org.apache.log4j.Logger#info ( )源码实例Demo

下面列出了org.apache.log4j.Logger#info ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: dsl-devkit   文件: ModelValidator.java
/**
 * Log issue.
 *
 * @param resource
 *          the resource
 * @param issue
 *          the issue
 * @param logger
 *          the logger
 */
private void logIssue(final Resource resource, final Issue issue, final Logger logger) {
  final String message = NLS.bind(MESSAGE_TEMPLATE, new Object[] {resource.getURI().lastSegment(), issue.getLineNumber(), issue.getMessage()});
  final Severity severity = issue.getSeverity();
  switch (severity) {
  case ERROR:
    logger.error(message);
    break;
  case WARNING:
    logger.warn(message);
    break;
  case INFO:
    if (logger.isInfoEnabled()) {
      logger.info(message);
    }
    break;

  default:
    break;
  }
}
 
源代码2 项目: ipmilib   文件: ParallelTest.java
@Override
public void run() {
	Logger logger = Logger.getLogger(getClass());
	try {
		for (int i = 0; i < cnt; ++i) {
			GetChassisStatusResponseData responseData = (GetChassisStatusResponseData) connector
					.sendMessage(connection,
							new GetChassisStatus(IpmiVersion.V20,
									connection.getCipherSuite(),
									AuthenticationType.RMCPPlus));
			logger.info("[" + id + "] Received message " + msgCnt++ + " from " + id + ": " + responseData.toString());
		}
	} catch (Exception e) {
		e.printStackTrace();
		succeeded = false;
	}
}
 
/**
 * Fill the result into the source
 * @param props
 * @param log
 * @param consumedMsgInfo
 * @return
 */
String setConsumedMsg(Properties props, Logger log, String[] consumedMsgInfo){
    String vNewMsgID = "";
    try {
        if(consumedMsgInfo!=null && consumedMsgInfo.length == 4){
            vNewMsgID = consumedMsgInfo[0];
            String vMsgName = consumedMsgInfo[1];
            String vSender = consumedMsgInfo[2];
            String vMsg = consumedMsgInfo[3];
            if (null == vMsg) {
                props.put(EventChecker.MSG, "NULL");
            } else {
                props.put(EventChecker.MSG, vMsg);
            }
            log.info("Received message : messageID: " + vNewMsgID + ", messageName: " + vMsgName + ", receiver: " + vSender
                    + ", messageBody: " + vMsg);
        }
    }catch (Exception e) {
        log.error("Error set consumed message failed {} setConsumedMsg failed" + e);
        return vNewMsgID;
    }
    return vNewMsgID;
}
 
源代码4 项目: mt-flume   文件: TestLog4jAppenderWithAvro.java
@Test
public void testAvroReflect() throws IOException {
  loadProperties("flume-log4jtest-avro-reflect.properties");
  PropertyConfigurator.configure(props);
  Logger logger = LogManager.getLogger(TestLog4jAppenderWithAvro.class);
  String msg = "This is log message number " + String.valueOf(0);

  AppEvent appEvent = new AppEvent();
  appEvent.setMessage(msg);

  logger.info(appEvent);

  Transaction transaction = ch.getTransaction();
  transaction.begin();
  Event event = ch.take();
  Assert.assertNotNull(event);

  Schema schema = ReflectData.get().getSchema(appEvent.getClass());

  ReflectDatumReader<AppEvent> reader = new ReflectDatumReader<AppEvent>(AppEvent.class);
  BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null);
  AppEvent recordFromEvent = reader.read(null, decoder);
  Assert.assertEquals(msg, recordFromEvent.getMessage());

  Map<String, String> hdrs = event.getHeaders();

  Assert.assertNull(hdrs.get(Log4jAvroHeaders.MESSAGE_ENCODING.toString()));

  Assert.assertNull("Schema URL should not be set",
      hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_URL.toString()));
  Assert.assertEquals("Schema string should be set", schema.toString(),
      hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_LITERAL.toString()));

  transaction.commit();
  transaction.close();

}
 
/**
 * function to fetch hcat token as per the specified hive configuration and
 * then store the token in to the credential store specified .
 *
 * @param userToProxy String value indicating the name of the user the token
 *          will be fetched for.
 * @param hiveConf the configuration based off which the hive client will be
 *          initialized.
 * @param logger the logger instance which writes the logging content to the
 *          job logs.
 *
 * @throws IOException
 * @throws TException
 * @throws MetaException
 *
 * */
private Token<DelegationTokenIdentifier> fetchHcatToken(String userToProxy,
    HiveConf hiveConf, String tokenSignatureOverwrite, final Logger logger)
    throws IOException, MetaException, TException {

  logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
      + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));

  logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
      + hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));

  logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
      + hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));

  HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
  String hcatTokenStr =
      hiveClient.getDelegationToken(userToProxy, UserGroupInformation
          .getLoginUser().getShortUserName());
  Token<DelegationTokenIdentifier> hcatToken =
      new Token<DelegationTokenIdentifier>();
  hcatToken.decodeFromUrlString(hcatTokenStr);

  // overwrite the value of the service property of the token if the signature
  // override is specified.
  if (tokenSignatureOverwrite != null
      && tokenSignatureOverwrite.trim().length() > 0) {
    hcatToken.setService(new Text(tokenSignatureOverwrite.trim()
        .toLowerCase()));

    logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":"
        + (tokenSignatureOverwrite == null ? "" : tokenSignatureOverwrite));
  }

  logger.info("Created hive metastore token: " + hcatTokenStr);
  logger.info("Token kind: " + hcatToken.getKind());
  logger.info("Token id: " + hcatToken.getIdentifier());
  logger.info("Token service: " + hcatToken.getService());
  return hcatToken;
}
 
源代码6 项目: kite   文件: TestLog4jAppenderWithAvro.java
@Test
public void testAvroReflect() throws IOException {
  loadProperties("flume-log4jtest-avro-reflect.properties");
  PropertyConfigurator.configure(props);
  Logger logger = LogManager.getLogger(TestLog4jAppenderWithAvro.class);
  String msg = "This is log message number " + String.valueOf(0);

  AppEvent appEvent = new AppEvent();
  appEvent.setMessage(msg);

  logger.info(appEvent);

  Transaction transaction = ch.getTransaction();
  transaction.begin();
  Event event = ch.take();
  Assert.assertNotNull(event);

  Schema schema = ReflectData.get().getSchema(appEvent.getClass());

  ReflectDatumReader<AppEvent> reader = new ReflectDatumReader<AppEvent>(AppEvent.class);
  BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null);
  AppEvent recordFromEvent = reader.read(null, decoder);
  Assert.assertEquals(msg, recordFromEvent.getMessage());

  Map<String, String> hdrs = event.getHeaders();

  Assert.assertNull(hdrs.get(Log4jAvroHeaders.MESSAGE_ENCODING.toString()));

  Assert.assertNull("Schema URL should not be set",
      hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_URL.toString()));
  Assert.assertEquals("Schema string should be set", schema.toString(),
      hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_LITERAL.toString()));

  transaction.commit();
  transaction.close();

}
 
public static HConnection createConnection(Configuration conf)
    throws IOException {

  Logger LOG = Logger.getLogger(HConnectionManagerMultiClusterWrapper.class);

  Collection < String > failoverClusters = conf
          .getStringCollection(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG);

  if (failoverClusters.size() == 0) {
    LOG.info(" -- Getting a signle cluster connection !!");
    return HConnectionManager.createConnection(conf);
  } else {

    Map<String, Configuration> configMap = HBaseMultiClusterConfigUtil
        .splitMultiConfigFile(conf);

    LOG.info(" -- Getting primary Connction");
    HConnection primaryConnection = HConnectionManager
        .createConnection(configMap
            .get(HBaseMultiClusterConfigUtil.PRIMARY_NAME));
    LOG.info(" --- Got primary Connction");

    ArrayList<HConnection> failoverConnections = new ArrayList<HConnection>();

    for (Entry<String, Configuration> entry : configMap.entrySet()) {
      if (!entry.getKey().equals(HBaseMultiClusterConfigUtil.PRIMARY_NAME)) {
        LOG.info(" -- Getting failure Connction");
        failoverConnections.add(HConnectionManager.createConnection(entry
            .getValue()));
        LOG.info(" --- Got failover Connction");
      }
    }
    
    return new HConnectionMultiCluster(conf, primaryConnection,
        failoverConnections.toArray(new HConnection[0]));
  }
}
 
源代码8 项目: openemm   文件: UserFormController.java
private void writeUserActivityLog(ComAdmin admin, UserAction userAction, Logger logger) {
	if (userActivityLogService != null) {
		userActivityLogService.writeUserActivityLog(admin, userAction, logger);
	} else {
		logger.error("Missing userActivityLogService in " + this.getClass().getSimpleName());
		logger.info("Userlog: " + admin.getUsername() + " " + userAction.getAction() + " " +  userAction.getDescription());
	}
}
 
源代码9 项目: olca-app   文件: LoggerConfig.java
private static void setLogLevel(Logger logger) {
	String level = AppArg.LOG_LEVEL.getValue();
	if (level != null) {
		setLevelFromCommandLine(logger, level);
	} else {
		logger.setLevel(LoggerPreference.getLogLevel());
	}
	logger.info("Log-level=" + logger.getLevel());
}
 
@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
    throws HadoopSecurityManagerException {
  // nntoken
  Credentials cred = null;
  try {
    cred =
        Credentials.readTokenStorageFile(new Path(tokenFile.toURI()),
            new Configuration());
    for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
      logger.info("Got token: " + t.toString());
      logger.info("Token kind: " + t.getKind());
      logger.info("Token id: " + new String(t.getIdentifier()));
      logger.info("Token service: " + t.getService());
      if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
        logger.info("Cancelling hive token " + new String(t.getIdentifier()));
        cancelHiveToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
        logger.info("Cancelling mr job tracker token "
            + new String(t.getIdentifier()));
        cancelMRJobTrackerToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        logger.info("Cancelling namenode token "
            + new String(t.getIdentifier()));
        cancelNameNodeToken(t, userToProxy);
      } else {
        logger.info("unknown token type " + t.getKind());
      }
    }
  } catch (Exception e) {
    e.printStackTrace();
  }

}
 
源代码11 项目: kite   文件: TestLog4jAppenderWithAvro.java
@Test
public void testAvroGeneric() throws IOException {
  loadProperties("flume-log4jtest-avro-generic.properties");
  PropertyConfigurator.configure(props);
  Logger logger = LogManager.getLogger(TestLog4jAppenderWithAvro.class);
  String msg = "This is log message number " + String.valueOf(0);

  Schema schema = new Schema.Parser().parse(
      getClass().getClassLoader().getResource("myrecord.avsc").openStream());
  GenericRecordBuilder builder = new GenericRecordBuilder(schema);
  GenericRecord record = builder.set("message", msg).build();

  logger.info(record);

  Transaction transaction = ch.getTransaction();
  transaction.begin();
  Event event = ch.take();
  Assert.assertNotNull(event);

  GenericDatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>(schema);
  BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null);
  GenericRecord recordFromEvent = reader.read(null, decoder);
  Assert.assertEquals(msg, recordFromEvent.get("message").toString());

  Map<String, String> hdrs = event.getHeaders();

  Assert.assertNull(hdrs.get(Log4jAvroHeaders.MESSAGE_ENCODING.toString()));

  Assert.assertEquals("Schema URL should be set",
      "file:///tmp/myrecord.avsc", hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_URL.toString()));
  Assert.assertNull("Schema string should not be set",
      hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_LITERAL.toString()));

  transaction.commit();
  transaction.close();

}
 
public static void main(String[] args) {
    
    Logger log = Logger.getLogger(TestDb.class);
    
    TypedProperties props = new TypedProperties(
            new HashMap<String, String>(), null);
    EmailAddressBatchUpdater updater = new EmailAddressBatchUpdater(props);
 
    String command = readInput("Enter command (A = add, D = delete) > ");
    
    String fileName = readInput("Enter file name > ");
   
    try {
        if (command.toUpperCase().startsWith("A")) {
            log.info("Starting adding of addresses");
            updater.addAddresses(fileName);
            log.info("Done with adding of addresses");
        }
        else if (command.toUpperCase().startsWith("D")) {
            log.info("Starting removal of addresses");
            updater.deleteAddresses(fileName);
            log.info("Done with removal of addresses");
        }
    } catch (MegatronException e) {
        log.error("Exception, could not perform " + command + " using file " + fileName + ".");
        e.printStackTrace();
    }
}
 
源代码13 项目: OpenCue   文件: CueExceptionUtil.java
/**
 * Creates an error message string which w/ a stack track and returns it.
 *
 * @param msg
 * @param aThrowable
 * @return String
 */
public static void logStackTrace(String msg, Throwable aThrowable) {
    Logger error_logger = Logger.getLogger(CueExceptionUtil.class);
    error_logger.info("Caught unexpected exception caused by: " + aThrowable);
    error_logger.info("StackTrace: \n" + getStackTrace(aThrowable));
    if (aThrowable.getCause() != null) {
        error_logger.info("Caused By: " + getStackTrace(aThrowable.getCause()));
    }
}
 
源代码14 项目: EdgeSim   文件: Controller.java
/**
 * Add a log to the log panel and write the log in the log file.
 * @param type:	"debug" or "info" or "error"
 * @param text: Contents of the log
 * @param logger: The log object of the class that is writing this log.
 * 				  This allows us to find the recorder from the log.
 */
public void appendLog(String type,String text,Logger logger){
	this.logPanel.append(text);
	if(logger != null){
		if(type.equals("debug")){
			logger.debug(text);
		}else if(type.equals("info")){
			logger.info(text);
		}else if(type.equals("error")){
			logger.error(text);
		}
	}
}
 
源代码15 项目: openxds   文件: XdsConfigurationLoader.java
/** testing only **/
public static void main(String args[]) {
	XdsConfigurationLoader.getInstance().setLoggingFile("testfile.txt", null, null);
	Logger log = Logger.getLogger("mylogger");
	log.fatal("**my fatal error**");
	log.error("**my error error**");
	log.warn("**my warn error**");
	log.info("**my info error**");
	log.debug("**my debug error**");
}
 
源代码16 项目: DataSphereStudio   文件: EventCheckSender.java
@Override
public boolean sendMsg(int jobId, Properties props, Logger log) {
        boolean result = false;
        PreparedStatement pstmt = null;
        Connection msgConn = null;
        String sendTime = DateFormatUtils.format(new Date(), "yyyy-MM-dd HH:mm:ss");
        String sqlForSendMsg = "INSERT INTO event_queue (sender,send_time,topic,msg_name,msg,send_ip) VALUES(?,?,?,?,?,?)";
        try {
            String vIP = getLinuxLocalIp(log);
            msgConn = getEventCheckerConnection(props,log);
            if(msgConn==null) return false;
            pstmt = msgConn.prepareCall(sqlForSendMsg);
            pstmt.setString(1, sender);
            pstmt.setString(2, sendTime);
            pstmt.setString(3, topic);
            pstmt.setString(4, msgName);
            pstmt.setString(5, msg);
            pstmt.setString(6, vIP);
            int rs = pstmt.executeUpdate();
            if (rs == 1) {
                result = true;
                log.info("Send msg success!");
            } else {
                log.error("Send msg failed for update database!");
            }
        } catch (SQLException e) {
            throw new RuntimeException("Send EventChecker msg failed!" + e);
        } finally {
            closeQueryStmt(pstmt, log);
            closeConnection(msgConn, log);
        }
        return result;
}
 
源代码17 项目: hazelcast-simulator   文件: MapStoreUtils.java
private static void assertMapStoreConfig(String expectedMapStoreName, String mapName, MapStoreConfig mapStoreConfig,
                                         Logger logger) {
    if (mapStoreConfig == null) {
        throw new TestException("MapStore for map %s needs to be configured with class %s, but was not configured at all",
                mapName, expectedMapStoreName);
    }
    logger.info(format("MapStore configuration for map %s: %s", mapName, mapStoreConfig));
    if (!mapStoreConfig.isEnabled()) {
        throw new TestException("MapStore for map %s needs to be configured with class %s, but was not enabled", mapName,
                expectedMapStoreName);
    }
}
 
源代码18 项目: rocketmq   文件: Log4jTest.java
@Test
public void testLog4j() {
    clear();
    Logger logger = Logger.getLogger("testLogger");
    for (int i = 0; i < 10; i++) {
        logger.info("log4j " + this.getType() + " simple test message " + i);
    }
    int received = consumeMessages(10, "log4j", 10);
    Assert.assertTrue(received > 5);
}
 
源代码19 项目: openemm   文件: InfoLog4JTag.java
@Override
protected final void logMessage(final Logger logger, final String msg) {
	logger.info(msg);
}
 
源代码20 项目: datawave   文件: AbstractTableConfigHelper.java
/**
 * Sets the aggregator configuration on table {@code tableName} to that contained in {@code aggregators}, if {@code tableName} is not already configured
 * with the specified aggregators.
 * 
 * @param tableName
 *            the name of the table whose configuration is to be modified
 * @param aggregators
 *            the aggregators that should be set on {@code tableName}
 * @param tops
 *            accumulo table operations helper for configuring tables
 * @param log
 *            a {@link Logger} for diagnostic messages
 * 
 * @throws AccumuloException
 * @throws AccumuloSecurityException
 * @throws TableNotFoundException
 */
protected void setAggregatorConfigurationIfNecessary(String tableName, List<CombinerConfiguration> aggregators, TableOperations tops, Logger log)
                throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    if (areAggregatorsConfigured(tableName, aggregators, tops)) {
        log.debug(tableName + " appears to have its aggregators configured already.");
        return;
    }
    
    log.info("Configuring aggregators for " + tableName);
    Map<String,String> props = generateInitialTableProperties();
    props.putAll(generateAggTableProperties(aggregators));
    for (Entry<String,String> prop : props.entrySet()) {
        tops.setProperty(tableName, prop.getKey(), prop.getValue());
    }
}