下面列出了怎么用org.apache.log4j.Level的API类实例代码及写法,或者点击链接到github查看源代码。
@Before
public void createCards()
{
MTGLogger.changeLevel(Level.TRACE);
mc = new MagicCard();
mc.setName("Black Lotus");
mc.setLayout(MTGLayout.NORMAL);
mc.setCost("{0}");
mc.setCmc(0);
mc.getTypes().add("Artifact");
mc.setReserved(true);
mc.setText("{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.");
mc.setRarity(MTGRarity.RARE);
mc.setArtist("Christopher Rush");
ed = new MagicEdition();
ed.setId("lea");
ed.setSet("Limited Edition Alpha");
ed.setBorder("Black");
ed.setRarity("Rare");
ed.setArtist("Christopher Rush");
ed.setMultiverseid("3");
mc.getEditions().add(ed);
mc.getCurrentSet().setNumber("232");
}
public static void setupLogging(File logDir) {
System.getProperties().setProperty("log4j.defaultInitOverride", "true");
RollingFileAppender appender = new RollingFileAppender();
appender.setFile(new File(logDir, "celos.log").getAbsolutePath());
appender.setAppend(true);
TimeBasedRollingPolicy rollingPolicy = new TimeBasedRollingPolicy();
rollingPolicy.setFileNamePattern(new File(logDir, "celos-%d{yyyy-MM-dd}.log").getAbsolutePath());
appender.setRollingPolicy(rollingPolicy);
PatternLayout patternLayout = new PatternLayout();
patternLayout.setConversionPattern("[%d{YYYY-MM-dd HH:mm:ss.SSS}] %-5p: %m%n");
appender.setLayout(patternLayout);
appender.activateOptions();
Logger.getRootLogger().addAppender(appender);
Logger.getRootLogger().setLevel(Level.INFO);
}
@Test
public void testFile() throws Exception {
appender.setAsyncQueueType("file");
appender.setAsyncFileQueuePath(tempDir.newFolder().getAbsolutePath());
appender.setLoadBalancerType("static");
appender.setLoadBalancerServer(TestConnectionPool.createConnectionString(servers));
appender.activateOptions();
LoggingEvent event = mock(LoggingEvent.class);
when(event.getMessage()).thenReturn(createEventMap());
when(event.getLevel()).thenReturn(Level.INFO);
appender.append(event);
// Make sure client has enough time to drain the intermediary message queue
waitAndVerify(15000, new Runnable() {
public void run() {
assertEquals(appender.getSentMessageCount(), 1);
}
});
appender.close();
}
private static void setupLoggingToFile(Logger stcLogger, String logfile,
boolean trace) {
Layout layout = new PatternLayout("%-5p %m%n");
boolean append = false;
try {
FileAppender appender = new FileAppender(layout, logfile, append);
Level threshold;
if (trace) {
threshold = Level.TRACE;
} else {
threshold = Level.DEBUG;
}
appender.setThreshold(threshold);
stcLogger.addAppender(appender);
stcLogger.setLevel(threshold);
} catch (IOException e) {
System.out.println(e.getMessage());
System.exit(ExitCode.ERROR_IO.code());
}
}
public static void setLogLevel(int log_level) throws IllegalArgumentException {
Level l;
switch(log_level) {
case 1: l = Level.TRACE; break;
case 2: l = Level.DEBUG; break;
case 3: l = Level.INFO; break;
case 4: l = Level.WARN; break;
case 5: l = Level.ERROR; break;
case 6: l = Level.FATAL; break;
default:
throw new IllegalArgumentException("Illegal log level: "+ log_level);
}
_logger.setLevel(l);
String inf = "Set log level to " + l;
System.out.println(inf);
_logger.info(inf);
}
private void updateInitialRecordTime(String subscriptionID, String initialRecordTime) {
MongoCollection<BsonDocument> collection = Configuration.mongoDatabase.getCollection("Subscription",
BsonDocument.class);
BsonDocument subscription = collection.find(new BsonDocument("subscriptionID", new BsonString(subscriptionID)))
.first();
subscription.put("initialRecordTime", new BsonString(initialRecordTime));
if (subscription != null) {
collection.findOneAndReplace(new BsonDocument("subscriptionID", new BsonString(subscriptionID)),
subscription);
}
Configuration.logger.log(Level.INFO,
"InitialRecordTime of Subscription ID: " + subscriptionID + " is updated to DB. ");
}
@TimeStep(prob = -1)
public void query(ThreadState state, Probe probe, @StartNanos long startNanos) {
int key = state.getRandomKey();
Predicate predicate = Predicates.equal("payloadField[any]", key);
Collection<Object> result = null;
try {
result = map.values(predicate);
} finally {
probe.done(startNanos);
}
if (throttlingLogger.requestLogSlot()) {
throttlingLogger.logInSlot(Level.INFO,
format("Query 'payloadField[any]= %d' returned %d results.", key, result.size()));
}
for (Object resultSillySequence : result) {
state.assertValidSequence(resultSillySequence);
}
}
/**
Convert the string passed as argument to a level. If the
conversion fails, then this method returns the value of
<code>defaultLevel</code>.
*/
public static Level toLevel(
String sArg,
Level defaultLevel ) {
if (sArg == null)
return defaultLevel;
String s = sArg.toUpperCase();
if ("SYSTEM".equals(s)) {
return AutoLevel.SYSTEM;
} else {
return Level.toLevel(sArg, defaultLevel);
}
}
/** */
public static void main(String args[]) {
setupServerAndData();
//Creating spark session.
SparkSession spark = SparkSession
.builder()
.appName("JavaIgniteDataFrameExample")
.master("local")
.config("spark.executor.instances", "2")
.getOrCreate();
// Adjust the logger to exclude the logs of no interest.
Logger.getRootLogger().setLevel(Level.ERROR);
Logger.getLogger("org.apache.ignite").setLevel(Level.INFO);
// Executing examples.
sparkDSLExample(spark);
nativeSparkSqlExample(spark);
Ignition.stop(false);
}
@Override
public void setLoggerLevel(String loggerName, String levelName) {
String prospectiveLevel = null;
if (levelName != null) {
prospectiveLevel = levelName.trim();
}
if (prospectiveLevel == null ||
prospectiveLevel.isEmpty() ||
! LOG4JLEVELS.contains(prospectiveLevel.toUpperCase())) {
throw new IllegalArgumentException("Log level \"" + levelName +
"\" is not valid.");
}
Logger logger = Logger.getLogger(loggerName);
if (logger == null) {
throw new IllegalArgumentException("Logger \"" + loggerName +
"\" does not exist");
}
Level newLevel = Level.toLevel(levelName);
logger.setLevel(newLevel);
}
@Test
public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
@BeforeClass
public static void beforeClass() {
ConsoleAppender console = new ConsoleAppender(); //create appender
//configure the appender
String PATTERN = "%d [%p] %C{1}: %m%n";
console.setLayout(new PatternLayout(PATTERN));
console.setThreshold(Level.DEBUG);
console.activateOptions();
//add appender to any Logger (here is root)
Logger.getRootLogger().addAppender(console);
}
@Before
public void before() {
BlockScanner.Conf.allowUnitTestSettings = true;
GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
@Override
public void registerLogger(@NotNull Object obj, @NotNull ServiceName serviceName) {
String loggerName = ILoggingConfigurator.getLoggerName(obj);
Logger log = LogManager.exists(loggerName);
if (log == null) {
throw new EPSCommonException(String.format("Logger with name '%s' doesn`t exists", loggerName));
}
try {
synchronized (lock) {
DailyMaxRollingFileAppender appender;
if (loggingConfiguration.isIndividualAppendersEnabled()) {
appender = getOrCreateServiceAppender(serviceName);
} else {
appender = getOrCreateMainAppender();
}
log.addAppender(appender);
serviceLoggers.put(serviceName, loggerName);
}
} catch (RuntimeException e) {
if (logger.isEnabledFor(Level.ERROR)) {
logger.error(String.format("Failed to register logger '%s' for service: %s", loggerName, serviceName), e);
}
}
}
@PUT
@Path("/log/{level}")
public Response setLogLevel( @PathParam("level") final String level, @QueryParam("package") final String packageName) {
String targetLoggerName = "org.cloudfoundry.autoscaler";
if (packageName != null && !packageName.isEmpty())
targetLoggerName = packageName;
Logger targetLogger = LogManager.getLogger(targetLoggerName);
if (level.equalsIgnoreCase("INFO"))
targetLogger.setLevel(Level.INFO);
else if (level.equalsIgnoreCase("DEBUG"))
targetLogger.setLevel(Level.DEBUG);
else if (level.equalsIgnoreCase("ERROR"))
targetLogger.setLevel(Level.ERROR);
else if (level.equalsIgnoreCase("WARN"))
targetLogger.setLevel(Level.WARN);
else if (level.equalsIgnoreCase("FATAL"))
targetLogger.setLevel(Level.FATAL);
else if (level.equalsIgnoreCase("TRACE"))
targetLogger.setLevel(Level.TRACE);
else if (level.equalsIgnoreCase("OFF"))
targetLogger.setLevel(Level.OFF);
else if (level.equalsIgnoreCase("ALL"))
targetLogger.setLevel(Level.ALL);
logger.info("Log level " + level.toUpperCase() + " is set");
return RestApiResponseHandler.getResponseOk();
}
private void writeToLog(final String s) {
if ((s == null) || s.isEmpty()) {
return;
}
switch(level) {
case Level.DEBUG_INT:
logger.debug(s);
break;
case Level.ERROR_INT:
logger.error(s);
break;
case Level.INFO_INT:
logger.info(s);
break;
case Level.TRACE_INT:
logger.trace(s);
break;
case Level.WARN_INT:
logger.warn(s);
break;
default:
throw new IllegalStateException();
}
}
/**
*/
@Override
public String translate(String key, String[] args, Level missingTranslationLogLevel) {
String val = translate(key, args, false);
// if still null -> fallback to default locale (if not in debug mode)
if (val == null) {
if (Settings.isDebuging()) {
val = getErrorMessage(key);
} else {
// try with fallBackToDefaultLocale
val = translate(key, args, true);
}
}
// else value got translated or there is at least an error message telling
// which key was not found.
// Note: val may be null if there is a localstrings file missing in the default language. use the online translation tool to double-check
// Error: ! even in default language: missing translation key!
if (val == null) {
val = getErrorMessage(key);
// TODO: 13.02.2009 Workaround to fix shibboleth-attribute WARN : 'no translation ... in org.olat.presentation.course.condition...'
if (!packageName.startsWith("org.olat.presentation.course.condition")) {
if (missingTranslationLogLevel != null && !missingTranslationLogLevel.equals(Level.OFF)) {
if (missingTranslationLogLevel.equals(Level.ERROR)) {
log.error(val, null);
} else if (missingTranslationLogLevel.equals(Level.WARN)) {
log.warn(val, null);
} else if (missingTranslationLogLevel.equals(Level.INFO)) {
log.info(val, null);
}
}
}
// don't use error message in GUI for production, use key instead (OLAT-5896)
if (!Settings.isDebuging()) {
val = key;
}
}
return val;
}
public static int findMessage(Level expectedLevel, String expectedMessage) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
Level logLevel = logList.get(i).getLogLevel();
if (logLevel.equals(expectedLevel)) {
String logMessage = logList.get(i).getLogMessage();
if (logMessage.contains(expectedMessage)) count++;
}
}
return count;
}
static void setNameNodeLoggingLevel(Level logLevel) {
LOG.fatal("Log level = " + logLevel.toString());
// change log level to NameNode logs
DFSTestUtil.setNameNodeLogLevel(logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
NetworkTopology.class.getName()), logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
Groups.class.getName()), logLevel);
}
@BeforeClass
public static void adjustLogLevels() {
Level desiredLevel = Level.ALL;
Logger log = Logger.getLogger(AbstractTableConfigHelperTest.class);
AbstractTableConfigHelperTest.testDriverLevel = log.getLevel();
log.setLevel(desiredLevel);
}
@Test
public void testChangeLeader() throws Exception {
RaftStorageTestUtils.setRaftLogWorkerLogLevel(Level.TRACE);
LOG.info("Running testChangeLeader");
final MiniRaftCluster cluster = newCluster(3);
cluster.start();
RaftPeerId leader = RaftTestUtil.waitForLeader(cluster).getId();
for(int i = 0; i < 10; i++) {
leader = RaftTestUtil.changeLeader(cluster, leader);
ExitUtils.assertNotTerminated();
}
RaftStorageTestUtils.setRaftLogWorkerLogLevel(Level.INFO);
cluster.shutdown();
}
private void send(int numberOfMsgs) throws EventDeliveryException {
for (int count = 0; count < numberOfMsgs; count++) {
int level = count % 5;
String msg = "This is log message number" + String.valueOf(count);
fixture.log(Level.toLevel(level), msg);
}
}
private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
UserVm additionalMasterVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Network.IpAddresses addrs = new Network.IpAddresses(null, null);
long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
Map<String, String> customParameterMap = new HashMap<String, String>();
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1));
String k8sMasterConfig = null;
try {
k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
} catch (IOException e) {
logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e);
}
String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset()));
additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
hostName, hostName, null, null, null,
null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
null, addrs, null, null, null, customParameterMap, null, null, null, null);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getUuid()));
}
return additionalMasterVm;
}
OperationStatsBase() {
baseDir = BASE_DIR_NAME + "/" + getOpName();
replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
numOpsRequired = 10;
numThreads = 3;
logLevel = Level.ERROR;
ugcRefreshCount = Integer.MAX_VALUE;
}
public void testDeadlockFilter() throws Exception {
// Make sure the chain blows up.
chain = new MockFilterChain() {
public void doFilter(ServletRequest req, ServletResponse resp)
throws IOException, ServletException {
throw new IOException("Test IOException");
}
};
SessionFilter filter = new SessionFilter();
HibernateFactory.getSession();
int caughtCount = 0;
Logger log = Logger.getLogger(SessionFilter.class);
Level orig = log.getLevel();
log.setLevel(Level.OFF);
for (int i = 0; i < 5; i++) {
try {
filter.doFilter(request, response, chain);
}
catch (IOException ioe) {
caughtCount++;
}
}
log.setLevel(orig);
assertTrue(caughtCount == 5);
HibernateFactory.getSession();
assertTrue(HibernateFactory.inTransaction());
}
public static void enableBlockReaderFactoryTracing() {
LogManager.getLogger(BlockReaderFactory.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitCache.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitReplica.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(BlockReaderLocal.class.getName()).setLevel(
Level.TRACE);
}
/**
* Close DEBUG model
*
* @throws StorageCommonException
*/
public void debugOff() {
LOG.info("debug Off");
Logger log = org.apache.log4j.Logger.getLogger(PACKAGE_NAME);
if (log != null) {
log.setLevel(Level.INFO);
} else {
}
}
public static void debug(String logcontent)
{
StackTraceElement ste = new Throwable().getStackTrace()[1];
String clsName=ste.getClassName();
String pkage = clsName.substring(clsName.lastIndexOf(".",clsName.lastIndexOf(".")-1)+1, clsName.length());
clsName=clsName.substring(clsName.lastIndexOf(".")+1, clsName.length());
if(clsName.isEmpty()) return;
//get a proper instance
Logger subLog = Logger.getRootLogger();
if(subLog==null) return;
subLog.log(Level.DEBUG, " ("+pkage+","+ste.getLineNumber()+","+ste.getMethodName()+" )"+logcontent);
}
/**
*
* @param level
*/
private void tryRaiseLevel(Level level) {
if (!this.getLevel().isGreaterOrEqual(level)) {
this.level = level;
if (this.getParent() != null) {
this.getParent().tryRaiseLevel(level);
}
}
}
void warns(String expectWarning) {
MockAppender appender = new MockAppender();
Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
try {
tester.convertSqlToRel(sql);
} finally {
logger.removeAppender(appender);
}
List<String> warnings = appender.loggingEvents.stream()
.filter(e -> e.getLevel() == Level.WARN)
.map(LoggingEvent::getRenderedMessage)
.collect(Collectors.toList());
assertThat(expectWarning, is(in(warnings)));
}