org.apache.log4j.Logger#error ( )源码实例Demo

下面列出了org.apache.log4j.Logger#error ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: util   文件: Closeables2.java
/**
 * Close all {@link Closeable} objects provided in the {@code closeables}
 * iterator. When encountering an error when closing, write the message out
 * to the provided {@code log}.
 *
 * @param closeables The closeables that we want to close.
 * @param log The log where we will write error messages when failing to
 *            close a closeable.
 */
public static void closeAll(
        @Nonnull final Iterable<? extends Closeable> closeables,
        @Nonnull final Logger log
) {
    Throwable throwable = null;
    for (Closeable closeable : closeables) {
        try {
            closeQuietly(closeable, log);
        } catch (Throwable e) {
            if (throwable == null) {
                throwable = e;
            } else {
                log.error("Suppressing throwable thrown when closing "+closeable, e);
            }
        }
    }
    if (throwable != null) {
        throw Throwables.propagate(throwable);
    }
}
 
源代码2 项目: serianalyzer   文件: Serianalyzer.java
/**
 * @param cl
 * @param superRef
 * @return
 * @throws IOException
 */
private boolean doCheckClassInternal ( Logger cl, MethodReference ref, MethodReference superRef ) throws IOException {
    // MethodReference scmp = superRef.comparable();
    // Boolean cached = this.getState().getCheckedCache().get(scmp);
    // if ( cached != null ) {
    // return cached;
    // }
    boolean found = false;
    SerianalyzerClassMethodVisitor visitor;
    try ( InputStream superData = this.input.getClassData(superRef.getTypeNameString()) ) {
        if ( superData == null ) {
            cl.error("No class data for " + superRef.getTypeNameString()); //$NON-NLS-1$
            return false;
        }

        this.state.trackKnown(superRef);
        ClassReader sr = new ClassReader(superData);
        visitor = new SerianalyzerClassMethodVisitor(this, superRef, ref.getTypeName());
        sr.accept(visitor, 0);
        if ( visitor.isFound() ) {
            found = true;
        }
    }
    // this.getState().getCheckedCache().put(scmp, found);
    return found;
}
 
源代码3 项目: sldeditor   文件: ConsoleManager.java
/**
 * Internal log exception method.
 *
 * @param e the e
 * @param logger the logger
 */
private void internalLogException(Exception e, Logger logger) {
    logger.error(e.getMessage());
    panel.addErrorMessage(e.getMessage());

    StackTraceElement[] stackTrace = e.getStackTrace();

    StringBuilder sb = new StringBuilder();

    for (StackTraceElement t : stackTrace) {
        sb.append(t.toString());
        sb.append("\n");
    }

    logger.error(sb.toString());
}
 
源代码4 项目: Babler   文件: DAO.java
/**
 * Saves an entry to file
 * @param entry
 * @param dbName usually scrapig
 * @return true if success
 */
public static boolean saveEntry(DBEntry entry, String dbName){

    if(entry == null || !entry.isValid())
        return false;

    Logger log = Logger.getLogger(DAO.class);

    MongoDatabase db = MongoDB.INSTANCE.getDatabase(dbName);

    String collectionName = getCollectionName(entry);


    MongoCollection collection = db.getCollection(collectionName,BasicDBObject.class);

    try {
        collection.insertOne(entry);
        return true;
    }
    catch (MongoWriteException ex){
        if (ex.getCode() != 11000) // Ignore errors about duplicates
            log.error(ex.getError().getMessage());
        return false;
    }

}
 
源代码5 项目: datawave   文件: ShardedTableMapFile.java
/**
 * Continually scans the metdata table attempting to get the split locations for the shard table.
 *
 * @param log
 *            logger for reporting errors
 * @param accumuloHelper
 *            Accumulo helper to query shard locations
 * @param shardedTableName
 *            name of the shard table--the table whose locations we are querying
 * @return a map of split (endRow) to the location of those tablets in accumulo
 */
public static Map<Text,String> getLocations(Logger log, AccumuloHelper accumuloHelper, String shardedTableName) {
    // split (endRow) -> String location mapping
    Map<Text,String> splitToLocation = new TreeMap<>();
    
    boolean keepRetrying = true;
    int attempts = 0;
    while (keepRetrying && attempts < MAX_RETRY_ATTEMPTS) {
        try {
            TableOperations tableOps = accumuloHelper.getConnector().tableOperations();
            attempts++;
            // if table does not exist don't want to catch the errors and end up in infinite loop
            if (!tableOps.exists(shardedTableName)) {
                log.error("Table " + shardedTableName + " not found, skipping split locations for missing table");
            } else {
                Range range = new Range();
                Locations locations = tableOps.locate(shardedTableName, Collections.singletonList(range));
                List<TabletId> tabletIds = locations.groupByRange().get(range);
                
                tabletIds.stream().filter(tId -> tId.getEndRow() != null)
                                .forEach(tId -> splitToLocation.put(tId.getEndRow(), locations.getTabletLocation(tId)));
            }
            // made it here, no errors so break out
            keepRetrying = false;
        } catch (Exception e) {
            log.warn(e.getClass().getName() + ":" + e.getMessage() + " ... retrying ...", e);
            UtilWaitThread.sleep(3000);
            splitToLocation.clear();
        }
    }
    
    return splitToLocation;
}
 
源代码6 项目: datawave   文件: JobSetupUtil.java
/**
 * Changes the priority of a MapReduce job to VERY_HIGH. There doesn't seem to be a good way using the Hadoop API to do this, so we wait until there is map
 * progress reported (indicating the job is actually running) and then update its priority by executing a system command.
 *
 * @param job
 *            The {@link Job} whose priority is to be increased
 */
public static void changeJobPriority(Job job, Logger log) throws IOException {
    // Spin until we get some map progress, so that we can be sure the job is
    // registered with hadoop when we go to change its priority through the
    // command-line below.
    while (job.mapProgress() == 0) {
        // block
    }
    
    try {
        StringBuilder cmd = new StringBuilder();
        String hadoopHome = System.getProperty("HADOOP_HOME");
        if (hadoopHome == null) {
            log.debug("$HADOOP_HOME is not set; hopefully `hadoop` is on the classpath.");
        } else {
            cmd.append(hadoopHome).append('/');
        }
        cmd.append("hadoop job -set-priority ").append(job.getJobID()).append(" VERY_HIGH");
        
        log.info("Executing: " + cmd);
        Process pr = Runtime.getRuntime().exec(cmd.toString());
        
        if (log.isInfoEnabled()) {
            BufferedReader in = new BufferedReader(new InputStreamReader(pr.getInputStream()));
            while (in.ready()) {
                log.info(in.readLine());
            }
        }
        
        int retCode = pr.waitFor();
        if (retCode == 0) {
            log.info("Successfully upgraded job priority.");
        } else {
            log.error("Hadoop process exited abnormally-- job may take a long time if system is saturated.");
        }
    } catch (Exception e) {
        log.error("This job may take a while on a system running at full ingest load.", e);
    }
}
 
源代码7 项目: kfs   文件: BatchContainerDirectory.java
/**
 * Reads the contents of the semaphore file (normally the error file), and writes the contents to the requested Logger.
 *
 * @param batchStepFile the descriptor whose semaphore file's contents should be written to the Logger
 * @param log the log to write the file contents to
 */
public void logFileContents(BatchStepFileDescriptor batchStepFile, Logger log) {
	File resultFile = batchStepFile.getStepFile();
	if (resultFile != null) {
	    List<String> contents = getFileContents(resultFile);
        String toLog = "";

	    for (String line : contents) {
            toLog += line + "\n";
	    }

        log.error("Exception found in "+ resultFile.getName() +"\n"+ toLog);
	}
}
 
源代码8 项目: openemm   文件: DbUserActivityLogServiceImpl.java
@Override
public void writeUserActivityLog(ComAdmin admin, String action, String description, Logger callerLog) {
    try {
        this.writeUserActivityLog(admin,action,description);
    } catch (Exception e) {
        callerLog.error("Error writing ActivityLog: " + e.getMessage(), e);
        callerLog.info("Userlog: " + admin.getUsername() + " " + action + " " +  description);
    }
}
 
源代码9 项目: stendhal   文件: Registrator.java
/**
 * registers observer for notifying
 * @param observer
 * 			- observer to add
 */
public void setObserver(final Observer observer) {
	if(observer != null) {
		addObserver(observer);
	} else {
		// log it.
		final Logger logger = Logger.getLogger(Registrator.class);
		logger.error("null observer was not added.", new Throwable());
	}
}
 
源代码10 项目: uncc2014watsonsim   文件: ParallelStats.java
/**
   * @param args the command line arguments
   * @throws Exception 
   */
  public static void main(String[] args) throws Exception {
      BasicConfigurator.configure();
      Logger.getRootLogger().setLevel(Level.WARN);
      Logger log = Logger.getLogger(ParallelStats.class);
      
      
      //String mode = System.console().readLine("Train or test [test]:");
      System.out.print("Train, test, minitrain or minitest [minitest]: ");
      BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
      String mode = br.readLine();
      String sql;
      if (mode.equals("test")) {
      	sql = String.format("ORDER BY permute LIMIT %d OFFSET %d", 2000, 0);
      } else if (mode.equals("train")) {
      	sql = String.format("ORDER BY permute LIMIT %d OFFSET %d", 10000, 2000);
      } else if (mode.equals("minitrain")) {
      	sql = String.format("ORDER BY permute LIMIT %d OFFSET %d", 1000, 0);
      } else {
      	sql = String.format("ORDER BY permute LIMIT %d OFFSET %d", 1000, 2000);
      }

      System.out.print("Describe the setup: ");
      String description = br.readLine();
try {
	new StatsGenerator(description + ": " + mode, sql).run();
} catch (SQLException e) {
	e.printStackTrace();
	log.error("Database missing, invalid, or out of date. Check that you "
			+ "have the latest version.", e);
}

      System.out.println("Done.");
  }
 
源代码11 项目: openemm   文件: UserFormController.java
private void writeUserActivityLog(ComAdmin admin, UserAction userAction, Logger logger) {
	if (userActivityLogService != null) {
		userActivityLogService.writeUserActivityLog(admin, userAction, logger);
	} else {
		logger.error("Missing userActivityLogService in " + this.getClass().getSimpleName());
		logger.info("Userlog: " + admin.getUsername() + " " + userAction.getAction() + " " +  userAction.getDescription());
	}
}
 
源代码12 项目: openemm   文件: BirtStatisticsServiceImpl.java
@Override
public File getBirtReportTmpFile(int birtReportId, String birtUrl, HttpClient httpClient, Logger loggerParameter) {
    try {
        return exportBirtStatistic(String.format(BIRT_REPORT_TEMP_FILE_PATTERN, birtReportId), ".tmp",
                BIRT_REPORT_TEMP_DIR, birtUrl, httpClient, loggerParameter);
    } catch (Exception e) {
        loggerParameter.error("Cannot get birt report file: " + e.getMessage());
        return null;
    }
}
 
源代码13 项目: 07kit   文件: Application.java
public static void main(String[] args) throws IOException {
    Logger.getRootLogger().addAppender(new Appender(new SimpleLayout()));
    final Logger logger = Logger.getLogger(Application.class);
    try {
        if (args.length > 0 && args[0] != null && args[0].trim().equals("-dev")) {
            devMode = true;
        }

        setOSXDockIcon();
        prepareEnvironment();

        SwingUtilities.invokeAndWait(() -> {
            IconFontSwing.register(FontAwesome.getIconFont());
            COLOUR_SCHEME.init();
            new SidebarController();
            new MainController();
            new LoginController();
            new SettingsDebugController();
            new WidgetDebugController();
            new SettingsController();
            new GalleryController();

            Session.get().onAuthenticated();
            ControllerManager.get(MainController.class).show();
        });
    } catch (Throwable t) {
        logger.error("Initialization failed.", t);
    }
}
 
源代码14 项目: DataSphereStudio   文件: AbstractEventCheck.java
void closeQueryStmt(PreparedStatement stmt, Logger log) {
    if (stmt != null) {
        try {
            stmt.close();
        } catch (SQLException e) {
            log.error("Error closing result stmt", e);
        }
    }

}
 
源代码15 项目: Babler   文件: LanguageClassifier.java
/**
 * Generates a list of support languages by this classifier
 * @param langs a list of language names (not codes). For example English, Hebrew, Spanish
 */
protected void buildListOfSupportedLanguageCodesFromLanguageNames(String [] langs){
    this.supportedLanguages = new ArrayList<>();
     Logger log = Logger.getLogger(LanguageClassifier.class);
    for(String lang : langs){
        try {
            supportedLanguages.add(LanguageCode.convertLanguageNameToCode(lang));
        } catch (Exception e) {
            log.error(e);
        }
    }
}
 
源代码16 项目: olca-app   文件: Console.java
@Override
public void close() {
	Logger logger = Logger.getLogger("org.openlca");
	logger.removeAppender(this);
	if (!stream.isClosed()) {
		try {
			stream.flush();
			stream.close();
		} catch (Exception e) {
			logger.error("Cannot close console stream.", e);
		}
	}
}
 
源代码17 项目: swift-t   文件: STCompiler.java
public static void reportInternalError(Logger logger, Throwable e) {
  logger.error("STC internal error: please report this", e);
}
 
源代码18 项目: DataSphereStudio   文件: EventDruidFactory.java
private static DruidDataSource createDataSource(Properties props, Logger log, String type) {
		String name = null;
		String url = null;
		String username = null;
		String password = null;
		
		if(type.equals("Msg")){
			name = props.getProperty("msg.eventchecker.jdo.option.name");
			url = props.getProperty("msg.eventchecker.jdo.option.url");
			username = props.getProperty("msg.eventchecker.jdo.option.username");
			try {
//				password = new String(Base64.getDecoder().decode(props.getProperty("msg.eventchecker.jdo.option.password").getBytes()),"UTF-8");
				password = props.getProperty("msg.eventchecker.jdo.option.password");
			} catch (Exception e){
				log.error("password decore failed" + e);
			}
		}
		
		int initialSize = Integer.valueOf(props.getProperty("option.initial.size", "1"));
		int maxActive = Integer.valueOf(props.getProperty("option.max.active", "100"));
		int minIdle = Integer.valueOf(props.getProperty("option.min.idle", "1"));
		long maxWait = Long.valueOf(props.getProperty("option.max.wait", "60000"));
		String validationQuery = props.getProperty("option.validation.quert", "SELECT 'x'");
		long timeBetweenEvictionRunsMillis = Long.valueOf(props.getProperty("option.time.between.eviction.runs.millis", "6000"));
		long minEvictableIdleTimeMillis = Long.valueOf(props.getProperty("option.evictable.idle,time.millis", "300000"));
		boolean testOnBorrow = Boolean.valueOf(props.getProperty("option.test.on.borrow", "true"));
		int maxOpenPreparedStatements = Integer.valueOf(props.getProperty("option.max.open.prepared.statements", "-1"));

		if (timeBetweenEvictionRunsMillis > minEvictableIdleTimeMillis) {
			timeBetweenEvictionRunsMillis = minEvictableIdleTimeMillis;
		}
		
		DruidDataSource ds = new DruidDataSource();
		
		if (StringUtils.isNotBlank(name)) {
			ds.setName(name);
		}
		
		ds.setUrl(url);
		ds.setDriverClassName("com.mysql.jdbc.Driver");
	    ds.setUsername(username);
	    ds.setPassword(password);
	    ds.setInitialSize(initialSize);
	    ds.setMinIdle(minIdle);
	    ds.setMaxActive(maxActive);
	    ds.setMaxWait(maxWait);
	    ds.setTestOnBorrow(testOnBorrow);
	    ds.setValidationQuery(validationQuery);
	    ds.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
	    ds.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
	    if (maxOpenPreparedStatements > 0) {
	      ds.setPoolPreparedStatements(true);
	      ds.setMaxPoolPreparedStatementPerConnectionSize(
	          maxOpenPreparedStatements);
	    } else {
	      ds.setPoolPreparedStatements(false);
	    }
	    log.info("Druid data source initialed!");
	    return ds;
	}
 
源代码19 项目: datawave   文件: JobSetupUtil.java
/**
 * Computes a set of ranges to scan over entries whose row is a shaded day (yyyyMMdd_shardNum), one range per day. We calculate a range per day so that we
 * can assume that all entries for a particular day go to the same mapper in a MapReduce job.
 *
 * This uses the supplied start and end date parameters that can be supplied at job start time, and if none are supplied, it uses the current day. The end
 * of the range is always set exclusively to the start of the day following the end of the supplied day (or the beginning of tomorrow if no end was
 * supplied).
 */
public static Collection<Range> computeShardedDayRange(Configuration conf, Logger log) {
    String start = conf.get(MetricsConfig.START);
    String end = conf.get(MetricsConfig.END);
    
    GregorianCalendar from = new GregorianCalendar();
    if (start != null)
        from.setTime(DateConverter.convert(start));
    from.set(Calendar.HOUR_OF_DAY, 0);
    from.set(Calendar.MINUTE, 0);
    from.set(Calendar.SECOND, 0);
    from.set(Calendar.MILLISECOND, 0);
    if (log.isDebugEnabled() && start == null)
        log.debug("Defaulting start to the beginning of today: " + from);
    
    GregorianCalendar until = new GregorianCalendar();
    if (end != null)
        until.setTimeInMillis(DateConverter.convert(end).getTime() + TimeUnit.DAYS.toMillis(1));
    until.set(Calendar.HOUR_OF_DAY, 0);
    until.set(Calendar.MINUTE, 0);
    until.set(Calendar.SECOND, 0);
    until.set(Calendar.MILLISECOND, 0);
    until.add(Calendar.DAY_OF_YEAR, 1);
    if (log.isDebugEnabled() && end == null)
        log.debug("Defaulting end to the beginning of tomorrow: " + until);
    
    if (until.compareTo(from) <= 0) {
        log.error("Warning: end date (" + until + ") is after begin date (" + from + "), swapping!");
        GregorianCalendar tmp = until;
        until = from;
        from = tmp;
    }
    
    ArrayList<Range> ranges = new ArrayList<>();
    while (from.compareTo(until) < 0) {
        String rangeStart = DateHelper.format(from.getTime());
        from.add(GregorianCalendar.DAY_OF_YEAR, 1);
        String rangeEnd = DateHelper.format(from.getTime());
        ranges.add(new Range(rangeStart, true, rangeEnd, false));
    }
    
    return ranges;
}
 
源代码20 项目: DataSphereStudio   文件: EventCheckerService.java
/**
 * 接收消息 接收消息先查询消费记录,有则从上一次消费后开始消费,没有则从任务启动时间点后开始消费。
 * 接收消息是以主动查询的方式进行的,在没有超出设定目标的时间内,反复查询目标消息。
 * Receiving a message first queries the consumption record,
 * and then starts to consume after the last consumption, and no consumption
 * starts after the job starts. The received message is performed in an active
 * query manner, and the target message is repeatedly queried within a time period
 * when the set target is not exceeded.
 */
public boolean reciveMsg(int jobId, Properties props, Logger log) {
    if(props!=null){
        return new DefaultEventcheckReceiver(props).reciveMsg(jobId,props,log);
    }else{
        log.error("create EventCheckSender failed {}");
        return false;
    }
}