类org.apache.hadoop.io.MultipleIOException源码实例Demo

下面列出了怎么用org.apache.hadoop.io.MultipleIOException的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: DFSClientCache.java
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码2 项目: big-c   文件: DFSClientCache.java
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码3 项目: hbase   文件: BoundedRecoveredEditsOutputSink.java
@Override
public void append(EntryBuffers.RegionEntryBuffer buffer)
    throws IOException {
  List<WAL.Entry> entries = buffer.entryBuffer;
  if (entries.isEmpty()) {
    LOG.warn("got an empty buffer, skipping");
    return;
  }
  // The key point is create a new writer, write edits then close writer.
  RecoveredEditsWriter writer =
    createRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName,
      entries.get(0).getKey().getSequenceId());
  if (writer != null) {
    openingWritersNum.incrementAndGet();
    writer.writeRegionEntries(entries);
    regionEditsWrittenMap.compute(Bytes.toString(buffer.encodedRegionName),
      (k, v) -> v == null ? writer.editsWritten : v + writer.editsWritten);
    List<IOException> thrown = new ArrayList<>();
    Path dst = closeRecoveredEditsWriter(writer, thrown);
    splits.add(dst);
    openingWritersNum.decrementAndGet();
    if (!thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
 
源代码4 项目: hbase   文件: HFileArchiver.java
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting files without archiving.");
  List<IOException> errors = new ArrayList<>(0);
  for (HStoreFile hsf : compactedFiles) {
    try {
      hsf.deleteStoreFile();
    } catch (IOException e) {
      LOG.error("Failed to delete {}", hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
源代码5 项目: hadoop-gpu   文件: FileSystem.java
synchronized void closeAll() throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();
  for(; !map.isEmpty(); ) {
    Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next();
    final Key key = e.getKey();
    final FileSystem fs = e.getValue();

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码6 项目: hadoop   文件: TestFsDatasetImpl.java
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
源代码7 项目: hadoop   文件: FileSystem.java
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码8 项目: dremio-oss   文件: DremioFileSystemCache.java
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
public synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    map.remove(key);
    toAutoClose.remove(key);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码9 项目: big-c   文件: TestFsDatasetImpl.java
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
源代码10 项目: big-c   文件: FileSystem.java
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码11 项目: lucene-solr   文件: BlockPoolSlice.java
/**
 * Wait till all the recursive task for add replica to volume completed.
 *
 * @param subTaskQueue
 *          {@link AddReplicaProcessor} tasks list.
 * @param exceptions
 *          exceptions occurred in sub tasks.
 * @throws IOException
 *           throw if any sub task or multiple sub tasks failed.
 */
private void waitForSubTaskToFinish(Queue<RecursiveAction> subTaskQueue,
                                    List<IOException> exceptions) throws IOException {
  while (!subTaskQueue.isEmpty()) {
    RecursiveAction task = subTaskQueue.poll();
    if (task != null) {
      task.join();
    }
  }
  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
源代码12 项目: hbase-operator-tools   文件: OfflineMetaRepair.java
public static void main(String[] args) throws Exception {
  // create a fsck object
  Configuration conf = HBaseConfiguration.create();
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
  HBaseFsck fsck = new HBaseFsck(conf);

  // Process command-line args.
  for (int i = 0; i < args.length; i++) {
    String cmd = args[i];
    if (cmd.equals("-details")) {
      HBaseFsck.setDisplayFullReport();
    } else if (cmd.equals("-base")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -base needs an HDFS path.");
        printUsageAndExit();
      }
      // update hbase root dir to user-specified base
      i++;
      CommonFSUtils.setRootDir(conf, new Path(args[i]));
      CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
    } else if (cmd.equals("-sidelineDir")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
        printUsageAndExit();
      }
      // set the hbck sideline dir to user-specified one
      i++;
      fsck.setSidelineDir(args[i]);
    } else {
      String str = "Unknown command line option : " + cmd;
      LOG.info(str);
      System.out.println(str);
      printUsageAndExit();
    }
  }

  System.out.println("OfflineMetaRepair command line options: " + String.join(" ", args));

  // Fsck doesn't shutdown and and doesn't provide a way to shutdown its
  // threads cleanly, so we do a System.exit.
  boolean success = false;
  try {
    success = fsck.rebuildMeta();
  } catch (MultipleIOException mioes) {
    for (IOException ioe : mioes.getExceptions()) {
      LOG.error("Bailed out due to:", ioe);
    }
  } catch (Exception e) {
    LOG.error("Bailed out due to: ", e);
  } finally {
    System.exit(success ? 0 : 1);
  }
}
 
 类所在包
 同包方法