类org.apache.hadoop.fs.StorageType源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.StorageType的API类实例代码及写法,或者点击链接到github查看源代码。


/**
 * {@inheritDoc}
 */
@Override
protected void chooseRemoteRack(int numOfReplicas,
    DatanodeDescriptor localMachine, Set<Node> excludedNodes,
    long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();

  final String rackLocation = NetworkTopology.getFirstHalf(
      localMachine.getNetworkLocation());
  try {
    // randomly choose from remote racks
    chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e) {
    // fall back to the local rack
    chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
        rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 

private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
    DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
    throws IOException {
  Promise<Void> saslPromise = channel.eventLoop().newPromise();
  trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
  saslPromise.addListener(new FutureListener<Void>() {

    @Override
    public void operationComplete(Future<Void> future) throws Exception {
      if (future.isSuccess()) {
        // setup response processing pipeline first, then send request.
        processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
        requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
      } else {
        promise.tryFailure(future.cause());
      }
    }
  });
}
 
源代码3 项目: big-c   文件: FSDirectory.java

private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
源代码4 项目: hadoop   文件: TestQuotaByStorageType.java

@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);

  // Setup a 3-node cluster and configure
  // each node with 1 SSD and 1 DISK without capacity limitation
  cluster = new MiniDFSCluster
      .Builder(conf)
      .numDataNodes(REPLICATION)
      .storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT})
      .build();
  cluster.waitActive();

  fsdir = cluster.getNamesystem().getFSDirectory();
  dfs = cluster.getFileSystem();
  fsn = cluster.getNamesystem();
}
 
源代码5 项目: big-c   文件: DirectoryWithQuotaFeature.java

private void verifyQuotaByStorageType(EnumCounters<StorageType> typeDelta)
    throws QuotaByStorageTypeExceededException {
  if (!isQuotaByStorageTypeSet()) {
    return;
  }
  for (StorageType t: StorageType.getTypesSupportingQuota()) {
    if (!isQuotaByStorageTypeSet(t)) {
      continue;
    }
    if (Quota.isViolated(quota.getTypeSpace(t), usage.getTypeSpace(t),
        typeDelta.get(t))) {
      throw new QuotaByStorageTypeExceededException(
        quota.getTypeSpace(t), usage.getTypeSpace(t) + typeDelta.get(t), t);
    }
  }
}
 

private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
源代码7 项目: hadoop   文件: SimulatedFSDataset.java

@Override // FsDatasetSpi
public synchronized ReplicaHandler createTemporary(
    StorageType storageType, ExtendedBlock b) throws IOException {
  if (isValidBlock(b)) {
        throw new ReplicaAlreadyExistsException("Block " + b + 
            " is valid, and cannot be written to.");
    }
  if (isValidRbw(b)) {
      throw new ReplicaAlreadyExistsException("Block " + b + 
          " is being written, and cannot be written to.");
  }
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true);
  map.put(binfo.theBlock, binfo);
  return new ReplicaHandler(binfo, null);
}
 
源代码8 项目: big-c   文件: TestQuotaByStorageType.java

@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOffChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Since child directory have SSD quota of 2 * BLOCKSIZE,
  // expect an exception when creating files under child directory.
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
源代码9 项目: hadoop   文件: Mover.java

boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb) {
  final List<MLocation> locations = MLocation.toLocations(lb);
  Collections.shuffle(locations);
  final DBlock db = newDBlock(lb.getBlock().getLocalBlock(), locations);

  for (final StorageType t : diff.existing) {
    for (final MLocation ml : locations) {
      final Source source = storages.getSource(ml);
      if (ml.storageType == t && source != null) {
        // try to schedule one replica move.
        if (scheduleMoveReplica(db, source, diff.expected)) {
          return true;
        }
      }
    }
  }
  return false;
}
 
源代码10 项目: hadoop   文件: TestQuotaByStorageType.java

@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
源代码11 项目: big-c   文件: FSImageFormatPBINode.java

private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final QuotaCounts q = root.getQuotaCounts();
  final long nsQuota = q.getNameSpace();
  final long dsQuota = q.getStorageSpace();
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
  if (typeQuotas.anyGreaterOrEqual(0)) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
}
 
源代码12 项目: big-c   文件: DFSInputStream.java

/**
 * Get the best node from which to stream the data.
 * @param block LocatedBlock, containing nodes in priority order.
 * @param ignoredNodes Do not choose nodes in this array (may be null)
 * @return The DNAddrPair of the best node.
 * @throws IOException
 */
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  DatanodeInfo[] nodes = block.getLocations();
  StorageType[] storageTypes = block.getStorageTypes();
  DatanodeInfo chosenNode = null;
  StorageType storageType = null;
  if (nodes != null) {
    for (int i = 0; i < nodes.length; i++) {
      if (!deadNodes.containsKey(nodes[i])
          && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
        chosenNode = nodes[i];
        // Storage types are ordered to correspond with nodes, so use the same
        // index to get storage type.
        if (storageTypes != null && i < storageTypes.length) {
          storageType = storageTypes[i];
        }
        break;
      }
    }
  }
  if (chosenNode == null) {
    throw new IOException("No live nodes contain block " + block.getBlock() +
        " after checking nodes = " + Arrays.toString(nodes) +
        ", ignoredNodes = " + ignoredNodes);
  }
  final String dnAddr =
      chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
  return new DNAddrPair(chosenNode, targetAddr, storageType);
}
 
源代码13 项目: big-c   文件: Dispatcher.java

/**
 * Decide if the block is a good candidate to be moved from source to target.
 * A block is a good candidate if
 * 1. the block is not in the process of being moved/has not been moved;
 * 2. the block does not have a replica on the target;
 * 3. doing the move does not reduce the number of racks that the block has
 */
private boolean isGoodBlockCandidate(StorageGroup source, StorageGroup target,
    StorageType targetStorageType, DBlock block) {
  if (source.equals(target)) {
    return false;
  }
  if (target.storageType != targetStorageType) {
    return false;
  }
  // check if the block is moved or not
  if (movedBlocks.contains(block.getBlock())) {
    return false;
  }
  final DatanodeInfo targetDatanode = target.getDatanodeInfo();
  if (source.getDatanodeInfo().equals(targetDatanode)) {
    // the block is moved inside same DN
    return true;
  }

  // check if block has replica in target node
  for (StorageGroup blockLocation : block.getLocations()) {
    if (blockLocation.getDatanodeInfo().equals(targetDatanode)) {
      return false;
    }
  }

  if (cluster.isNodeGroupAware()
      && isOnSameNodeGroupWithReplicas(source, target, block)) {
    return false;
  }
  if (reduceNumOfRacks(source, target, block)) {
    return false;
  }
  return true;
}
 
源代码14 项目: big-c   文件: TestMover.java

@Test
public void testScheduleSameBlock() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(4).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testScheduleSameBlock/file";
    
    {
      final FSDataOutputStream out = dfs.create(new Path(file));
      out.writeChars("testScheduleSameBlock");
      out.close();
    }

    final Mover mover = newMover(conf);
    mover.init();
    final Mover.Processor processor = mover.new Processor();

    final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    final List<MLocation> locations = MLocation.toLocations(lb);
    final MLocation ml = locations.get(0);
    final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);

    final List<StorageType> storageTypes = new ArrayList<StorageType>(
        Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
    Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
    Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
  } finally {
    cluster.shutdown();
  }
}
 
源代码15 项目: hadoop   文件: BlockReportTestBase.java

/**
 * Test writes a file and closes it.
 * Block reported is generated with an extra block.
 * Block report is forced and the check for # of pendingdeletion
 * blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  DFSTestUtil.createFile(fs, filePath,
                         FILE_SIZE, REPL_FACTOR, rand.nextLong());


  DataNode dn = cluster.getDataNodes().get(DN_N0);
  // all blocks belong to the same file, hence same BP
  String poolId = cluster.getNamesystem().getBlockPoolId();

  // Create a bogus new block which will not be present on the namenode.
  ExtendedBlock b = new ExtendedBlock(
      poolId, rand.nextLong(), 1024L, rand.nextLong());
  dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);

  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
 
源代码16 项目: hadoop-ozone   文件: VolumeInfo.java

private VolumeInfo(Builder b) throws IOException {

    this.rootDir = b.rootDir;
    File root = new File(this.rootDir);

    boolean succeeded = root.isDirectory() || root.mkdirs();

    if (!succeeded) {
      LOG.error("Unable to create the volume root dir at : {}", root);
      throw new IOException("Unable to create the volume root dir at " + root);
    }

    this.storageType = (b.storageType != null ?
        b.storageType : StorageType.DEFAULT);

    this.configuredCapacity = (b.configuredCapacity != 0 ?
        b.configuredCapacity : -1);

    SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
    if (usageCheckFactory == null) {
      usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
    }
    SpaceUsageCheckParams checkParams =
        usageCheckFactory.paramsFor(root);

    this.usage = new VolumeUsage(checkParams);
  }
 

private StorageLocationReport(String id, boolean failed, long capacity,
    long scmUsed, long remaining, StorageType storageType,
    String storageLocation) {
  this.id = id;
  this.failed = failed;
  this.capacity = capacity;
  this.scmUsed = scmUsed;
  this.remaining = remaining;
  this.storageType = storageType;
  this.storageLocation = storageLocation;
}
 
源代码18 项目: hadoop   文件: TestStoragePolicySummary.java

@Test
public void testSortInDescendingOrder() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  BlockStoragePolicy warm = bsps.getPolicy("WARM");
  BlockStoragePolicy cold = bsps.getPolicy("COLD");
  //DISK:3
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  //DISK:1,ARCHIVE:2
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.DISK,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.DISK},warm);
  //ARCHIVE:3
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(3,actualOutput.size());
  Map<String, Long>  expectedOutput = new LinkedHashMap<>();
  expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l);
  expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  Assert.assertEquals(expectedOutput.toString(),actualOutput.toString());
}
 
源代码19 项目: big-c   文件: TestBlockStoragePolicy.java

private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
                                int replicaNum, StorageType... types) {
  List<StorageType> typeList = Lists.newArrayList();
  Collections.addAll(typeList, types);
  LocatedBlocks lbs = status.getBlockLocations();
  Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
  for (LocatedBlock lb : lbs.getLocatedBlocks()) {
    Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
    for (StorageType type : lb.getStorageTypes()) {
      Assert.assertTrue(typeList.remove(type));
    }
  }
  Assert.assertTrue(typeList.isEmpty());
}
 
源代码20 项目: hadoop   文件: Dispatcher.java

/** Decide if the given block is a good candidate to move or not */
private boolean isGoodBlockCandidate(DBlock block) {
  // source and target must have the same storage type
  final StorageType sourceStorageType = getStorageType();
  for (Task t : tasks) {
    if (Dispatcher.this.isGoodBlockCandidate(this, t.target,
        sourceStorageType, block)) {
      return true;
    }
  }
  return false;
}
 
源代码21 项目: big-c   文件: FSEditLogOp.java

@Override
void readFields(DataInputStream in, int logVersion)
  throws IOException {
  this.src = FSImageSerialization.readString(in);
  this.type = StorageType.parseStorageType(FSImageSerialization.readInt(in));
  this.dsQuota = FSImageSerialization.readLong(in);
}
 
源代码22 项目: big-c   文件: TestQuotaByStorageType.java

@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before truncate
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Truncate file to 1 * BLOCKSIZE
  int newFile1Len = BLOCKSIZE * 1;
  dfs.truncate(createdFile1, newFile1Len);

  // Verify SSD consumed after truncate
  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(newFile1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
}
 
源代码23 项目: hadoop   文件: NameNodeRpcServer.java

@Override // ClientProtocol
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
                     StorageType type)
    throws IOException {
  checkNNStartup();
  namesystem.setQuota(path, namespaceQuota, storagespaceQuota, type);
}
 
源代码24 项目: hadoop   文件: TestBlockStoragePolicy.java

@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
    short replication, List<StorageType> chosen, StorageType... expected) {
  final List<StorageType> types = p.chooseStorageTypes(replication,
      chosen, none, false);
  assertStorageTypes(types, expected);
}
 
源代码25 项目: big-c   文件: TestBlockStoragePolicy.java

/**
 * Consider a File with Cold temperature. Increase replication factor of
 * that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
 */
@Test
public void testChangeColdRep() throws Exception {
  testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
      new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
          StorageType.ARCHIVE},
      new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
          StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
 
源代码26 项目: big-c   文件: TestStorageReport.java

@Before
public void startUpCluster() throws IOException {
  conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(REPL_FACTOR)
      .storageTypes(new StorageType[] { storageType, storageType } )
      .build();
  fs = cluster.getFileSystem();
  bpid = cluster.getNamesystem().getBlockPoolId();
}
 
源代码27 项目: hadoop   文件: TestBlockStoragePolicy.java

@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
    short replication, List<StorageType> chosen, StorageType... expected) {
  final List<StorageType> types = p.chooseStorageTypes(replication,
      chosen, both, false);
  assertStorageTypes(types, expected);
}
 
源代码28 项目: hadoop   文件: FsDatasetImpl.java

private void addVolume(Collection<StorageLocation> dataLocations,
    Storage.StorageDirectory sd) throws IOException {
  final File dir = sd.getCurrentDir();
  final StorageType storageType =
      getStorageTypeFromLocations(dataLocations, sd.getRoot());

  // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
  // nothing needed to be rolled back to make various data structures, e.g.,
  // storageMap and asyncDiskService, consistent.
  FsVolumeImpl fsVolume = new FsVolumeImpl(
      this, sd.getStorageUuid(), dir, this.conf, storageType);
  FsVolumeReference ref = fsVolume.obtainReference();
  ReplicaMap tempVolumeMap = new ReplicaMap(this);
  fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);

  synchronized (this) {
    volumeMap.addAll(tempVolumeMap);
    storageMap.put(sd.getStorageUuid(),
        new DatanodeStorage(sd.getStorageUuid(),
            DatanodeStorage.State.NORMAL,
            storageType));
    asyncDiskService.addVolume(sd.getCurrentDir());
    volumes.addVolume(ref);
  }

  LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
 
源代码29 项目: hadoop   文件: BlockStoragePolicy.java

/**
 * Compute the difference between two lists t and c so that after the diff
 * computation we have: t = t - c;
 * Further, if e is not null, set e = e + c - t;
 */
private static void diff(List<StorageType> t, Iterable<StorageType> c,
    List<StorageType> e) {
  for(StorageType storagetype : c) {
    final int i = t.indexOf(storagetype);
    if (i >= 0) {
      t.remove(i);
    } else if (e != null) {
      e.add(storagetype);
    }
  }
}
 
源代码30 项目: hadoop   文件: PBHelper.java

private static List<StorageTypesProto> convert(StorageType[][] types) {
  List<StorageTypesProto> list = Lists.newArrayList();
  if (types != null) {
    for (StorageType[] ts : types) {
      StorageTypesProto.Builder builder = StorageTypesProto.newBuilder();
      builder.addAllStorageTypes(convertStorageTypes(ts));
      list.add(builder.build());
    }
  }
  return list;
}
 
 类所在包
 同包方法