类org.apache.hadoop.util.LightWeightGSet源码实例Demo

下面列出了怎么用org.apache.hadoop.util.LightWeightGSet的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: BlocksMap.java
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
 
源代码2 项目: big-c   文件: BlocksMap.java
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
 
源代码3 项目: hadoop   文件: INodeMap.java
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
 
源代码4 项目: hadoop   文件: CacheManager.java
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
源代码5 项目: hadoop   文件: RetryCache.java
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
 
源代码6 项目: big-c   文件: INodeMap.java
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
 
源代码7 项目: big-c   文件: CacheManager.java
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
源代码8 项目: big-c   文件: RetryCache.java
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
 
源代码9 项目: NNAnalytics   文件: GSetGeneratorBase.java
public static GSet<INode, INodeWithAdditionalFields> getEmptyGSet() {
  return new LightWeightGSet<>(LightWeightGSet.computeCapacity(1, "test"));
}
 
源代码10 项目: hadoop   文件: BlockManager.java
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}
 
源代码11 项目: hadoop   文件: BlockInfoContiguous.java
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
 
源代码12 项目: hadoop   文件: BlockInfoContiguous.java
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
 
源代码13 项目: hadoop   文件: RetryCache.java
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
 
源代码14 项目: big-c   文件: BlockManager.java
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}
 
源代码15 项目: big-c   文件: BlockInfoContiguous.java
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
 
源代码16 项目: big-c   文件: BlockInfoContiguous.java
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
 
源代码17 项目: big-c   文件: RetryCache.java
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
 
 类所在包
 类方法
 同包方法