org.apache.hadoop.hbase.util.EnvironmentEdgeManager#injectEdge ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.util.EnvironmentEdgeManager#injectEdge ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

private DateTieredCompactionRequest getRequest(long now, ArrayList<HStoreFile> candidates,
    boolean isMajor, boolean toCompact) throws IOException {
  ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(timeMachine);
  timeMachine.setValue(now);
  DateTieredCompactionRequest request;
  DateTieredCompactionPolicy policy =
    (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy();
  if (isMajor) {
    for (HStoreFile file : candidates) {
      ((MockHStoreFile) file).setIsMajor(true);
    }
    assertEquals(toCompact, policy.shouldPerformMajorCompaction(candidates));
    request = (DateTieredCompactionRequest) policy.selectMajorCompaction(candidates);
  } else {
    assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.of()));
    request =
      (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false);
  }
  return request;
}
 
源代码2 项目: hbase   文件: TestDefaultMemStore.java
protected void checkShouldFlush(Configuration conf, boolean expected) throws Exception {
  try {
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    HBaseTestingUtility hbaseUtility = new HBaseTestingUtility(conf);
    String cf = "foo";
    HRegion region =
        hbaseUtility.createTestRegion("foobar", ColumnFamilyDescriptorBuilder.of(cf));

    edge.setCurrentTimeMillis(1234);
    Put p = new Put(Bytes.toBytes("r"));
    p.add(KeyValueTestUtil.create("r", cf, "q", 100, "v"));
    region.put(p);
    edge.setCurrentTimeMillis(1234 + 100);
    StringBuilder sb = new StringBuilder();
    assertTrue(!region.shouldFlush(sb));
    edge.setCurrentTimeMillis(1234 + 10000);
    assertTrue(region.shouldFlush(sb) == expected);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
源代码3 项目: hbase   文件: TestRegionServerReportForDuty.java
/**
 * Tests region sever reportForDuty with manual environment edge
 */
@Test
public void testReportForDutyWithEnvironmentEdge() throws Exception {
  // Start a master and wait for it to become the active/primary master.
  // Use a random unique port
  cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtility.randomFreePort());
  // Set the dispatch and retry delay to 0 since we want the rpc request to be sent immediately
  cluster.getConfiguration().setInt("hbase.procedure.remote.dispatcher.delay.msec", 0);
  cluster.getConfiguration().setLong("hbase.regionserver.rpc.retry.interval", 0);

  // master has a rs. defaultMinToStart = 2
  boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(testUtil.getConfiguration());
  cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
    tablesOnMaster ? 2 : 1);
  cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART,
    tablesOnMaster ? 2 : 1);

  // Inject manual environment edge for clock skew computation between RS and master
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);
  master = cluster.addMaster();
  rs = cluster.addRegionServer();
  LOG.debug("Starting master: " + master.getMaster().getServerName());
  master.start();
  rs.start();

  waitForClusterOnline(master);
}
 
源代码4 项目: hbase   文件: TestRateLimiter.java
@Test
public void testOverconsumptionFixedIntervalRefillStrategy() throws InterruptedException {
  RateLimiter limiter = new FixedIntervalRateLimiter();
  limiter.set(10, TimeUnit.SECONDS);

  // fix the current time in order to get the precise value of interval
  EnvironmentEdge edge = new EnvironmentEdge() {
    private final long ts = System.currentTimeMillis();

    @Override
    public long currentTime() {
      return ts;
    }
  };
  EnvironmentEdgeManager.injectEdge(edge);
  // 10 resources are available, but we need to consume 20 resources
  // Verify that we have to wait at least 1.1sec to have 1 resource available
  assertTrue(limiter.canExecute());
  limiter.consume(20);
  // To consume 1 resource also wait for 1000ms
  assertEquals(1000, limiter.waitInterval(1));
  // To consume 10 resource wait for 100ms
  assertEquals(1000, limiter.waitInterval(10));
  EnvironmentEdgeManager.reset();

  limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
  // Verify that after 1sec also no resource should be available
  assertFalse(limiter.canExecute(1));
  limiter.setNextRefillTime(limiter.getNextRefillTime() - 100);

  // Verify that after 1sec the 10 resource is available
  assertTrue(limiter.canExecute());
  assertEquals(0, limiter.waitInterval());
}
 
源代码5 项目: hbase   文件: TestRateLimiter.java
@Test
public void testUnconfiguredLimiters() throws InterruptedException {

  ManualEnvironmentEdge testEdge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(testEdge);
  long limit = Long.MAX_VALUE;

  // For unconfigured limiters, it is supposed to use as much as possible
  RateLimiter avgLimiter = new AverageIntervalRateLimiter();
  RateLimiter fixLimiter = new FixedIntervalRateLimiter();

  assertEquals(limit, avgLimiter.getAvailable());
  assertEquals(limit, fixLimiter.getAvailable());

  assertTrue(avgLimiter.canExecute(limit));
  avgLimiter.consume(limit);

  assertTrue(fixLimiter.canExecute(limit));
  fixLimiter.consume(limit);

  // Make sure that available is Long.MAX_VALUE
  assertTrue(limit == avgLimiter.getAvailable());
  assertTrue(limit == fixLimiter.getAvailable());

  // after 100 millseconds, it should be able to execute limit as well
  testEdge.incValue(100);

  assertTrue(avgLimiter.canExecute(limit));
  avgLimiter.consume(limit);

  assertTrue(fixLimiter.canExecute(limit));
  fixLimiter.consume(limit);

  // Make sure that available is Long.MAX_VALUE
  assertTrue(limit == avgLimiter.getAvailable());
  assertTrue(limit == fixLimiter.getAvailable());

  EnvironmentEdgeManager.reset();
}
 
源代码6 项目: hbase   文件: TestIncrementTimeRange.java
@BeforeClass
public static void setupBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MyObserver.class.getName());
  // Make general delay zero rather than default. Timing is off in this
  // test that depends on an evironment edge that is manually moved forward.
  util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0);
  util.startMiniCluster();
  EnvironmentEdgeManager.injectEdge(mee);
}
 
源代码7 项目: hbase   文件: TestAppendTimeRange.java
@BeforeClass
public static void setupBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MyObserver.class.getName());
  // Make general delay zero rather than default. Timing is off in this
  // test that depends on an evironment edge that is manually moved forward.
  util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0);
  util.startMiniCluster();
  EnvironmentEdgeManager.injectEdge(mee);
}
 
源代码8 项目: hbase   文件: TestDefaultMemStore.java
/**
 * Tests that the timeOfOldestEdit is updated correctly for the
 * various edit operations in memstore.
 * @throws Exception
 */
@Test
public void testUpdateToTimeOfOldestEdit() throws Exception {
  try {
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    DefaultMemStore memstore = new DefaultMemStore();
    long t = memstore.timeOfOldestEdit();
    assertEquals(Long.MAX_VALUE, t);

    // test the case that the timeOfOldestEdit is updated after a KV add
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"), null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    // snapshot() will reset timeOfOldestEdit. The method will also assert the
    // value is reset to Long.MAX_VALUE
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV delete
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, KeyValue.Type.Delete, "v"), null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV upsert
    List<Cell> l = new ArrayList<>();
    KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
    kv1.setSequenceId(100);
    l.add(kv1);
    memstore.upsert(l, 1000, null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
源代码9 项目: hbase   文件: TestDefaultMemStore.java
@Test
public void testShouldFlushMeta() throws Exception {
  // write an edit in the META and ensure the shouldFlush (that the periodic memstore
  // flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
  // the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
  Configuration conf = new Configuration();
  conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
  HBaseTestingUtility hbaseUtility = new HBaseTestingUtility(conf);
  Path testDir = hbaseUtility.getDataTestDir();
  EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
  EnvironmentEdgeManager.injectEdge(edge);
  edge.setCurrentTimeMillis(1234);
  WALFactory wFactory = new WALFactory(conf, "1234");
  TableDescriptors tds = new FSTableDescriptors(conf);
  FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
  HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
      conf, tds.get(TableName.META_TABLE_NAME),
      wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
  // parameterized tests add [#] suffix get rid of [ and ].
  TableDescriptor desc = TableDescriptorBuilder
      .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")))
      .setColumnFamily(ColumnFamilyDescriptorBuilder.of("foo")).build();
  RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName())
      .setStartKey(Bytes.toBytes("row_0200")).setEndKey(Bytes.toBytes("row_0300")).build();
  HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri));
  addRegionToMETA(meta, r);
  edge.setCurrentTimeMillis(1234 + 100);
  StringBuilder sb = new StringBuilder();
  assertTrue(meta.shouldFlush(sb) == false);
  edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
  assertTrue(meta.shouldFlush(sb) == true);
}
 
源代码10 项目: hbase   文件: TestStripeCompactionPolicy.java
@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredFiles() throws Exception {
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  long now = defaultTtl + 2;
  edge.setValue(now);
  EnvironmentEdgeManager.injectEdge(edge);
  try {
    HStoreFile expiredFile = createFile(), notExpiredFile = createFile();
    when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
    when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
    List<HStoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
    List<HStoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
    List<HStoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);

    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(),
        defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
    // Merge expired if there are eligible stripes.
    StripeCompactionPolicy.StripeInformationProvider si =
        createStripesWithFiles(expired, expired, expired);
    verifyWholeStripesCompaction(policy, si, 0, 2, null, 1, Long.MAX_VALUE, false);
    // Don't merge if nothing expired.
    si = createStripesWithFiles(notExpired, notExpired, notExpired);
    assertNull(policy.selectCompaction(si, al(), false));
    // Merge one expired stripe with next.
    si = createStripesWithFiles(notExpired, expired, notExpired);
    verifyWholeStripesCompaction(policy, si, 1, 2, null, 1, Long.MAX_VALUE, false);
    // Merge the biggest run out of multiple options.
    // Merge one expired stripe with next.
    si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
    verifyWholeStripesCompaction(policy, si, 3, 4, null, 1, Long.MAX_VALUE, false);
    // Stripe with a subset of expired files is not merged.
    si = createStripesWithFiles(expired, expired, notExpired, expired, mixed);
    verifyWholeStripesCompaction(policy, si, 0, 1, null, 1, Long.MAX_VALUE, false);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
源代码11 项目: hbase   文件: TestStripeCompactionPolicy.java
@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredStripes() throws Exception {
  // HBASE-11397
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  long now = defaultTtl + 2;
  edge.setValue(now);
  EnvironmentEdgeManager.injectEdge(edge);
  try {
    HStoreFile expiredFile = createFile(), notExpiredFile = createFile();
    when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
    when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
    List<HStoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
    List<HStoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);

    StripeCompactionPolicy policy =
        createPolicy(HBaseConfiguration.create(), defaultSplitSize, defaultSplitCount,
          defaultInitialCount, true);

    // Merge all three expired stripes into one.
    StripeCompactionPolicy.StripeInformationProvider si =
        createStripesWithFiles(expired, expired, expired);
    verifyMergeCompatcion(policy, si, 0, 2);

    // Merge two adjacent expired stripes into one.
    si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
    verifyMergeCompatcion(policy, si, 3, 4);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
源代码12 项目: hbase   文件: TestFIFOCompactionPolicy.java
@BeforeClass
public static void setEnvironmentEdge() throws Exception {
  EnvironmentEdge ee = new TimeOffsetEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(ee);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  TEST_UTIL.startMiniCluster(1);
}
 
源代码13 项目: hbase   文件: TestServerNonceManager.java
@Test
public void testCleanup() throws Exception {
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);
  try {
    ServerNonceManager nm = createManager(6);
    ScheduledChore cleanup = nm.createCleanupScheduledChore(Mockito.mock(Stoppable.class));
    edge.setValue(1);
    assertTrue(nm.startOperation(NO_NONCE, 1, createStoppable()));
    assertTrue(nm.startOperation(NO_NONCE, 2, createStoppable()));
    assertTrue(nm.startOperation(NO_NONCE, 3, createStoppable()));
    edge.setValue(2);
    nm.endOperation(NO_NONCE, 1, true);
    edge.setValue(4);
    nm.endOperation(NO_NONCE, 2, true);
    edge.setValue(9);
    cleanup.choreForTesting();
    // Nonce 1 has been cleaned up.
    assertTrue(nm.startOperation(NO_NONCE, 1, createStoppable()));
    // Nonce 2 has not been cleaned up.
    assertFalse(nm.startOperation(NO_NONCE, 2, createStoppable()));
    // Nonce 3 was active and active ops should never be cleaned up; try to end and start.
    nm.endOperation(NO_NONCE, 3, false);
    assertTrue(nm.startOperation(NO_NONCE, 3, createStoppable()));
    edge.setValue(11);
    cleanup.choreForTesting();
    // Now, nonce 2 has been cleaned up.
    assertTrue(nm.startOperation(NO_NONCE, 2, createStoppable()));
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
/**
 * @param tableName name of the table to create for the test
 * @return the supporting state for the test
 */
private TestState setupTest(String tableName) throws IOException {
  byte[] tableNameBytes = Bytes.toBytes(tableName);
  HTableDescriptor desc = new HTableDescriptor(tableNameBytes);
  desc.addFamily(FAM1);
  // add the necessary simple options to create the builder
  Map<String, String> indexerOpts = new HashMap<String, String>();
  // just need to set the codec - we are going to set it later, but we need something here or the
  // initializer blows up.
  indexerOpts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY,
    CoveredIndexCodecForTesting.class.getName());
  Indexer.enableIndexing(desc, CoveredColumnsIndexBuilder.class, indexerOpts);

  // create the table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.createTable(desc);
  HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);

  // overwrite the codec so we can verify the current state
  HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
  Indexer indexer =
      (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
  CoveredColumnsIndexBuilder builder =
      (CoveredColumnsIndexBuilder) indexer.getBuilderForTesting();
  VerifyingIndexCodec codec = new VerifyingIndexCodec();
  builder.setIndexCodecForTesting(codec);

  // setup the Puts we want to write
  final long ts = System.currentTimeMillis();
  EnvironmentEdge edge = new EnvironmentEdge() {

    @Override
    public long currentTimeMillis() {
      return ts;
    }
  };
  EnvironmentEdgeManager.injectEdge(edge);

  return new TestState(primary, codec, ts);
}
 
源代码15 项目: hbase   文件: TestCompactingMemStore.java
/**
 * Tests that the timeOfOldestEdit is updated correctly for the
 * various edit operations in memstore.
 */
@Override
@Test
public void testUpdateToTimeOfOldestEdit() throws Exception {
  try {
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    long t = memstore.timeOfOldestEdit();
    assertEquals(Long.MAX_VALUE, t);

    // test the case that the timeOfOldestEdit is updated after a KV add
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"), null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    // The method will also assert
    // the value is reset to Long.MAX_VALUE
    t = runSnapshot(memstore, true);

    // test the case that the timeOfOldestEdit is updated after a KV delete
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, KeyValue.Type.Delete, "v"), null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
   t = runSnapshot(memstore, true);

    // test the case that the timeOfOldestEdit is updated after a KV upsert
    List<Cell> l = new ArrayList<>();
    KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
    kv1.setSequenceId(100);
    l.add(kv1);
    memstore.upsert(l, 1000, null);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
源代码16 项目: hbase   文件: TestConnection.java
/**
 * Test that connection can become idle without breaking everything.
 */
@Test
public void testConnectionIdle() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TEST_UTIL.createTable(tableName, FAM_NAM).close();
  int idleTime = 20000;
  boolean previousBalance = TEST_UTIL.getAdmin().balancerSwitch(false, true);

  Configuration c2 = new Configuration(TEST_UTIL.getConfiguration());
  // We want to work on a separate connection.
  c2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
  c2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // Don't retry: retry = test failed
  c2.setInt(RpcClient.IDLE_TIME, idleTime);

  Connection connection = ConnectionFactory.createConnection(c2);
  final Table table = connection.getTable(tableName);

  Put put = new Put(ROW);
  put.addColumn(FAM_NAM, ROW, ROW);
  table.put(put);

  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  mee.setValue(System.currentTimeMillis());
  EnvironmentEdgeManager.injectEdge(mee);
  LOG.info("first get");
  table.get(new Get(ROW));

  LOG.info("first get - changing the time & sleeping");
  mee.incValue(idleTime + 1000);
  Thread.sleep(1500); // we need to wait a little for the connection to be seen as idle.
                      // 1500 = sleep time in RpcClient#waitForWork + a margin

  LOG.info("second get - connection has been marked idle in the middle");
  // To check that the connection actually became idle would need to read some private
  // fields of RpcClient.
  table.get(new Get(ROW));
  mee.incValue(idleTime + 1000);

  LOG.info("third get - connection is idle, but the reader doesn't know yet");
  // We're testing here a special case:
  // time limit reached BUT connection not yet reclaimed AND a new call.
  // in this situation, we don't close the connection, instead we use it immediately.
  // If we're very unlucky we can have a race condition in the test: the connection is already
  // under closing when we do the get, so we have an exception, and we don't retry as the
  // retry number is 1. The probability is very very low, and seems acceptable for now. It's
  // a test issue only.
  table.get(new Get(ROW));

  LOG.info("we're done - time will change back");

  table.close();

  connection.close();
  EnvironmentEdgeManager.reset();
  TEST_UTIL.getAdmin().balancerSwitch(previousBalance, true);
}
 
源代码17 项目: hbase   文件: TestRateLimiter.java
@Test
public void testExtremeLimiters() throws InterruptedException {

  ManualEnvironmentEdge testEdge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(testEdge);
  long limit = Long.MAX_VALUE - 1;

  RateLimiter avgLimiter = new AverageIntervalRateLimiter();
  avgLimiter.set(limit, TimeUnit.SECONDS);
  RateLimiter fixLimiter = new FixedIntervalRateLimiter();
  fixLimiter.set(limit, TimeUnit.SECONDS);

  assertEquals(limit, avgLimiter.getAvailable());
  assertEquals(limit, fixLimiter.getAvailable());

  assertTrue(avgLimiter.canExecute(limit / 2));
  avgLimiter.consume(limit / 2);

  assertTrue(fixLimiter.canExecute(limit / 2));
  fixLimiter.consume(limit / 2);

  // Make sure that available is whatever left
  assertTrue((limit - (limit / 2)) == avgLimiter.getAvailable());
  assertTrue((limit - (limit / 2)) == fixLimiter.getAvailable());

  // after 100 millseconds, both should not be able to execute the limit
  testEdge.incValue(100);

  assertFalse(avgLimiter.canExecute(limit));
  assertFalse(fixLimiter.canExecute(limit));

  // after 500 millseconds, average interval limiter should be able to execute the limit
  testEdge.incValue(500);
  assertTrue(avgLimiter.canExecute(limit));
  assertFalse(fixLimiter.canExecute(limit));

  // Make sure that available is correct
  assertTrue(limit == avgLimiter.getAvailable());
  assertTrue((limit - (limit / 2)) == fixLimiter.getAvailable());

  // after 500 millseconds, both should be able to execute
  testEdge.incValue(500);
  assertTrue(avgLimiter.canExecute(limit));
  assertTrue(fixLimiter.canExecute(limit));

  // Make sure that available is Long.MAX_VALUE
  assertTrue(limit == avgLimiter.getAvailable());
  assertTrue(limit == fixLimiter.getAvailable());

  EnvironmentEdgeManager.reset();
}
 
源代码18 项目: hbase   文件: TestHBaseClient.java
@Test
public void testFailedServer(){
  ManualEnvironmentEdge ee = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(ee);
  FailedServers fs = new FailedServers(new Configuration());
  Throwable testThrowable = new Throwable();//throwable already tested in TestFailedServers.java

  InetSocketAddress ia = InetSocketAddress.createUnresolved("bad", 12);
   // same server as ia
  InetSocketAddress ia2 = InetSocketAddress.createUnresolved("bad", 12);
  InetSocketAddress ia3 = InetSocketAddress.createUnresolved("badtoo", 12);
  InetSocketAddress ia4 = InetSocketAddress.createUnresolved("badtoo", 13);


  Assert.assertFalse(fs.isFailedServer(ia));

  fs.addToFailedServers(ia,testThrowable);
  Assert.assertTrue(fs.isFailedServer(ia));
  Assert.assertTrue(fs.isFailedServer(ia2));

  ee.incValue(1);
  Assert.assertTrue(fs.isFailedServer(ia));
  Assert.assertTrue(fs.isFailedServer(ia2));

  ee.incValue(RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1);
  Assert.assertFalse(fs.isFailedServer(ia));
  Assert.assertFalse(fs.isFailedServer(ia2));

  fs.addToFailedServers(ia,testThrowable);
  fs.addToFailedServers(ia3,testThrowable);
  fs.addToFailedServers(ia4,testThrowable);

  Assert.assertTrue(fs.isFailedServer(ia));
  Assert.assertTrue(fs.isFailedServer(ia2));
  Assert.assertTrue(fs.isFailedServer(ia3));
  Assert.assertTrue(fs.isFailedServer(ia4));

  ee.incValue(RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1);
  Assert.assertFalse(fs.isFailedServer(ia));
  Assert.assertFalse(fs.isFailedServer(ia2));
  Assert.assertFalse(fs.isFailedServer(ia3));
  Assert.assertFalse(fs.isFailedServer(ia4));


  fs.addToFailedServers(ia3,testThrowable);
  Assert.assertFalse(fs.isFailedServer(ia4));
}
 
源代码19 项目: hbase   文件: TestFlushRegionEntry.java
@BeforeClass
public static void setUp() throws Exception {
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  edge.setValue(12345);
  EnvironmentEdgeManager.injectEdge(edge);
}
 
源代码20 项目: hbase   文件: TestDefaultCompactSelection.java
@Test
public void testCompactionRatio() throws IOException {
  TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);
  /**
   * NOTE: these tests are specific to describe the implementation of the
   * current compaction algorithm.  Developed to ensure that refactoring
   * doesn't implicitly alter this.
   */
  long tooBig = maxSize + 1;

  // default case. preserve user ratio on size
  compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
  // less than compact threshold = don't compact
  compactEquals(sfCreate(100,50,25,12,12) /* empty */);
  // greater than compact size = skip those
  compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
  // big size + threshold
  compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
  // small files = don't care about ratio
  compactEquals(sfCreate(7,1,1), 7,1,1);

  // don't exceed max file compact threshold
  // note:  file selection starts with largest to smallest.
  compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);

  compactEquals(sfCreate(50, 10, 10 ,10, 10), 10, 10, 10, 10);

  compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10);

  compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251);

  compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */);

  // Always try and compact something to get below blocking storefile count
  this.conf.setLong("hbase.hstore.compaction.min.size", 1);
  store.storeEngine.getCompactionPolicy().setConf(conf);
  compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1);
  this.conf.setLong("hbase.hstore.compaction.min.size", minSize);
  store.storeEngine.getCompactionPolicy().setConf(conf);

  /* MAJOR COMPACTION */
  // if a major compaction has been forced, then compact everything
  compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
  // also choose files < threshold on major compaction
  compactEquals(sfCreate(12,12), true, 12, 12);
  // even if one of those files is too big
  compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
  // don't exceed max file compact threshold, even with major compaction
  store.forceMajor = true;
  compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
  store.forceMajor = false;
  // if we exceed maxCompactSize, downgrade to minor
  // if not, it creates a 'snowball effect' when files >> maxCompactSize:
  // the last file in compaction is the aggregate of all previous compactions
  compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
  conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
  conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
  store.storeEngine.getCompactionPolicy().setConf(conf);
  try {
    // The modTime of the mocked store file is currentTimeMillis, so we need to increase the
    // timestamp a bit to make sure that now - lowestModTime is greater than major compaction
    // period(1ms).
    // trigger an aged major compaction
    List<HStoreFile> candidates = sfCreate(50, 25, 12, 12);
    edge.increment(2);
    compactEquals(candidates, 50, 25, 12, 12);
    // major sure exceeding maxCompactSize also downgrades aged minors
    candidates = sfCreate(100, 50, 23, 12, 12);
    edge.increment(2);
    compactEquals(candidates, 23, 12, 12);
  } finally {
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
  }

  /* REFERENCES == file is from a region that was split */
  // treat storefiles that have references like a major compaction
  compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
  // reference files shouldn't obey max threshold
  compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
  // reference files should obey max file compact to avoid OOM
  compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);

  // empty case
  compactEquals(new ArrayList<>() /* empty */);
  // empty case (because all files are too big)
  compactEquals(sfCreate(tooBig, tooBig) /* empty */);
}