com.codahale.metrics.graphite.Graphite#org.apache.commons.configuration.PropertiesConfiguration源码实例Demo

下面列出了com.codahale.metrics.graphite.Graphite#org.apache.commons.configuration.PropertiesConfiguration 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

/**
 * Tests that the builder correctly leaves out the file path.
 */
@Test
public void testReadPropertiesFromFile_clientSecretNoFilePath() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientId", "clientId");
  config.setProperty("api.admanager.refreshToken", "refreshToken");

  GoogleClientSecretsForApiBuilder builder = new GoogleClientSecretsForApiBuilder(
      configurationHelper, GoogleClientSecretsBuilder.Api.AD_MANAGER);

  thrown.expect(ValidationException.class);
  thrown.expectMessage("Client secret must be set."
      + "\nIf you do not have a client ID or secret, please create one in the API "
      + "console: https://console.developers.google.com");
  builder.from(config).build();
}
 
源代码2 项目: AisAbnormal   文件: PacketHandlerImplTest.java
@Test
public void initNoAnalyses() throws Exception {
    PropertiesConfiguration configuration = new PropertiesConfiguration();
    configuration.addProperty(CONFKEY_ANALYSIS_COG_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_SOG_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_TYPESIZE_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_DRIFT_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_SUDDENSPEEDCHANGE_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_CLOSEENCOUNTER_ENABLED, false);
    configuration.addProperty(CONFKEY_ANALYSIS_FREEFLOW_ENABLED, false);

    final JUnit4Mockery context = new JUnit4Mockery();
    Injector injectorMock = context.mock(Injector.class);

    context.checking(new Expectations() {{
    }});

    PacketHandlerImpl sut = new PacketHandlerImpl(configuration, injectorMock, null, null, null, null);
    Set<Analysis> analyses = sut.getAnalyses();

    assertEquals(0, analyses.size());
}
 
@Before
public void setUp() throws Exception {

	// host name and port see: arangodb.properties
	PropertiesConfiguration configuration = new PropertiesConfiguration();
	configuration.setProperty("arangodb.hosts", "127.0.0.1:8529");
	configuration.setProperty("arangodb.user", "gremlin");
	configuration.setProperty("arangodb.password", "gremlin");
	Properties arangoProperties = ConfigurationConverter.getProperties(configuration);
	
	client = new ArangoDBGraphClient(null, arangoProperties, "tinkerpop", 30000, true);
	
	client.deleteGraph(graphName);
	client.deleteCollection(vertices);
	client.deleteCollection(edges);
	
}
 
/**
 * Tests that the builder correctly leaves out the file path.
 */
@Test
public void testReadPropertiesFromFile_clientIdNoFilePath() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientSecret", "clientSecret");
  config.setProperty("api.admanager.refreshToken", "refreshToken");

  ForApiBuilder builder = new OfflineCredentials.ForApiBuilder(
      configurationHelper, OfflineCredentials.Api.AD_MANAGER, oAuth2Helper);

  thrown.expect(ValidationException.class);
  thrown.expectMessage("Client ID must be set."
      + "\nIf you do not have a client ID or secret, please create one in the API "
      + "console: https://console.developers.google.com/project");
  builder.from(config).build();
}
 
源代码5 项目: pinlater   文件: PinLaterRedisBackendTest.java
@BeforeClass
public static void beforeClass() throws Exception {
  QUEUE_NAME = "pinlater_redis_backend_test";
  // If there is no local Redis, skip this test.
  Assume.assumeTrue(LocalRedisChecker.isRunning(REDIS_PORT));

  configuration = new PropertiesConfiguration();
  try {
    configuration.load(ClassLoader.getSystemResourceAsStream("pinlater.redis.test.properties"));
  } catch (ConfigurationException e) {
    throw new RuntimeException(e);
  }
  InputStream redisConfigStream = ClassLoader.getSystemResourceAsStream("redis.local.json");

  backend = new PinLaterRedisBackend(
      configuration, redisConfigStream, "localhost", System.currentTimeMillis());
}
 
源代码6 项目: singer   文件: LogConfigUtils.java
/**
 * get the PropertiesConfiguration for a specific topic from the file
 * datapipelines.properties
 *
 * @param config    - the properties configuration for the entire
 *                  datapipelines.properties
 * @param topicName - the name of the desired topic
 */
public static PropertiesConfiguration getTopicConfig(Configuration config, String topicName) {
  // get default values, then copy the user-specified values into the default
  // values
  // user values will overwrite any default values.

  // get default settings
  PropertiesConfiguration topicConfig = toPropertiesConfiguration(
      config.subset("singer.default"));

  // add topic-specific default values
  topicConfig.setProperty("logfile_regex", topicName + "_(\\\\d+).log");
  topicConfig.setProperty("writer.kafka.topic", topicName);

  // get user values
  PropertiesConfiguration topicConfigOverrides = toPropertiesConfiguration(
      config.subset(topicName));

  // copy user settings into default values
  ConfigurationUtils.copy(topicConfigOverrides, topicConfig);
  return topicConfig;
}
 
/**
 * Tests that the builder correctly reads from a file.
 */
@Test
public void testReadPropertiesFromFile() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientId", "clientId");
  config.setProperty("api.admanager.clientSecret", "clientSecret");
  config.setProperty("api.admanager.refreshToken", "refreshToken");

  when(configurationHelper.fromFile("path")).thenReturn(config);

  ForApiBuilder builder = new OfflineCredentials.ForApiBuilder(
      configurationHelper, OfflineCredentials.Api.AD_MANAGER, oAuth2Helper);

  OfflineCredentials offlineCredentials = builder.fromFile("path").build();

  assertEquals("clientId", offlineCredentials.getClientId());
  assertEquals("clientSecret", offlineCredentials.getClientSecret());
  assertEquals("refreshToken", offlineCredentials.getRefreshToken());
}
 
源代码8 项目: StatsAgg   文件: PropertiesConfigurationWrapper.java
private void readPropertiesConfigurationFile(InputStream configurationInputStream) {
    
    if (configurationInputStream == null) {
        return;
    }
    
    try {
        configurationInputStream_ = configurationInputStream;
        
        propertiesConfiguration_ = new PropertiesConfiguration();
        propertiesConfiguration_.setDelimiterParsingDisabled(true);
        propertiesConfiguration_.setAutoSave(false);
        propertiesConfiguration_.load(configurationInputStream, null);
    }
    catch (Exception e) {
        logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e));
        
        configurationInputStream_ = null;
        propertiesConfiguration_ = null;
    }
}
 
源代码9 项目: incubator-pinot   文件: PinotConfigUtils.java
public static Configuration generateServerConf(String serverHost, int serverPort, int serverAdminPort,
    String serverDataDir, String serverSegmentDir)
    throws SocketException, UnknownHostException {
  if (serverHost == null) {
    serverHost = NetUtil.getHostAddress();
  }
  if (serverPort == 0) {
    serverPort = getAvailablePort();
  }
  if (serverAdminPort == 0) {
    serverAdminPort = getAvailablePort();
  }
  if (serverDataDir == null) {
    serverDataDir = TMP_DIR + String.format("Server_%s_%d/server/data", serverHost, serverPort);
  }
  if (serverSegmentDir == null) {
    serverSegmentDir = TMP_DIR + String.format("Server_%s_%d/server/segment", serverHost, serverPort);
  }
  Configuration serverConf = new PropertiesConfiguration();
  serverConf.addProperty(CommonConstants.Helix.KEY_OF_SERVER_NETTY_HOST, serverHost);
  serverConf.addProperty(CommonConstants.Helix.KEY_OF_SERVER_NETTY_PORT, serverPort);
  serverConf.addProperty(CommonConstants.Server.CONFIG_OF_ADMIN_API_PORT, serverAdminPort);
  serverConf.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_DATA_DIR, serverDataDir);
  serverConf.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_SEGMENT_TAR_DIR, serverSegmentDir);
  return serverConf;
}
 
/**
 * Tests that the builder correctly reads from a file.
 */
@Test
public void testGoogleSecretsReadPropertiesFromFile() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientId", "clientId");
  config.setProperty("api.admanager.clientSecret", "clientSecret");

  when(configurationHelper.fromFile("path")).thenReturn(config);

  GoogleClientSecretsForApiBuilder builder = new GoogleClientSecretsForApiBuilder(
      configurationHelper, GoogleClientSecretsBuilder.Api.AD_MANAGER);

  GoogleClientSecrets googleClientSecrets = builder.fromFile("path").build();

  assertEquals("clientId", googleClientSecrets.getInstalled().getClientId());
  assertEquals("clientSecret", googleClientSecrets.getInstalled().getClientSecret());
}
 
源代码11 项目: hugegraph   文件: CassandraTest.java
@Test
public void testParseRepilcaWithSimpleStrategyAndDoubleReplica() {
    String strategy = CassandraOptions.CASSANDRA_STRATEGY.name();
    String replica = CassandraOptions.CASSANDRA_REPLICATION.name();

    Configuration conf = Mockito.mock(PropertiesConfiguration.class);
    Mockito.when(conf.getKeys())
           .thenReturn(ImmutableList.of(strategy, replica).iterator());
    Mockito.when(conf.getProperty(strategy))
           .thenReturn("SimpleStrategy");
    Mockito.when(conf.getProperty(replica))
           .thenReturn(ImmutableList.of("1.5"));
    HugeConfig config = new HugeConfig(conf);

    Assert.assertThrows(RuntimeException.class, () -> {
        Whitebox.invokeStatic(CassandraStore.class, "parseReplica", config);
    });
}
 
源代码12 项目: pinlater   文件: RedisBackendUtils.java
/**
 * Creates the Redis shard map from config.
 *
 * @param redisConfigStream InputStream containing the Redis config json.
 * @param configuration PropertiesConfiguration object.
 * @return A map shardName -> RedisPools.
 */
public static ImmutableMap<String, RedisPools> buildShardMap(
    InputStream redisConfigStream,
    PropertiesConfiguration configuration) {
  RedisConfigSchema redisConfig;
  try {
    redisConfig = RedisConfigSchema.read(Preconditions.checkNotNull(redisConfigStream));
  } catch (IOException e) {
    LOG.error("Failed to load redis configuration", e);
    throw new RuntimeException(e);
  }

  ImmutableMap.Builder<String, RedisPools> shardMapBuilder =
      new ImmutableMap.Builder<String, RedisPools>();
  for (RedisConfigSchema.Shard shard : redisConfig.shards) {
    shardMapBuilder.put(
        shard.name,
        new RedisPools(
            configuration,
            shard.shardConfig.master.host,
            shard.shardConfig.master.port,
            shard.shardConfig.dequeueOnly));
  }
  return shardMapBuilder.build();
}
 
源代码13 项目: ankush   文件: PropertyFileManipulator.java
/**
 * Edits the conf value.
 * 
 * @param file
 *            the file
 * @param propertyName
 *            the property name
 * @param newPropertyValue
 *            the new property value
 * @return true, if successful
 */
@Override
public boolean editConfValue(String file, String propertyName,
		String newPropertyValue) {
	boolean status = false;
	try {
		// read conf file
		File confFile = new File(file);

		if (!confFile.exists()) {
			System.err.println("File " + file + " does not exists.");
			status = false;
		}
		PropertiesConfiguration props = new PropertiesConfiguration(file);
		props.setProperty(propertyName, newPropertyValue);
		props.getLayout().setSeparator(propertyName, "=");
		props.save();
		status = true;
	} catch (Exception e) {
		System.err.println(e.getMessage());
	}
	return status;
}
 
/**
 * Tests that the builder correctly reads properties from a configuration.
 */
@Test
public void testReadPropertiesFromConfiguration_adWordsServiceAccount() 
    throws ValidationException {
  PropertiesConfiguration config = new PropertiesConfiguration();
  String jsonKeyFilePath = "someJsonKeyFilePath";
  config.setProperty("api.adwords.jsonKeyFilePath", jsonKeyFilePath);
  
  OfflineCredentials offlineCredentials = new OfflineCredentials.Builder()
      .forApi(OfflineCredentials.Api.ADWORDS)
      .from(config)
      .build();

  assertEquals(jsonKeyFilePath, offlineCredentials.getJsonKeyFilePath());
  assertNull("service account user should be null", offlineCredentials.getServiceAccountUser());

  // Create another credential with the service account user set.
  String serviceAccountUser = "[email protected]";
  offlineCredentials = new OfflineCredentials.Builder()
      .forApi(OfflineCredentials.Api.ADWORDS)
      .from(config)
      .withServiceAccountUser(serviceAccountUser)
      .build();
  assertEquals(jsonKeyFilePath, offlineCredentials.getJsonKeyFilePath());
  assertEquals(serviceAccountUser, offlineCredentials.getServiceAccountUser());
}
 
源代码15 项目: incubator-pinot   文件: PerfBenchmarkDriver.java
private void startServer()
    throws Exception {
  if (!_conf.shouldStartServer()) {
    LOGGER.info("Skipping start server step. Assumes server is already started.");
    return;
  }
  Configuration serverConfiguration = new PropertiesConfiguration();
  serverConfiguration.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_DATA_DIR, _serverInstanceDataDir);
  serverConfiguration
      .addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_SEGMENT_TAR_DIR, _serverInstanceSegmentTarDir);
  serverConfiguration.addProperty(CommonConstants.Helix.KEY_OF_SERVER_NETTY_HOST, "localhost");
  if (_segmentFormatVersion != null) {
    serverConfiguration.setProperty(CommonConstants.Server.CONFIG_OF_SEGMENT_FORMAT_VERSION, _segmentFormatVersion);
  }
  serverConfiguration.setProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_ID, _serverInstanceName);
  LOGGER.info("Starting server instance: {}", _serverInstanceName);
  HelixServerStarter helixServerStarter = new HelixServerStarter(_clusterName, _zkAddress, serverConfiguration);
  helixServerStarter.start();
}
 
/**
 * Tests that the builder correctly leaves out the file path.
 */
@Test
public void testReadPropertiesFromFile_clientIdNoFilePath() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientSecret", "clientSecret");
  config.setProperty("api.admanager.refreshToken", "refreshToken");

  GoogleClientSecretsForApiBuilder builder = new GoogleClientSecretsForApiBuilder(
      configurationHelper, GoogleClientSecretsBuilder.Api.AD_MANAGER);

  thrown.expect(ValidationException.class);
  thrown.expectMessage("Client ID must be set."
      + "\nIf you do not have a client ID or secret, please create one in the API "
      + "console: https://console.developers.google.com");
  builder.from(config).build();
}
 
@Test
public void testBrokerRequestHandlerWithAsFunction()
    throws Exception {
  SingleConnectionBrokerRequestHandler requestHandler =
      new SingleConnectionBrokerRequestHandler(new PropertiesConfiguration(), null, null, null,
          new BrokerMetrics("", new MetricsRegistry(), false), null);
  long currentTsMin = System.currentTimeMillis();
  JsonNode request = new ObjectMapper().readTree(
      "{\"sql\":\"SELECT now() as currentTs, fromDateTime('2020-01-01 UTC', 'yyyy-MM-dd z') as firstDayOf2020\"}");
  RequestStatistics requestStats = new RequestStatistics();
  BrokerResponseNative brokerResponse =
      (BrokerResponseNative) requestHandler.handleRequest(request, null, requestStats);
  long currentTsMax = System.currentTimeMillis();
  Assert.assertEquals(brokerResponse.getResultTable().getDataSchema().getColumnName(0), "currentTs");
  Assert.assertEquals(brokerResponse.getResultTable().getDataSchema().getColumnDataType(0),
      DataSchema.ColumnDataType.LONG);
  Assert.assertEquals(brokerResponse.getResultTable().getDataSchema().getColumnName(1), "firstDayOf2020");
  Assert.assertEquals(brokerResponse.getResultTable().getDataSchema().getColumnDataType(1),
      DataSchema.ColumnDataType.LONG);
  Assert.assertEquals(brokerResponse.getResultTable().getRows().size(), 1);
  Assert.assertEquals(brokerResponse.getResultTable().getRows().get(0).length, 2);
  Assert.assertTrue(Long.parseLong(brokerResponse.getResultTable().getRows().get(0)[0].toString()) > currentTsMin);
  Assert.assertTrue(Long.parseLong(brokerResponse.getResultTable().getRows().get(0)[0].toString()) < currentTsMax);
  Assert.assertEquals(brokerResponse.getResultTable().getRows().get(0)[1], 1577836800000L);
  Assert.assertEquals(brokerResponse.getTotalDocs(), 0);
}
 
源代码18 项目: pinlater   文件: PinLaterMySQLBackend.java
/**
 * Creates an instance of the PinLaterMySQLBackend.
 *
 * @param configuration          configuration parameters for the backend.
 * @param serverHostName         hostname of the PinLater server.
 * @param serverStartTimeMillis  start time of the PinLater server.
 */
public PinLaterMySQLBackend(PropertiesConfiguration configuration,
                            String serverHostName,
                            long serverStartTimeMillis) throws Exception {
  super(configuration, "MySQL", serverHostName, serverStartTimeMillis);
  this.configuration = Preconditions.checkNotNull(configuration);
  this.countLimit = configuration.getInt("MYSQL_COUNT_LIMIT");
  this.numDbPerQueue = configuration.getInt("MYSQL_NUM_DB_PER_QUEUE", 1);
  this.mySQLHealthMonitor = new MySQLHealthMonitor(new HashSet<String>());

  // Start the JobQueueMonitor scheduled task.
  this.queueMonitorService = Executors.newSingleThreadScheduledExecutor(
      new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MySQLJobQueueMonitor-%d")
          .build());

  // Call Base class's initialization function to initialize the shardMap, futurePool and dequeue
  // semaphoreMap.
  initialize();
}
 
源代码19 项目: bioasq   文件: GoPubMedConceptRetrievalExecutor.java
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
  super.initialize(context);
  String conf = UimaContextHelper.getConfigParameterStringValue(context, "conf");
  PropertiesConfiguration gopubmedProperties = new PropertiesConfiguration();
  try {
    gopubmedProperties.load(getClass().getResourceAsStream(conf));
  } catch (ConfigurationException e) {
    throw new ResourceInitializationException(e);
  }
  service = new GoPubMedService(gopubmedProperties);
  pages = UimaContextHelper.getConfigParameterIntValue(context, "pages", 1);
  hits = UimaContextHelper.getConfigParameterIntValue(context, "hits", 100);
  bopQueryStringConstructor = new BagOfPhraseQueryStringConstructor();
  timeout = UimaContextHelper.getConfigParameterIntValue(context, "timeout", 4);
  limit = UimaContextHelper.getConfigParameterIntValue(context, "limit", Integer.MAX_VALUE);
}
 
源代码20 项目: incubator-pinot   文件: StarTreeIndexMapUtils.java
/**
 * Stores the index maps for multiple star-trees into a file.
 */
public static void storeToFile(List<Map<IndexKey, IndexValue>> indexMaps, File indexMapFile)
    throws ConfigurationException {
  Preconditions.checkState(!indexMapFile.exists(), "Star-tree index map file already exists");

  PropertiesConfiguration configuration = new PropertiesConfiguration(indexMapFile);
  int numStarTrees = indexMaps.size();
  for (int i = 0; i < numStarTrees; i++) {
    Map<IndexKey, IndexValue> indexMap = indexMaps.get(i);
    for (Map.Entry<IndexKey, IndexValue> entry : indexMap.entrySet()) {
      IndexKey key = entry.getKey();
      IndexValue value = entry.getValue();
      configuration.addProperty(key.getPropertyName(i, OFFSET_SUFFIX), value._offset);
      configuration.addProperty(key.getPropertyName(i, SIZE_SUFFIX), value._size);
    }
  }
  configuration.save();
}
 
源代码21 项目: terrapin   文件: TerrapinControllerServiceImpl.java
public TerrapinControllerServiceImpl(PropertiesConfiguration configuration,
                                     ZooKeeperManager zkManager,
                                     DFSClient hdfsClient,
                                     HelixAdmin helixAdmin,
                                     String clusterName) {
  this.configuration = configuration;
  this.zkManager = zkManager;
  this.hdfsClient = hdfsClient;
  this.helixAdmin = helixAdmin;
  this.clusterName = clusterName;

  ExecutorService threadPool = new ThreadPoolExecutor(100,
      100,
      0,
      TimeUnit.SECONDS,
      new LinkedBlockingDeque<Runnable>(1000),
      new ThreadFactoryBuilder().setDaemon(false)
                    .setNameFormat("controller-pool-%d")
                    .build());
 this.futurePool = new ExecutorServiceFuturePool(threadPool);
}
 
源代码22 项目: atlas   文件: KafkaNotificationMockTest.java
@Test
public void testSetKafkaJAASPropertiesForMissingLoginModuleName() {
    Properties properties = new Properties();
    Configuration configuration = new PropertiesConfiguration();

    final String loginModuleControlFlag = "required";
    final String optionUseKeyTab = "false";
    final String optionStoreKey = "true";
    final String optionServiceName = "kafka";

    configuration.setProperty("atlas.jaas.KafkaClient.loginModuleControlFlag", loginModuleControlFlag);
    configuration.setProperty("atlas.jaas.KafkaClient.option.useKeyTab", optionUseKeyTab);
    configuration.setProperty("atlas.jaas.KafkaClient.option.storeKey", optionStoreKey);
    configuration.setProperty("atlas.jaas.KafkaClient.option.serviceName",optionServiceName);

    try {
        KafkaNotification kafkaNotification = new KafkaNotification(configuration);
        kafkaNotification.setKafkaJAASProperties(configuration, properties);
        String newPropertyValue = properties.getProperty(KafkaNotification.KAFKA_SASL_JAAS_CONFIG_PROPERTY);

        assertNull(newPropertyValue);
    } catch (AtlasException e) {
        fail("Failed while creating KafkaNotification object with exception : " + e.getMessage());
    }

}
 
源代码23 项目: ankush   文件: PropertyFileManipulator.java
/**
 * Delete conf value.
 * 
 * @param file
 *            the file
 * @param propertyName
 *            the property name
 * @return true, if successful
 */
@Override
public boolean deleteConfValue(String file, String propertyName) {
	boolean status = false;
	try {
		// read conf file
		File confFile = new File(file);

		if (!confFile.exists()) {
			System.err.println("File " + file + " does not exists.");
			status = false;
		}
		PropertiesConfiguration props = new PropertiesConfiguration(file);
		props.getLayout().setSeparator(propertyName, "=");
		if (props.getProperty(propertyName) != null) {
			props.clearProperty(propertyName);
			props.save();
			status = true;
		}
	} catch (Exception e) {
		System.err.println(e.getMessage());
	}
	return status;

}
 
@Test
public void testPutOutOfCapacity()
    throws OutOfCapacityException {
  PropertiesConfiguration conf = new PropertiesConfiguration();
  conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 2);
  ResourceManager rm = new UnboundedResourceManager(conf);
  MultiLevelPriorityQueue queue = createQueue(conf, rm);
  queue.put(createQueryRequest(groupOne, metrics));
  groupFactory.groupMap.get(groupOne).addReservedThreads(rm.getTableThreadsHardLimit());
  // we should still be able to add one more waiting query
  queue.put(createQueryRequest(groupOne, metrics));
  // this assert is to test that above call to put() is not the one
  // throwing exception
  assertTrue(true);
  // it should throw now
  try {
    queue.put(createQueryRequest(groupOne, metrics));
  } catch (OutOfCapacityException e) {
    assertTrue(true);
    return;
  }
  assertTrue(false);
}
 
源代码25 项目: incubator-pinot   文件: SegmentMetadataImpl.java
/**
 * Helper method to set time related information:
 * <ul>
 *   <li> Time column Name. </li>
 *   <li> Tine Unit. </li>
 *   <li> Time Interval. </li>
 *   <li> Start and End time. </li>
 * </ul>
 */
private void setTimeInfo(PropertiesConfiguration segmentMetadataPropertiesConfiguration) {
  _timeColumn = segmentMetadataPropertiesConfiguration.getString(TIME_COLUMN_NAME);
  if (segmentMetadataPropertiesConfiguration.containsKey(SEGMENT_START_TIME) && segmentMetadataPropertiesConfiguration
      .containsKey(SEGMENT_END_TIME) && segmentMetadataPropertiesConfiguration.containsKey(TIME_UNIT)) {
    try {
      _timeUnit = TimeUtils.timeUnitFromString(segmentMetadataPropertiesConfiguration.getString(TIME_UNIT));
      assert _timeUnit != null;
      _timeGranularity = new Duration(_timeUnit.toMillis(1));
      String startTimeString = segmentMetadataPropertiesConfiguration.getString(SEGMENT_START_TIME);
      String endTimeString = segmentMetadataPropertiesConfiguration.getString(SEGMENT_END_TIME);
      _segmentStartTime = Long.parseLong(startTimeString);
      _segmentEndTime = Long.parseLong(endTimeString);
      _timeInterval =
          new Interval(_timeUnit.toMillis(_segmentStartTime), _timeUnit.toMillis(_segmentEndTime), DateTimeZone.UTC);
    } catch (Exception e) {
      LOGGER.warn("Caught exception while setting time interval and granularity", e);
      _timeInterval = null;
      _timeGranularity = null;
      _segmentStartTime = Long.MAX_VALUE;
      _segmentEndTime = Long.MIN_VALUE;
    }
  }
}
 
源代码26 项目: hadoop   文件: MetricsConfig.java
/**
 * Load configuration from a list of files until the first successful load
 * @param conf  the configuration object
 * @param files the list of filenames to try
 * @return  the configuration object
 */
static MetricsConfig loadFirst(String prefix, String... fileNames) {
  for (String fname : fileNames) {
    try {
      Configuration cf = new PropertiesConfiguration(fname)
          .interpolatedConfiguration();
      LOG.info("loaded properties from "+ fname);
      LOG.debug(toString(cf));
      MetricsConfig mc = new MetricsConfig(cf, prefix);
      LOG.debug(mc);
      return mc;
    }
    catch (ConfigurationException e) {
      if (e.getMessage().startsWith("Cannot locate configuration")) {
        continue;
      }
      throw new MetricsConfigException(e);
    }
  }
  LOG.warn("Cannot locate configuration: tried "+
           Joiner.on(",").join(fileNames));
  // default to an empty configuration
  return new MetricsConfig(new PropertiesConfiguration(), prefix);
}
 
/**
 * Tests that the builder correctly reads properties from a configuration.
 */
@Test
public void testReadPropertiesFromConfiguration_properPrefixServiceAccount()
    throws ValidationException {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.jsonKeyFilePath", "jsonKeyFilePathDfp");
  config.setProperty("api.adwords.jsonKeyFilePath", "jsonKeyFilePathAdWords");

  OfflineCredentials offlineCredentials = new OfflineCredentials.Builder()
      .forApi(OfflineCredentials.Api.AD_MANAGER)
      .from(config)
      .build();

  assertEquals("jsonKeyFilePathDfp", offlineCredentials.getJsonKeyFilePath());
}
 
源代码28 项目: smaker   文件: GenUtils.java
/**
 * 获取配置信息
 */
private static Configuration getConfig() {
	try {
		return new PropertiesConfiguration("generator.properties");
	} catch (ConfigurationException e) {
		throw new CheckedException("获取配置文件失败,", e);
	}
}
 
源代码29 项目: sqlg   文件: TestShardingGremlin.java
@SuppressWarnings("Duplicates")
@BeforeClass
public static void beforeClass() {
    URL sqlProperties = Thread.currentThread().getContextClassLoader().getResource("sqlg.properties");
    try {
        configuration = new PropertiesConfiguration(sqlProperties);
        Assume.assumeTrue(isPostgres());
        configuration.addProperty("distributed", true);
        if (!configuration.containsKey("jdbc.url"))
            throw new IllegalArgumentException(String.format("SqlGraph configuration requires that the %s be set", "jdbc.url"));

    } catch (ConfigurationException e) {
        throw new RuntimeException(e);
    }
}
 
/**
 * Tests that the builder correctly fails on a bad configuration.
 */
@Test
public void testReadPropertiesFromConfiguration_missingClientId() throws Exception {
  PropertiesConfiguration config = new PropertiesConfiguration();
  config.setProperty("api.admanager.clientSecret", "clientSecret");
  config.setProperty("api.admanager.refreshToken", "refreshToken");

  thrown.expect(ValidationException.class);
  new OfflineCredentials.Builder()
      .forApi(OfflineCredentials.Api.AD_MANAGER)
      .from(config)
      .build();
}