下面列出了com.google.common.collect.Maps#newHashMapWithExpectedSize ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public static <T> Map<String, T> transformHexThreadId(Map<Integer, T> value) {
if (value == null || value.isEmpty()) {
return new HashMap<>(0);
}
Map<String, T> result = Maps.newHashMapWithExpectedSize(value.size());
for (Map.Entry<Integer, T> entry : value.entrySet()) {
String threadId = "0x" + Integer.toHexString(entry.getKey());
result.put(threadId, entry.getValue());
}
return result;
}
public Database(Plugin plugin, Logger logger) {
this.plugin = plugin;
this.logger = logger;
this.dbConfig = new DatabaseConfiguration(plugin);
this.toplist = Maps.newHashMapWithExpectedSize(Settings.getTopitems());
}
public Map<String, ExpectedAttributeValue> build(final KCVMutation mutation) {
final Map<String, ExpectedAttributeValue> expected = Maps.newHashMapWithExpectedSize(mutation.getTotalMutations());
for (Entry addedColumn : mutation.getAdditions()) {
final StaticBuffer columnKey = addedColumn.getColumn();
addExpectedValueIfPresent(columnKey, expected);
}
for (StaticBuffer deletedKey : mutation.getDeletions()) {
addExpectedValueIfPresent(deletedKey, expected);
}
return expected;
}
private TreeArtifactValue constructTreeArtifactValueFromFilesystem(SpecialArtifact parent)
throws IOException {
Preconditions.checkState(parent.isTreeArtifact(), parent);
// Make sure the tree artifact root is a regular directory. Note that this is how the Action
// is initialized, so this should hold unless the Action itself has deleted the root.
if (!artifactPathResolver.toPath(parent).isDirectory(Symlinks.NOFOLLOW)) {
return TreeArtifactValue.MISSING_TREE_ARTIFACT;
}
Set<PathFragment> paths =
TreeArtifactValue.explodeDirectory(artifactPathResolver.toPath(parent));
Map<TreeFileArtifact, FileArtifactValue> values = Maps.newHashMapWithExpectedSize(paths.size());
for (PathFragment path : paths) {
TreeFileArtifact treeFileArtifact = TreeFileArtifact.createTreeOutput(parent, path);
FileArtifactValue fileMetadata = store.getArtifactData(treeFileArtifact);
if (fileMetadata == null) {
try {
fileMetadata = constructFileArtifactValueFromFilesystem(treeFileArtifact);
} catch (FileNotFoundException e) {
String errorMessage =
String.format(
"Failed to resolve relative path %s inside TreeArtifact %s. "
+ "The associated file is either missing or is an invalid symlink.",
treeFileArtifact.getParentRelativePath(),
treeFileArtifact.getParent().getExecPathString());
throw new IOException(errorMessage, e);
}
}
values.put(treeFileArtifact, fileMetadata);
}
return TreeArtifactValue.create(values);
}
public static Map<String, String> str2map(String str, String tokenStr, String splitStr) {
StringTokenizer tokenSTK = new StringTokenizer(str, tokenStr);
Map<String, String> result = Maps.newHashMapWithExpectedSize(tokenSTK.countTokens());
while (tokenSTK.hasMoreTokens()) {
String nextStr = tokenSTK.nextToken();
StringTokenizer splitSTK = new StringTokenizer(nextStr, splitStr);
if (splitSTK.countTokens() == 2) {
result.put(splitSTK.nextToken(), splitSTK.nextToken());
}
}
return result;
}
protected ImmutableLeafSetNodeBuilder(final int sizeHint) {
if (sizeHint >= 0) {
value = Maps.newHashMapWithExpectedSize(sizeHint);
} else {
value = new HashMap<>(DEFAULT_CAPACITY);
}
}
@Nonnull
private static RecordStoreState stateOf(@Nonnull Object... values) {
if (values.length % 2 != 0) {
throw new RecordCoreArgumentException("odd number of values given to create record store state");
}
Map<String, IndexState> indexStateMap = Maps.newHashMapWithExpectedSize(values.length / 2);
for (int i = 0; i < values.length; i += 2) {
String indexName = (String)values[i];
IndexState indexState = (IndexState)values[i + 1];
indexStateMap.put(indexName, indexState);
}
return new RecordStoreState(null, indexStateMap);
}
@Test
public void testShouldAlert() {
when(consoleConfig.getNoAlarmMinutesForClusterUpdate()).thenReturn(15);
Map<String, Date> map = Maps.newHashMapWithExpectedSize(1);
map.put("cluster", new Date());
alertManager.setClusterCreateTime(map);
alertManager.setAlertClusterWhiteList(Sets.newHashSet());
Assert.assertFalse(alertManager.shouldAlert("cluster"));
Assert.assertTrue(alertManager.shouldAlert("test"));
}
@Override
protected Map<Class<?>, RefreshableContainer> getSupportedPushEvents() {
final Map<Class<?>, RefreshableContainer> supportedEvents = Maps.newHashMapWithExpectedSize(4);
supportedEvents.put(DistributionSetCreatedEventContainer.class, distributionTableLayout.getTable());
supportedEvents.put(DistributionSetDeletedEventContainer.class, distributionTableLayout.getTable());
supportedEvents.put(SoftwareModuleCreatedEventContainer.class, softwareModuleTableLayout.getTable());
supportedEvents.put(SoftwareModuleDeletedEventContainer.class, softwareModuleTableLayout.getTable());
return supportedEvents;
}
/**
* Creates SASL properties required for an encrypted SASL negotiation.
*
* @param encryptionAlgorithm to use for SASL negotation
* @return properties of encrypted SASL negotiation
*/
public static Map<String, String> createSaslPropertiesForEncryption(
String encryptionAlgorithm) {
Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3);
saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop());
saslProps.put(Sasl.SERVER_AUTH, "true");
saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
return saslProps;
}
@Override
public Map<GraphNode, Point2D> getPositions(Collection<GraphNode> nodes) {
// Collect the positions from the Jung layout tool.
Map<GraphNode, Point2D> result =
Maps.newHashMapWithExpectedSize(nodes.size());
for (GraphNode node : nodes) {
Point2D position = jungLayout.apply(node);
result.put(node, position);
}
Point2dUtils.translatePos(region, nodes, result);
return result;
}
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
// Must update config before starting server
props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(50));
props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1));
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
props.put(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, Integer.toString(0)); // Prevents RejectedExecutionException when deleting sequences
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
public Map<String, ValueDifference<String>> getPreferenceChanges() {
Map<String, String> currentSettings = Maps.newHashMapWithExpectedSize(keys.length);
for (String key : keys) {
currentSettings.put(key, preferenceStore.getString(key));
}
MapDifference<String, String> mapDifference = Maps.difference(currentSettings, originalSettings);
Map<String, ValueDifference<String>> entriesDiffering = mapDifference.entriesDiffering();
return entriesDiffering;
}
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
// Drop the HBase table metadata for this test
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
// Must update config before starting server
startServer(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
}
@Step
private void addSecondAttributeAndVerify(final String controllerId) {
final Map<String, String> testData = Maps.newHashMapWithExpectedSize(2);
testData.put("test2", "testdata20");
controllerManagement.updateControllerAttributes(controllerId, testData, null);
testData.put("test1", "testdata1");
assertThat(targetManagement.getControllerAttributes(controllerId)).as("Controller Attributes are wrong")
.isEqualTo(testData);
}
protected void init(OptionsHelper optionsHelper) throws Exception {
if (optionsHelper.hasOption(OPTION_UPDATE_MAPPING)) {
File mappingFile = new File(optionsHelper.getOptionValue(OPTION_UPDATE_MAPPING));
String content = new String(Files.readAllBytes(mappingFile.toPath()), Charset.defaultCharset());
Map<String, TableSchemaUpdateMapping> tmpMappings = JsonUtil.readValue(content,
new TypeReference<Map<String, TableSchemaUpdateMapping>>() {
});
mappings = Maps.newHashMapWithExpectedSize(tmpMappings.size());
for (Map.Entry<String, TableSchemaUpdateMapping> entry : tmpMappings.entrySet()) {
mappings.put(entry.getKey().toUpperCase(Locale.ROOT), entry.getValue());
}
}
ifDstHiveCheck = optionsHelper.hasOption(OPTION_DST_HIVE_CHECK)
? Boolean.valueOf(optionsHelper.getOptionValue(OPTION_DST_HIVE_CHECK))
: true;
ifSchemaOnly = optionsHelper.hasOption(OPTION_SCHEMA_ONLY)
? Boolean.valueOf(optionsHelper.getOptionValue(OPTION_SCHEMA_ONLY))
: true;
ifOverwrite = optionsHelper.hasOption(OPTION_OVERWRITE)
? Boolean.valueOf(optionsHelper.getOptionValue(OPTION_OVERWRITE))
: false;
ifExecute = optionsHelper.hasOption(OPTION_EXECUTE)
? Boolean.valueOf(optionsHelper.getOptionValue(OPTION_EXECUTE))
: false;
codeOfFSHAEnabled = optionsHelper.hasOption(OPTION_FS_HA_ENABLED_CODE)
? Integer.valueOf(optionsHelper.getOptionValue(OPTION_FS_HA_ENABLED_CODE))
: 3;
String srcConfigURI = optionsHelper.getOptionValue(OPTION_KYLIN_URI_SRC);
srcCluster = new SrcClusterUtil(srcConfigURI, ifFSHAEnabled(codeOfFSHAEnabled, 0),
ifFSHAEnabled(codeOfFSHAEnabled, 1));
String dstConfigURI = optionsHelper.getOptionValue(OPTION_KYLIN_URI_DST);
dstCluster = new DstClusterUtil(dstConfigURI, ifFSHAEnabled(codeOfFSHAEnabled, 2),
ifFSHAEnabled(codeOfFSHAEnabled, 3), ifExecute);
distCpConf = new Configuration(srcCluster.jobConf);
if (optionsHelper.hasOption(OPTION_DISTCP_JOB_QUEUE)) {
distCpConf.set("mapreduce.job.queuename", optionsHelper.getOptionValue(OPTION_DISTCP_JOB_QUEUE));
}
int distCpMemory = optionsHelper.hasOption(OPTION_DISTCP_JOB_MEMORY)
? Integer.valueOf(optionsHelper.getOptionValue(OPTION_DISTCP_JOB_MEMORY))
: 1500;
int distCpJVMMemory = distCpMemory * 4 / 5;
distCpConf.set("mapreduce.map.memory.mb", "" + distCpMemory);
distCpConf.set("mapreduce.map.java.opts",
"-server -Xmx" + distCpJVMMemory + "m -Djava.net.preferIPv4Stack=true");
nThread = optionsHelper.hasOption(OPTION_THREAD_NUM)
? Integer.valueOf(optionsHelper.getOptionValue(OPTION_THREAD_NUM))
: 8;
coprocessorJarPath = optionsHelper.hasOption(OPTION_COPROCESSOR_PATH)
? optionsHelper.getOptionValue(OPTION_COPROCESSOR_PATH)
: srcCluster.getDefaultCoprocessorJarPath();
}
protected RawResolvedFeatures(LightweightTypeReference type, OverrideTester overrideTester) {
super(type, overrideTester);
this.featureIndex = Maps.newHashMapWithExpectedSize(4);
}
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
MultiRowMutationState multiRowMutationState;
Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
multiRowMutationState = mutations.get(tableRef);
if (multiRowMutationState == null || multiRowMutationState.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long
serverTimestamp =
serverTimeStamps == null ?
validateAndGetServerTimestamp(tableRef, multiRowMutationState) :
serverTimeStamps[i++];
final PTable table = tableRef.getTable();
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ?
(table.isTransactional() == true ? HConstants.LATEST_TIMESTAMP : EnvironmentEdgeManager.currentTimeMillis())
: scn;
Iterator<Pair<PTable, List<Mutation>>>
mutationsIterator =
addRowMutations(tableRef, multiRowMutationState, mutationTimestamp,
serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PTable, List<Mutation>> pair = mutationsIterator.next();
PTable logicalTable = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, logicalTable.getPhysicalName(),
tableRef, logicalTable);
List<Mutation>
oldMutationList =
physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList != null) mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(multiRowMutationState.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), multiRowMutationState, txMutations);
}
}
Map<TableInfo, List<Mutation>> unverifiedIndexMutations = new LinkedHashMap<>();
Map<TableInfo, List<Mutation>> verifiedOrDeletedIndexMutations = new LinkedHashMap<>();
filterIndexCheckerMutations(physicalTableMutationMap, unverifiedIndexMutations,
verifiedOrDeletedIndexMutations);
// Phase 1: Send index mutations with the empty column value = "unverified"
sendMutations(unverifiedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, false);
// Phase 2: Send data table and other indexes
sendMutations(physicalTableMutationMap.entrySet().iterator(), span, indexMetaDataPtr, false);
// Phase 3: Send put index mutations with the empty column value = "verified" and/or delete index mutations
try {
sendMutations(verifiedOrDeletedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, true);
} catch (SQLException ex) {
LOGGER.warn(
"Ignoring exception that happened during setting index verified value to verified=TRUE ",
ex);
}
}
}
boolean hasSourceRetention(@NonNull String fqn, @Nullable Annotation annotation) {
if (sourceRetention == null) {
sourceRetention = Maps.newHashMapWithExpectedSize(20);
// The @IntDef and @String annotations have always had source retention,
// and always must (because we can't express fully qualified field references
// in a .class file.)
sourceRetention.put(INT_DEF_ANNOTATION, true);
sourceRetention.put(STRING_DEF_ANNOTATION, true);
// The @Nullable and @NonNull annotations have always had class retention
sourceRetention.put(SUPPORT_NOTNULL, false);
sourceRetention.put(SUPPORT_NULLABLE, false);
// TODO: Look at support library statistics and put the other most
// frequently referenced annotations in here statically
// The resource annotations vary: up until 22.0.1 they had source
// retention but then switched to class retention.
}
Boolean source = sourceRetention.get(fqn);
if (source != null) {
return source;
}
if (annotation == null || annotation.type == null
|| annotation.type.resolvedType == null) {
// Assume it's class retention: that's what nearly all annotations
// currently are. (We do dynamic lookup of unknown ones to allow for
// this version of the Gradle plugin to be able to work on future
// versions of the support library with new annotations, where it's
// possible some annotations need to use source retention.
sourceRetention.put(fqn, false);
return false;
} else if (annotation.type.resolvedType.getAnnotations() != null) {
for (AnnotationBinding binding : annotation.type.resolvedType.getAnnotations()) {
if (hasSourceRetention(binding)) {
sourceRetention.put(fqn, true);
return true;
}
}
}
sourceRetention.put(fqn, false);
return false;
}
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}