下面列出了java.util.Collections#emptySet ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Test to make sure we honor the size of buckets when
* choosing which buckets to move.
* @throws Exception
*/
public void testMoveBucketsWithSizes() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator ,0, false, false, true, true, 6, getAddressComparor(false), logger, true, Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
//Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new int[] {3,1,1,1,1,1}, new int[] {1,1,1,1,1,1});
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new int[] {0,0,0,0,0,0}, new int[] {0,0,0,0,0,0});
model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails());
assertEquals(4, doMoves(model));
assertEquals(Collections.emptyList(), bucketOperator.creates);
assertEquals(Collections.emptyList(), bucketOperator.primaryMoves);
//Four of the buckets should move to member2, because
//member1 has 1 bucket that is size 3.
List<Move> expectedMoves = new ArrayList<Move>();
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member1, member2));
assertEquals(expectedMoves, bucketOperator.bucketMoves);
}
@Test
public void testLoadJsonTestMatrixWithMetaTags() throws IOException {
proctorLoader = new ExampleJsonProctorLoader(
Collections.emptySet(),
ImmutableSet.of("sometag", "example_tag")
);
final String path = getClass().getResource("example-test-matrix.json").getPath();
final File testMatrixFile = new File(path);
final Reader reader = new FileReader(testMatrixFile);
final TestMatrixArtifact testMatrixArtifact = proctorLoader.loadJsonTestMatrix(reader);
// only verify test names because other checks are done in testLoadJsonTestMatrix()
Assertions.assertThat(testMatrixArtifact.getTests().keySet())
.containsExactlyInAnyOrder("sometst", "meta_tags_tst");
}
public Set<String> getDomainsThatAllowUserDeletion()
{
if(getAllowsUserDeletion())
{
return Collections.singleton(getDomain());
}
else
{
return Collections.<String>emptySet();
}
}
@Override
void initTestValues() {
emptyValue = Collections.emptySet();
updateEmpty = mapOf(Tuple2.of(3, "3"), Tuple2.of(5, "5"), Tuple2.of(23, null), Tuple2.of(10, "10"));
updateUnexpired = mapOf(Tuple2.of(12, "12"), Tuple2.of(24, null), Tuple2.of(7, "7"));
updateExpired = mapOf(Tuple2.of(15, "15"), Tuple2.of(25, null), Tuple2.of(4, "4"));
getUpdateEmpty = updateEmpty.entrySet();
getUnexpired = updateUnexpired.entrySet();
getUpdateExpired = updateExpired.entrySet();
}
@Test
public void testSerializeEmptyAddressList() throws IOException {
AddressList list = new AddressList(Collections.emptySet());
ObjectMapper mapper = new ObjectMapper();
String serialized = mapper.writeValueAsString(list);
assertTrue(serialized.matches(".*\"items\"\\s*:\\s*\\[\\s*\\].*"),
"Serialized form '" + serialized + "' does not include empty items list");
AddressList deserialized = mapper.readValue(serialized, AddressList.class);
assertThat(deserialized.getItems(), is(list.getItems()));
}
@SuppressWarnings("unchecked")
private Set<String> extractIdSet(final Object obj) {
if (obj instanceof DBObject) {
Object dbObj = ((DBObject) obj).get("$in");
if (dbObj instanceof List) {
return new HashSet<String>((List<String>) dbObj);
}
} else if (obj instanceof String) {
return new HashSet<String>(Arrays.asList((String) obj));
}
return Collections.emptySet();
}
@Test
public void searchWithEntityTypesAndEntityFilters() throws AtlasBaseException {
SearchParameters params = new SearchParameters();
params.setTypeName(DATABASE_TYPE+","+HIVE_TABLE_TYPE);
SearchParameters.FilterCriteria filterCriteria = getSingleFilterCondition("owner", SearchParameters.Operator.CONTAINS, "ETL");
params.setEntityFilters(filterCriteria);
params.setLimit(20);
SearchContext context = new SearchContext(params, typeRegistry, graph, Collections.<String>emptySet());
EntitySearchProcessor processor = new EntitySearchProcessor(context);
assertEquals(processor.execute().size(), 4);
}
/**
* Default: An empty set.
* @return User task ids.
*/
public static Set<UUID> userTaskIds(HttpServletRequest request) throws UnsupportedEncodingException {
String parameterString = caseSensitiveParameterName(request.getParameterMap(), USER_TASK_IDS_PARAM);
return parameterString == null
? Collections.emptySet()
: Arrays.stream(urlDecode(request.getParameter(parameterString)).split(",")).map(UUID::fromString).collect(Collectors.toSet());
}
public Set<String> getClassificationNames(AtlasEntityHeader entity) {
final Set<String> ret;
if (entity == null || entity.getClassifications() == null) {
ret = Collections.emptySet();
} else {
ret = new HashSet<>();
for (AtlasClassification classify : entity.getClassifications()) {
ret.add(classify.getTypeName());
}
}
return ret;
}
/** Deserializer. */
@Nonnull
public static <T extends Enum<T> & Wrapper> Set<T> fromBuffer(@Nonnull Class<T> type, @Nonnull ByteBuffer buffer, @Nonnegative int length) {
Preconditions.checkPositionIndex(length, buffer.remaining(), "Insufficient bytes remaining in buffer");
switch (length) {
case 0:
return Collections.emptySet();
case 1:
return fromByte(type, buffer.get());
default:
byte[] data = new byte[length];
buffer.get(data);
return fromBytes(type, data);
}
}
protected Set<RTLMemoryLocation> initUsedMemoryLocations() {
return Collections.emptySet();
}
public void setKeyFields(Set<String> keyFields) {
this.keyFields = keyFields == null ? Collections.emptySet() : ImmutableSet.copyOf(keyFields);
}
@Test
void testEquals_DifferentBaseImplValues() {
ElementImpl e1 = new ElementImpl(DFLT_MIN_OCCURS, DFLT_MAX_OCCURS, DFLT_ID, DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, DFLT_DESCR);
assertNotEquals(e1, new ElementImpl(2, DFLT_MAX_OCCURS, DFLT_ID, DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, DFLT_DESCR));
assertNotEquals(e1, new ElementImpl(DFLT_MIN_OCCURS, 10, DFLT_ID, DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, DFLT_DESCR));
assertNotEquals(e1, new ElementImpl(DFLT_MIN_OCCURS, DFLT_MAX_OCCURS, "E0002", DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, DFLT_DESCR));
assertNotEquals(e1, new ElementImpl(DFLT_MIN_OCCURS, DFLT_MAX_OCCURS, DFLT_ID, DFLT_POSITION, DFLT_VALUES, "Another Title", DFLT_DESCR));
assertNotEquals(e1, new ElementImpl(DFLT_MIN_OCCURS, DFLT_MAX_OCCURS, DFLT_ID, DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, "Do not use!"));
EDISimpleType standard = new EDISimpleType() {
@Override
public String getId() {
return "E0003";
}
@Override
public String getCode() {
return "3";
}
@Override
public Type getType() {
return Type.ELEMENT;
}
@Override
public Base getBase() {
return Base.STRING;
}
@Override
public int getNumber() {
return 3;
}
@Override
public long getMinLength() {
return 0;
}
@Override
public long getMaxLength() {
return 5;
}
@Override
public Set<String> getValueSet() {
return Collections.emptySet();
}
};
EDIReference reference = new EDIReference() {
@Override
public EDIType getReferencedType() {
return standard;
}
@Override
public int getMinOccurs() {
return DFLT_MIN_OCCURS;
}
@Override
public int getMaxOccurs() {
return DFLT_MAX_OCCURS;
}
};
ElementImpl e3 = new ElementImpl(DFLT_MIN_OCCURS, DFLT_MAX_OCCURS, DFLT_ID, DFLT_POSITION, DFLT_VALUES, DFLT_TITLE, DFLT_DESCR);
e3.setStandardReference(reference);
assertNotEquals(e1, e3);
}
/**
* Creates a new Builder instance allowing any origin, "*" which is the
* wildcard origin.
*/
public Builder() {
anyOrigin = true;
origins = Collections.emptySet();
}
public Collection<Person> search() {
Stream<Person> matches =
filterSocialSecurityNumber(filterEmail(filterDocumentIdNumber(filterName(filterUsername(null)))));
return matches == null ? Collections.emptySet() : matches.collect(Collectors.toSet());
}
@Override
public Set<PlayerId> friends() {
return Collections.emptySet();
}
@Override
public Set<Relationship> getRelationships() {
return Collections.emptySet();
}
/**
* Returns a set of regions that are live but are not empty nor have a prune upper bound recorded. These regions
* will stop the progress of pruning.
* <p/>
* Note that this can return false positives in the following case -
* At time 't' empty regions were recorded, and time 't+1' prune iteration was invoked.
* Since a new set of regions was recorded at time 't+1', all regions recorded as empty before time 't + 1' will
* now be reported as blocking the pruning, even though they are empty. This is because we cannot tell if those
* regions got any new data between time 't' and 't + 1'.
*
* @param numRegions number of regions
* @param time time in milliseconds or relative time, regions recorded before the given time are returned
* @return {@link Set} of regions that needs to be compacted and flushed
*/
@Override
@SuppressWarnings("WeakerAccess")
public Set<String> getRegionsToBeCompacted(Integer numRegions, String time) throws IOException {
// Fetch the live regions at the given time
RegionsAtTime timeRegion = getRegionsOnOrBeforeTime(time);
if (timeRegion.getRegions().isEmpty()) {
return Collections.emptySet();
}
Long timestamp = timeRegion.getTime();
SortedSet<String> regions = timeRegion.getRegions();
// Get the live regions
SortedSet<String> liveRegions = getRegionsOnOrBeforeTime(NOW).getRegions();
// Retain only the live regions
regions = Sets.newTreeSet(Sets.intersection(liveRegions, regions));
SortedSet<byte[]> emptyRegions = dataJanitorState.getEmptyRegionsAfterTime(timestamp, null);
SortedSet<String> emptyRegionNames = new TreeSet<>();
Iterable<String> regionStrings = Iterables.transform(emptyRegions, TimeRegions.BYTE_ARR_TO_STRING_FN);
for (String regionString : regionStrings) {
emptyRegionNames.add(regionString);
}
Set<String> nonEmptyRegions = Sets.newHashSet(Sets.difference(regions, emptyRegionNames));
// Get all pruned regions for the current time and remove them from the nonEmptyRegions,
// resulting in a set of regions that are not empty and have not been registered prune upper bound
List<RegionPruneInfo> prunedRegions = dataJanitorState.getPruneInfoForRegions(null);
for (RegionPruneInfo prunedRegion : prunedRegions) {
if (nonEmptyRegions.contains(prunedRegion.getRegionNameAsString())) {
nonEmptyRegions.remove(prunedRegion.getRegionNameAsString());
}
}
if ((numRegions < 0) || (numRegions >= nonEmptyRegions.size())) {
return nonEmptyRegions;
}
Set<String> subsetRegions = new HashSet<>(numRegions);
for (String regionName : nonEmptyRegions) {
if (subsetRegions.size() == numRegions) {
break;
}
subsetRegions.add(regionName);
}
return subsetRegions;
}
@Override
public Set<RuntimePackageDependency> getAdditionalRuntimePackages() {
return Collections.emptySet();
}
/**
* <p>Return an iterable object that iterates over this nodes's children.
* The iterator does not support the optional operation
* {@link Iterator#remove()}.</p>
*
* <p>To iterate over a node's siblings, one can write</p>
* <pre>Node n = ...;
* for (Node child : n.children()) { ...</pre>
*/
public Iterable<Node> children() {
if (first == null) {
return Collections.emptySet();
} else {
return new SiblingNodeIterable(first);
}
}