com.google.common.collect.Multiset#add ( )源码实例Demo

下面列出了com.google.common.collect.Multiset#add ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: naturalize   文件: AbstractIdentifierRenamings.java
/**
 * @param relevantNgrams
 * @param currentName
 * @return
 */
public Multiset<String> getAlternativeNames(
		final Multiset<NGram<String>> relevantNgrams,
		final String currentName) {
	// Get all alternative namings
	final Multiset<String> nameAlternatives = ngramLM
			.getAlternativeNamings(relevantNgrams, WILDCARD_TOKEN);
	nameAlternatives.add(currentName); // Give the current identifier a
										// chance...

	// Prune naming alternatives
	final Multiset<String> toKeep = TreeMultiset.create();

	int seen = 0;
	for (final Entry<String> ent : Multisets.copyHighestCountFirst(
			nameAlternatives).entrySet()) {
		if (seen > 1000) {
			break;
		}
		toKeep.add(ent.getElement(), ent.getCount());
		seen++;
	}
	toKeep.add(AbstractNGramLM.UNK_SYMBOL);
	return toKeep;
}
 
源代码2 项目: ProjectAres   文件: EntropyTest.java
@Test
public void intRange() throws Exception {
    Entropy e = new MutableEntropy(SEED);
    Range<Integer> range = Range.closedOpen(-5, 5);
    Multiset<Integer> distribution = HashMultiset.create();

    // Choose 1k values and check that they are in the range
    for(int i = 0; i < 10000; i++) {
        final int value = e.randomInt(range);
        assertContains(range, value);
        distribution.add(value);
        e.advance();
    }

    // Assert that each of the 10 values was chosen ~1000 times
    Ranges.forEach(range, value -> {
        assertEquals(1000D, distribution.count(value), 50D);
    });
}
 
源代码3 项目: batfish   文件: NodePropertiesAnswerer.java
/**
 * Gets properties of nodes.
 *
 * @param propertySpecifier Specifies which properties to get
 * @param ctxt Specifier context to use in extractions
 * @param nodeSpecifier Specifies the set of nodes to focus on
 * @param columns a map from column name to {@link ColumnMetadata}
 * @return A multiset of {@link Row}s where each row corresponds to a node and columns correspond
 *     to property values.
 */
public static Multiset<Row> getProperties(
    NodePropertySpecifier propertySpecifier,
    SpecifierContext ctxt,
    NodeSpecifier nodeSpecifier,
    Map<String, ColumnMetadata> columns) {
  Multiset<Row> rows = HashMultiset.create();

  for (String nodeName : nodeSpecifier.resolve(ctxt)) {
    RowBuilder row = Row.builder(columns).put(COL_NODE, new Node(nodeName));

    for (String property : propertySpecifier.getMatchingProperties()) {
      PropertySpecifier.fillProperty(
          NodePropertySpecifier.getPropertyDescriptor(property),
          ctxt.getConfigs().get(nodeName),
          property,
          row);
    }

    rows.add(row.build());
  }

  return rows;
}
 
源代码4 项目: mango   文件: DatabaseShardingTest.java
private void check(List<Msg> msgs, MsgDao dao) {
  List<Msg> dbMsgs = new ArrayList<Msg>();
  Multiset<Integer> ms = HashMultiset.create();
  for (Msg msg : msgs) {
    ms.add(msg.getUid());
  }
  for (Multiset.Entry<Integer> entry : ms.entrySet()) {
    dbMsgs.addAll(dao.getMsgs(entry.getElement()));
  }
  assertThat(dbMsgs, hasSize(msgs.size()));
  assertThat(dbMsgs, containsInAnyOrder(msgs.toArray()));
}
 
源代码5 项目: tutorials   文件: GuavaMultiSetUnitTest.java
@Test
public void givenMultiSet_whenSettingCountWithCorrectValue_shouldBeSuccessful() {
    Multiset<String> bookStore = HashMultiset.create();
    bookStore.add("Potter");
    bookStore.add("Potter");

    assertThat(bookStore.setCount("Potter", 2, 52)).isTrue();
}
 
源代码6 项目: log-synth   文件: HeaderSamplerTest.java
@Test
public void testEncoding() throws IOException {
    HeaderSampler s = new HeaderSampler();
    s.setType("normal");
    Multiset<String> encodings = HashMultiset.create();
    for (int i = 0; i < 1000; i++) {
        encodings.add(s.encoding());
    }
    assertEquals(3, encodings.elementSet().size());
    assertEquals(333.0, encodings.count("deflate"), 90);
    assertEquals(333.0, encodings.count("gzip"), 90);
    assertEquals(333.0, encodings.count("gzip, deflate"), 90);

    s.setType("mal1");
    encodings = HashMultiset.create();
    for (int i = 0; i < 1000; i++) {
        encodings.add(s.encoding());
    }
    assertEquals(1, encodings.elementSet().size());
    assertEquals(1000, encodings.count("identity"), 90);

    s.setType("mal2");
    encodings = HashMultiset.create();
    for (int i = 0; i < 1000; i++) {
        encodings.add(s.encoding());
    }
    assertEquals(1, encodings.elementSet().size());
}
 
private static void addNerTypeLabel(String token, BmeowTypePair pair) {
	Multiset<BmeowTypePair> labelCount = classTypeCounts.get(token);
	
	if(labelCount == null){
		labelCount = HashMultiset.create();
	}
	
	labelCount.add(pair);
	classTypeCounts.put(token, labelCount);
}
 
源代码8 项目: tutorials   文件: Anagram.java
public boolean isAnagramMultiset(String string1, String string2) {
    if (string1.length() != string2.length()) {
        return false;
    }
    Multiset<Character> multiset1 = HashMultiset.create();
    Multiset<Character> multiset2 = HashMultiset.create();
    for (int i = 0; i < string1.length(); i++) {
        multiset1.add(string1.charAt(i));
        multiset2.add(string2.charAt(i));
    }
    return multiset1.equals(multiset2);
}
 
源代码9 项目: tac-kbp-eal   文件: ByEventTypeResultWriter.java
static Multiset<Symbol> gatherEventTypesSeen(
    final Iterable<EALScorer2015Style.Result> perDocResults) {
  final Multiset<Symbol> eventTypesSeen = HashMultiset.create();
  for (final EALScorer2015Style.Result perDocResult : perDocResults) {
    for (final TypeRoleFillerRealis trfr : perDocResult.argResult().argumentScoringAlignment()
        .allEquivalenceClassess()) {
      eventTypesSeen.add(trfr.type());
    }
  }
  return eventTypesSeen;
}
 
源代码10 项目: javers   文件: MultisetType.java
/**
 * @return immutable Multiset
 */
@Override
public Object map(Object sourceEnumerable, EnumerableFunction mapFunction, OwnerContext owner) {
    Validate.argumentIsNotNull(mapFunction);
    Multiset sourceMultiset = toNotNullMultiset(sourceEnumerable);
    Multiset targetMultiset = HashMultiset.create();

    EnumerationAwareOwnerContext enumeratorContext = new EnumerationAwareOwnerContext(owner, true);
    for (Object sourceVal : sourceMultiset) {
        targetMultiset.add(mapFunction.apply(sourceVal, enumeratorContext));
    }
    return Multisets.unmodifiableMultiset(targetMultiset);
}
 
源代码11 项目: log-synth   文件: TermGeneratorTest.java
@Test
    public void generateTerms() {
        TermGenerator x = new TermGenerator(WORDS, 1, 0.8);
        final Multiset<String> counts = HashMultiset.create();
        for (int i = 0; i < 10000; i++) {
            counts.add(x.sample());
        }

        assertEquals(10000, counts.size());
        assertTrue("Should have some common words", counts.elementSet().size() < 10000);
        List<Integer> k = Lists.newArrayList(
                counts.elementSet().stream()
                        .map(counts::count)
                        .collect(Collectors.toList()));
//        System.out.printf("%s\n", Ordering.natural().reverse().sortedCopy(k).subList(0, 30));
//        System.out.printf("%s\n", Iterables.transform(Iterables.filter(counts.elementSet(), new Predicate<String>() {
//            public boolean apply(String s) {
//                return counts.count(s) > 100;
//            }
//        }), new Function<String, String>() {
//            public String apply(String s) {
//                return s + ":" + counts.count(s);
//            }
//        }));
        assertEquals(1, Ordering.natural().leastOf(k, 1).get(0).intValue());
        assertTrue(Ordering.natural().greatestOf(k, 1).get(0) > 300);
        assertTrue(counts.count("the") > 300);
    }
 
源代码12 项目: angelix   文件: TestTreeBoundedSynthesis.java
@Test
public void testForbiddenNonexistent() {
    Multiset<Node> components = HashMultiset.create();
    components.add(x);
    components.add(y);
    components.add(Library.ADD);

    ArrayList<TestCase> testSuite = new ArrayList<>();
    Map<ProgramVariable, Node> assignment1 = new HashMap<>();
    assignment1.put(x, IntConst.of(1));
    assignment1.put(y, IntConst.of(1));
    testSuite.add(TestCase.ofAssignment(assignment1, IntConst.of(2)));

    Map<ProgramVariable, Node> assignment2 = new HashMap<>();
    assignment2.put(x, IntConst.of(1));
    assignment2.put(y, IntConst.of(2));
    testSuite.add(TestCase.ofAssignment(assignment2, IntConst.of(3)));

    List<Expression> forbidden = new ArrayList<>();
    Map<Hole, Expression> args = new HashMap<>();
    args.put((Hole) Library.ADD.getLeft(), Expression.leaf(x));
    args.put((Hole) Library.ADD.getRight(), Expression.leaf(y));
    forbidden.add(Expression.app(Library.SUB, args));

    Synthesis synthesizerWithForbidden =
            new Synthesis(new BoundedShape(2, forbidden), new TreeBoundedEncoder(false));
    Optional<Pair<Expression, Map<Parameter, Constant>>> result = synthesizerWithForbidden.synthesize(testSuite, components);
    assertTrue(result.isPresent());
    Node node = result.get().getLeft().getSemantics(result.get().getRight());
    assertTrue(node.equals(new Add(x, y)) || node.equals(new Add(y, x)));
}
 
源代码13 项目: log-synth   文件: FlatSequenceTest.java
@Test
public void testCrossProduct() throws IOException {
    // here we verify the cross product result when we have multiple flattened sequences in a record
    //noinspection UnstableApiUsage
    SchemaSampler s1 = new SchemaSampler(Resources.asCharSource(Resources.getResource("schema035.json"), Charsets.UTF_8).read());
    Multiset<String> count = HashMultiset.create();

    for (int i = 0; i < 150; i++) {
        JsonNode r = s1.sample();
        assertEquals(r.get("a").asInt() + 100, r.get("b").asInt());
        assertEquals(r.get("foo").get("a").asInt() + 100, r.get("foo").get("b").asInt());

        count.add(String.format("a=%d", r.get("a").asInt()));
        count.add(String.format("a=%d, b=%d", r.get("a").asInt(), r.get("b").asInt()));
        count.add(String.format("a=%d, foo.a=%d", r.get("a").asInt(), r.get("foo").get("a").asInt()));
        count.add(String.format("foo.a=%d", r.get("foo").get("a").asInt()));
        count.add(String.format("foo.a=%d, foo.b=%d", r.get("foo").get("a").asInt(), r.get("foo").get("b").asInt()));
    }

    assertEquals(50, count.count("a=1"), 1.5);
    assertEquals(50, count.count("a=2"), 1.5);
    assertEquals(0, count.count("a=3"));
    assertEquals(75, count.count("foo.a=0"));
    assertEquals(75, count.count("foo.a=1"));
    assertEquals(0, count.count("foo.a=3"));
    assertEquals(25, count.count("a=0, foo.a=0"));
    assertEquals(75, count.count("foo.a=0, foo.b=100"));
    assertEquals(25, count.count("a=0, foo.a=1"));
    assertEquals(50, count.count("a=0, b=100"));
}
 
源代码14 项目: registry   文件: Schema.java
private static Multiset<Field> parseArray(List<Object> array) throws ParserException {
    Multiset<Field> members = LinkedHashMultiset.create();
    for(Object member: array) {
        members.add(parseField(null, member));
    }
    return members;
}
 
源代码15 项目: presto   文件: BucketBalancer.java
private static Multimap<String, BucketAssignment> computeAssignmentChanges(ClusterState clusterState)
{
    Multimap<String, BucketAssignment> sourceToAllocationChanges = HashMultimap.create();

    Map<String, Long> allocationBytes = new HashMap<>(clusterState.getAssignedBytes());
    Set<String> activeNodes = clusterState.getActiveNodes();

    for (Distribution distribution : clusterState.getDistributionAssignments().keySet()) {
        // number of buckets in this distribution assigned to a node
        Multiset<String> allocationCounts = HashMultiset.create();
        Collection<BucketAssignment> distributionAssignments = clusterState.getDistributionAssignments().get(distribution);
        distributionAssignments.stream()
                .map(BucketAssignment::getNodeIdentifier)
                .forEach(allocationCounts::add);

        int currentMin = allocationBytes.keySet().stream()
                .mapToInt(allocationCounts::count)
                .min()
                .getAsInt();
        int currentMax = allocationBytes.keySet().stream()
                .mapToInt(allocationCounts::count)
                .max()
                .getAsInt();

        int numBuckets = distributionAssignments.size();
        int targetMin = (int) Math.floor((numBuckets * 1.0) / clusterState.getActiveNodes().size());
        int targetMax = (int) Math.ceil((numBuckets * 1.0) / clusterState.getActiveNodes().size());

        log.info("Distribution %s: Current bucket skew: min %s, max %s. Target bucket skew: min %s, max %s", distribution.getId(), currentMin, currentMax, targetMin, targetMax);

        for (String source : ImmutableSet.copyOf(allocationCounts)) {
            List<BucketAssignment> existingAssignments = distributionAssignments.stream()
                    .filter(assignment -> assignment.getNodeIdentifier().equals(source))
                    .collect(toList());

            for (BucketAssignment existingAssignment : existingAssignments) {
                if (activeNodes.contains(source) && allocationCounts.count(source) <= targetMin) {
                    break;
                }

                // identify nodes with bucket counts lower than the computed target, and greedily select from this set based on projected disk utilization.
                // greediness means that this may produce decidedly non-optimal results if one looks at the global distribution of buckets->nodes.
                // also, this assumes that nodes in a cluster have identical storage capacity
                String target = activeNodes.stream()
                        .filter(candidate -> !candidate.equals(source) && allocationCounts.count(candidate) < targetMax)
                        .sorted(comparingInt(allocationCounts::count))
                        .min(Comparator.comparingDouble(allocationBytes::get))
                        .orElseThrow(() -> new VerifyException("unable to find target for rebalancing"));

                long bucketSize = clusterState.getDistributionBucketSize().get(distribution);

                // only move bucket if it reduces imbalance
                if (activeNodes.contains(source) && (allocationCounts.count(source) == targetMax && allocationCounts.count(target) == targetMin)) {
                    break;
                }

                allocationCounts.remove(source);
                allocationCounts.add(target);
                allocationBytes.compute(source, (k, v) -> v - bucketSize);
                allocationBytes.compute(target, (k, v) -> v + bucketSize);

                sourceToAllocationChanges.put(
                        existingAssignment.getNodeIdentifier(),
                        new BucketAssignment(existingAssignment.getDistributionId(), existingAssignment.getBucketNumber(), target));
            }
        }
    }

    return sourceToAllocationChanges;
}
 
源代码16 项目: google-oauth-java-client   文件: OAuthParameters.java
private void putParameter(Multiset<Parameter> parameters, String key, Object value) {
  parameters.add(new Parameter(escape(key), value == null ? null : escape(value.toString())));
}
 
源代码17 项目: batfish   文件: BgpPeerConfigurationAnswererTest.java
@Test
public void testAnswer() {
  MockSpecifierContext ctxt =
      MockSpecifierContext.builder().setConfigs(ImmutableMap.of("c", _c)).build();
  Multiset<Row> rows =
      BgpPeerConfigurationAnswerer.getAnswerRows(
          ctxt,
          new NameNodeSpecifier("c"),
          BgpPeerConfigurationAnswerer.createTableMetadata(
                  new BgpPeerConfigurationQuestion(null, BgpPeerPropertySpecifier.ALL))
              .toColumnMap(),
          BgpPeerPropertySpecifier.ALL);

  Node node = new Node("c");
  Multiset<Row> expected = HashMultiset.create();
  expected.add(
      Row.builder()
          .put(COL_NODE, node)
          .put(COL_VRF, "v")
          .put(COL_REMOTE_IP, new SelfDescribingObject(Schema.IP, Ip.parse("2.2.2.2")))
          .put(getColumnName(LOCAL_AS), 100L)
          .put(COL_LOCAL_INTERFACE, null)
          .put(getColumnName(REMOTE_AS), LongSpace.of(200L).toString())
          .put(getColumnName(LOCAL_IP), Ip.parse("1.1.1.1"))
          .put(getColumnName(CONFEDERATION), 1L)
          .put(getColumnName(IS_PASSIVE), false)
          .put(getColumnName(ROUTE_REFLECTOR_CLIENT), false)
          .put(getColumnName(CLUSTER_ID), null)
          .put(getColumnName(PEER_GROUP), "g1")
          .put(getColumnName(IMPORT_POLICY), ImmutableSet.of("p1"))
          .put(getColumnName(EXPORT_POLICY), ImmutableSet.of("p2"))
          .put(getColumnName(SEND_COMMUNITY), false)
          .build());
  expected.add(
      Row.builder()
          .put(COL_NODE, node)
          .put(COL_VRF, "v")
          .put(getColumnName(LOCAL_AS), 100L)
          .put(COL_LOCAL_INTERFACE, null)
          .put(
              COL_REMOTE_IP,
              new SelfDescribingObject(Schema.PREFIX, Prefix.create(Ip.parse("3.3.3.0"), 24)))
          .put(getColumnName(REMOTE_AS), LongSpace.of(300L).toString())
          .put(getColumnName(LOCAL_IP), Ip.parse("1.1.1.2"))
          .put(getColumnName(CONFEDERATION), 2L)
          .put(getColumnName(IS_PASSIVE), true)
          .put(getColumnName(ROUTE_REFLECTOR_CLIENT), true)
          .put(getColumnName(CLUSTER_ID), Ip.parse("5.5.5.5"))
          .put(getColumnName(PEER_GROUP), "g2")
          .put(getColumnName(IMPORT_POLICY), ImmutableSet.of("p3"))
          .put(getColumnName(EXPORT_POLICY), ImmutableSet.of("p4"))
          .put(getColumnName(SEND_COMMUNITY), false)
          .build());
  expected.add(
      Row.builder()
          .put(COL_NODE, node)
          .put(COL_VRF, "v")
          .put(getColumnName(LOCAL_AS), 100L)
          .put(COL_LOCAL_INTERFACE, "iface")
          .put(COL_REMOTE_IP, null)
          .put(getColumnName(REMOTE_AS), LongSpace.of(400L).toString())
          .put(getColumnName(LOCAL_IP), null)
          .put(getColumnName(CONFEDERATION), 3L)
          .put(getColumnName(IS_PASSIVE), false)
          .put(getColumnName(ROUTE_REFLECTOR_CLIENT), true)
          .put(getColumnName(CLUSTER_ID), Ip.parse("6.6.6.6"))
          .put(getColumnName(PEER_GROUP), "g3")
          .put(getColumnName(IMPORT_POLICY), ImmutableSet.of("p5"))
          .put(getColumnName(EXPORT_POLICY), ImmutableSet.of("p6"))
          .put(getColumnName(SEND_COMMUNITY), false)
          .build());

  assertThat(rows, equalTo(expected));
}
 
源代码18 项目: gef   文件: ObservableMultisetTests.java
@Test
public void replaceAll() {
	// initialize multiset with some values
	observable.add(1, 1);
	observable.add(2, 2);
	observable.add(3, 3);
	observable.add(4, 4);

	// prepare backup multiset
	Multiset<Integer> backupMultiset = HashMultiset.create();
	backupMultiset.add(1, 1);
	backupMultiset.add(2, 2);
	backupMultiset.add(3, 3);
	backupMultiset.add(4, 4);
	check(observable, backupMultiset);

	// register listeners
	registerListeners();

	// replaceAll
	invalidationListener.expect(1);
	multisetChangeListener.addAtomicExpectation();
	multisetChangeListener.addElementaryExpection(2, 1, 0); // decrease
															// count
	multisetChangeListener.addElementaryExpection(4, 4, 0); // remove
	multisetChangeListener.addElementaryExpection(3, 0, 3); // increase
															// count
	multisetChangeListener.addElementaryExpection(5, 0, 5); // add

	Multiset<Integer> toReplace = HashMultiset.create();
	toReplace.add(1);
	toReplace.add(2, 1);
	toReplace.add(3, 6);
	toReplace.add(5, 5);

	observable.replaceAll(toReplace);
	backupMultiset.clear();
	backupMultiset.addAll(toReplace);
	check(observable, backupMultiset);
	checkListeners();

	// replace with same contents (should not have any effect)
	invalidationListener.expect(0);
	observable.replaceAll(toReplace);
	check(observable, backupMultiset);
	checkListeners();
}
 
private static void importSystemOutputToAnnotationStore(Set<SystemOutputStore> argumentStores,
    Set<AnnotationStore> annotationStores,
    Function<DocumentSystemOutput, DocumentSystemOutput> filter, Predicate<Symbol> docIdFilter)
    throws IOException {
  log.info("Loading system outputs from {}",
      StringUtils.unixNewlineJoiner().join(argumentStores));
  log.info("Using assessment stores at {}",
      StringUtils.unixNewlineJoiner().join(annotationStores));

  final Multiset<AnnotationStore> totalNumAdded = HashMultiset.create();
  final Multiset<AnnotationStore> totalAlreadyThere = HashMultiset.create();

  for (final SystemOutputStore systemOutput : argumentStores) {
    log.info("Processing system output from {}", systemOutput);

    for (final Symbol docid : filter(systemOutput.docIDs(), docIdFilter)) {
      final DocumentSystemOutput docOutput = filter.apply(systemOutput.read(docid));
      log.info("Processing {} responses for document {}", docid, docOutput.arguments().size());

      for (final AnnotationStore annStore : annotationStores) {
        final AnswerKey currentAnnotation = annStore.readOrEmpty(docid);
        final int numAnnotatedResponsesInCurrentAnnotation =
            currentAnnotation.annotatedResponses().size();
        final int numUnannotatedResponsesInCurrentAnnotation =
            currentAnnotation.unannotatedResponses().size();

        final AnswerKey newAnswerKey = currentAnnotation.copyAddingPossiblyUnannotated(
            docOutput.arguments().responses());
        final int numAdded =
            newAnswerKey.unannotatedResponses().size() - numUnannotatedResponsesInCurrentAnnotation;
        final int numAlreadyKnown = docOutput.arguments().responses().size() - numAdded;
        log.info(
            "Annotation store {} has {} annotated and {} unannotated; added {} for assessment",
            annStore, numAnnotatedResponsesInCurrentAnnotation,
            numUnannotatedResponsesInCurrentAnnotation, numAdded);
        annStore.write(newAnswerKey);
        totalNumAdded.add(annStore, numAdded);
        totalAlreadyThere.add(annStore, numAlreadyKnown);
      }
    }
  }

  log.info("Total number of responses added: {}", totalNumAdded);
  log.info("Total number of responses already known: {}", totalAlreadyThere);
}
 
源代码20 项目: gef   文件: ObservableMultisetTests.java
@Test
public void remove_withCount() {
	// initialize multiset with some values
	observable.add(1, 1);
	observable.add(2, 2);
	observable.add(3, 3);

	// prepare backup multiset
	Multiset<Integer> backupMultiset = HashMultiset.create();
	backupMultiset.add(1, 1);
	backupMultiset.add(2, 2);
	backupMultiset.add(3, 3);
	check(observable, backupMultiset);

	// register listeners
	registerListeners();

	// remove zero occurrences (no change expected)
	assertEquals(backupMultiset.remove(3, 0), observable.remove(3, 0));
	check(observable, backupMultiset);
	checkListeners();

	// remove (two occurrences of) value
	invalidationListener.expect(1);
	multisetChangeListener.addAtomicExpectation();
	multisetChangeListener.addElementaryExpection(3, 2, 0);
	assertEquals(backupMultiset.remove(3, 2), observable.remove(3, 2));
	check(observable, backupMultiset);
	checkListeners();

	// remove more occurrences than contained (change contains fewer
	// occurrences)
	invalidationListener.expect(1);
	multisetChangeListener.addAtomicExpectation();
	multisetChangeListener.addElementaryExpection(3, 1, 0);
	assertEquals(backupMultiset.remove(3, 2), observable.remove(3, 2));
	check(observable, backupMultiset);
	checkListeners();

	// remove not contained value (no change expected)
	assertEquals(backupMultiset.remove(3, 1), observable.remove(3, 1));
	check(observable, backupMultiset);
	checkListeners();
}