java.util.Map#size ( )源码实例Demo

下面列出了java.util.Map#size ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: onos   文件: PersistentMap.java
@Override
public boolean equals(Object map) {
    //This is not threadsafe and on larger maps incurs a significant processing cost
    if (!(map instanceof Map)) {
        return false;
    }
    Map asMap = (Map) map;
    if (this.size() != asMap.size()) {
        return false;
    }
    for (Entry entry : this.entrySet()) {
        Object key = entry.getKey();
        if (!asMap.containsKey(key) || !asMap.get(key).equals(entry.getValue())) {
            return false;
        }
    }
    return true;
}
 
源代码2 项目: MorphoLibJ   文件: Morphology.java
/**
 * Performs morphological gradient on each channel, and reconstitutes
 * the resulting color image.
 */
private static ImageProcessor gradientRGB(ImageProcessor image, Strel strel)
{
	// extract channels and allocate memory for result
	Map<String, ByteProcessor> channels = ColorImages.mapChannels(image);
	Collection<ImageProcessor> res = new ArrayList<ImageProcessor>(channels.size());
	
	// Process each channel individually
	for (String name : new String[]{"red", "green", "blue"})
	{
		strel.setChannelName(name);
		res.add(gradient(channels.get(name), strel));
	}
	
	return ColorImages.mergeChannels(res);
}
 
源代码3 项目: boubei-tss   文件: _Recorder.java
@RequestMapping(value = "/approve/{record}/{id}", method = RequestMethod.POST)
public void approve(HttpServletRequest request, HttpServletResponse response, @PathVariable("record") Object record, @PathVariable("id") Long id) {

	Long recordId = recordService.getRecordID(record, false);
	Map<String, String> requestMap = prepareParams(request, recordId);
	
	String opinion = requestMap.remove("opinion");
	
	// 审批时允许审批者补充填写 wf_ 打头的数据表字段
	if( requestMap.size() > 0 ) {
		_Database db = recordService.getDB(recordId);
		db.update(id, requestMap);
	}
			
	String msg = "审批成功";
	String wfStatus = wfService.approve(recordId, id, opinion);
	if( WFStatus.PASSED.equals(wfStatus) ) {
		msg = "审批通过";
	}
	
	printJSON(msg);
}
 
源代码4 项目: presto   文件: DistributedExecutionPlanner.java
@Override
public Map<PlanNodeId, SplitSource> visitSample(SampleNode node, Void context)
{
    switch (node.getSampleType()) {
        case BERNOULLI:
            return node.getSource().accept(this, context);
        case SYSTEM:
            Map<PlanNodeId, SplitSource> nodeSplits = node.getSource().accept(this, context);
            // TODO: when this happens we should switch to either BERNOULLI or page sampling
            if (nodeSplits.size() == 1) {
                PlanNodeId planNodeId = getOnlyElement(nodeSplits.keySet());
                SplitSource sampledSplitSource = new SampledSplitSource(nodeSplits.get(planNodeId), node.getSampleRatio());
                return ImmutableMap.of(planNodeId, sampledSplitSource);
            }
            // table sampling on a sub query without splits is meaningless
            return nodeSplits;

        default:
            throw new UnsupportedOperationException("Sampling is not supported for type " + node.getSampleType());
    }
}
 
源代码5 项目: offheap-store   文件: EvictionListenerIT.java
@Test
public void testEvictionListenerSeesStealingEventsReadWriteLocked() {
  MonitoringEvictionListener listener = new MonitoringEvictionListener();
  PageSource source = new UpfrontAllocatingPageSource(new OffHeapBufferSource(), 16 * 4096, 16 * 4096);
  Map<Long, String> victim = new ConcurrentOffHeapClockCache<>(source, LongStorageEngine.createFactory(OffHeapBufferHalfStorageEngine
    .createFactory(source, 128, StringPortability.INSTANCE, false, true)), listener);

  long i = 0;
  while (listener.evictedKeys().isEmpty()) {
    victim.put(i, Long.toString(i));
    i++;
  }
  listener.evictedKeys().clear();
  long victimSize = victim.size();

  Map<Long, String> thief = new ConcurrentOffHeapHashMap<>(source, true, LongStorageEngine.createFactory(OffHeapBufferHalfStorageEngine
    .createFactory(source, 128, StringPortability.INSTANCE, true, false)));

  try {
    i = 0;
    while (true) {
      thief.put(i, Long.toString(i));
      i++;
    }
  } catch (OversizeMappingException e) {
    //ignore
  }

  Assert.assertFalse(listener.evictedKeys().isEmpty());

  Assert.assertEquals(victimSize, victim.size() + listener.evictedKeys().size());
}
 
源代码6 项目: gemfirexd-oss   文件: ClientCacheCreation.java
public String getDefaultPoolName() {
  String result = null;
  Map m = getPools();
  if (m.size() == 1) {
    Pool p = (Pool)m.values().iterator().next();
    result = p.getName();
  } else if (m.isEmpty()) {
    result = "DEFAULT";
  }
  return result;
}
 
源代码7 项目: ranger   文件: RangerValidator.java
/**
 * Returns a copy of the policy resource map where all keys (resource-names) are lowercase
 * @param input
 * @return
 */
Map<String, RangerPolicyResource> getPolicyResourceWithLowerCaseKeys(Map<String, RangerPolicyResource> input) {
	if (input == null) {
		return null;
	}
	Map<String, RangerPolicyResource> output = new HashMap<String, RangerPolicyResource>(input.size());
	for (Map.Entry<String, RangerPolicyResource> entry : input.entrySet()) {
		output.put(entry.getKey().toLowerCase(), entry.getValue());
	}
	return output;
}
 
源代码8 项目: PoseidonX   文件: DataSourceContainer.java
/**
 * 校验查询参数长度是否和定义一致
 */
private void validateEvaluateArguments(Map<String, Object> cqlExpressionValues)
{
    if (cqlExpressionValues.size() != getEvaluateArgumentsLength())
    {
        LOG.error("evaluate arguments size doesn't same with dataSource. dataSource : {}, evaluate size : {}",
            getEvaluateArgumentsLength(),
            cqlExpressionValues.size());
        throw new StreamingRuntimeException("evaluate arguments size doesn't same with dataSource. dataSource : "
            + getEvaluateArgumentsLength() + ", evaluate size : " + cqlExpressionValues.size());
    }
}
 
源代码9 项目: carbon-apimgt   文件: SettingsMappingUtil.java
/**
 * This method feeds data into the settingsDTO
 * @param isUserAvailable check if user is logged in
 * @return SettingsDTO
 * @throws APIManagementException
 */
public SettingsDTO fromSettingstoDTO(Boolean isUserAvailable) throws APIManagementException {
    SettingsDTO settingsDTO = new SettingsDTO();
    EnvironmentListDTO environmentListDTO = new EnvironmentListDTO();
    if (isUserAvailable) {
        Map<String, Environment> environments = APIUtil.getEnvironments();
        if (environments != null) {
            environmentListDTO = EnvironmentMappingUtil.fromEnvironmentCollectionToDTO(environments.values());
        }
        settingsDTO.setEnvironment(environmentListDTO.getList());
        String storeUrl = APIUtil.getStoreUrl();
        String loggedInUserTenantDomain = RestApiUtil.getLoggedInUserTenantDomain();
        Map<String, String> domainMappings =
                APIUtil.getDomainMappings(loggedInUserTenantDomain, APIConstants.API_DOMAIN_MAPPINGS_STORE);
        if (domainMappings.size() != 0) {
            Iterator entries = domainMappings.entrySet().iterator();
            while (entries.hasNext()) {
                Map.Entry thisEntry = (Map.Entry) entries.next();
                storeUrl = "https://" + thisEntry.getValue();
                break;
            }
        }
        settingsDTO.setStoreUrl(storeUrl);
        settingsDTO.setMonetizationAttributes(getMonetizationAttributes());
        settingsDTO.setSecurityAuditProperties(getSecurityAuditProperties());
        settingsDTO.setExternalStoresEnabled(
                APIUtil.isExternalStoresEnabled(RestApiUtil.getLoggedInUserTenantDomain()));
        settingsDTO.setDocVisibilityEnabled(APIUtil.isDocVisibilityLevelsEnabled());
    }
    settingsDTO.setScopes(GetScopeList());
    return settingsDTO;
}
 
源代码10 项目: gemfirexd-oss   文件: TIntLongHashMapDecorator.java
/**
    * Copies the key/value mappings in <tt>map</tt> into this map.
    * Note that this will be a <b>deep</b> copy, as storage is by
    * primitive value.
    *
    * @param map a <code>Map</code> value
    */
   @Override // GemStoneAddition
   public void putAll(Map map) {
Iterator it = map.entrySet().iterator();
for (int i = map.size(); i-- > 0;) {
    Map.Entry e = (Map.Entry)it.next();
    this.put(e.getKey(), e.getValue());
}
   }
 
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
    GRpcApi grpcApi = bean.getClass().getAnnotation(GRpcApi.class);
    if (grpcApi == null) {
        return bean;
    }
    String scheme = grpcApi.value();
    //检验scheme是否存在
    if (bindServiceAdapterList.stream().anyMatch(item -> item.getScheme().equals(scheme))) {
        throw new GRpcServerCreateException("The scheme " + "[" + scheme + "] is already exist.Please check your configuration.");
    }
    Class<?> targetClass = AopUtils.getTargetClass(bean);
    Map<Method, GRpcMethod> annotatedMethods = MethodIntrospector.selectMethods(targetClass,
            (MethodIntrospector.MetadataLookup<GRpcMethod>) method -> AnnotatedElementUtils.findMergedAnnotation(method, GRpcMethod.class));
    if (annotatedMethods.size() == 0) {
        return bean;
    }
    List<MethodCallProperty> methodCallPropertyList = new ArrayList<>();
    annotatedMethods.forEach((method, v) -> {
        MethodCallProperty methodCallProperty = new MethodCallProperty();
        methodCallProperty.setScheme(scheme);
        methodCallProperty.setMethod(method);
        methodCallProperty.setProxyTarget(bean);
        methodCallProperty.setMethodName(StringUtils.isEmpty(v.value()) ? method.getName() : v.value());
        methodCallProperty.setMethodType(v.type());
        methodCallPropertyList.add(methodCallProperty);
    });
    BindServiceAdapter bindServiceAdapter = new BindServiceAdapter(scheme, methodCallPropertyList, marshallerFactory);
    bindServiceAdapterList.add(bindServiceAdapter);
    return bean;
}
 
public void addResult(final String rowKey, final Map<String, String> cells, final long timestamp) {
    final byte[] rowArray = rowKey.getBytes(StandardCharsets.UTF_8);

    final Cell[] cellArray = new Cell[cells.size()];
    int i = 0;
    for (final Map.Entry<String, String> cellEntry : cells.entrySet()) {
        final Cell cell = Mockito.mock(Cell.class);
        when(cell.getRowArray()).thenReturn(rowArray);
        when(cell.getRowOffset()).thenReturn(0);
        when(cell.getRowLength()).thenReturn((short) rowArray.length);

        final String cellValue = cellEntry.getValue();
        final byte[] valueArray = cellValue.getBytes(StandardCharsets.UTF_8);
        when(cell.getValueArray()).thenReturn(valueArray);
        when(cell.getValueOffset()).thenReturn(0);
        when(cell.getValueLength()).thenReturn(valueArray.length);

        final byte[] familyArray = "nifi".getBytes(StandardCharsets.UTF_8);
        when(cell.getFamilyArray()).thenReturn(familyArray);
        when(cell.getFamilyOffset()).thenReturn(0);
        when(cell.getFamilyLength()).thenReturn((byte) familyArray.length);

        final String qualifier = cellEntry.getKey();
        final byte[] qualifierArray = qualifier.getBytes(StandardCharsets.UTF_8);
        when(cell.getQualifierArray()).thenReturn(qualifierArray);
        when(cell.getQualifierOffset()).thenReturn(0);
        when(cell.getQualifierLength()).thenReturn(qualifierArray.length);

        when(cell.getTimestamp()).thenReturn(timestamp);

        cellArray[i++] = cell;
    }

    final Result result = Mockito.mock(Result.class);
    when(result.getRow()).thenReturn(rowArray);
    when(result.rawCells()).thenReturn(cellArray);
    results.add(result);
}
 
源代码13 项目: wildfly-core   文件: GlobalOperationHandlers.java
@Override
public void handleResult(OperationContext.ResultAction resultAction, OperationContext context, ModelNode operation) {
    if(fakeOperationResponse != null && fakeOperationResponse.hasDefined(FAILURE_DESCRIPTION)) {
        context.getFailureDescription().set(fakeOperationResponse.get(FAILURE_DESCRIPTION));
        return;
    }
    // Report on filtering
    if (localFilteredData.hasFilteredData()) {
        context.getResponseHeaders().get(ACCESS_CONTROL).set(localFilteredData.toModelNode());
    }

    // Extract any failure info from the individual results and use them
    // to construct an overall failure description if necessary
    if (resultAction == OperationContext.ResultAction.ROLLBACK
            && !context.hasFailureDescription() && result.isDefined()) {
        String op = operation.require(OP).asString();
        Map<PathAddress, ModelNode> failures = new HashMap<PathAddress, ModelNode>();
        for (ModelNode resultItem : result.asList()) {
            if (resultItem.hasDefined(FAILURE_DESCRIPTION)) {
                final PathAddress failedAddress = PathAddress.pathAddress(resultItem.get(ADDRESS));
                ModelNode failedDesc = resultItem.get(FAILURE_DESCRIPTION);
                failures.put(failedAddress, failedDesc);
            }
        }

        if (failures.size() == 1) {
            Map.Entry<PathAddress, ModelNode> entry = failures.entrySet().iterator().next();
            if (entry.getValue().getType() == ModelType.STRING) {
                context.getFailureDescription().set(ControllerLogger.ROOT_LOGGER.wildcardOperationFailedAtSingleAddress(op, entry.getKey(), entry.getValue().asString()));
            } else {
                context.getFailureDescription().set(ControllerLogger.ROOT_LOGGER.wildcardOperationFailedAtSingleAddressWithComplexFailure(op, entry.getKey()));
            }
        } else if (failures.size() > 1) {
            context.getFailureDescription().set(ControllerLogger.ROOT_LOGGER.wildcardOperationFailedAtMultipleAddresses(op, failures.keySet()));
        }
    }
}
 
源代码14 项目: openjdk-8   文件: MapBinToFromTreeTest.java
void remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
    int size = m.size();
    // Remove all elements thus ensuring at some point trees will be
    // converting back to bins
    for (int i = 0; i < size; i++) {
        m.remove(new HashCodeInteger(i));

        c.accept(i, m.size());
    }
}
 
源代码15 项目: flink   文件: RocksIncrementalSnapshotStrategy.java
@Override
protected SnapshotResult<KeyedStateHandle> callInternal() throws Exception {

	boolean completed = false;

	// Handle to the meta data file
	SnapshotResult<StreamStateHandle> metaStateHandle = null;
	// Handles to new sst files since the last completed checkpoint will go here
	final Map<StateHandleID, StreamStateHandle> sstFiles = new HashMap<>();
	// Handles to the misc files in the current snapshot will go here
	final Map<StateHandleID, StreamStateHandle> miscFiles = new HashMap<>();

	try {

		metaStateHandle = materializeMetaData();

		// Sanity checks - they should never fail
		Preconditions.checkNotNull(metaStateHandle, "Metadata was not properly created.");
		Preconditions.checkNotNull(metaStateHandle.getJobManagerOwnedSnapshot(),
			"Metadata for job manager was not properly created.");

		uploadSstFiles(sstFiles, miscFiles);

		synchronized (materializedSstFiles) {
			materializedSstFiles.put(checkpointId, sstFiles.keySet());
		}

		final IncrementalRemoteKeyedStateHandle jmIncrementalKeyedStateHandle =
			new IncrementalRemoteKeyedStateHandle(
				backendUID,
				keyGroupRange,
				checkpointId,
				sstFiles,
				miscFiles,
				metaStateHandle.getJobManagerOwnedSnapshot());

		final DirectoryStateHandle directoryStateHandle = localBackupDirectory.completeSnapshotAndGetHandle();
		final SnapshotResult<KeyedStateHandle> snapshotResult;
		if (directoryStateHandle != null && metaStateHandle.getTaskLocalSnapshot() != null) {

			IncrementalLocalKeyedStateHandle localDirKeyedStateHandle =
				new IncrementalLocalKeyedStateHandle(
					backendUID,
					checkpointId,
					directoryStateHandle,
					keyGroupRange,
					metaStateHandle.getTaskLocalSnapshot(),
					sstFiles.keySet());

			snapshotResult = SnapshotResult.withLocalState(jmIncrementalKeyedStateHandle, localDirKeyedStateHandle);
		} else {
			snapshotResult = SnapshotResult.of(jmIncrementalKeyedStateHandle);
		}

		completed = true;

		return snapshotResult;
	} finally {
		if (!completed) {
			final List<StateObject> statesToDiscard =
				new ArrayList<>(1 + miscFiles.size() + sstFiles.size());
			statesToDiscard.add(metaStateHandle);
			statesToDiscard.addAll(miscFiles.values());
			statesToDiscard.addAll(sstFiles.values());
			cleanupIncompleteSnapshot(statesToDiscard);
		}
	}
}
 
源代码16 项目: radiance   文件: SubstanceIconFactory.java
/**
 * Retrieves icon that matches the specified state of the slider thumb.
 * 
 * @param slider
 *            The slider itself.
 * @return Icon that matches the specified state of the slider thumb.
 */
private ImageWrapperIcon getIcon(JSlider slider,
        StateTransitionTracker stateTransitionTracker) {
    StateTransitionTracker.ModelStateInfo modelStateInfo = stateTransitionTracker
            .getModelStateInfo();
    Map<ComponentState, StateTransitionTracker.StateContributionInfo> activeStates = modelStateInfo
            .getStateContributionMap();
    ComponentState currState = stateTransitionTracker.getModelStateInfo()
            .getCurrModelState();

    float activeStrength = stateTransitionTracker.getActiveStrength();
    int height = (int) (this.size * (2.0 + activeStrength) / 3.0);
    height = Math.min(height, this.size - 2);
    int delta = (this.size - height) / 2 - 1;

    SubstanceFillPainter fillPainter = SubstanceCoreUtilities.getFillPainter(slider);
    SubstanceBorderPainter borderPainter = SubstanceCoreUtilities.getBorderPainter(slider);

    SubstanceColorScheme baseFillScheme = SubstanceColorSchemeUtilities
            .getColorScheme(slider, currState);
    SubstanceColorScheme baseBorderScheme = SubstanceColorSchemeUtilities
            .getColorScheme(slider, ColorSchemeAssociationKind.BORDER, currState);

    HashMapKey baseKey = SubstanceCoreUtilities.getHashKey(this.size, height,
            slider.getComponentOrientation(), baseFillScheme.getDisplayName(),
            baseBorderScheme.getDisplayName(), fillPainter.getDisplayName(),
            borderPainter.getDisplayName(), this.isMirrorred);

    ImageWrapperIcon baseLayer = SliderVerticalIcon.icons.get(baseKey);
    if (baseLayer == null) {
        baseLayer = getSingleLayer(slider, height, delta, fillPainter, borderPainter,
                baseFillScheme, baseBorderScheme);
        SliderVerticalIcon.icons.put(baseKey, baseLayer);
    }

    if (currState.isDisabled() || (activeStates.size() == 1))
        return baseLayer;

    BufferedImage result = SubstanceCoreUtilities.getBlankImage(baseLayer.getIconWidth(),
            baseLayer.getIconHeight());
    Graphics2D g2d = result.createGraphics();
    baseLayer.paintIcon(slider, g2d, 0, 0);

    for (Map.Entry<ComponentState, StateTransitionTracker.StateContributionInfo> activeEntry : activeStates
            .entrySet()) {
        ComponentState activeState = activeEntry.getKey();
        if (activeState == currState)
            continue;

        float contribution = activeEntry.getValue().getContribution();
        if (contribution == 0.0f)
            continue;

        SubstanceColorScheme fillScheme = SubstanceColorSchemeUtilities
                .getColorScheme(slider, activeState);
        SubstanceColorScheme borderScheme = SubstanceColorSchemeUtilities
                .getColorScheme(slider, ColorSchemeAssociationKind.BORDER, activeState);

        HashMapKey key = SubstanceCoreUtilities.getHashKey(this.size, height,
                slider.getComponentOrientation(), fillScheme.getDisplayName(),
                borderScheme.getDisplayName(), fillPainter.getDisplayName(),
                borderPainter.getDisplayName(), this.isMirrorred);

        ImageWrapperIcon layer = SliderVerticalIcon.icons.get(key);
        if (layer == null) {
            layer = getSingleLayer(slider, height, delta, fillPainter, borderPainter,
                    fillScheme, borderScheme);
            SliderVerticalIcon.icons.put(key, layer);
        }

        g2d.setComposite(AlphaComposite.SrcOver.derive(contribution));
        layer.paintIcon(slider, g2d, 0, 0);
    }

    g2d.dispose();
    return new ImageWrapperIcon(result);
}
 
源代码17 项目: greycat   文件: BioInputNeuralNode.java
public void learn(final double value, final int spikeLimit, final double threshold, final Callback callback) {
    final Map<Long, List<Tuple<Long, Double>>> loop = new HashMap<Long, List<Tuple<Long, Double>>>();
    LongLongMap firstHiddenLayer = (LongLongMap) get(BioNeuralNetwork.RELATION_OUTPUTS);
    //fill with first layer
    firstHiddenLayer.each((key, value1) -> {
        List<Tuple<Long, Double>> previous = loop.get(key);
        if (previous == null) {
            previous = new ArrayList<Tuple<Long, Double>>();
            loop.put(key, previous);
        }
        previous.add(new Tuple<Long, Double>(this.id(), value));
    });
    final Job[] job = new Job[1];
    job[0] = new Job() {
        @Override
        public void run() {
            long[] keys = new long[loop.size()];
            //ugly !!!
            int index = 0;
            for (Long v : loop.keySet()) {
                keys[index] = v;
                index++;
            }
            graph().lookupAll(world(), time(), keys, nextLayers -> {
                for (int i = 0; i < nextLayers.length; i++) {
                    if (nextLayers[i] instanceof BioOutputNeuralNode) {
                        BioOutputNeuralNode neural = (BioOutputNeuralNode) nextLayers[i];
                        long currentId = neural.id();
                        List<Tuple<Long, Double>> signals = loop.get(currentId);
                        loop.remove(currentId);
                        for (int j = 0; j < signals.size(); j++) {
                            double nextSignal = neural.learn(signals.get(j).left(), signals.get(j).right(), spikeLimit, threshold);
                            //TODO signal
                        }
                    } else {
                        BioNeuralNode neural = (BioNeuralNode) nextLayers[i];
                        long currentId = neural.id();
                        List<Tuple<Long, Double>> signals = loop.get(currentId);
                        loop.remove(currentId);
                        final LongLongArrayMap outputs = (LongLongArrayMap) neural.get(BioNeuralNetwork.RELATION_OUTPUTS);
                        for (int j = 0; j < signals.size(); j++) {
                            double nextSignal = neural.learn(signals.get(j).left(), signals.get(j).right(), spikeLimit, threshold);
                            if (nextSignal != 0) {
                                outputs.each((key, value1) -> {
                                    List<Tuple<Long, Double>> previous = loop.get(key);
                                    if (previous == null) {
                                        previous = new ArrayList<Tuple<Long, Double>>();
                                        loop.put(key, previous);
                                    }
                                    previous.add(new Tuple<Long, Double>(currentId, nextSignal));
                                });
                                //stack
                            }
                        }
                    }
                    //in any case free
                    nextLayers[i].free();
                }
                if (loop.size() > 0) {
                    graph().scheduler().dispatch(SchedulerAffinity.SAME_THREAD, job[0]);
                }
            });
        }
    };
    graph().scheduler().dispatch(SchedulerAffinity.SAME_THREAD, job[0]);


}
 
源代码18 项目: kcache   文件: OffsetCheckpoint.java
/**
 * Reads the offsets from the local checkpoint file, skipping any negative offsets it finds.
 *
 * @throws IOException if any file operation fails with an IO exception
 * @throws IllegalArgumentException if the offset checkpoint version is unknown
 */
public Map<TopicPartition, Long> read() throws IOException {
    synchronized (lock) {
        try (final BufferedReader reader = Files.newBufferedReader(file.toPath())) {
            final int version = readInt(reader);
            switch (version) {
                case 0:
                    final int expectedSize = readInt(reader);
                    final Map<TopicPartition, Long> offsets = new HashMap<>();
                    String line = reader.readLine();
                    while (line != null) {
                        final String[] pieces = WHITESPACE_MINIMUM_ONCE.split(line);
                        if (pieces.length != 3) {
                            throw new IOException(
                                String.format("Malformed line in offset checkpoint file: '%s'.", line));
                        }

                        final String topic = pieces[0];
                        final int partition = Integer.parseInt(pieces[1]);
                        final TopicPartition tp = new TopicPartition(topic, partition);
                        final long offset = Long.parseLong(pieces[2]);
                        if (offset >= 0L) {
                            offsets.put(tp, offset);
                        } else {
                            LOG.warn("Read offset={} from checkpoint file for {}", offset, tp);
                        }

                        line = reader.readLine();
                    }
                    if (offsets.size() != expectedSize) {
                        throw new IOException(
                            String.format("Expected %d entries but found only %d", expectedSize, offsets.size()));
                    }
                    return offsets;

                default:
                    throw new IllegalArgumentException("Unknown offset checkpoint version: " + version);
            }
        } catch (final NoSuchFileException e) {
            return Collections.emptyMap();
        }
    }
}
 
源代码19 项目: oryx   文件: RDFUpdate.java
@Override
public PMML buildModel(JavaSparkContext sparkContext,
                       JavaRDD<String> trainData,
                       List<?> hyperParameters,
                       Path candidatePath) {

  int maxSplitCandidates = (Integer) hyperParameters.get(0);
  int maxDepth = (Integer) hyperParameters.get(1);
  String impurity = (String) hyperParameters.get(2);
  Preconditions.checkArgument(maxSplitCandidates >= 2,
                              "max-split-candidates must be at least 2");
  Preconditions.checkArgument(maxDepth > 0,
                              "max-depth must be at least 1");

  JavaRDD<String[]> parsedRDD = trainData.map(MLFunctions.PARSE_FN);
  CategoricalValueEncodings categoricalValueEncodings =
      new CategoricalValueEncodings(getDistinctValues(parsedRDD));
  JavaRDD<LabeledPoint> trainPointData =
      parseToLabeledPointRDD(parsedRDD, categoricalValueEncodings);

  Map<Integer,Integer> categoryInfo = categoricalValueEncodings.getCategoryCounts();
  categoryInfo.remove(inputSchema.getTargetFeatureIndex()); // Don't specify target count
  // Need to translate indices to predictor indices
  Map<Integer,Integer> categoryInfoByPredictor = new HashMap<>(categoryInfo.size());
  categoryInfo.forEach((k, v) -> categoryInfoByPredictor.put(inputSchema.featureToPredictorIndex(k), v));

  int seed = RandomManager.getRandom().nextInt();

  RandomForestModel model;
  if (inputSchema.isClassification()) {
    int numTargetClasses =
        categoricalValueEncodings.getValueCount(inputSchema.getTargetFeatureIndex());
    model = RandomForest.trainClassifier(trainPointData,
                                         numTargetClasses,
                                         categoryInfoByPredictor,
                                         numTrees,
                                         "auto",
                                         impurity,
                                         maxDepth,
                                         maxSplitCandidates,
                                         seed);
  } else {
    model = RandomForest.trainRegressor(trainPointData,
                                        categoryInfoByPredictor,
                                        numTrees,
                                        "auto",
                                        impurity,
                                        maxDepth,
                                        maxSplitCandidates,
                                        seed);
  }

  List<IntLongHashMap> treeNodeIDCounts = treeNodeExampleCounts(trainPointData, model);
  IntLongHashMap predictorIndexCounts = predictorExampleCounts(trainPointData, model);

  return rdfModelToPMML(model,
                        categoricalValueEncodings,
                        maxDepth,
                        maxSplitCandidates,
                        impurity,
                        treeNodeIDCounts,
                        predictorIndexCounts);
}
 
源代码20 项目: Utils   文件: MapHelper.java
/**
 * is null or its size is 0
 * <p/>
 * <pre>
 * isEmpty(null)   =   true;
 * isEmpty({})     =   true;
 * isEmpty({1, 2})    =   false;
 * </pre>
 *
 * @param sourceMap
 * @return if map is null or its size is 0, return true, else return false.
 */
public static <K, V> boolean isEmpty(Map<K, V> sourceMap) {
    return (sourceMap == null || sourceMap.size() == 0);
}