类org.apache.commons.lang3.mutable.MutableInt源码实例Demo

下面列出了怎么用org.apache.commons.lang3.mutable.MutableInt的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: attic-apex-malhar   文件: GPOUtils.java
/**
 * Deserializes a {@link GPOMutable} object from the given byte array at the given offset, which was
 * serialized with the given {@link FieldsDescriptor} with the given fields excluded.
 * @param fieldsDescriptor The {@link FieldsDescriptor} object corresponding to the {@link GPOMutable}.
 * @param excludedFields The fields to exclude from serializing the {@link GPOMutable}.
 * @param serializedGPO The array containing the serialized {@link GPOMutable}.
 * @param offset The offset in the provided array to start deserializing from.
 * @return The deserialized {@link GPOMutable}.
 */
public static GPOMutable deserialize(FieldsDescriptor fieldsDescriptor, Fields excludedFields, byte[] serializedGPO,
    int offset)
{
  GPOMutable gpo = new GPOMutable(fieldsDescriptor);
  MutableInt offsetM = new MutableInt(offset);

  Set<String> exFieldsSet = excludedFields.getFields();

  for (String field : fieldsDescriptor.getFields().getFields()) {
    if (exFieldsSet.contains(field)) {
      continue;
    }

    Type type = fieldsDescriptor.getType(field);
    GPOType gpoType = GPOType.GPO_TYPE_ARRAY[type.ordinal()];
    gpoType.deserialize(gpo, field, serializedGPO, offsetM);
  }

  return gpo;
}
 
源代码2 项目: attic-apex-malhar   文件: GPOUtils.java
/**
 * This method deserializes a double from the given byte array from the given offset,
 * and increments the offset appropriately.
 * @param buffer The byte buffer to deserialize from.
 * @param offset The offset to deserialize from.
 * @return The deserialized double.
 */
public static double deserializeDouble(byte[] buffer, MutableInt offset)
{
  int offsetInt = offset.intValue();
  long val = (((long)buffer[0 + offsetInt]) & 0xFFL) << 56 |
      ((((long)buffer[1 + offsetInt]) & 0xFFL) << 48) |
      ((((long)buffer[2 + offsetInt]) & 0xFFL) << 40) |
      ((((long)buffer[3 + offsetInt]) & 0xFFL) << 32) |
      ((((long)buffer[4 + offsetInt]) & 0xFFL) << 24) |
      ((((long)buffer[5 + offsetInt]) & 0xFFL) << 16) |
      ((((long)buffer[6 + offsetInt]) & 0xFFL) << 8) |
      (((long)buffer[7 + offsetInt]) & 0xFFL);

  offset.add(Type.DOUBLE.getByteSize());
  return Double.longBitsToDouble(val);
}
 
源代码3 项目: james-project   文件: ExternalSession.java
private boolean tryReadFromSocket() throws IOException, InterruptedException {
    final MutableInt status = new MutableInt(0);
    Awaitility
        .waitAtMost(Duration.ONE_MINUTE)
        .pollDelay(new Duration(10, TimeUnit.MILLISECONDS))
        .until(() -> {
            int read = socket.read(readBuffer);
            status.setValue(read);
            return read != 0;
        });
    if (status.intValue() == -1) {
        monitor.debug("Error reading, got -1");
        return false;
    }
    return true;
}
 
源代码4 项目: sqlg   文件: GraphStrategy.java
void combineSteps() {
    @SuppressWarnings("unchecked")
    List<Step<?, ?>> steps = new ArrayList(this.traversal.asAdmin().getSteps());
    ListIterator<Step<?, ?>> stepIterator = steps.listIterator();
    MutableInt pathCount = new MutableInt(0);
    while (stepIterator.hasNext()) {
        Step<?, ?> step = stepIterator.next();
        if (isReplaceableStep(step.getClass())) {
            stepIterator.previous();
            boolean keepGoing = handleStep(stepIterator, pathCount);
            if (!keepGoing) {
                break;
            }
        } else {
            //If a step can not be replaced then its the end of optimizationinging.
            break;
        }
    }
}
 
源代码5 项目: count-db   文件: TestDataInterface.java
@Test
public void testIteratorWithFilter() {
    DataInterface<Long> dataInterface = createCountDataInterface("testIteratorWithFilter");
    int numOfItems = 100;
    for (int i = 0; i < 100; i++) {
        dataInterface.write(i, (long) i);
    }
    dataInterface.flush();
    //Try with stream
    MutableInt numOfValuesRead = new MutableInt();
    dataInterface.stream(new EvenKeysFilter()).forEach((v) -> numOfValuesRead.increment());
    Assert.assertEquals(numOfItems / 2, numOfValuesRead.intValue());
    //Try with iterator
    numOfValuesRead.setValue(0);
    CloseableIterator<KeyValue<Long>> closeableIterator = dataInterface.iterator(new EvenKeysFilter());
    while (closeableIterator.hasNext()) {
        closeableIterator.next();
        numOfValuesRead.increment();
    }
    closeableIterator.close();
    Assert.assertEquals(numOfItems / 2, numOfValuesRead.intValue());
}
 
源代码6 项目: hbase   文件: IPCUtil.java
static void execute(EventLoop eventLoop, Runnable action) {
  if (eventLoop.inEventLoop()) {
    // this is used to prevent stack overflow, you can see the same trick in netty's LocalChannel
    // implementation.
    MutableInt depth = DEPTH.get();
    if (depth.intValue() < MAX_DEPTH) {
      depth.increment();
      try {
        action.run();
      } finally {
        depth.decrement();
      }
    } else {
      eventLoop.execute(action);
    }
  } else {
    eventLoop.execute(action);
  }
}
 
/**
 * @throws Exception if an error occurs
 */
@Test
public void addJob_multipleExecution_removeJob() throws Exception {
    final MutableInt id = new MutableInt();
    final MutableInt count = new MutableInt(0);
    final JavaScriptJob job = new BasicJavaScriptJob(50, Integer.valueOf(50)) {
        @Override
        public void run() {
            count.increment();
            if (count.intValue() >= 5) {
                manager_.removeJob(id.intValue());
            }
        }
    };
    id.setValue(manager_.addJob(job, page_));
    manager_.waitForJobs(1000);
    assertEquals(5, count.intValue());
}
 
/**
 * @throws Exception if an error occurs
 */
@Test
public void addJob_multipleExecution_removeAllJobs() throws Exception {
    final MutableInt count = new MutableInt(0);
    final JavaScriptJob job = new BasicJavaScriptJob(50, Integer.valueOf(50)) {
        @Override
        public void run() {
            count.increment();
            if (count.intValue() >= 5) {
                manager_.removeAllJobs();
            }
        }
    };
    manager_.addJob(job, page_);
    manager_.waitForJobs(1000);
    assertEquals(5, count.intValue());
}
 
源代码9 项目: sqlg   文件: VertexStrategy.java
@SuppressWarnings("unchecked")
void combineSteps() {
    List<Step<?, ?>> steps = new ArrayList(this.traversal.asAdmin().getSteps());
    ListIterator<Step<?, ?>> stepIterator = steps.listIterator();
    MutableInt pathCount = new MutableInt(0);
    while (stepIterator.hasNext()) {
        Step<?, ?> step = stepIterator.next();
        if (this.reset) {
            this.reset = false;
            this.sqlgStep = null;
        }
        if (isReplaceableStep(step.getClass())) {
            stepIterator.previous();
            boolean keepGoing = handleStep(stepIterator, pathCount);
            if (!keepGoing) {
                break;
            }
        } else {
            //restart
            this.sqlgStep = null;
        }
    }
}
 
源代码10 项目: flux   文件: WorkflowInterceptorTest.java
@Test
public void testWorkflowInterception_WithActualParameters() throws Throwable {
    /* setup */
    final MutableInt getEventNameCall = new MutableInt(0);
    doAnswer(invocation -> {
        Event argument = (Event) invocation.getArguments()[0];
        final int currentValue = getEventNameCall.intValue();
        getEventNameCall.increment();
        return argument.name() + currentValue;
    }).when(localContext).generateEventName(any(Event.class));

    final Method invokedMethod = simpleWorkflowForTest.getClass().getDeclaredMethod("simpleDummyWorkflow", StringEvent.class, IntegerEvent.class);
    final StringEvent testStringEvent = new StringEvent("someEvent");
    final IntegerEvent testIntegerEvent = new IntegerEvent(1);
    /* invoke method */
    workflowInterceptor.invoke(dummyInvocation(invokedMethod,new Object[]{testStringEvent,testIntegerEvent}));
    /* verifications */
    verify(localContext,times(1)).addEvents(
        new EventData(SimpleWorkflowForTest.STRING_EVENT_NAME + "0", StringEvent.class.getName(), objectMapper.writeValueAsString(testStringEvent), CLIENT),
        new EventData(SimpleWorkflowForTest.INTEGER_EVENT_NAME + "1", IntegerEvent.class.getName(), objectMapper.writeValueAsString(testIntegerEvent), CLIENT)
    );
}
 
源代码11 项目: count-db   文件: TestDataInterface.java
@Test
public void testValuesIteratorWithFilter() {
    DataInterface<Long> dataInterface = createCountDataInterface("testValuesIteratorWithFilter");
    int numOfItems = 100;
    for (int i = 0; i < 100; i++) {
        dataInterface.write(i, (long) i);
    }
    dataInterface.flush();
    //Try with stream
    MutableInt numOfValuesRead = new MutableInt();
    dataInterface.streamValues(new EvenKeysFilter()).forEach((v) -> numOfValuesRead.increment());
    Assert.assertEquals(numOfItems / 2, numOfValuesRead.intValue());
    //Try with iterator
    numOfValuesRead.setValue(0);
    CloseableIterator<Long> closeableIterator = dataInterface.valueIterator(new EvenKeysFilter());
    while (closeableIterator.hasNext()) {
        closeableIterator.next();
        numOfValuesRead.increment();
    }
    closeableIterator.close();
    Assert.assertEquals(numOfItems / 2, numOfValuesRead.intValue());
}
 
源代码12 项目: gatk   文件: AllelePileupCounter.java
/**
 *
 * @param referenceAllele allele to treat as reference.  Create with {@link Allele#create(String, boolean)}, where
 *                  second parameter is {@code true}.  Never {@code null}.   If the reference is symbolic, exception will be thrown.
 * @param alternateAlleles List of alleles to treat as the alternates.  Easy to create with {@link Allele#create(String, boolean)}, where
 *                  second parameter is {@code false}.  Never {@code null}
 * @param minBaseQualityCutoff minimum base quality for the bases that match the allele in order to be counted.
 *                             Must be positive or zero.
 */
public AllelePileupCounter(final Allele referenceAllele, final List<Allele> alternateAlleles, int minBaseQualityCutoff) {

    this.referenceAllele = Utils.nonNull(referenceAllele);
    this.alternateAlleles = Utils.nonNull(alternateAlleles);

    // Additional checks
    if (referenceAllele.isSymbolic()) {
        throw new UserException.BadInput("A symbolic reference allele was specified.");
    }

    Utils.validateArg(!referenceAllele.isNonReference(), "Reference allele was non-reference: " + referenceAllele);
    Utils.validateArg(alternateAlleles.stream().allMatch(a -> a.isNonReference()),
            "One or more alternate alleles were reference: " + alternateAlleles.stream().map(a-> a.toString()).collect(Collectors.joining(", ")));

    this.minBaseQualityCutoff = ParamUtils.isPositiveOrZero(minBaseQualityCutoff, "Minimum base quality must be positive or zero.");;

    alternateAlleles.forEach(a -> countMap.put(a, new MutableInt(0)));
    countMap.put(referenceAllele, new MutableInt(0));
}
 
/**
 * 
 * @param anno the word anno
 * @param badChars the bad char counter. Being incremented
 * @return true if the word has one bad char, false otherwise
 */
private boolean isBadWord(WordAnnotation anno, MutableInt badChars) {
	final String coveredText = anno.getCoveredText();
	boolean foundOneBadChar = false;
	for(int i=0; i< coveredText.length(); i++) {
		boolean found = false;
		char c = coveredText.charAt(i);
		for(char a:this.allowedChars) {
			if(a==c) 
				found = true;
		}
		if(!found) {
			badChars.increment();
			foundOneBadChar = true;
		}
	}
	return foundOneBadChar;
}
 
源代码14 项目: hbase   文件: ZkSplitLogWorkerCoordination.java
/**
 * Submit a log split task to executor service
 * @param curTask task to submit
 * @param curTaskZKVersion current version of task
 */
void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) {
  final MutableInt zkVersion = new MutableInt(curTaskZKVersion);

  CancelableProgressable reporter = new CancelableProgressable() {
    private long last_report_at = 0;

    @Override
    public boolean progress() {
      long t = EnvironmentEdgeManager.currentTime();
      if ((t - last_report_at) > reportPeriod) {
        last_report_at = t;
        int latestZKVersion =
            attemptToOwnTask(false, watcher, server.getServerName(), curTask,
              zkVersion.intValue());
        if (latestZKVersion < 0) {
          LOG.warn("Failed to heartbeat the task" + curTask);
          return false;
        }
        zkVersion.setValue(latestZKVersion);
      }
      return true;
    }
  };
  ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails =
      new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
  splitTaskDetails.setTaskNode(curTask);
  splitTaskDetails.setCurTaskZKVersion(zkVersion);

  WALSplitterHandler hsh =
      new WALSplitterHandler(server, this, splitTaskDetails, reporter,
          this.tasksInProgress, splitTaskExecutor);
  server.getExecutorService().submit(hsh);
}
 
源代码15 项目: Flink-CEPplus   文件: KinesisProxyTest.java
@Test
public void testGetRecordsRetry() throws Exception {
	Properties kinesisConsumerConfig = new Properties();
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, "us-east-1");

	final GetRecordsResult expectedResult = new GetRecordsResult();
	MutableInt retries = new MutableInt();
	final Throwable[] retriableExceptions = new Throwable[] {
		new AmazonKinesisException("mock"),
	};

	AmazonKinesisClient mockClient = mock(AmazonKinesisClient.class);
	Mockito.when(mockClient.getRecords(any())).thenAnswer(new Answer<GetRecordsResult>() {
		@Override
		public GetRecordsResult answer(InvocationOnMock invocation) throws Throwable{
			if (retries.intValue() < retriableExceptions.length) {
				retries.increment();
				throw retriableExceptions[retries.intValue() - 1];
			}
			return expectedResult;
		}
	});

	KinesisProxy kinesisProxy = new KinesisProxy(kinesisConsumerConfig);
	Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient);

	GetRecordsResult result = kinesisProxy.getRecords("fakeShardIterator", 1);
	assertEquals(retriableExceptions.length, retries.intValue());
	assertEquals(expectedResult, result);
}
 
/**
 * The stream calls this to report to the strategy how many blocks are free currently.
 * @param freeBlockNum
 */
@Override
public void currentFreeBlocks(int freeBlockNum)
{
  if (freeBlockNum < 0) {
    throw new IllegalArgumentException("The number of free blocks could not less than zero.");
  }
  freeBlockNumQueue.add(new MutableInt(freeBlockNum));
}
 
源代码17 项目: attic-apex-malhar   文件: GPOUtils.java
/**
 * This method deserializes a short from the given byte array from the given offset,
 * and increments the offset appropriately.
 * @param buffer The byte buffer to deserialize from.
 * @param offset The offset to deserialize from.
 * @return The deserialized short.
 */
public static short deserializeShort(byte[] buffer, MutableInt offset)
{
  int offsetInt = offset.intValue();
  short val = (short)(((((int)buffer[0 + offsetInt]) & 0xFF) << 8) |
      (((int)buffer[1 + offsetInt]) & 0xFF));

  offset.add(Type.SHORT.getByteSize());
  return val;
}
 
源代码18 项目: astor   文件: ObjectUtils.java
/**
 * Find the most frequently occurring item.
 * 
 * @param <T> type of values processed by this method
 * @param items to check
 * @return most populous T, {@code null} if non-unique or no items supplied
 * @since 3.0.1
 */
public static <T> T mode(T... items) {
    if (ArrayUtils.isNotEmpty(items)) {
        HashMap<T, MutableInt> occurrences = new HashMap<T, MutableInt>(items.length);
        for (T t : items) {
            MutableInt count = occurrences.get(t);
            if (count == null) {
                occurrences.put(t, new MutableInt(1));
            } else {
                count.increment();
            }
        }
        T result = null;
        int max = 0;
        for (Map.Entry<T, MutableInt> e : occurrences.entrySet()) {
            int cmp = e.getValue().intValue();
            if (cmp == max) {
                result = null;
            } else if (cmp > max) {
                max = cmp;
                result = e.getKey();
            }
        }
        return result;
    }
    return null;
}
 
源代码19 项目: teku   文件: SimpleOffsetSerializer.java
private static void deserializeVariableElementList(
    SSZReader reader,
    MutableInt bytesPointer,
    Class listElementType,
    int bytesEndByte,
    SSZMutableList newSSZList)
    throws InstantiationException, InvocationTargetException, IllegalAccessException {

  int currentObjectStartByte = bytesPointer.intValue();

  if (currentObjectStartByte == bytesEndByte) {
    return;
  }

  List<Integer> offsets = new ArrayList<>();

  int variablePartStartByte = currentObjectStartByte + readOffset(reader, bytesPointer);
  offsets.add(variablePartStartByte);

  while (bytesPointer.intValue() < variablePartStartByte) {
    offsets.add(readOffset(reader, bytesPointer));
  }

  for (int i = 0; i < offsets.size(); i++) {
    // Get the end byte of current variable size container either using offset
    // or the end of the outer object you're in
    int currentObjectEndByte =
        (i + 1) == offsets.size() ? bytesEndByte : currentObjectStartByte + offsets.get(i + 1);
    newSSZList.add(
        deserializeContainer(listElementType, reader, bytesPointer, currentObjectEndByte));
  }
}
 
源代码20 项目: attic-apex-malhar   文件: GPOUtils.java
/**
 * This method serializes the given long to the given byte buffer to the given offset,
 * the method also increments the offset appropriately.
 * @param val The value to serialize.
 * @param buffer The byte buffer to serialize to.
 * @param offset The offset in the buffer to serialize to and also to increment appropriately.
 */
public static void serializeLong(long val, byte[] buffer, MutableInt offset)
{
  int offsetInt = offset.intValue();
  buffer[0 + offsetInt] = (byte)((val >> 56) & 0xFFL);
  buffer[1 + offsetInt] = (byte)((val >> 48) & 0xFFL);
  buffer[2 + offsetInt] = (byte)((val >> 40) & 0xFFL);
  buffer[3 + offsetInt] = (byte)((val >> 32) & 0xFFL);
  buffer[4 + offsetInt] = (byte)((val >> 24) & 0xFFL);
  buffer[5 + offsetInt] = (byte)((val >> 16) & 0xFFL);
  buffer[6 + offsetInt] = (byte)((val >> 8) & 0xFFL);
  buffer[7 + offsetInt] = (byte)(val & 0xFFL);

  offset.add(Type.LONG.getByteSize());
}
 
源代码21 项目: attic-apex-malhar   文件: GPOUtils.java
/**
 * This method serializes the given float to the given byte buffer to the given offset,
 * the method also increments the offset appropriately.
 * @param valf The value to serialize.
 * @param buffer The byte buffer to serialize to.
 * @param offset The offset in the buffer to serialize to and also to increment appropriately.
 */
public static void serializeFloat(float valf, byte[] buffer, MutableInt offset)
{
  int offsetInt = offset.intValue();
  int val = Float.floatToIntBits(valf);

  buffer[0 + offsetInt] = (byte)((val >> 24) & 0xFF);
  buffer[1 + offsetInt] = (byte)((val >> 16) & 0xFF);
  buffer[2 + offsetInt] = (byte)((val >> 8) & 0xFF);
  buffer[3 + offsetInt] = (byte)(val & 0xFF);

  offset.add(Type.FLOAT.getByteSize());
}
 
源代码22 项目: hbase   文件: ProcedureTree.java
private void checkReady(Entry rootEntry, Map<Long, Entry> remainingProcMap) {
  if (ProcedureUtil.isFinished(rootEntry.proc)) {
    if (!rootEntry.subProcs.isEmpty()) {
      LOG.error("unexpected active children for root-procedure: {}", rootEntry);
      rootEntry.subProcs.forEach(e -> LOG.error("unexpected active children: {}", e));
      addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
    } else {
      addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
    }
    return;
  }
  Map<Integer, List<Entry>> stackId2Proc = new HashMap<>();
  MutableInt maxStackId = new MutableInt(Integer.MIN_VALUE);
  collectStackId(rootEntry, stackId2Proc, maxStackId);
  // the stack ids should start from 0 and increase by one every time
  boolean valid = true;
  for (int i = 0; i <= maxStackId.intValue(); i++) {
    List<Entry> entries = stackId2Proc.get(i);
    if (entries == null) {
      LOG.error("Missing stack id {}, max stack id is {}, root procedure is {}", i, maxStackId,
        rootEntry);
      valid = false;
    } else if (entries.size() > 1) {
      LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," +
        " root procedure is {}", entries, i, maxStackId, rootEntry);
      valid = false;
    }
  }
  if (valid) {
    addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
  } else {
    addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
  }
}
 
源代码23 项目: systemds   文件: TemplateUtils.java
public static boolean isValidNumVectorIntermediates(CNode node, CNode main, Map<Long, Set<Long>> parents, Map<Long, Pair<Long, MutableInt>> inUse, Set<Long> inUse2, int count) {
	if( count <= 1 ) return false;
	IDSequence buff = new IDSequence(true, count-1); //zero based
	inUse.clear(); inUse2.clear();
	node.resetVisitStatus();
	return rIsValidNumVectorIntermediates(node, main, parents, inUse, inUse2, buff);
}
 
源代码24 项目: hbase   文件: ZKConnectionRegistry.java
private static void tryComplete(MutableInt remaining, HRegionLocation[] locs,
    CompletableFuture<RegionLocations> future) {
  remaining.decrement();
  if (remaining.intValue() > 0) {
    return;
  }
  future.complete(new RegionLocations(locs));
}
 
源代码25 项目: spliceengine   文件: ConcatenationOperatorNode.java
/**
  * Check if this node always evaluates to the same value. If so, return
  * a constant node representing the known result.
  *
  * @return a constant node representing the result of this concatenation
  * operation, or {@code this} if the result is not known up front
  */
 ValueNode evaluateConstantExpressions() throws StandardException {
     if (leftOperand instanceof CharConstantNode &&
             rightOperand instanceof CharConstantNode) {
         CharConstantNode leftOp = (CharConstantNode) leftOperand;
         CharConstantNode rightOp = (CharConstantNode) rightOperand;
         StringDataValue leftValue = (StringDataValue) leftOp.getValue();
         StringDataValue rightValue = (StringDataValue) rightOp.getValue();

StringDataValue resultValue = null;
         DataTypeDescriptor resultDTD = getTypeServices();
         if (resultDTD == null) {
         	TypeId resultTypeId =
	    resolveConcatOperationResultType(leftOp.getTypeServices(),
	                                     rightOp.getTypeServices(),
	                                     new MutableInt());
	resultValue = (StringDataValue)
	  DataValueFactoryImpl.getNullDVD(resultTypeId.getTypeFormatId());
}
         else
             resultValue = (StringDataValue) resultDTD.getNull();

         resultValue.concatenate(leftValue, rightValue, resultValue);

         return (ValueNode) getNodeFactory().getNode(
                 C_NodeTypes.CHAR_CONSTANT_NODE,
                 resultValue.getString(),
                 getContextManager());
     }

     return this;
 }
 
源代码26 项目: systemds   文件: Explain.java
private static String explainLineageItemNR(LineageItem item, int level) {
	//NOTE: in contrast to similar non-recursive functions like resetVisitStatusNR,
	// we maintain a more complex stack to ensure DFS ordering where current nodes
	// are added after the subtree underneath is processed (backwards compatibility)
	Stack<LineageItem> stackItem = new Stack<>();
	Stack<MutableInt> stackPos = new Stack<>();
	stackItem.push(item); stackPos.push(new MutableInt(0));
	StringBuilder sb = new StringBuilder();
	while( !stackItem.empty() ) {
		LineageItem tmpItem = stackItem.peek();
		MutableInt tmpPos = stackPos.peek();
		//check ascent condition - no item processing
		if( tmpItem.isVisited() ) {
			stackItem.pop(); stackPos.pop();
		}
		//check ascent condition - append item
		else if( tmpItem.getInputs() == null 
			|| tmpItem.getInputs().length <= tmpPos.intValue() ) {
			sb.append(createOffset(level));
			sb.append(tmpItem.toString());
			sb.append('\n');
			stackItem.pop(); stackPos.pop();
			tmpItem.setVisited();
		}
		//check descent condition
		else if( tmpItem.getInputs() != null ) {
			stackItem.push(tmpItem.getInputs()[tmpPos.intValue()]);
			tmpPos.increment();
			stackPos.push(new MutableInt(0));
		}
	}
	return sb.toString();
}
 
源代码27 项目: alfresco-repository   文件: BehaviourFilterImpl.java
private ClassFilter getClassFilter(QName className)
{
    ParameterCheck.mandatory("className", className);

    // Check the global, first
    if (!isEnabled())
    {
        return null;
    }

    if (!TransactionalResourceHelper.isResourcePresent(KEY_CLASS_FILTERS))
    {
        // Nothing was disabled
        return null;
    }
    Map<ClassFilter, MutableInt> classFilters = TransactionalResourceHelper.getMap(KEY_CLASS_FILTERS);
    for (ClassFilter classFilter : classFilters.keySet())
    {
        if (classFilter.getClassName().equals(className))
        {
            MutableInt filterNumber = classFilters.get(classFilter);
            if (filterNumber != null && filterNumber.intValue() > 0 )
            {
                return classFilter;
            }
            break;
        }
    }
    return null;
}
 
private void loadData(final int maxCount)
{
    final MutableInt doneCount = new MutableInt(0);
    // Batches of 1000 objects
    RetryingTransactionCallback<Integer> makeNodesCallback = new RetryingTransactionCallback<Integer>()
    {
        public Integer execute() throws Throwable
        {
            for (int i = 0; i < 1000; i++)
            {
                // We don't need to write anything
                String contentUrl = FileContentStore.createNewFileStoreUrl();
                ContentData contentData = new ContentData(contentUrl, MimetypeMap.MIMETYPE_TEXT_PLAIN, 10, "UTF-8");
                nodeHelper.makeNode(contentData);
                
                int count = doneCount.intValue();
                count++;
                doneCount.setValue(count);
                
                // Do some reporting
                if (count % 1000 == 0)
                {
                    System.out.println(String.format("   " + (new Date()) + "Total created: %6d", count));
                }
                
                // Double check for shutdown
                if (vmShutdownListener.isVmShuttingDown())
                {
                    break;
                }
            }
            return maxCount;
        }
    };
    int repetitions = (int) Math.floor((double)maxCount / 1000.0);
    for (int i = 0; i < repetitions; i++)
    {
        transactionService.getRetryingTransactionHelper().doInTransaction(makeNodesCallback);
    }
}
 
@Before
public void setUp() throws Exception
{
    failCount = new MutableInt(0);
    transactionService = ctx.getBean("transactionComponent", TransactionService.class);
    properties = ctx.getBean("global-properties", Properties.class);

    String dbPoolMaxProp = properties.getProperty("db.pool.max");
    if (PropertyCheck.isValidPropertyString(dbPoolMaxProp))
    {
        dbPoolMax = Integer.parseInt(dbPoolMaxProp);
    }
    else
    {
        throw new IllegalArgumentException("The db.pool.max property is not valid.");
    }
    
    String dbPoolWaitMaxProp = properties.getProperty("db.pool.wait.max");
    if (PropertyCheck.isValidPropertyString(dbPoolWaitMaxProp))
    {
        dbPoolWaitMax = Integer.parseInt(dbPoolWaitMaxProp);
    }
    else
    {
        throw new IllegalArgumentException("The db.pool.wait.max property is not valid.");
    }
    
    dbPoolWaitMax = dbPoolWaitMax == -1 ? 100 : dbPoolWaitMax;
}
 
源代码30 项目: obevo   文件: SameSchemaDeployExecutionDao.java
public SameSchemaDeployExecutionDao(SqlExecutor sqlExecutor, DbMetadataManager dbMetadataManager, DbPlatform platform, ImmutableSet<PhysicalSchema> physicalSchemas, String tableSqlSuffix, DbEnvironment env, ChangeTypeBehaviorRegistry changeTypeBehaviorRegistry) {
    this.sqlExecutor = sqlExecutor;
    this.jdbc = sqlExecutor.getJdbcTemplate();
    this.dbMetadataManager = dbMetadataManager;
    this.platform = platform;
    this.physicalSchemas = physicalSchemas;
    this.nextIdBySchema = physicalSchemas.toMap(Functions.<PhysicalSchema>getPassThru(), new Function<PhysicalSchema, MutableInt>() {
        @Override
        public MutableInt valueOf(PhysicalSchema object) {
            return new MutableInt(1);
        }
    }).toImmutable();
    this.tableSqlSuffix = tableSqlSuffix;
    this.env = env;
    this.changeTypeBehaviorRegistry = changeTypeBehaviorRegistry;

    Function<String, String> convertDbObjectName = platform.convertDbObjectName();
    this.deployExecutionTableName = convertDbObjectName.valueOf(DEPLOY_EXECUTION_TABLE_NAME);
    this.deployExecutionAttributeTableName = convertDbObjectName.valueOf(DEPLOY_EXECUTION_ATTRIBUTE_TABLE_NAME);
    this.idColName = convertDbObjectName.valueOf("ID");
    this.statusColName = convertDbObjectName.valueOf("STATUS");
    this.deployTimeColName = convertDbObjectName.valueOf("DEPLOYTIME");
    this.executorIdColName = convertDbObjectName.valueOf("EXECUTORID");
    this.toolVersionColName = convertDbObjectName.valueOf("TOOLVERSION");
    this.initCommandColName = convertDbObjectName.valueOf("INIT_COMMAND");
    this.rollbackCommandColName = convertDbObjectName.valueOf("ROLLBACK_COMMAND");
    this.requesterIdColName = convertDbObjectName.valueOf("REQUESTERID");
    this.reasonColName = convertDbObjectName.valueOf("REASON");
    this.productVersionColName = convertDbObjectName.valueOf("PRODUCTVERSION");
    this.dbSchemaColName = convertDbObjectName.valueOf("DBSCHEMA");
    this.allMainColumns = Lists.immutable.with(idColName, statusColName, deployTimeColName, executorIdColName, toolVersionColName, initCommandColName, rollbackCommandColName, requesterIdColName, reasonColName, dbSchemaColName, productVersionColName);

    this.deployExecutionIdColName = convertDbObjectName.valueOf("DEPLOYEXECUTIONID");
    this.attrNameColName = convertDbObjectName.valueOf("ATTRNAME");
    this.attrValueColName = convertDbObjectName.valueOf("ATTRVALUE");
    this.allAttrColumns = Lists.immutable.with(deployExecutionIdColName, attrNameColName, attrValueColName);
}
 
 类所在包
 同包方法