下面列出了com.google.common.base.Optional#isPresent ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Returns whether the source has zero bytes. The default implementation returns true if
* {@link #sizeIfKnown} returns zero, falling back to opening a stream and checking for EOF if the
* size is not known.
*
* <p>Note that, in cases where {@code sizeIfKnown} returns zero, it is <i>possible</i> that bytes
* are actually available for reading. (For example, some special files may return a size of 0
* despite actually having content when read.) This means that a source may return {@code true}
* from {@code isEmpty()} despite having readable content.
*
* @throws IOException if an I/O error occurs
* @since 15.0
*/
public boolean isEmpty() throws IOException {
Optional<Long> sizeIfKnown = sizeIfKnown();
if (sizeIfKnown.isPresent() && sizeIfKnown.get() == 0L) {
return true;
}
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return in.read() == -1;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
@Override
protected boolean isResponsible(Map<Object, Object> context, EObject eObject) {
if (eObject.eClass().getEPackage() != JSONPackage.eINSTANCE) {
return false;
}
// this validator extension only applies to package.json files located in the root of a project
URI pckjsonUri = eObject.eResource().getURI();
String fileName = fileExtensionCalculator.getFilenameWithoutXpectExtension(pckjsonUri);
if (!fileName.equals(IN4JSProject.PACKAGE_JSON)) {
return false;
}
Optional<? extends IN4JSProject> optProject = n4jsCore.findProject(pckjsonUri);
if (!optProject.isPresent()) {
LOGGER.error("no containing project found for package.json URI:" + pckjsonUri);
return false;
}
IN4JSProject project = optProject.get();
URI expectedLocation = project.getLocation().appendSegment(IN4JSProject.PACKAGE_JSON).toURI();
// In test Xpect scenarios (see bundle packagejson.xpect.tests) package.json files can be named package.json.xt
URI pckjsonUriWithoutXpectExtension = pckjsonUri.trimSegments(1).appendSegment(fileName);
return expectedLocation.equals(pckjsonUriWithoutXpectExtension);
}
/**
* Convert a given {@link Config} instance to a {@link Properties} instance.
* If the config value is not of String type, it will try to get it as a generic Object type
* using {@see com.typesafe.config.Config#getAnyRef()} and then try to return its json representation as a string
*
* @param config the given {@link Config} instance
* @param prefix an optional prefix; if present, only properties whose name starts with the prefix
* will be returned.
* @return a {@link Properties} instance
*/
public static Properties configToProperties(Config config, Optional<String> prefix) {
Properties properties = new Properties();
if (config != null) {
Config resolvedConfig = config.resolve();
for (Map.Entry<String, ConfigValue> entry : resolvedConfig.entrySet()) {
if (!prefix.isPresent() || entry.getKey().startsWith(prefix.get())) {
String propKey = desanitizeKey(entry.getKey());
String propVal;
try {
propVal = resolvedConfig.getString(entry.getKey());
} catch (ConfigException.WrongType wrongType) {
propVal = new Gson().toJson(resolvedConfig.getAnyRef(entry.getKey()));
}
properties.setProperty(propKey, propVal);
}
}
}
return properties;
}
private TaskResult run(HttpClient httpClient)
{
Optional<String> secretUri = httpSecrets.getSecretOptional("uri");
String rawUri;
boolean uriIsSecret;
if (secretUri.isPresent()) {
uriIsSecret = true;
rawUri = secretUri.get();
}
else {
UserSecretTemplate uriTemplate = UserSecretTemplate.of(params.get("_command", String.class));
uriIsSecret = uriTemplate.containsSecrets();
rawUri = uriTemplate.format(context.getSecrets());
}
URI uri = URI.create(rawUri);
boolean storeContent = params.get("store_content", boolean.class, false);
ContentResponse response = runHttp(httpClient, uri, uriIsSecret);
return result(response, storeContent);
}
@Override
public void onCallback(String childId, boolean success) {
Optional<Entity> child = Iterables.tryFind(getChildren(), EntityPredicates.idEqualTo(childId));
if (child.isPresent()) {
((AsyncEntity)child.get()).onCallback(success);
} else {
LOG.warn("Child not found with resourceId '"+childId+"'; not injecting state from callback");
}
Optional<Entity> unstartedVm = Iterables.tryFind(getChildren(), EntityPredicates.attributeSatisfies(Attributes.SERVICE_STATE_EXPECTED,
new Predicate<Lifecycle.Transition>() {
@Override public boolean apply(Transition input) {
return input == null || input.getState() == Lifecycle.STARTING;
}}));
if (!unstartedVm.isPresent()) {
// No VMs are still starting; we are finished starting
ServiceStateLogic.ServiceNotUpLogic.clearNotUpIndicator(this, START.getName());
ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
}
}
/**
* Returns the singular descriptor used by all non-null messages in the list.
*
* <p>If there is no descriptor, or more than one, returns {@code Optional.absent()}.
*/
static Optional<Descriptor> getSingleDescriptor(Iterable<? extends Message> messages) {
Optional<Descriptor> optDescriptor = Optional.absent();
for (Message message : messages) {
if (message != null) {
Descriptor descriptor = message.getDescriptorForType();
if (!optDescriptor.isPresent()) {
optDescriptor = Optional.of(descriptor);
} else if (descriptor != optDescriptor.get()) {
// Two different descriptors - abandon ship.
return Optional.absent();
}
}
}
return optDescriptor;
}
@Override
public TypeRef caseIntersectionTypeExpression(IntersectionTypeExpression intersection) {
final Optional<List<TypeRef>> typeRefsBounds = mapAndCompare(intersection.getTypeRefs(), this::bound);
if (typeRefsBounds.isPresent()) {
final IntersectionTypeExpression result = TypeUtils
.createNonSimplifiedIntersectionType(typeRefsBounds.get());
TypeUtils.copyTypeModifiers(result, intersection);
return result;
}
return intersection;
}
@NotNull
public static Optional<Double> resourceValueDouble(@NotNull final Optional<Resource> resource) {
if (resource.isPresent()) {
if (resource.get().getType() != Value.Type.SCALAR) {
throw new IllegalArgumentException("Resource must be of type SCALAR");
}
return Optional.of(resource.get().getScalar().getValue());
} else {
return Optional.absent();
}
}
@Override
public Void call(Optional<PersistentContainer> oContainer) {
if (oContainer.isPresent()) {
JsonObject jsonObject = new JsonObject()
.put("message", "Container exist");
throw new HttpRequestValidationException(HTTP_NOT_FOUND, jsonObject);
}
return null;
}
@Override
public TaskResult runTask()
{
Optional<String> paramServerType = systemConfig.getOptional("param_server.type", String.class);
if (!paramServerType.isPresent()) {
throw new ConfigException("param_server.type is required to use this operator.");
}
Config localParams = request.getLocalConfig();
List<String> keys = localParams.getKeys();
if (keys.size() == 0) {
throw new ConfigException("no key is set.");
}
paramServerClient.doTransaction(client -> {
for (String key : keys) {
// This operator expected to take a String like scalar value as parameters.
// e.g.
// param_set>:
// key1: value1
// key2: value2
//
// So if user specified Array like value, `localParams.getOptional(key, String.class)` throws Error.
// If we about to support Array like value, we need to create an another operator.
Optional<String> value = localParams.getOptional(key, String.class);
if (value.isPresent()) {
paramServerClient.set(key, value.get(), request.getSiteId());
}
}
});
return TaskResult.empty(request);
}
@Timed
public void updateKnownAgentLastSeenAt(String clusterName, String agentId, long time) {
Optional<BaragonKnownAgentMetadata> maybeAgent = getKnownAgentMetadata(clusterName, agentId);
if (maybeAgent.isPresent()) {
maybeAgent.get().setLastSeenAt(time);
writeToZk(String.format(KNOWN_AGENTS_GROUP_HOST_FORMAT, clusterName, maybeAgent.get().getAgentId()), maybeAgent.get());
} else {
LOG.error("Could not fetch known agent metadata to update lastSeenAt time");
}
}
private static boolean floatsEqual(float x, float y, Optional<Offset<Float>> correspondence) {
if (correspondence.isPresent()) {
try {
assertThat(x).isCloseTo(y, correspondence.get());
} catch (AssertionError e) {
return false;
}
return true;
} else {
return Float.compare(x, y) == 0;
}
}
@Override
public void endElement(String uri, String localName, String qName) {
StringBuilder buffer = textBuffers.removeLast();
Optional<String> text = fromNullable(buffer.length() > 0 ? buffer.toString() : null);
for (XPath xPath : pathsToMatch) {
Optional<String> dependee = xPath.matchAndExtract(xmlElements, text);
if (dependee.isPresent()) {
analysisContext.addDependencies(dependerId, dependee.get().trim());
}
}
xmlElements.removeLast();
}
@Override
public Void visitEnhancedForLoop(EnhancedForLoopTree expected, Tree actual) {
Optional<EnhancedForLoopTree> other = checkTypeAndCast(expected, actual);
if (!other.isPresent()) {
addTypeMismatch(expected, actual);
return null;
}
scan(expected.getVariable(), other.get().getVariable());
scan(expected.getExpression(), other.get().getExpression());
scan(expected.getStatement(), other.get().getStatement());
return null;
}
@Provides
@Singleton
protected ScanCountListener provideScanCountListener(MetricsScanCountListener metricsListener,
Optional<CloudWatchScanCountListener> cloudWatchScanCountListener) {
List<ScanCountListener> listeners = Lists.newArrayListWithCapacity(2);
listeners.add(metricsListener);
if (cloudWatchScanCountListener.isPresent()) {
listeners.add(cloudWatchScanCountListener.get());
}
return MultiScanCountListener.combine(listeners);
}
private static Config resolveConfig(JobSpec jobSpec, JobCatalog catalog)
throws SpecNotFoundException, JobTemplate.TemplateException {
Optional<URI> templateURIOpt = jobSpec.getTemplateURI();
if (templateURIOpt.isPresent()) {
JobCatalogWithTemplates catalogWithTemplates = new PackagedTemplatesJobCatalogDecorator(catalog);
JobTemplate template = catalogWithTemplates.getTemplate(templateURIOpt.get());
return template.getResolvedConfig(jobSpec.getConfig()).resolve();
} else {
return jobSpec.getConfig().resolve();
}
}
public static ScanRanges create(RowKeySchema schema, List<List<KeyRange>> ranges, int[] slotSpan, Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex, Optional<byte[]> scanMinOffset) {
int offset = nBuckets == null ? 0 : SaltingUtil.NUM_SALTING_BYTES;
int nSlots = ranges.size();
if (nSlots == offset && !scanMinOffset.isPresent()) {
return EVERYTHING;
} else if ((nSlots == 1 + offset && ranges.get(offset).size() == 1 && ranges.get(offset).get(0) == KeyRange.EMPTY_RANGE)) {
return NOTHING;
}
TimeRange rowTimestampRange = getRowTimestampColumnRange(ranges, schema, rowTimestampColIndex);
boolean isPointLookup = isPointLookup(schema, ranges, slotSpan, useSkipScan);
if (isPointLookup) {
// TODO: consider keeping original to use for serialization as it would be smaller?
List<byte[]> keys = ScanRanges.getPointKeys(ranges, slotSpan, schema, nBuckets);
List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
// We have full keys here, so use field from our varbinary schema
for (byte[] key : keys) {
keyRanges.add(KeyRange.getKeyRange(key));
}
// while doing a point look up if after intersecting with the MinMaxrange there are
// no more keyranges left then just return
if (keyRanges.isEmpty()) {
return NOTHING;
}
ranges = Collections.singletonList(keyRanges);
useSkipScan = keyRanges.size() > 1;
// Treat as binary if descending because we've got a separator byte at the end
// which is not part of the value.
if (keys.size() > 1 || SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, schema.getField(schema.getFieldCount()-1)) == QueryConstants.DESC_SEPARATOR_BYTE) {
schema = SchemaUtil.VAR_BINARY_SCHEMA;
slotSpan = ScanUtil.SINGLE_COLUMN_SLOT_SPAN;
} else {
// Keep original schema and don't use skip scan as it's not necessary
// when there's a single key.
slotSpan = new int[] {schema.getMaxFields()-1};
}
}
List<List<KeyRange>> sortedRanges = Lists.newArrayListWithExpectedSize(ranges.size());
for (int i = 0; i < ranges.size(); i++) {
Field f = schema.getField(i);
List<KeyRange> sorted = Lists.newArrayList(ranges.get(i));
Collections.sort(sorted, f.getSortOrder() == SortOrder.ASC ? KeyRange.COMPARATOR : KeyRange.DESC_COMPARATOR);
sortedRanges.add(ImmutableList.copyOf(sorted));
}
// Don't set minMaxRange for point lookup because it causes issues during intersect
// by going across region boundaries
KeyRange scanRange = KeyRange.EVERYTHING_RANGE;
// if (!isPointLookup && (nBuckets == null || !useSkipScanFilter)) {
// if (! ( isPointLookup || (nBuckets != null && useSkipScanFilter) ) ) {
// if (nBuckets == null || (nBuckets != null && (!isPointLookup || !useSkipScanFilter))) {
if (nBuckets == null || !isPointLookup || !useSkipScan) {
byte[] minKey = ScanUtil.getMinKey(schema, sortedRanges, slotSpan);
byte[] maxKey = ScanUtil.getMaxKey(schema, sortedRanges, slotSpan);
// If the maxKey has crossed the salt byte boundary, then we do not
// have anything to filter at the upper end of the range
if (ScanUtil.crossesPrefixBoundary(maxKey, ScanUtil.getPrefix(minKey, offset), offset)) {
maxKey = KeyRange.UNBOUND;
}
// We won't filter anything at the low end of the range if we just have the salt byte
if (minKey.length <= offset) {
minKey = KeyRange.UNBOUND;
}
//Handle the offset by pushing it into the scanRange
if(scanMinOffset.isPresent()){
byte[] minOffset = scanMinOffset.get();
//If we are salted we have to
//This should be safe for RVC Offset since we specify a full rowkey which
// is by definition unique so duplicating the salt bucket is fine
if(nBuckets != null && nBuckets > 0) {
minOffset[0] = 0; //We use 0 for salt bucket for scans
}
//If the offset is more selective than the existing ranges
if(Bytes.BYTES_COMPARATOR.compare(minOffset,minKey) > 0){
minKey=minOffset;
}
}
scanRange = KeyRange.getKeyRange(minKey, maxKey);
}
if (scanRange == KeyRange.EMPTY_RANGE) {
return NOTHING;
}
return new ScanRanges(schema, slotSpan, sortedRanges, scanRange, useSkipScan, isPointLookup, nBuckets, rowTimestampRange);
}
@Override
public AsyncFuture<Void> run(final ShellIO io, TaskParameters base) throws Exception {
final MetadataLoadParameters params = (MetadataLoadParameters) base;
final SuggestBackend target = suggest.useGroup(params.getTarget());
final Optional<RateLimiter> rateLimiter = params.getRate() <= 0 ? Optional.absent()
: Optional.of(RateLimiter.create(params.getRate()));
io.out().println("Loading suggest data:");
io.out().println(" from (file): " + params.getFile());
io.out().println(" to (suggest): " + target);
io.out().println(" rate-limit:" +
(rateLimiter.isPresent() ? params.getRate() : "disabled"));
io.out().flush();
long total = 0;
long failed = 0;
long ratePosition = 0;
long rateStart = clock.currentTimeMillis();
final DateRange now = DateRange.now(clock);
try (final BufferedReader input = new BufferedReader(open(io, params.getFile()))) {
String line;
while ((line = input.readLine()) != null) {
if (rateLimiter.isPresent()) {
rateLimiter.get().acquire();
}
final Series series = mapper.readValue(line, Series.class);
if (rateLimiter.isPresent()) {
rateLimiter.get().acquire();
}
total++;
try {
target.write(new WriteSuggest.Request(series, now)).get();
} catch (Exception e) {
failed++;
}
if (total % OUTPUT_STEP == 0) {
if (failed > 0) {
io.out().print('!');
failed = 0;
} else {
io.out().print('#');
}
if (total % (OUTPUT_STEP * 20) == 0) {
long rateNow = clock.currentTimeMillis();
final long rate;
if (rateNow == rateStart) {
rate = -1;
} else {
rate = ((total - ratePosition) * 1000) / (rateNow - rateStart);
}
io
.out()
.println(
String.format(" %d (%s/s)", total, rate == -1 ? "infinite" : rate));
ratePosition = total;
rateStart = rateNow;
}
io.out().flush();
}
}
}
io.out().println();
io.out().println("Allegedly successful writes: " + (total - failed));
io.out().println("Allegedly failed writes: " + failed);
io.out().flush();
return async.resolved();
}
/**
* The main thing that the addSSTableSplit handles is to split SSTables
* using their index if available. The general algorithm is that if the file
* is large than the blocksize plus some fuzzy factor to
*/
List<InputSplit> getSSTableSplitsForFile(JobContext job, FileStatus file) throws IOException {
long length = file.getLen();
if (length == 0) {
LOG.info("skipping zero length file: {}", file.getPath());
return Collections.emptyList();
}
Path path = file.getPath();
Configuration conf = job.getConfiguration();
FileSystem fs = path.getFileSystem(conf);
BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
Optional<Path> compressionPath = getCompressionPath(fs, path);
if (compressionPath.isPresent()) {
return ImmutableList.of((InputSplit) AegCompressedSplit.createAegCompressedSplit(path, 0, length,
blkLocations[blkLocations.length - 1].getHosts(), compressionPath.get(), conf));
}
long blockSize = file.getBlockSize();
String aegisthusBlockSize = conf.get(Aegisthus.Feature.CONF_BLOCKSIZE);
if (!Strings.isNullOrEmpty(aegisthusBlockSize)) {
blockSize = Long.valueOf(aegisthusBlockSize);
}
long maxSplitSize = (long) (blockSize * .99);
long fuzzySplit = (long) (blockSize * 1.2);
long bytesRemaining = length;
List<InputSplit> splits = Lists.newArrayList();
IndexDatabaseScanner scanner = null;
// Only initialize if we are going to have more than a single split
if (fuzzySplit < length) {
Path indexPath = new Path(path.getParent(), path.getName().replaceAll("-Data.db", "-Index.db"));
if (!fs.exists(indexPath)) {
fuzzySplit = length;
} else {
FSDataInputStream fileIn = fs.open(indexPath);
scanner = new IndexDatabaseScanner(new BufferedInputStream(fileIn));
}
}
long splitStart = 0;
while (splitStart + fuzzySplit < length && scanner != null && scanner.hasNext()) {
long splitSize = 0;
// The scanner returns an offset from the start of the file.
while (splitSize < maxSplitSize && scanner.hasNext()) {
IndexDatabaseScanner.OffsetInfo offsetInfo = scanner.next();
splitSize = offsetInfo.getDataFileOffset() - splitStart;
}
int blkIndex = getBlockIndex(blkLocations, splitStart + (splitSize / 2));
LOG.debug("split path: {}:{}:{}", path.getName(), splitStart, splitSize);
splits.add(AegSplit.createSplit(path, splitStart, splitSize, blkLocations[blkIndex].getHosts()));
bytesRemaining -= splitSize;
splitStart += splitSize;
}
if (scanner != null) {
scanner.close();
}
if (bytesRemaining != 0) {
LOG.debug("end path: {}:{}:{}", path.getName(), length - bytesRemaining, bytesRemaining);
splits.add(AegSplit.createSplit(path, length - bytesRemaining, bytesRemaining,
blkLocations[blkLocations.length - 1].getHosts()));
}
return splits;
}
@Override
public void deleteProperty( EntityRef entityRef, String propertyName ) throws Exception {
Id entityId = new SimpleId( entityRef.getUuid(), entityRef.getType() );
// if ( !UUIDUtils.isTimeBased( entityId.getUuid() ) ) {
// throw new IllegalArgumentException(
// "Entity Id " + entityId.getType() + ":"+entityId.getUuid() +" uuid not time based");
// }
org.apache.usergrid.persistence.model.entity.Entity cpEntity =
load( entityId );
cpEntity.removeField( propertyName );
if(logger.isTraceEnabled()){
logger.trace( "About to Write {}:{} version {}",
cpEntity.getId().getType(), cpEntity.getId().getUuid(), cpEntity.getVersion() );
}
String region = null;
String collectionName = Schema.defaultCollectionName( entityRef.getType() );
CollectionSettings collectionSettings = collectionSettingsFactory
.getInstance( new CollectionSettingsScopeImpl( getAppIdObject(), collectionName) );
Optional<Map<String, Object>> existingSettings =
collectionSettings.getCollectionSettings( collectionName );
if ( existingSettings.isPresent() ) {
region = existingSettings.get().get(AUTHORITATIVE_REGION_SETTING).toString();
}
//TODO: does this call and others like it need a graphite reporter?
cpEntity = ecm.write( cpEntity, region ).toBlocking().last();
if(logger.isTraceEnabled()){
logger.trace("Wrote {}:{} version {}",
cpEntity.getId().getType(), cpEntity.getId().getUuid(), cpEntity.getVersion() );
}
//Adding graphite metrics
if ( !skipIndexingForType( cpEntity.getId().getType() ) ) {
indexService.queueEntityIndexUpdate( applicationScope, cpEntity, 0 , null);
}
}