org.apache.hadoop.mapred.TaskAttemptContextImpl#org.apache.flink.util.Preconditions源码实例Demo

下面列出了org.apache.hadoop.mapred.TaskAttemptContextImpl#org.apache.flink.util.Preconditions 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Flink-CEPplus   文件: AbstractHeapState.java
@Override
public byte[] getSerializedValue(
		final byte[] serializedKeyAndNamespace,
		final TypeSerializer<K> safeKeySerializer,
		final TypeSerializer<N> safeNamespaceSerializer,
		final TypeSerializer<SV> safeValueSerializer) throws Exception {

	Preconditions.checkNotNull(serializedKeyAndNamespace);
	Preconditions.checkNotNull(safeKeySerializer);
	Preconditions.checkNotNull(safeNamespaceSerializer);
	Preconditions.checkNotNull(safeValueSerializer);

	Tuple2<K, N> keyAndNamespace = KvStateSerializer.deserializeKeyAndNamespace(
			serializedKeyAndNamespace, safeKeySerializer, safeNamespaceSerializer);

	SV result = stateTable.get(keyAndNamespace.f0, keyAndNamespace.f1);

	if (result == null) {
		return null;
	}
	return KvStateSerializer.serializeValue(result, safeValueSerializer);
}
 
源代码2 项目: flink   文件: StreamExecutionEnvironment.java
/**
 * Creates a data stream from the given non-empty collection. The type of the data stream is that of the
 * elements in the collection.
 *
 * <p>The framework will try and determine the exact type from the collection elements. In case of generic
 * elements, it may be necessary to manually supply the type information via
 * {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
 *
 * <p>Note that this operation will result in a non-parallel data stream source, i.e. a data stream source with
 * parallelism one.
 *
 * @param data
 * 		The collection of elements to create the data stream from.
 * @param <OUT>
 *     The generic type of the returned data stream.
 * @return
 *     The data stream representing the given collection
 */
public <OUT> DataStreamSource<OUT> fromCollection(Collection<OUT> data) {
	Preconditions.checkNotNull(data, "Collection must not be null");
	if (data.isEmpty()) {
		throw new IllegalArgumentException("Collection must not be empty");
	}

	OUT first = data.iterator().next();
	if (first == null) {
		throw new IllegalArgumentException("Collection must not contain null elements");
	}

	TypeInformation<OUT> typeInfo;
	try {
		typeInfo = TypeExtractor.getForObject(first);
	}
	catch (Exception e) {
		throw new RuntimeException("Could not create TypeInformation for type " + first.getClass()
				+ "; please specify the TypeInformation manually via "
				+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)", e);
	}
	return fromCollection(data, typeInfo);
}
 
public CheckpointCoordinatorConfiguration(
		long checkpointInterval,
		long checkpointTimeout,
		long minPauseBetweenCheckpoints,
		int maxConcurrentCheckpoints,
		CheckpointRetentionPolicy checkpointRetentionPolicy,
		boolean isExactlyOnce,
		boolean isPreferCheckpointForRecovery,
		int tolerableCpFailureNumber) {

	// sanity checks
	if (checkpointInterval < MINIMAL_CHECKPOINT_TIME || checkpointTimeout < MINIMAL_CHECKPOINT_TIME ||
		minPauseBetweenCheckpoints < 0 || maxConcurrentCheckpoints < 1 ||
		tolerableCpFailureNumber < 0) {
		throw new IllegalArgumentException();
	}

	this.checkpointInterval = checkpointInterval;
	this.checkpointTimeout = checkpointTimeout;
	this.minPauseBetweenCheckpoints = minPauseBetweenCheckpoints;
	this.maxConcurrentCheckpoints = maxConcurrentCheckpoints;
	this.checkpointRetentionPolicy = Preconditions.checkNotNull(checkpointRetentionPolicy);
	this.isExactlyOnce = isExactlyOnce;
	this.isPreferCheckpointForRecovery = isPreferCheckpointForRecovery;
	this.tolerableCheckpointFailureNumber = tolerableCpFailureNumber;
}
 
源代码4 项目: flink   文件: CollectSinkFunction.java
private InetAddress getBindAddress() {
	RuntimeContext context = getRuntimeContext();
	Preconditions.checkState(
		context instanceof StreamingRuntimeContext,
		"CollectSinkFunction can only be used in StreamTask");
	StreamingRuntimeContext streamingContext = (StreamingRuntimeContext) context;
	String bindAddress = streamingContext.getTaskManagerRuntimeInfo().getTaskManagerBindAddress();

	if (bindAddress != null) {
		try {
			return InetAddress.getByName(bindAddress);
		} catch (UnknownHostException e) {
			return null;
		}
	}
	return null;
}
 
源代码5 项目: Flink-CEPplus   文件: HeartbeatManagerImpl.java
public HeartbeatManagerImpl(
		long heartbeatTimeoutIntervalMs,
		ResourceID ownResourceID,
		HeartbeatListener<I, O> heartbeatListener,
		ScheduledExecutor mainThreadExecutor,
		Logger log) {
	Preconditions.checkArgument(heartbeatTimeoutIntervalMs > 0L, "The heartbeat timeout has to be larger than 0.");

	this.heartbeatTimeoutIntervalMs = heartbeatTimeoutIntervalMs;
	this.ownResourceID = Preconditions.checkNotNull(ownResourceID);
	this.heartbeatListener = Preconditions.checkNotNull(heartbeatListener, "heartbeatListener");
	this.mainThreadExecutor = Preconditions.checkNotNull(mainThreadExecutor);
	this.log = Preconditions.checkNotNull(log);
	this.heartbeatTargets = new ConcurrentHashMap<>(16);

	stopped = false;
}
 
源代码6 项目: flink   文件: JobDetailsHandler.java
public JobDetailsHandler(
		GatewayRetriever<? extends RestfulGateway> leaderRetriever,
		Time timeout,
		Map<String, String> responseHeaders,
		MessageHeaders<EmptyRequestBody, JobDetailsInfo, JobMessageParameters> messageHeaders,
		ExecutionGraphCache executionGraphCache,
		Executor executor,
		MetricFetcher metricFetcher) {
	super(
		leaderRetriever,
		timeout,
		responseHeaders,
		messageHeaders,
		executionGraphCache,
		executor);

	this.metricFetcher = Preconditions.checkNotNull(metricFetcher);
}
 
源代码7 项目: flink   文件: DefaultConfigurableOptionsFactory.java
/**
 * Returns the value in string format with the given key.
 *
 * @param key The configuration-key to query in string format.
 */
private String getInternal(String key) {
	Preconditions.checkArgument(configuredOptions.containsKey(key),
		"The configuration " + key + " has not been configured.");

	return configuredOptions.get(key);
}
 
源代码8 项目: Flink-CEPplus   文件: DoubleValueArray.java
/**
 * Initializes the array with the provided number of bytes.
 *
 * @param bytes initial size of the encapsulated array in bytes
 */
private void initialize(int bytes) {
	int capacity = bytes / ELEMENT_LENGTH_IN_BYTES;

	Preconditions.checkArgument(capacity > 0, "Requested array with zero capacity");
	Preconditions.checkArgument(capacity <= MAX_ARRAY_SIZE, "Requested capacity exceeds limit of " + MAX_ARRAY_SIZE);

	data = new double[capacity];
}
 
源代码9 项目: flink   文件: ElasticsearchSink.java
/**
 * Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
 *
 * @param maxRetries the maximum number of retries for a backoff attempt when flushing bulk requests
 */
public void setBulkFlushBackoffRetries(int maxRetries) {
	Preconditions.checkArgument(
		maxRetries > 0,
		"Max number of backoff attempts must be larger than 0.");

	this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, String.valueOf(maxRetries));
}
 
源代码10 项目: flink   文件: RandomSamplerTest.java
private double[] getWrongSampler(int fixSize) {
	Preconditions.checkArgument(fixSize > 0, "Sample size be positive.");
	int halfSourceSize = SOURCE_SIZE / 2;
	double[] wrongSampler = new double[fixSize];
	for (int i = 0; i < fixSize; i++) {
		wrongSampler[i] = (double) i % halfSourceSize;
	}

	return wrongSampler;
}
 
源代码11 项目: bahir-flink   文件: StreamSchema.java
public StreamSchema(TypeInformation<T> typeInfo, String... fieldNames) {
    Preconditions.checkNotNull(fieldNames, "Field name is required");
    this.typeInfo = typeInfo;
    this.fieldNames = fieldNames;
    this.fieldIndexes = getFieldIndexes(typeInfo, fieldNames);
    this.fieldTypes = getFieldTypes(typeInfo, fieldIndexes, fieldNames);
    this.streamSerializer = new StreamSerializer<>(this);
}
 
源代码12 项目: flink   文件: ScatterGatherIteration.java
private ScatterGatherIteration(ScatterFunction<K, VV, Message, EV> sf,
		GatherFunction<K, VV, Message> gf,
		DataSet<Edge<K, EV>> edgesWithValue,
		int maximumNumberOfIterations) {
	Preconditions.checkNotNull(sf);
	Preconditions.checkNotNull(gf);
	Preconditions.checkNotNull(edgesWithValue);
	Preconditions.checkArgument(maximumNumberOfIterations > 0, "The maximum number of iterations must be at least one.");

	this.scatterFunction = sf;
	this.gatherFunction = gf;
	this.edgesWithValue = edgesWithValue;
	this.maximumNumberOfIterations = maximumNumberOfIterations;
	this.messageType = getMessageType(sf);
}
 
源代码13 项目: flink   文件: BucketStateSerializer.java
BucketStateSerializer(
		final SimpleVersionedSerializer<RecoverableWriter.ResumeRecoverable> resumableSerializer,
		final SimpleVersionedSerializer<RecoverableWriter.CommitRecoverable> commitableSerializer,
		final SimpleVersionedSerializer<BucketID> bucketIdSerializer
) {
	this.resumableSerializer = Preconditions.checkNotNull(resumableSerializer);
	this.commitableSerializer = Preconditions.checkNotNull(commitableSerializer);
	this.bucketIdSerializer = Preconditions.checkNotNull(bucketIdSerializer);
}
 
源代码14 项目: flink   文件: PathResolutionTest.java
public TestSpec expectPath(String... expectedPath) {
	Preconditions.checkArgument(
		sqlPathToLookup != null && tableApiLookupPath != null,
		"Both sql & table API versions of path lookups required. Remember expectPath needs to be called last");

	Preconditions.checkArgument(
		catalogManager != null,
		"A catalog manager needs to provided. Remember expectPath needs to be called last"
	);

	this.expectedPath = asList(expectedPath);
	return this;
}
 
源代码15 项目: Alink   文件: StreamOperator.java
private static <T> StreamExecutionEnvironment getExecutionEnvironment(
	Function <T, StreamExecutionEnvironment> getFunction, T[] types) {
	Preconditions.checkState(types != null && types.length > 0,
		"The operators must not be empty when get StreamExecutionEnvironment");

	StreamExecutionEnvironment env = null;

	for (T type : types) {
		if (type == null) {
			continue;
		}

		StreamExecutionEnvironment executionEnv = getFunction.apply(type);

		if (env != null && env != executionEnv) {
			throw new RuntimeException("The operators must be runing in the same StreamExecutionEnvironment");
		}

		env = executionEnv;
	}

	Preconditions.checkNotNull(env,
		"Could not find the StreamExecutionEnvironment in the operators. " +
			"There is a bug. Please contact the developer.");

	return env;
}
 
源代码16 项目: flink   文件: ClusterConfigHandler.java
public ClusterConfigHandler(
		GatewayRetriever<? extends RestfulGateway> leaderRetriever,
		Time timeout,
		Map<String, String> responseHeaders,
		MessageHeaders<EmptyRequestBody, ClusterConfigurationInfo, EmptyMessageParameters> messageHeaders,
		Configuration configuration) {
	super(leaderRetriever, timeout, responseHeaders, messageHeaders);

	Preconditions.checkNotNull(configuration);
	this.clusterConfig = ClusterConfigurationInfo.from(configuration);
}
 
源代码17 项目: Flink-CEPplus   文件: DataSink.java
/**
 * Sets the resources for this data sink, and the minimum and preferred resources are the same by default.
 *
 * @param resources The resources for this data sink.
 * @return The data sink with set minimum and preferred resources.
 */
private DataSink<T> setResources(ResourceSpec resources) {
	Preconditions.checkNotNull(resources, "The resources must be not null.");
	Preconditions.checkArgument(resources.isValid(), "The values in resources must be not less than 0.");

	this.minResources = resources;
	this.preferredResources = resources;

	return this;
}
 
源代码18 项目: stateful-functions   文件: TaggedBootstrapData.java
public TaggedBootstrapData(Address target, Object payload, int unionIndex) {
  this.target = Objects.requireNonNull(target);
  this.payload = Objects.requireNonNull(payload);

  Preconditions.checkArgument(unionIndex >= 0);
  this.unionIndex = unionIndex;
}
 
源代码19 项目: flink   文件: TaskState.java
public TaskState(JobVertexID jobVertexID, int parallelism, int maxParallelism, int chainLength) {
	Preconditions.checkArgument(
			parallelism <= maxParallelism,
			"Parallelism " + parallelism + " is not smaller or equal to max parallelism " + maxParallelism + ".");
	Preconditions.checkArgument(chainLength > 0, "There has to be at least one operator in the operator chain.");

	this.jobVertexID = jobVertexID;

	this.subtaskStates = new HashMap<>(parallelism);

	this.parallelism = parallelism;
	this.maxParallelism = maxParallelism;
	this.chainLength = chainLength;
}
 
源代码20 项目: Alink   文件: HaversineDistance.java
/**
 * The shortest distance between two points on the surface of the Earth
 *
 * @param vec1 [latitude, longitude] of the first point.
 * @param vec2 [latitude, longitude] of the second point.
 * @return distance.
 */
@Override
public double calc(Vector vec1, Vector vec2) {
    Preconditions.checkState(vec1.size() == VECTOR_SIZE && vec2.size() == VECTOR_SIZE,
        "HaversineDistance only supports vector size 2, the first value is latitude and the second value is "
            + "longitude");

    return calc(vec1.get(0), vec1.get(1), vec2.get(0), vec2.get(1));
}
 
源代码21 项目: Flink-CEPplus   文件: QueryableStateClient.java
/**
 * Returns a future holding the request result.
 * @param jobId                     JobID of the job the queryable state belongs to.
 * @param queryableStateName        Name under which the state is queryable.
 * @param key			            The key we are interested in.
 * @param keyTypeHint				A {@link TypeHint} used to extract the type of the key.
 * @param stateDescriptor			The {@link StateDescriptor} of the state we want to query.
 * @return Future holding the immutable {@link State} object containing the result.
 */
@PublicEvolving
public <K, S extends State, V> CompletableFuture<S> getKvState(
		final JobID jobId,
		final String queryableStateName,
		final K key,
		final TypeHint<K> keyTypeHint,
		final StateDescriptor<S, V> stateDescriptor) {

	Preconditions.checkNotNull(keyTypeHint);

	TypeInformation<K> keyTypeInfo = keyTypeHint.getTypeInfo();
	return getKvState(jobId, queryableStateName, key, keyTypeInfo, stateDescriptor);
}
 
源代码22 项目: Flink-CEPplus   文件: WorkerRegistration.java
public WorkerRegistration(
		TaskExecutorGateway taskExecutorGateway,
		WorkerType worker,
		int dataPort,
		HardwareDescription hardwareDescription) {

	super(worker.getResourceID(), taskExecutorGateway);

	this.worker = Preconditions.checkNotNull(worker);
	this.dataPort = dataPort;
	this.hardwareDescription = Preconditions.checkNotNull(hardwareDescription);
}
 
源代码23 项目: flink   文件: StreamSourceContexts.java
private ManualWatermarkContext(
		final Output<StreamRecord<T>> output,
		final ProcessingTimeService timeService,
		final Object checkpointLock,
		final StreamStatusMaintainer streamStatusMaintainer,
		final long idleTimeout) {

	super(timeService, checkpointLock, streamStatusMaintainer, idleTimeout);

	this.output = Preconditions.checkNotNull(output, "The output cannot be null.");
	this.reuse = new StreamRecord<>(null);
}
 
源代码24 项目: beam   文件: FlinkExecutableStageFunction.java
private void processElements(Iterable<WindowedValue<InputT>> iterable, RemoteBundle bundle)
    throws Exception {
  Preconditions.checkArgument(bundle != null, "RemoteBundle must not be null");

  FnDataReceiver<WindowedValue<?>> mainReceiver =
      Iterables.getOnlyElement(bundle.getInputReceivers().values());
  for (WindowedValue<InputT> input : iterable) {
    mainReceiver.accept(input);
  }
}
 
源代码25 项目: flink   文件: OperatorState.java
public void putState(int subtaskIndex, OperatorSubtaskState subtaskState) {
	Preconditions.checkNotNull(subtaskState);

	if (subtaskIndex < 0 || subtaskIndex >= parallelism) {
		throw new IndexOutOfBoundsException("The given sub task index " + subtaskIndex +
			" exceeds the maximum number of sub tasks " + operatorSubtaskStates.size());
	} else {
		operatorSubtaskStates.put(subtaskIndex, subtaskState);
	}
}
 
源代码26 项目: flink   文件: CompositeSerializer.java
@Override
public T copy(T from, T reuse) {
	Preconditions.checkNotNull(from);
	Preconditions.checkNotNull(reuse);
	if (isImmutableType()) {
		return from;
	}
	Object[] fields = new Object[fieldSerializers.length];
	for (int index = 0; index < fieldSerializers.length; index++) {
		fields[index] = fieldSerializers[index].copy(getField(from, index), getField(reuse, index));
	}
	return createInstanceWithReuse(fields, reuse);
}
 
源代码27 项目: flink   文件: StateReaderOperator.java
protected StateReaderOperator(F function, TypeInformation<KEY> keyType, TypeSerializer<N> namespaceSerializer) {
	Preconditions.checkNotNull(function, "The user function must not be null");
	Preconditions.checkNotNull(keyType, "The key type must not be null");
	Preconditions.checkNotNull(namespaceSerializer, "The namespace serializer must not be null");

	this.function = function;
	this.keyType = keyType;
	this.namespaceSerializer = namespaceSerializer;
}
 
源代码28 项目: flink   文件: StateDescriptor.java
/**
 * Sets the name for queries of state created from this descriptor.
 *
 * <p>If a name is set, the created state will be published for queries
 * during runtime. The name needs to be unique per job. If there is another
 * state instance published under the same name, the job will fail during runtime.
 *
 * @param queryableStateName State name for queries (unique name per job)
 * @throws IllegalStateException If queryable state name already set
 */
public void setQueryable(String queryableStateName) {
	Preconditions.checkArgument(
		ttlConfig.getUpdateType() == StateTtlConfig.UpdateType.Disabled,
		"Queryable state is currently not supported with TTL");
	if (this.queryableStateName == null) {
		this.queryableStateName = Preconditions.checkNotNull(queryableStateName, "Registration name");
	} else {
		throw new IllegalStateException("Queryable state name already set");
	}
}
 
源代码29 项目: flink   文件: FileUploads.java
public FileUploads(@Nonnull Path uploadDirectory) {
	Preconditions.checkNotNull(uploadDirectory, "UploadDirectory must not be null.");
	Preconditions.checkArgument(Files.exists(uploadDirectory), "UploadDirectory does not exist.");
	Preconditions.checkArgument(Files.isDirectory(uploadDirectory), "UploadDirectory is not a directory.");
	Preconditions.checkArgument(uploadDirectory.isAbsolute(), "UploadDirectory is not absolute.");
	this.uploadDirectory = uploadDirectory;
}
 
源代码30 项目: flink   文件: Transformation.java
/**
 * Creates a new {@code Transformation} with the given name, output type and parallelism.
 *
 * @param name The name of the {@code Transformation}, this will be shown in Visualizations and the Log
 * @param outputType The output type of this {@code Transformation}
 * @param parallelism The parallelism of this {@code Transformation}
 */
public Transformation(String name, TypeInformation<T> outputType, int parallelism) {
	this.id = getNewNodeId();
	this.name = Preconditions.checkNotNull(name);
	this.outputType = outputType;
	this.parallelism = parallelism;
	this.slotSharingGroup = null;
}