下面列出了org.apache.commons.lang3.ArrayUtils#getLength ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
protected void validateSelectedFields(RecipientForm aForm, ActionMessages errors) {
int columnsCount = ArrayUtils.getLength(aForm.getSelectedFields());
if (columnsCount < 1 || columnsCount > MAX_SELECTED_FIELDS_COUNT) {
String[] columns = webStorage.get(WebStorage.RECIPIENT_OVERVIEW)
.getSelectedFields()
.toArray(ArrayUtils.EMPTY_STRING_ARRAY);
aForm.setSelectedFields(columns);
}
if (columnsCount > MAX_SELECTED_FIELDS_COUNT) {
logger.error("Error in RecipientAction: error.maximum.recipient.columns: count > " + MAX_SELECTED_FIELDS_COUNT);
errors.add(ActionMessages.GLOBAL_MESSAGE, new ActionMessage("error.maximum.recipient.columns"));
aForm.addErrors(errors);
}
}
private void assertNotificationRequests(TestStep testStep)
{
List<MessageBody> notificationRequests = requests.getValues();
int expectedNotificationRequestCount = (testStep.expectedNotificationMsgKey == null ? 0 : 1) *
ArrayUtils.getLength(testStep.expectedNotificationPriorities);
assertThat(notificationRequests, hasSize(expectedNotificationRequestCount));
for (int i = 0; i < notificationRequests.size(); i++)
{
MessageBody notificationRequest = notificationRequests.get(i);
assertThat(notificationRequest.getMessageType(), equalTo(NotifyRequest.NAME));
if (testStep.expectedNotificationMsgKey != null)
{
assertThat(NotifyRequest.getMsgKey(notificationRequest), equalTo(testStep.expectedNotificationMsgKey));
assertThat(NotifyRequest.getPriority(notificationRequest), equalTo(testStep.expectedNotificationPriorities[i]));
}
}
requests.reset();
}
protected void checkAndFillMessage(Connection connection, ProduceMessageData produceMessageData) throws JoyQueueException {
if (CollectionUtils.isEmpty(produceMessageData.getMessages())) {
throw new JoyQueueException(JoyQueueCode.CN_PARAM_ERROR, "messages not empty");
}
byte[] address = connection.getAddress();
String txId = produceMessageData.getTxId();
int partition = produceMessageData.getMessages().get(0).getPartition();
for (BrokerMessage brokerMessage : produceMessageData.getMessages()) {
if (brokerMessage.getPartition() != partition) {
throw new JoyQueueException(JoyQueueCode.CN_PARAM_ERROR, "the put message command has multi partition");
}
if (ArrayUtils.getLength(brokerMessage.getByteBody()) > produceConfig.getBodyLength()) {
throw new JoyQueueException(JoyQueueCode.CN_PARAM_ERROR, "message body out of rage");
}
if (StringUtils.length(brokerMessage.getBusinessId()) > produceConfig.getBusinessIdLength()) {
throw new JoyQueueException(JoyQueueCode.CN_PARAM_ERROR, "message businessId out of rage");
}
brokerMessage.setClientIp(address);
brokerMessage.setTxId(txId);
}
}
@Override
public HBKey get() {
String vals[] = StringUtils.split(".");
if ( ArrayUtils.getLength(vals) != 10)
return new HBKey(false);
String type = vals[0];
String dbType = vals[1];
String ds = vals[2];
String schema = vals[3];
String table = vals[4];
int partition = Integer.valueOf(vals[6]);
String times[] = StringUtils.split(vals[8], "|");
if (ArrayUtils.getLength(times) != 3)
return new HBKey(false);
long checkpointMs = Long.valueOf(times[0]);
long txTimeMs = Long.valueOf(times[1]);
String status = times[2];
return new HBKey(type, dbType, ds, schema, table, partition, checkpointMs, txTimeMs, status, true, key);
}
/**
* Build multiple paths to URI
*
* @param paths
* multiple paths
* @return URI
*/
public static String buildURI(String... paths) {
int length = ArrayUtils.getLength(paths);
if (length < 1) {
return PathConstants.SLASH;
}
StringBuilder uriBuilder = new StringBuilder(PathConstants.SLASH);
for (int i = 0; i < length; i++) {
String path = paths[i];
uriBuilder.append(path);
if (i < length - 1) {
uriBuilder.append(PathConstants.SLASH);
}
}
return resolvePath(uriBuilder.toString());
}
/**
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal
* This accommodates differences between hadoop versions, where hadoop 1
* does not throw a FileNotFoundException, and return an empty FileStatus[]
* while Hadoop 2 will throw FileNotFoundException.
*
* @param fs file system
* @param dir directory
* @param filter file status filter
* @return null if dir is empty or doesn't exist, otherwise FileStatus list
*/
public static List<FileStatus> listStatusWithStatusFilter(final FileSystem fs,
final Path dir, final FileStatusFilter filter) throws IOException {
FileStatus [] status = null;
try {
status = fs.listStatus(dir);
} catch (FileNotFoundException fnfe) {
LOG.trace("{} does not exist", dir);
return null;
}
if (ArrayUtils.getLength(status) == 0) {
return null;
}
if (filter == null) {
return Arrays.asList(status);
} else {
List<FileStatus> status2 = filterFileStatuses(status, filter);
if (status2 == null || status2.isEmpty()) {
return null;
} else {
return status2;
}
}
}
@Override
protected void validate(QueryBuilderRuleNode node, DataType dataType, String operator) throws QueryBuilderToEqlConversionException {
Object[] values = QueryBuilderUtil.getRuleNodeValueAsArray(node);
if (ArrayUtils.getLength(values) < 2) {
throw new QueryBuilderToEqlConversionException("Invalid rule value for node " + node);
}
if (TODAY.equalsIgnoreCase((String) values[0]) && ArrayUtils.getLength(values) > 2) {
// validate if values contains ['TODAY', '%operator%', '%offset%', '%dateformat%'] data
parseOffset(node, (String) values[1]);
}
}
@Override
protected void validate(QueryBuilderRuleNode ruleNode) throws QueryBuilderToEqlConversionException {
Object[] values = QueryBuilderUtil.getRuleNodeValueAsArray(ruleNode);
if (ArrayUtils.getLength(values) != 2 || QueryBuilderUtil.containsAnyEmptyValue(values)) {
throw new QueryBuilderToEqlConversionException("Invalid value for node " + ruleNode);
}
}
public String info() {
return "{" +
"topic='" + topic + '\'' +
", key='" + key + '\'' +
", value.length=" + ArrayUtils.getLength(value) +
'}';
}
protected short checkAndFillMessages(List<KafkaBrokerMessage> messages) {
for (KafkaBrokerMessage message : messages) {
if (ArrayUtils.getLength(message.getKey()) > produceConfig.getBusinessIdLength()) {
return KafkaErrorCode.MESSAGE_TOO_LARGE.getCode();
}
if (ArrayUtils.getLength(message.getValue()) > produceConfig.getBodyLength()) {
return KafkaErrorCode.MESSAGE_TOO_LARGE.getCode();
}
}
return KafkaErrorCode.NONE.getCode();
}
public String info() {
return "{" +
"topic='" + topic + '\'' +
", key='" + key + '\'' +
", value.length=" + ArrayUtils.getLength(value) +
'}';
}
/**
* 将{@link ExtFilter}数组转换为Or关系的Matcher
*
* @param extFilterArray 增强过滤器数组
* @return Or关系Matcher
*/
public static Matcher toOrGroupMatcher(final ExtFilter[] extFilterArray) {
final Matcher[] matcherArray = new Matcher[ArrayUtils.getLength(extFilterArray)];
for (int index = 0; index < matcherArray.length; index++) {
matcherArray[index] = new ExtFilterMatcher(extFilterArray[index]);
}
return new GroupMatcher.Or(matcherArray);
}
/**
* Handle individual shard failures that can occur even when the response is OK. These
* can indicate misconfiguration of the search indices.
* @param request The search request.
* @param response The search response.
*/
private void handleShardFailures(
org.elasticsearch.action.search.SearchRequest request,
org.elasticsearch.action.search.SearchResponse response) {
/*
* shard failures are only logged. the search itself is not failed. this approach
* assumes that a user is interested in partial search results, even if the
* entire search result set cannot be produced.
*
* for example, assume the user adds an additional sensor and the telemetry
* is indexed into a new search index. if that search index is misconfigured,
* it can result in partial shard failures. rather than failing the entire search,
* we log the error and allow the results to be returned from shards that
* are correctly configured.
*/
int errors = ArrayUtils.getLength(response.getShardFailures());
LOG.error("Search resulted in {}/{} shards failing; errors={}, search={}",
response.getFailedShards(),
response.getTotalShards(),
errors,
ElasticsearchUtils.toJSON(request).orElse("???"));
// log each reported failure
int failureCount=1;
for(ShardSearchFailure fail: response.getShardFailures()) {
String msg = String.format(
"Shard search failure [%s/%s]; reason=%s, index=%s, shard=%s, status=%s, nodeId=%s",
failureCount,
errors,
ExceptionUtils.getRootCauseMessage(fail.getCause()),
fail.index(),
fail.shardId(),
fail.status(),
fail.shard().getNodeId());
LOG.error(msg, fail.getCause());
}
}
/**
* Upload the profiler configuration to Zookeeper.
* @param client The zookeeper client.
*/
private void uploadProfilerConfig(CuratorFramework client) throws Exception {
byte[] configBytes = null;
if (profilerConfigurationPath != null) {
configBytes = readProfilerConfigFromFile(profilerConfigurationPath);
} else if(profilerConfig != null) {
configBytes = profilerConfig.toJSON().getBytes(StandardCharsets.UTF_8);
}
if (ArrayUtils.getLength(configBytes) > 0) {
writeProfilerConfigToZookeeper(configBytes, client);
}
}
/**
* Verifies current version of file system
*
* @param fs filesystem object
* @param rootdir root hbase directory
* @return null if no version file exists, version string otherwise
* @throws IOException if the version file fails to open
* @throws DeserializationException if the version data cannot be translated into a version
*/
public static String getVersion(FileSystem fs, Path rootdir)
throws IOException, DeserializationException {
final Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
FileStatus[] status = null;
try {
// hadoop 2.0 throws FNFE if directory does not exist.
// hadoop 1.0 returns null if directory does not exist.
status = fs.listStatus(versionFile);
} catch (FileNotFoundException fnfe) {
return null;
}
if (ArrayUtils.getLength(status) == 0) {
return null;
}
String version = null;
byte [] content = new byte [(int)status[0].getLen()];
FSDataInputStream s = fs.open(versionFile);
try {
IOUtils.readFully(s, content, 0, content.length);
if (ProtobufUtil.isPBMagicPrefix(content)) {
version = parseVersionFrom(content);
} else {
// Presume it pre-pb format.
try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(content))) {
version = dis.readUTF();
}
}
} catch (EOFException eof) {
LOG.warn("Version file was empty, odd, will try to set it.");
} finally {
s.close();
}
return version;
}
public IndividualBytesFieldCell(byte[] row, byte[] family, byte[] qualifier, long timestamp,
KeyValue.Type type, long seqId, byte[] value, byte[] tags) {
this(row, 0, ArrayUtils.getLength(row),
family, 0, ArrayUtils.getLength(family),
qualifier, 0, ArrayUtils.getLength(qualifier),
timestamp, type, seqId,
value, 0, ArrayUtils.getLength(value),
tags, 0, ArrayUtils.getLength(tags));
}
public int getDistinctValueCount() {
Object uniqueValArray = columnStatistics.getUniqueValuesSet();
if (uniqueValArray == null) {
return Constants.UNKNOWN_CARDINALITY;
}
return ArrayUtils.getLength(uniqueValArray);
}
private static void putAddressListInAttributes(
Map<String, String> attributes,
final String attributePrefix,
Address[] addresses) {
if (addresses != null) {
for (int count = 0; count < ArrayUtils.getLength(addresses); count++) {
attributes.put(attributePrefix + "." + count, addresses[count].toString());
}
}
}
private void validate(QueryBuilderRuleNode ruleNode) throws QueryBuilderToEqlConversionException {
Object[] values = QueryBuilderUtil.getRuleNodeValueAsArray(ruleNode);
if (ArrayUtils.getLength(values) != 3 || QueryBuilderUtil.containsAnyEmptyValue(values)) {
throw new QueryBuilderToEqlConversionException();
}
}
/**
* 提取模块ID
* 模块ID应该在PATH的第一个位置
*
* @param path servlet访问路径
* @return 路径解析成功则返回模块的ID,如果解析失败则返回null
*/
private String parseUniqueId(final String path) {
final String[] pathSegmentArray = StringUtils.split(path, "/");
return ArrayUtils.getLength(pathSegmentArray) >= 1
? pathSegmentArray[0]
: null;
}