类org.apache.hadoop.util.VersionUtil源码实例Demo

下面列出了怎么用org.apache.hadoop.util.VersionUtil的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: BPServiceActor.java
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
源代码2 项目: big-c   文件: BPServiceActor.java
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
源代码3 项目: flink   文件: HBaseTestingClusterAutoStarter.java
@BeforeClass
public static void setUp() throws Exception {
	// HBase 1.4 does not work with Hadoop 3
	// because it uses Guava 12.0.1, Hadoop 3 uses Guava 27.0-jre.
	// There is no Guava version in between that works with both.
	Assume.assumeTrue("This test is skipped for Hadoop versions above 3", VersionUtil.compareVersions(System.getProperty("hadoop.version"), "3.0.0") < 0);

	LOG.info("HBase minicluster: Starting");

	TEST_UTIL.startMiniCluster(1);

	// https://issues.apache.org/jira/browse/HBASE-11711
	TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);

	// Make sure the zookeeper quorum value contains the right port number (varies per run).
	LOG.info("Hbase minicluster client port: " + TEST_UTIL.getZkCluster().getClientPort());
	TEST_UTIL.getConfiguration().set("hbase.zookeeper.quorum", "localhost:" + TEST_UTIL.getZkCluster().getClientPort());

	conf = initialize(TEST_UTIL.getConfiguration());
	LOG.info("HBase minicluster: Running");
}
 
源代码4 项目: flink   文件: YarnFileStageTestS3ITCase.java
@Test
@RetryOnFailure(times = 3)
public void testRecursiveUploadForYarnS3n() throws Exception {
	// skip test on Hadoop 3: https://issues.apache.org/jira/browse/HADOOP-14738
	Assume.assumeTrue("This test is skipped for Hadoop versions above 3", VersionUtil.compareVersions(System.getProperty("hadoop.version"), "3.0.0") < 0);

	try {
		Class.forName("org.apache.hadoop.fs.s3native.NativeS3FileSystem");
	} catch (ClassNotFoundException e) {
		// not in the classpath, cannot run this test
		String msg = "Skipping test because NativeS3FileSystem is not in the class path";
		log.info(msg);
		assumeNoException(msg, e);
	}
	testRecursiveUploadForYarn("s3n", "testYarn-s3n");
}
 
源代码5 项目: zeppelin   文件: SparkShims.java
/**
 * This is temporal patch for support old versions of Yarn which is not adopted YARN-6615
 *
 * @return true if YARN-6615 is patched, false otherwise
 */
protected boolean supportYarn6615(String version) {
  return (VersionUtil.compareVersions(HADOOP_VERSION_2_6_6, version) <= 0
          && VersionUtil.compareVersions(HADOOP_VERSION_2_7_0, version) > 0)
      || (VersionUtil.compareVersions(HADOOP_VERSION_2_7_4, version) <= 0
          && VersionUtil.compareVersions(HADOOP_VERSION_2_8_0, version) > 0)
      || (VersionUtil.compareVersions(HADOOP_VERSION_2_8_2, version) <= 0
          && VersionUtil.compareVersions(HADOOP_VERSION_2_9_0, version) > 0)
      || (VersionUtil.compareVersions(HADOOP_VERSION_2_9_0, version) <= 0
          && VersionUtil.compareVersions(HADOOP_VERSION_3_0_0, version) > 0)
      || (VersionUtil.compareVersions(HADOOP_VERSION_3_0_0_ALPHA4, version) <= 0)
      || (VersionUtil.compareVersions(HADOOP_VERSION_3_0_0, version) <= 0);
}
 
源代码6 项目: geowave   文件: MiniAccumuloClusterFactory.java
protected static boolean isYarn() {
  return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0;
}
 
源代码7 项目: geowave   文件: AccumuloMiniCluster.java
protected static boolean isYarn() {
  return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0;
}
 
源代码8 项目: geowave   文件: TestUtils.java
public static boolean isYarn() {
  return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0;
}
 
源代码9 项目: Bats   文件: ParquetReaderUtility.java
/**
 * If binary metadata was stored prior to Drill version {@link #ALLOWED_DRILL_VERSION_FOR_BINARY},
 * it might have incorrectly defined min / max values.
 * In case if given version is null, we assume this version is prior to {@link #ALLOWED_DRILL_VERSION_FOR_BINARY}.
 * In this case we allow reading such metadata only if {@link ParquetReaderConfig#enableStringsSignedMinMax()} is true.
 *
 * @param drillVersion drill version used to create metadata file
 * @param readerConfig parquet reader configuration
 * @return true if reading binary min / max values are allowed, false otherwise
 */
private static boolean allowBinaryMetadata(String drillVersion, ParquetReaderConfig readerConfig) {
  return readerConfig.enableStringsSignedMinMax() ||
    (drillVersion != null && VersionUtil.compareVersions(ALLOWED_DRILL_VERSION_FOR_BINARY, drillVersion) <= 0);
}
 
 类所在包
 类方法
 同包方法