类org.apache.hadoop.util.VersionInfo源码实例Demo

下面列出了怎么用org.apache.hadoop.util.VersionInfo的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: ClusterInfo.java
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
源代码2 项目: hadoop   文件: ListPathsServlet.java
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
源代码3 项目: hadoop   文件: BPServiceActor.java
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
源代码4 项目: hadoop   文件: NNThroughputBenchmark.java
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
源代码5 项目: hadoop   文件: TestDatanodeRegister.java
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
源代码6 项目: hadoop   文件: TestDatanodeRegister.java
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
源代码7 项目: dremio-oss   文件: ShimLoader.java
/**
 * Return the "major" version of Hadoop currently on the classpath.
 * Releases in the 1.x and 2.x series are mapped to the appropriate
 * 0.x release series, e.g. 1.x is mapped to "0.20S" and 2.x
 * is mapped to "0.23".
 */
public static String getMajorVersion() {
  String vers = VersionInfo.getVersion();

  String[] parts = vers.split("\\.");
  if (parts.length < 2) {
    throw new RuntimeException("Illegal Hadoop Version: " + vers +
        " (expected A.B.* format)");
  }

  switch (Integer.parseInt(parts[0])) {
  case 2:
  case 3:
    return HADOOP23VERSIONNAME;
  default:
    throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers);
  }
}
 
源代码8 项目: big-c   文件: NodeInfo.java
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
源代码9 项目: big-c   文件: ClusterInfo.java
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
源代码10 项目: big-c   文件: ListPathsServlet.java
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = ServletUtil.getDecodedPath(request, "/listPaths");
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
源代码11 项目: big-c   文件: BPServiceActor.java
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
源代码12 项目: big-c   文件: NNThroughputBenchmark.java
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
源代码13 项目: big-c   文件: TestDatanodeRegister.java
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
源代码14 项目: big-c   文件: TestDatanodeRegister.java
@Test
public void testSoftwareVersionDifferences() throws Exception {
  // We expect no exception to be thrown when the software versions match.
  assertEquals(VersionInfo.getVersion(),
      actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // We expect no exception to be thrown when the min NN version is below the
  // reported NN version.
  doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
  
  // When the NN reports a version that's too low, throw an exception.
  doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
  doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
  try {
    actor.retrieveNamespaceInfo();
    fail("Should have thrown an exception for NN with too-low version");
  } catch (IncorrectVersionException ive) {
    GenericTestUtils.assertExceptionContains(
        "The reported NameNode version is too low", ive);
    LOG.info("Got expected exception", ive);
  }
}
 
源代码15 项目: zeppelin   文件: SparkShims.java
protected void buildSparkJobUrl(String master,
                                String sparkWebUrl,
                                int jobId,
                                Properties jobProperties,
                                InterpreterContext context) {
  String jobUrl = sparkWebUrl + "/jobs/job?id=" + jobId;
  String version = VersionInfo.getVersion();
  if (master.toLowerCase().contains("yarn") && !supportYarn6615(version)) {
    jobUrl = sparkWebUrl + "/jobs";
  }
  String jobGroupId = jobProperties.getProperty("spark.jobGroup.id");

  Map<String, String> infos = new java.util.HashMap<String, String>();
  infos.put("jobUrl", jobUrl);
  infos.put("label", "SPARK JOB");
  infos.put("tooltip", "View in Spark web UI");
  infos.put("noteId", getNoteId(jobGroupId));
  infos.put("paraId", getParagraphId(jobGroupId));
  LOGGER.debug("Send spark job url: " + infos);
  context.getIntpEventClient().onParaInfosReceived(infos);
}
 
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion());
	}

	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
源代码17 项目: flink   文件: HadoopRecoverableWriter.java
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems
	if (!"hdfs".equalsIgnoreCase(fs.getScheme())) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS");
	}

	// Part of functionality depends on specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		LOG.warn("WARNING: You are running on hadoop version " + VersionInfo.getVersion() + "." +
				" If your RollingPolicy does not roll on every checkpoint/savepoint, the StreamingFileSink will throw an exception upon recovery.");
	}
}
 
/**
 * Set the number of locations in the split to SPLIT_MAX_NUM_LOCATIONS if it is larger than
 * SPLIT_MAX_NUM_LOCATIONS (MAPREDUCE-5186).
 */
private static List<InputSplit> cleanSplits(List<InputSplit> splits) throws IOException {
  if (VersionInfo.getVersion().compareTo("2.3.0") >= 0) {
    // This issue was fixed in 2.3.0, if newer version, no need to clean up splits
    return splits;
  }

  List<InputSplit> cleanedSplits = Lists.newArrayList();

  for (int i = 0; i < splits.size(); i++) {
    CombineFileSplit oldSplit = (CombineFileSplit) splits.get(i);
    String[] locations = oldSplit.getLocations();

    Preconditions.checkNotNull(locations, "CombineFileSplit.getLocations() returned null");

    if (locations.length > SPLIT_MAX_NUM_LOCATIONS) {
      locations = Arrays.copyOf(locations, SPLIT_MAX_NUM_LOCATIONS);
    }

    cleanedSplits.add(new CombineFileSplit(oldSplit.getPaths(), oldSplit.getStartOffsets(), oldSplit.getLengths(),
        locations));
  }
  return cleanedSplits;
}
 
源代码19 项目: hadoop-gpu   文件: ListPathsServlet.java
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
源代码20 项目: RDFS   文件: ListPathsServlet.java
/**
 * Build a map from the query string, setting values and defaults.
 */
protected Map<String,String> buildRoot(HttpServletRequest request,
    XMLOutputter doc) {
  final String path = request.getPathInfo() != null
    ? request.getPathInfo() : "/";
  final String exclude = request.getParameter("exclude") != null
    ? request.getParameter("exclude") : "\\..*\\.crc";
  final String filter = request.getParameter("filter") != null
    ? request.getParameter("filter") : ".*";
  final boolean recur = request.getParameter("recursive") != null
    && "yes".equals(request.getParameter("recursive"));

  Map<String, String> root = new HashMap<String, String>();
  root.put("path", path);
  root.put("recursive", recur ? "yes" : "no");
  root.put("filter", filter);
  root.put("exclude", exclude);
  root.put("time", df.get().format(new Date()));
  root.put("version", VersionInfo.getVersion());
  return root;
}
 
源代码21 项目: RDFS   文件: FSDataset.java
/**
 * Register the FSDataset MBean using the name
 *        "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
 */
void registerMBean(final String storageId) {
  // We wrap to bypass standard mbean naming convetion.
  // This wraping can be removed in java 6 as it is more flexible in
  // package naming for mbeans and their impl.
  StandardMBean bean;
  String storageName;
  if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage
    storageName = "UndefinedStorageId" + rand.nextInt();
  } else {
    storageName = storageId;
  }
  try {
    bean = new StandardMBean(this,FSDatasetMBean.class);
    mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
    versionBeanName = VersionInfo.registerJMX("DataNode");
  } catch (NotCompliantMBeanException e) {
    e.printStackTrace();
  }

  DataNode.LOG.info("Registered FSDatasetStatusMBean");
}
 
/**
 * Add an file path to the current set of classpath entries. It adds the file to cache as well.
 * <p/>
 * This is copied from Hadoop 0.20.2 o.a.h.filecache.DistributedCache so we can inject the correct path separator for
 * the environment the cluster is executing in. See {@link #getClusterPathSeparator()}.
 *
 * @param file Path of the file to be added
 * @param conf Configuration that contains the classpath setting
 */
public void addFileToClassPath( Path file, Configuration conf )
  throws IOException {

  // Save off the classloader, to make sure the version info can be loaded successfully from the hadoop-common JAR
  ClassLoader cl = Thread.currentThread().getContextClassLoader();
  Thread.currentThread().setContextClassLoader( VersionInfo.class.getClassLoader() );

  // Restore the original classloader
  Thread.currentThread().setContextClassLoader( cl );

  String classpath = conf.get( "mapred.job.classpath.files" );
  conf.set( "mapred.job.classpath.files", classpath == null ? file.toString()
    : classpath + getClusterPathSeparator() + file.toString() );
  FileSystem fs = FileSystem.get( conf );
  URI uri = fs.makeQualified( file ).toUri();

  org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile( uri, conf );
}
 
源代码23 项目: Flink-CEPplus   文件: RollingSinkSecuredITCase.java
/**
 * Skips all tests if the Hadoop version doesn't match.
 * We can't run this test class until HDFS-9213 is fixed which allows a secure DataNode
 * to bind to non-privileged ports for testing.
 * For now, we skip this test class until Hadoop version 3.x.x.
 */
private static void skipIfHadoopVersionIsNotAppropriate() {
	// Skips all tests if the Hadoop version doesn't match
	String hadoopVersionString = VersionInfo.getVersion();
	String[] split = hadoopVersionString.split("\\.");
	if (split.length != 3) {
		throw new IllegalStateException("Hadoop version was not of format 'X.X.X': " + hadoopVersionString);
	}
	Assume.assumeTrue(
		// check whether we're running Hadoop version >= 3.x.x
		Integer.parseInt(split[0]) >= 3
	);
}
 
@Test
public void testGetMethodReflectiveHadoop22() {
	assumeTrue(
		"Method getContainersFromPreviousAttempts is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 2));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	final Method method = registerApplicationMasterResponseReflector.getMethod();
	assertThat(method, notNullValue());
}
 
源代码25 项目: Flink-CEPplus   文件: HadoopUtils.java
/**
 * Checks if the Hadoop dependency is at least of the given version.
 */
public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException {
	String versionString = VersionInfo.getVersion();
	String[] versionParts = versionString.split("\\.");

	if (versionParts.length < 2) {
		throw new FlinkRuntimeException(
				"Cannot determine version of Hadoop, unexpected version string: " + versionString);
	}

	int maj = Integer.parseInt(versionParts[0]);
	int min = Integer.parseInt(versionParts[1]);

	return maj > major || (maj == major && min >= minor);
}
 
/**
 * This test needs to be skipped for earlier Hadoop versions because those
 * have a bug.
 */
@Override
public void testMkdirsFailsForExistingFile() throws Exception {
	final String versionString = VersionInfo.getVersion();
	final String prefix = versionString.substring(0, 3);
	final float version = Float.parseFloat(prefix);
	Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);

	super.testMkdirsFailsForExistingFile();
}
 
@Test
public void testGetMethodReflectiveHadoop22() {
	assumeTrue(
		"Method getContainersFromPreviousAttempts is not supported by Hadoop: " +
			VersionInfo.getVersion(),
		isHadoopVersionGreaterThanOrEquals(2, 2));

	final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector =
		new RegisterApplicationMasterResponseReflector(LOG);

	final Method method = registerApplicationMasterResponseReflector.getMethod();
	assertThat(method, notNullValue());
}
 
源代码28 项目: flink   文件: HadoopUtils.java
/**
 * Checks if the Hadoop dependency is at least of the given version.
 */
public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException {
	String versionString = VersionInfo.getVersion();
	String[] versionParts = versionString.split("\\.");

	if (versionParts.length < 2) {
		throw new FlinkRuntimeException(
				"Cannot determine version of Hadoop, unexpected version string: " + versionString);
	}

	int maj = Integer.parseInt(versionParts[0]);
	int min = Integer.parseInt(versionParts[1]);

	return maj > major || (maj == major && min >= minor);
}
 
源代码29 项目: flink   文件: HadoopLocalFileSystemBehaviorTest.java
/**
 * This test needs to be skipped for earlier Hadoop versions because those
 * have a bug.
 */
@Override
public void testMkdirsFailsForExistingFile() throws Exception {
	final String versionString = VersionInfo.getVersion();
	final String prefix = versionString.substring(0, 3);
	final float version = Float.parseFloat(prefix);
	Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);

	super.testMkdirsFailsForExistingFile();
}
 
源代码30 项目: hadoop   文件: NodeInfo.java
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.totalGCoresAllocatedContainers = resourceView
        .getGCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
 类所在包
 类方法
 同包方法