org.junit.Assume#assumeTrue ( )源码实例Demo

下面列出了org.junit.Assume#assumeTrue ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: flink   文件: ContinuousFileProcessingTest.java
@BeforeClass
public static void createHDFS() {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	try {
		File hdfsDir = tempFolder.newFolder();

		org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
		hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
		hdfsCluster = builder.build();

		hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
		hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

	} catch (Throwable e) {
		e.printStackTrace();
		Assert.fail("Test failed " + e.getMessage());
	}
}
 
源代码2 项目: jcifs-ng   文件: EnumTest.java
@Ignore ( "This causes a connection to whatever local master browser is available, config may be incompatible with it" )
@Test
public void testBrowse () throws MalformedURLException, CIFSException {
    CIFSContext ctx = withAnonymousCredentials();
    try ( SmbFile smbFile = new SmbFile("smb://", ctx) ) {
        try ( CloseableIterator<SmbResource> it = smbFile.children() ) {
            while ( it.hasNext() ) {
                try ( SmbResource serv = it.next() ) {
                    System.err.println(serv.getName());
                }
            }
        }
    }
    catch ( SmbUnsupportedOperationException e ) {
        Assume.assumeTrue("Browsing unsupported", false);
    }
}
 
源代码3 项目: JQF   文件: PngReaderTest.java
@Fuzz
public void read(ImageInputStream input) throws IOException {
    // Decode image from input stream
    reader.setInput(input);
    // Bound dimensions
    Assume.assumeTrue(reader.getHeight(0) < 1024);
    Assume.assumeTrue(reader.getWidth(0) < 1024);
    // Parse PNG
    reader.read(0);
}
 
源代码4 项目: big-c   文件: DFSTestUtil.java
public ShortCircuitTestContext(String testName) {
  this.testName = testName;
  this.sockDir = new TemporarySocketDirectory();
  DomainSocket.disableBindPathValidation();
  formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
  Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
 
源代码5 项目: jcifs   文件: EnumTest.java
@Test
public void testDomainSeverEnum () throws MalformedURLException, CIFSException {
    try ( SmbFile smbFile = new SmbFile("smb://" + getTestDomain(), withTestNTLMCredentials(getContext())) ) {
        String[] list = smbFile.list();
        assertNotNull(list);
        log.debug(Arrays.toString(list));
    }
    catch ( SmbUnsupportedOperationException e ) {
        Assume.assumeTrue(false);
    }
}
 
源代码6 项目: iceberg   文件: TestSparkParquetReader.java
protected void writeAndValidate(Schema schema) throws IOException {
  Assume.assumeTrue("Parquet Avro cannot write non-string map keys", null == TypeUtil.find(schema,
      type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get()));

  List<GenericData.Record> expected = RandomData.generateList(schema, 100, 0L);

  File testFile = temp.newFile();
  Assert.assertTrue("Delete should succeed", testFile.delete());

  try (FileAppender<GenericData.Record> writer = Parquet.write(Files.localOutput(testFile))
      .schema(schema)
      .named("test")
      .build()) {
    writer.addAll(expected);
  }

  try (CloseableIterable<InternalRow> reader = Parquet.read(Files.localInput(testFile))
      .project(schema)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(schema, type))
      .build()) {
    Iterator<InternalRow> rows = reader.iterator();
    for (int i = 0; i < expected.size(); i += 1) {
      Assert.assertTrue("Should have expected number of rows", rows.hasNext());
      assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
    }
    Assert.assertFalse("Should not have extra rows", rows.hasNext());
  }
}
 
源代码7 项目: spydra   文件: LifecycleIT.java
@Test
public void testLifecycle() throws Exception {
  // The way that the haddop credentials provider is buggy as it tries to contact the metadata
  // service in gcp if we use the default account
  Assume.assumeTrue("Skipping lifecycle test, not running on gce and "
                    + "GOOGLE_APPLICATION_CREDENTIALS not set",
      hasApplicationJsonOrRunningOnGce());
  SpydraArgument testArgs = SpydraArgumentUtil.loadArguments("integration-test-config.json");
  SpydraArgument arguments = SpydraArgumentUtil
      .dataprocConfiguration(CLIENT_ID, testArgs.getLogBucket(), testArgs.getRegion());
  arguments.getCluster().numWorkers(3);
  arguments.getSubmit().jar(getExamplesJarPath());
  arguments.getSubmit().setJobArgs(Arrays.asList("pi", "1", "1"));

  // TODO We should test the init action as well but the uploading before running the test is tricky
  // We could upload it manually to a test bucket here and set the right things
  arguments.getCluster().getOptions().remove(SpydraArgument.OPTION_INIT_ACTIONS);

  // Merge to get all other custom test arguments
  arguments = SpydraArgument.merge(arguments, testArgs);

  LOGGER.info("Using following service account to run gcloud commands locally: " +
      arguments.getCluster().getOptions().get(SpydraArgument.OPTION_ACCOUNT));
  Submitter submitter = Submitter.getSubmitter(arguments);
  assertTrue("job wasn't successful", submitter.executeJob(arguments));

  assertTrue(isClusterCollected(arguments));

  URI doneUri = URI.create(arguments.clusterProperties().getProperty(
      "mapred:mapreduce.jobhistory.done-dir"));
  LOGGER.info("Checking that we have two files in: " + doneUri);
  assertEquals(2, getFileCount(doneUri));
  URI intermediateUri = URI.create(arguments.clusterProperties().getProperty(
      "mapred:mapreduce.jobhistory.intermediate-done-dir"));
  LOGGER.info("Checking that we do not have any files in: " + intermediateUri);
  assertEquals(0, getFileCount(intermediateUri));
}
 
源代码8 项目: hadoop   文件: TestLinuxContainerExecutor.java
@Test(timeout = 10000)
public void testPostExecuteAfterReacquisition() throws Exception {
  Assume.assumeTrue(shouldRun());
  // make up some bogus container ID
  ApplicationId appId = ApplicationId.newInstance(12345, 67890);
  ApplicationAttemptId attemptId =
      ApplicationAttemptId.newInstance(appId, 54321);
  ContainerId cid = ContainerId.newContainerId(attemptId, 9876);

  Configuration conf = new YarnConfiguration();
  conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER,
    TestResourceHandler.class, LCEResourcesHandler.class);
  LinuxContainerExecutor lce = new LinuxContainerExecutor();
  lce.setConf(conf);
  try {
    lce.init();
  } catch (IOException e) {
    // expected if LCE isn't setup right, but not necessary for this test
  }

  Container container = mock(Container.class);
  ContainerLaunchContext context = mock(ContainerLaunchContext.class);
  HashMap<String, String> env = new HashMap<>();

  when(container.getLaunchContext()).thenReturn(context);
  when(context.getEnvironment()).thenReturn(env);

  lce.reacquireContainer(new ContainerReacquisitionContext.Builder()
      .setContainer(container)
      .setUser("foouser")
      .setContainerId(cid)
      .build());
  assertTrue("postExec not called after reacquisition",
      TestResourceHandler.postExecContainers.contains(cid));
}
 
源代码9 项目: hadoop   文件: TestBlockReaderLocalLegacy.java
@Test
public void testBothOldAndNewShortCircuitConfigured() throws Exception {
  final short REPL_FACTOR = 1;
  final int FILE_LENGTH = 512;
  Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
  TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = getConfiguration(socketDir);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  socketDir.close();
  FileSystem fs = cluster.getFileSystem();

  Path path = new Path("/foo");
  byte orig[] = new byte[FILE_LENGTH];
  for (int i = 0; i < orig.length; i++) {
    orig[i] = (byte)(i%10);
  }
  FSDataOutputStream fos = fs.create(path, (short)1);
  fos.write(orig);
  fos.close();
  DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
  FSDataInputStream fis = cluster.getFileSystem().open(path);
  byte buf[] = new byte[FILE_LENGTH];
  IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
  fis.close();
  Assert.assertArrayEquals(orig, buf);
  Arrays.equals(orig, buf);
  cluster.shutdown();
}
 
源代码10 项目: wildfly-openssl   文件: ALPNTest.java
@Test
public void testALPNFailure() throws IOException, NoSuchAlgorithmException, InterruptedException {
    Assume.assumeTrue(OpenSSLEngine.isAlpnSupported());
    try (ServerSocket serverSocket = SSLTestUtils.createServerSocket()) {
        final AtomicReference<byte[]> sessionID = new AtomicReference<>();
        final SSLContext sslContext = SSLTestUtils.createSSLContext("openssl.TLSv1.2");
        final SSLContext clientSslContext = SSLTestUtils.createClientSSLContext("openssl.TLSv1.2");
        final AtomicReference<OpenSSLEngine> engineAtomicReference = new AtomicReference<>();
        Thread acceptThread = new Thread(new EchoRunnable(serverSocket, sslContext, sessionID, (engine -> {
            OpenSSLEngine openSSLEngine = (OpenSSLEngine) engine;
            openSSLEngine.setApplicationProtocols("h2", "h2/13", "http");
            engineAtomicReference.set(openSSLEngine);
            return openSSLEngine;
        })));
        acceptThread.start();
        final OpenSSLSocket socket = (OpenSSLSocket) clientSslContext.getSocketFactory().createSocket();
        socket.connect(SSLTestUtils.createSocketAddress());
        socket.getOutputStream().write(MESSAGE.getBytes(StandardCharsets.US_ASCII));
        byte[] data = new byte[100];
        int read = socket.getInputStream().read(data);

        Assert.assertEquals(MESSAGE, new String(data, 0, read));
        Assert.assertNull("server side", engineAtomicReference.get().getSelectedApplicationProtocol());
        Assert.assertNull("client side", socket.getSelectedApplicationProtocol());
        serverSocket.close();
        acceptThread.join();
    }
}
 
源代码11 项目: jcifs   文件: FileAttributesTest.java
@Test
public void testGetGroup () throws IOException {
    try ( SmbResource f = getDefaultShareRoot() ) {
        try {
            jcifs.SID security = f.getOwnerGroup();
            assertNotNull(security);
        }
        catch ( SmbUnsupportedOperationException e ) {
            Assume.assumeTrue("No Ntsmbs", false);
        }
    }
}
 
源代码12 项目: opencensus-java   文件: JaxrsClientFilterTest.java
@Before
public void setUp() {
  // Mockito in this test depends on some class that's only available on JDK 1.8:
  // TypeDescription$Generic$AnnotationReader$Dispatcher$ForJava8CapableVm
  Assume.assumeTrue(System.getProperty("java.version").startsWith("1.8"));
}
 
源代码13 项目: Flink-CEPplus   文件: HadoopMapredITCase.java
@Before
public void checkOperatingSystem() {
	// FLINK-5164 - see https://wiki.apache.org/hadoop/WindowsProblems
	Assume.assumeTrue("This test can't run successfully on Windows.", !OperatingSystem.isWindows());
}
 
源代码14 项目: netty-4.1.22   文件: SniClientTest.java
@Test(timeout = 30000)
public void testSniSNIMatcherMatchesClientOpenSslServerOpenSsl() throws Exception {
    Assume.assumeTrue(PlatformDependent.javaVersion() >= 8);
    Assume.assumeTrue(OpenSsl.isAvailable());
    SniClientJava8TestUtil.testSniClient(SslProvider.OPENSSL, SslProvider.OPENSSL, true);
}
 
源代码15 项目: Tomcat7.0.67   文件: TestCometProcessor.java
@Test
public void testAsyncClose() throws Exception {
    Assume.assumeTrue(
            "This test is skipped, because this connector does not support Comet.",
            isCometSupported());

    // Setup Tomcat instance
    Tomcat tomcat = getTomcatInstance();
    // No file system docBase required
    Context root = tomcat.addContext("", null);
    Tomcat.addServlet(root, "comet", new SimpleCometServlet());
    root.addServletMapping("/comet", "comet");
    Tomcat.addServlet(root, "hello", new HelloWorldServlet());
    root.addServletMapping("/hello", "hello");
    root.getPipeline().addValve(new AsyncCometCloseValve());
    tomcat.getConnector().setProperty("connectionTimeout", "5000");
    tomcat.start();

    // Create connection to Comet servlet
    final Socket socket =
        SocketFactory.getDefault().createSocket("localhost", getPort());
    socket.setSoTimeout(5000);

    final OutputStream os = socket.getOutputStream();
    String requestLine = "POST http://localhost:" + getPort() +
            "/comet HTTP/1.1\r\n";
    os.write(requestLine.getBytes());
    os.write("transfer-encoding: chunked\r\n".getBytes());
    os.write("\r\n".getBytes());

    InputStream is = socket.getInputStream();
    ResponseReaderThread readThread = new ResponseReaderThread(is);
    readThread.start();

    // Wait for the comet request/response to finish
    int count = 0;
    while (count < 10 && !readThread.getResponse().endsWith("0\r\n\r\n")) {
        Thread.sleep(500);
        count++;
    }

    if (count == 10) {
        fail("Comet request did not complete");
    }

    // Send a standard HTTP request on the same connection
    requestLine = "GET http://localhost:" + getPort() +
            "/hello HTTP/1.1\r\n";
    os.write(requestLine.getBytes());
    os.write("\r\n".getBytes());

    // Check for the expected response
    count = 0;
    while (count < 10 && !readThread.getResponse().contains(
            HelloWorldServlet.RESPONSE_TEXT)) {
        Thread.sleep(500);
        count++;
    }

    if (count == 10) {
        fail("Non-comet request did not complete");
    }

    readThread.join();
    os.close();
    is.close();
}
 
@BeforeClass
public static void verifyOS() {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());
}
 
源代码17 项目: Flink-CEPplus   文件: OSSTestCredentials.java
public static void assumeCredentialsAvailable() {
	Assume.assumeTrue("No OSS credentials available in this test's environment", credentialsAvailable());
}
 
@Test
public void testAes256() throws Exception {
    Assume.assumeTrue(Cipher.getMaxAllowedKeyLength("AES") >= 256);
    testBasic("", 256, -1);
}
 
源代码19 项目: big-c   文件: TestSecureNameNodeWithExternalKdc.java
@Before
public void testExternalKdcRunning() {
  // Tests are skipped if external KDC is not running.
  Assume.assumeTrue(isExternalKdcRunning());
}
 
@Before
@Override
public void init() {
    Assume.assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe());
    super.init();
}