类com.amazonaws.services.s3.model.InitiateMultipartUploadRequest源码实例Demo

下面列出了怎么用com.amazonaws.services.s3.model.InitiateMultipartUploadRequest的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: data-highway   文件: S3MultipartUploadTest.java
@Test
public void complete() {
  InitiateMultipartUploadResult response = mock(InitiateMultipartUploadResult.class);
  when(response.getUploadId()).thenReturn(UPLOAD_ID);
  when(s3.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(response);

  ArgumentCaptor<CompleteMultipartUploadRequest> request = ArgumentCaptor
      .forClass(CompleteMultipartUploadRequest.class);
  @SuppressWarnings("unchecked")
  List<PartETag> partETags = mock(List.class);

  when(s3.completeMultipartUpload(request.capture())).thenReturn(null);

  underTest.start();
  underTest.complete(UPLOAD_ID, partETags);

  assertThat(request.getValue().getBucketName(), is(BUCKET));
  assertThat(request.getValue().getKey(), is(KEY));
  assertThat(request.getValue().getUploadId(), is(UPLOAD_ID));
  assertThat(request.getValue().getPartETags(), is(partETags));
}
 
源代码2 项目: hadoop   文件: S3AFastOutputStream.java
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
 
源代码3 项目: big-c   文件: S3AFastOutputStream.java
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
 
源代码4 项目: stocator   文件: COSAPIClient.java
/**
 * Start the multipart upload process.
 * @return the upload result containing the ID
 * @throws IOException IO problem
 */
String initiateMultiPartUpload(Boolean atomicWrite, String etag) throws IOException {
  LOG.debug("Initiating Multipart upload");
  ObjectMetadata om = newObjectMetadata(-1);
  // if atomic write is enabled use the etag to ensure put request is atomic
  if (atomicWrite) {
    if (etag != null) {
      LOG.debug("Atomic write - setting If-Match header");
      om.setHeader("If-Match", etag);
    } else {
      LOG.debug("Atomic write - setting If-None-Match header");
      om.setHeader("If-None-Match", "*");
    }
  }
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(mBucket,
          key,
          om);
  try {
    return mClient.initiateMultipartUpload(initiateMPURequest)
        .getUploadId();
  } catch (AmazonClientException ace) {
    throw translateException("initiate MultiPartUpload", key, ace);
  }
}
 
@Test
public void canUploadMultipleOutputArtifacts() throws IOException {
    // given
    jenkinsOutputs.clear();
    jenkinsOutputs.add(new OutputArtifact(TEST_FILE, "dummyArtifact"));
    jenkinsOutputs.add(new OutputArtifact("Dir1", "dummyArtifact1"));
    jenkinsOutputs.add(new OutputArtifact("Dir2", "dummyArtifact2"));

    outputArtifacts.clear();
    outputArtifacts.add(outputArtifact);
    outputArtifacts.add(outputArtifact1);
    outputArtifacts.add(outputArtifact2);

    // when
    publisher.invoke(workspace, null);

    // then
    verify(s3Client, times(3)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
}
 
/**
 * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput,
 * uploadMetadataOutput ports.
 * @param tuple given tuple
 */
protected void processTuple(AbstractFileSplitter.FileMetadata tuple)
{
  if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
    return;
  }
  String keyName = getKeyName(tuple.getFilePath());
  String uploadId = "";
  if (tuple.getNumberOfBlocks() > 1) {
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    initRequest.setObjectMetadata(createObjectMetadata());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    uploadId = initResponse.getUploadId();
  }
  UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName);
  fileMetadataOutput.emit(uploadFileMetadata);
  uploadMetadataOutput.emit(uploadFileMetadata);
  currentWindowRecoveryState.add(uploadFileMetadata);
}
 
@Test
public void testInitiateUpload()
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(fileMetadata.getFilePath()).thenReturn("/tmp/file1.txt");
  when(fileMetadata.getNumberOfBlocks()).thenReturn(4);

  S3InitiateFileUploadTest operator = new S3InitiateFileUploadTest();
  operator.setBucketName("testbucket");
  operator.setup(context);

  CollectorTestSink<S3InitiateFileUploadOperator.UploadFileMetadata> fileSink = new CollectorTestSink<>();
  CollectorTestSink<Object> tmp = (CollectorTestSink)fileSink;
  operator.fileMetadataOutput.setSink(tmp);
  operator.beginWindow(0);
  operator.processTuple(fileMetadata);
  operator.endWindow();

  S3InitiateFileUploadOperator.UploadFileMetadata emitted = (S3InitiateFileUploadOperator.UploadFileMetadata)tmp.collectedTuples.get(0);
  Assert.assertEquals("Upload ID :", uploadId, emitted.getUploadId());
}
 
源代码8 项目: s3proxy   文件: AwsSdkTest.java
@Test
public void testAtomicMpuAbort() throws Exception {
    String key = "testAtomicMpuAbort";
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(BYTE_SOURCE.size());
    client.putObject(containerName, key, BYTE_SOURCE.openStream(),
            metadata);

    InitiateMultipartUploadRequest initRequest =
            new InitiateMultipartUploadRequest(containerName, key);
    InitiateMultipartUploadResult initResponse =
            client.initiateMultipartUpload(initRequest);
    String uploadId = initResponse.getUploadId();

    client.abortMultipartUpload(new AbortMultipartUploadRequest(
                containerName, key, uploadId));

    S3Object object = client.getObject(containerName, key);
    assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
            BYTE_SOURCE.size());
    try (InputStream actual = object.getObjectContent();
            InputStream expected = BYTE_SOURCE.openStream()) {
        assertThat(actual).hasContentEqualTo(expected);
    }
}
 
源代码9 项目: s3proxy   文件: AwsSdkTest.java
@Test
public void testPartNumberMarker() throws Exception {
    String blobName = "foo";
    InitiateMultipartUploadResult result = client.initiateMultipartUpload(
            new InitiateMultipartUploadRequest(containerName, blobName));
    ListPartsRequest request = new ListPartsRequest(containerName,
            blobName, result.getUploadId());

    client.listParts(request.withPartNumberMarker(0));

    try {
        client.listParts(request.withPartNumberMarker(1));
        Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);
    } catch (AmazonS3Exception e) {
        assertThat(e.getErrorCode()).isEqualTo("NotImplemented");
    }
}
 
源代码10 项目: nifi   文件: TestStandardS3EncryptionService.java
@Test
public void testRequests() {
    final ObjectMetadata metadata = new ObjectMetadata();
    final GetObjectRequest getObjectRequest = new GetObjectRequest("", "");
    final InitiateMultipartUploadRequest initUploadRequest = new InitiateMultipartUploadRequest("", "");
    final PutObjectRequest putObjectRequest = new PutObjectRequest("", "", "");
    final UploadPartRequest uploadPartRequest = new UploadPartRequest();

    service.configureGetObjectRequest(getObjectRequest, metadata);
    Assert.assertNull(getObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureUploadPartRequest(uploadPartRequest, metadata);
    Assert.assertNull(uploadPartRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configurePutObjectRequest(putObjectRequest, metadata);
    Assert.assertNull(putObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureInitiateMultipartUploadRequest(initUploadRequest, metadata);
    Assert.assertNull(initUploadRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());
}
 
源代码11 项目: stratosphere   文件: S3DataOutputStream.java
private String initiateMultipartUpload() throws IOException {

		boolean operationSuccessful = false;
		final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
		if (this.useRRS) {
			request.setStorageClass(StorageClass.ReducedRedundancy);
		} else {
			request.setStorageClass(StorageClass.Standard);
		}

		try {

			final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
			operationSuccessful = true;
			return result.getUploadId();

		} catch (AmazonServiceException e) {
			throw new IOException(StringUtils.stringifyException(e));
		} finally {
			if (!operationSuccessful) {
				abortUpload();
			}
		}
	}
 
源代码12 项目: data-highway   文件: S3MultipartUpload.java
String start() {
  bytes = 0;
  InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucket, key);

  if(enableServerSideEncryption) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    request.setObjectMetadata(objectMetadata);
  }

  String uploadId = s3.initiateMultipartUpload(request).getUploadId();
  stopwatch.start();
  log.info("Starting upload to s3://{}/{}.", bucket, key);
  return uploadId;
}
 
源代码13 项目: data-highway   文件: S3MultipartUploadTest.java
@Test
public void start() {
  ArgumentCaptor<InitiateMultipartUploadRequest> request = ArgumentCaptor
      .forClass(InitiateMultipartUploadRequest.class);
  InitiateMultipartUploadResult response = mock(InitiateMultipartUploadResult.class);
  when(response.getUploadId()).thenReturn(UPLOAD_ID);
  when(s3.initiateMultipartUpload(request.capture())).thenReturn(response);

  String result = underTest.start();

  assertThat(result, is(UPLOAD_ID));
  assertThat(request.getValue().getBucketName(), is(BUCKET));
  assertThat(request.getValue().getKey(), is(KEY));
}
 
源代码14 项目: s3committer   文件: TestS3PartitionedTaskCommit.java
@Test
public void testAppend() throws Exception {
  FileSystem mockS3 = getMockS3();

  getTAC().getConfiguration()
      .set(S3Committer.CONFLICT_MODE, "append");

  S3PartitionedOutputCommitter committer = newTaskCommitter();

  committer.setupTask(getTAC());
  TestUtil.createTestOutputFiles(relativeFiles,
      committer.getTaskAttemptPath(getTAC()), getTAC().getConfiguration());

  // test success when one partition already exists
  reset(mockS3);
  when(mockS3
      .exists(new Path(OUTPUT_PATH, relativeFiles.get(2)).getParent()))
      .thenReturn(true);

  committer.commitTask(getTAC());
  Set<String> files = Sets.newHashSet();
  for (InitiateMultipartUploadRequest request : getMockResults().getRequests().values()) {
    Assert.assertEquals(MockS3FileSystem.BUCKET, request.getBucketName());
    files.add(request.getKey());
  }
  Assert.assertEquals("Should have the right number of uploads",
      relativeFiles.size(), files.size());

  Set<String> expected = Sets.newHashSet();
  for (String relative : relativeFiles) {
    expected.add(OUTPUT_PREFIX +
        "/" + Paths.addUUID(relative, committer.getUUID()));
  }

  Assert.assertEquals("Should have correct paths", expected, files);
}
 
源代码15 项目: s3committer   文件: TestS3PartitionedTaskCommit.java
@Test
public void testReplace() throws Exception {
  // TODO: this committer needs to delete the data that already exists
  // This test should assert that the delete was done
  FileSystem mockS3 = getMockS3();

  getTAC().getConfiguration()
      .set(S3Committer.CONFLICT_MODE, "replace");

  S3PartitionedOutputCommitter committer = newTaskCommitter();

  committer.setupTask(getTAC());
  TestUtil.createTestOutputFiles(relativeFiles,
      committer.getTaskAttemptPath(getTAC()), getTAC().getConfiguration());

  // test success when one partition already exists
  reset(mockS3);
  when(mockS3
      .exists(new Path(OUTPUT_PATH, relativeFiles.get(3)).getParent()))
      .thenReturn(true);

  committer.commitTask(getTAC());
  Set<String> files = Sets.newHashSet();
  for (InitiateMultipartUploadRequest request : getMockResults().getRequests().values()) {
    Assert.assertEquals(MockS3FileSystem.BUCKET, request.getBucketName());
    files.add(request.getKey());
  }
  Assert.assertEquals("Should have the right number of uploads",
      relativeFiles.size(), files.size());

  Set<String> expected = Sets.newHashSet();
  for (String relative : relativeFiles) {
    expected.add(OUTPUT_PREFIX +
        "/" + Paths.addUUID(relative, committer.getUUID()));
  }

  Assert.assertEquals("Should have correct paths", expected, files);
}
 
源代码16 项目: nifi-minifi   文件: S3OutputStream.java
private MultipartUpload newMultipartUpload() throws IOException {
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key, new ObjectMetadata());
  try {
    return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId());
  } catch (AmazonClientException e) {
    throw new IOException("Unable to initiate MultipartUpload: " + e, e);
  }
}
 
源代码17 项目: bender   文件: S3TransporterTest.java
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
 
源代码18 项目: beam   文件: S3WritableByteChannel.java
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
    throws IOException {
  this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
  this.options = checkNotNull(options);
  this.path = checkNotNull(path, "path");
  checkArgument(
      atMostOne(
          options.getSSECustomerKey() != null,
          options.getSSEAlgorithm() != null,
          options.getSSEAwsKeyManagementParams() != null),
      "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
          + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
  // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
  checkArgument(
      options.getS3UploadBufferSizeBytes()
          >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
      "S3UploadBufferSizeBytes must be at least %s bytes",
      S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
  this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
  eTags = new ArrayList<>();

  ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentType(contentType);
  if (options.getSSEAlgorithm() != null) {
    objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
  }
  InitiateMultipartUploadRequest request =
      new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
          .withStorageClass(options.getS3StorageClass())
          .withObjectMetadata(objectMetadata);
  request.setSSECustomerKey(options.getSSECustomerKey());
  request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
  InitiateMultipartUploadResult result;
  try {
    result = amazonS3.initiateMultipartUpload(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadId = result.getUploadId();
}
 
源代码19 项目: modeldb   文件: S3Service.java
@Override
public Optional<String> initiateMultipart(String s3Key) throws ModelDBException {
  // Validate bucket
  Boolean exist = doesBucketExist(bucketName);
  if (!exist) {
    throw new ModelDBException("Bucket does not exists", io.grpc.Status.Code.INTERNAL);
  }
  InitiateMultipartUploadRequest initiateMultipartUploadRequest =
      new InitiateMultipartUploadRequest(bucketName, s3Key);
  InitiateMultipartUploadResult result =
      s3Client.initiateMultipartUpload(initiateMultipartUploadRequest);
  return Optional.ofNullable(result.getUploadId());
}
 
源代码20 项目: nexus-public   文件: ParallelRequester.java
protected void parallelRequests(final AmazonS3 s3,
                                final String bucket,
                                final String key,
                                final Supplier<IOFunction<String, List<PartETag>>> operations)
{
  InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
  String uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();

  CompletionService<List<PartETag>> completionService = new ExecutorCompletionService<>(executorService);
  try {
    for (int i = 0; i < parallelism; i++) {
      completionService.submit(() -> operations.get().apply(uploadId));
    }

    List<PartETag> partETags = new ArrayList<>();
    for (int i = 0; i < parallelism; i++) {
      partETags.addAll(completionService.take().get());
    }

    s3.completeMultipartUpload(new CompleteMultipartUploadRequest()
        .withBucketName(bucket)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartETags(partETags));
  }
  catch (InterruptedException interrupted) {
    s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
    Thread.currentThread().interrupt();
  }
  catch (CancellationException | ExecutionException ex) {
    s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
    throw new BlobStoreException(
        format("Error executing parallel requests for bucket:%s key:%s with uploadId:%s", bucket, key, uploadId), ex,
        null);
  }
}
 
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
 
@Test
public void uploadsArtifactToS3() throws IOException {
    // when
    publisher.invoke(workspace, null);

    // then
    final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client);
    inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION);
    inOrder.verify(awsClients).getCodePipelineClient();
    inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture());
    inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture());
    inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture());

    final com.amazonaws.auth.AWSSessionCredentials credentials
        = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials();
    assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId());
    assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey());
    assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken());

    verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture());
    assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId());

    final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, initRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, initRequest.getKey());

    final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, uploadRequest.getKey());
    assertEquals(UPLOAD_ID, uploadRequest.getUploadId());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());
}
 
@Before
public void setUp() {
    MockitoAnnotations.initMocks(this);

    when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
            .thenReturn(mockUploadResult);
    when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest);
    when(mockUploadResult.getUploadId()).thenReturn("123");
    when(mockArtifact.getLocation()).thenReturn(mockLocation);
    when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation);
    when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket");
    when(s3ArtifactLocation.getObjectKey()).thenReturn("Key");

    outContent = TestUtils.setOutputStream();
}
 
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
 
@Test
public void uploadWithCustomKmsEncryptionKey() throws IOException {
    TestUtils.initializeTestingFolders();

    when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN");
    when(mockEncryptionKey.getType()).thenReturn(EncryptionKeyType.KMS.toString());

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            mockEncryptionKey,
            mockS3Client,
            null); // Listener

    verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertEquals("KMS-KEY-ARN", encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
 
@Test
public void uploadWithUnknownEncryptionKeyType() throws IOException {
    TestUtils.initializeTestingFolders();

    when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN");
    when(mockEncryptionKey.getType()).thenReturn("Custom");

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            mockEncryptionKey,
            mockS3Client,
            null); // Listener

    verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
 
源代码27 项目: spring-cloud-aws   文件: SimpleStorageResource.java
private void initiateMultiPartIfNeeded() {
	if (this.multiPartUploadResult == null) {
		this.multiPartUploadResult = SimpleStorageResource.this.amazonS3
				.initiateMultipartUpload(new InitiateMultipartUploadRequest(
						SimpleStorageResource.this.bucketName,
						SimpleStorageResource.this.objectName));
	}
}
 
源代码28 项目: nifi   文件: TestS3EncryptionStrategies.java
@Before
public void setup() {
    byte[] keyRawBytes = new byte[32];
    SecureRandom secureRandom = new SecureRandom();
    secureRandom.nextBytes(keyRawBytes);
    randomKeyMaterial = Base64.encodeBase64String(keyRawBytes);

    metadata = new ObjectMetadata();
    putObjectRequest = new PutObjectRequest("", "", "");
    initUploadRequest = new InitiateMultipartUploadRequest("", "");
    getObjectRequest = new GetObjectRequest("", "");
    uploadPartRequest = new UploadPartRequest();
}
 
源代码29 项目: crate   文件: OutputS3.java
private S3OutputStream(Executor executor, URI uri, S3ClientHelper s3ClientHelper) throws IOException {
    this.executor = executor;
    bucketName = uri.getHost();
    key = uri.getPath().substring(1);
    outputStream = new ByteArrayOutputStream();
    client = s3ClientHelper.client(uri);
    multipartUpload = client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key));
}
 
源代码30 项目: hadoop-ozone   文件: S3KeyGenerator.java
private void createKey(long counter) throws Exception {
  timer.time(() -> {
    if (multiPart) {

      final String keyName = generateObjectName(counter);
      final InitiateMultipartUploadRequest initiateRequest =
          new InitiateMultipartUploadRequest(bucketName, keyName);

      final InitiateMultipartUploadResult initiateMultipartUploadResult =
          s3.initiateMultipartUpload(initiateRequest);
      final String uploadId = initiateMultipartUploadResult.getUploadId();

      List<PartETag> parts = new ArrayList<>();
      for (int i = 1; i <= numberOfParts; i++) {

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
            .withBucketName(bucketName)
            .withKey(keyName)
            .withPartNumber(i)
            .withLastPart(i == numberOfParts)
            .withUploadId(uploadId)
            .withPartSize(fileSize)
            .withInputStream(new ByteArrayInputStream(content.getBytes(
                StandardCharsets.UTF_8)));

        final UploadPartResult uploadPartResult =
            s3.uploadPart(uploadPartRequest);
        parts.add(uploadPartResult.getPartETag());
      }

      s3.completeMultipartUpload(
          new CompleteMultipartUploadRequest(bucketName, keyName, uploadId,
              parts));

    } else {
      s3.putObject(bucketName, generateObjectName(counter),
          content);
    }

    return null;
  });
}
 
 同包方法