类com.amazonaws.services.s3.model.UploadPartRequest源码实例Demo

下面列出了怎么用com.amazonaws.services.s3.model.UploadPartRequest的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: data-highway   文件: S3MultipartUpload.java
PartETag upload(String uploadId, S3Part part) {
  Object[] logParams = new Object[] { part.getSize(), part.getNumber(), bucket, key };
  log.debug("Uploading {} bytes for part {} to s3://{}/{}.", logParams);
  UploadPartRequest request = new UploadPartRequest()
      .withUploadId(uploadId)
      .withBucketName(bucket)
      .withKey(key)
      .withPartNumber(part.getNumber())
      .withPartSize(part.getSize())
      .withMD5Digest(part.getMd5())
      .withInputStream(part.getInputStream());
  UploadPartResult result = s3.uploadPart(request);
  log.debug("Uploaded {} bytes for part {} to s3://{}/{}.", logParams);
  bytes += part.getSize();
  return result.getPartETag();
}
 
源代码2 项目: data-highway   文件: S3MultipartUploadTest.java
@Test
public void upload() {
  ArgumentCaptor<UploadPartRequest> request = ArgumentCaptor.forClass(UploadPartRequest.class);
  UploadPartResult response = mock(UploadPartResult.class);
  PartETag partETag = mock(PartETag.class);
  when(response.getPartETag()).thenReturn(partETag);
  when(s3.uploadPart(request.capture())).thenReturn(response);
  InputStream inputStream = mock(InputStream.class);
  S3Part part = new S3Part(1, 2, "md5", inputStream);

  PartETag result = underTest.upload(UPLOAD_ID, part);

  assertThat(result, is(partETag));
  assertThat(request.getValue().getBucketName(), is(BUCKET));
  assertThat(request.getValue().getKey(), is(KEY));
  assertThat(request.getValue().getPartNumber(), is(1));
  assertThat(request.getValue().getPartSize(), is(2L));
  assertThat(request.getValue().getMd5Digest(), is("md5"));
  assertThat(request.getValue().getInputStream(), is(inputStream));
}
 
源代码3 项目: hadoop   文件: S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
 
源代码4 项目: big-c   文件: S3AFastOutputStream.java
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
 
源代码5 项目: beam   文件: S3WritableByteChannel.java
private void flush() throws IOException {
  uploadBuffer.flip();
  ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array());

  UploadPartRequest request =
      new UploadPartRequest()
          .withBucketName(path.getBucket())
          .withKey(path.getKey())
          .withUploadId(uploadId)
          .withPartNumber(partNumber++)
          .withPartSize(uploadBuffer.remaining())
          .withMD5Digest(Base64.encodeAsString(md5.digest()))
          .withInputStream(inputStream);
  request.setSSECustomerKey(options.getSSECustomerKey());

  UploadPartResult result;
  try {
    result = amazonS3.uploadPart(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadBuffer.clear();
  md5.reset();
  eTags.add(result.getPartETag());
}
 
源代码6 项目: nexus-public   文件: ParallelUploader.java
private List<PartETag> uploadChunks(final AmazonS3 s3,
                                    final String bucket,
                                    final String key,
                                    final String uploadId,
                                    final ChunkReader chunkReader)
    throws IOException
{
  List<PartETag> tags = new ArrayList<>();
  Optional<Chunk> chunk;

  while ((chunk = chunkReader.readChunk(chunkSize)).isPresent()) {
    UploadPartRequest request = new UploadPartRequest()
        .withBucketName(bucket)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartNumber(chunk.get().chunkNumber)
        .withInputStream(new ByteArrayInputStream(chunk.get().data, 0, chunk.get().dataLength))
        .withPartSize(chunk.get().dataLength);

    tags.add(s3.uploadPart(request).getPartETag());
  }

  return tags;
}
 
源代码7 项目: spring-cloud-aws   文件: SimpleStorageResource.java
@Override
public UploadPartResult call() throws Exception {
	try {
		return this.amazonS3.uploadPart(new UploadPartRequest()
				.withBucketName(this.bucketName).withKey(this.key)
				.withUploadId(this.uploadId)
				.withInputStream(new ByteArrayInputStream(this.content))
				.withPartNumber(this.partNumber).withLastPart(this.last)
				.withPartSize(this.contentLength));
	}
	finally {
		// Release the memory, as the callable may still live inside the
		// CompletionService which would cause
		// an exhaustive memory usage
		this.content = null;
	}
}
 
源代码8 项目: nifi   文件: TestStandardS3EncryptionService.java
@Test
public void testRequests() {
    final ObjectMetadata metadata = new ObjectMetadata();
    final GetObjectRequest getObjectRequest = new GetObjectRequest("", "");
    final InitiateMultipartUploadRequest initUploadRequest = new InitiateMultipartUploadRequest("", "");
    final PutObjectRequest putObjectRequest = new PutObjectRequest("", "", "");
    final UploadPartRequest uploadPartRequest = new UploadPartRequest();

    service.configureGetObjectRequest(getObjectRequest, metadata);
    Assert.assertNull(getObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureUploadPartRequest(uploadPartRequest, metadata);
    Assert.assertNull(uploadPartRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configurePutObjectRequest(putObjectRequest, metadata);
    Assert.assertNull(putObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureInitiateMultipartUploadRequest(initUploadRequest, metadata);
    Assert.assertNull(initUploadRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());
}
 
源代码9 项目: crate   文件: OutputS3.java
private void doUploadIfNeeded() throws IOException {
    if (currentPartBytes >= PART_SIZE) {
        final ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
        final int currentPart = partNumber;
        final long currentPartSize = currentPartBytes;

        outputStream.close();
        outputStream = new ByteArrayOutputStream();
        partNumber++;
        pendingUploads.add(CompletableFutures.supplyAsync(() -> {
            UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(key)
                .withPartNumber(currentPart)
                .withPartSize(currentPartSize)
                .withUploadId(multipartUpload.getUploadId())
                .withInputStream(inputStream);
            return client.uploadPart(uploadPartRequest).getPartETag();
        }, executor));
        currentPartBytes = 0;
    }
}
 
源代码10 项目: s3committer   文件: TestUtil.java
private static UploadPartResult newResult(UploadPartRequest request,
                                          String etag) {
  UploadPartResult result = new UploadPartResult();
  result.setPartNumber(request.getPartNumber());
  result.setETag(etag);
  return result;
}
 
源代码11 项目: nifi-minifi   文件: S3OutputStream.java
public void uploadPart(ByteArrayInputStream inputStream, int partSize) {
  int currentPartNumber = partETags.size() + 1;
  UploadPartRequest request = new UploadPartRequest()
                                        .withBucketName(bucket)
                                        .withKey(key)
                                        .withUploadId(uploadId)
                                        .withInputStream(inputStream)
                                        .withPartNumber(currentPartNumber)
                                        .withPartSize(partSize)
                                        .withGeneralProgressListener(progressListener);
  log.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
  partETags.add(s3.uploadPart(request).getPartETag());
}
 
源代码12 项目: bender   文件: S3TransporterTest.java
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
 
源代码13 项目: bender   文件: S3TransporterTest.java
@Test
public void testGzFilename() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename.gz");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
}
 
源代码14 项目: bender   文件: S3TransporterTest.java
@Test
public void testContextBasedFilename()
    throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  TestContext context = new TestContext();
  context.setAwsRequestId("request_id");
  transport.sendBatch(buffer, partitions, context);
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/request_id.bz2", argument.getValue().getKey());
}
 
源代码15 项目: stocator   文件: COSAPIClient.java
/**
 * Upload part of a multi-partition file.
 * <i>Important: this call does not close any input stream in the request.</i>
 * @param request request
 * @return the result of the operation
 * @throws AmazonClientException on problems
 */
public UploadPartResult uploadPart(UploadPartRequest request)
    throws AmazonClientException {
  try {
    UploadPartResult uploadPartResult = mClient.uploadPart(request);
    return uploadPartResult;
  } catch (AmazonClientException e) {
    throw e;
  }
}
 
源代码16 项目: stocator   文件: COSAPIClient.java
/**
 * Create and initialize a part request of a multipart upload.
 * Exactly one of: {@code uploadStream} or {@code sourceFile}
 * must be specified.
 * @param uploadId ID of ongoing upload
 * @param partNumber current part number of the upload
 * @param size amount of data
 * @param uploadStream source of data to upload
 * @param sourceFile optional source file
 * @return the request
 */
UploadPartRequest newUploadPartRequest(String uploadId,
    int partNumber, int size, InputStream uploadStream, File sourceFile) {
  Preconditions.checkNotNull(uploadId);
  // exactly one source must be set; xor verifies this
  Preconditions.checkArgument((uploadStream != null) ^ (sourceFile != null),
      "Data source");
  Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
  Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000,
      "partNumber must be between 1 and 10000 inclusive, but is %s",
      partNumber);

  LOG.debug("Creating part upload request for {} #{} size {}",
      uploadId, partNumber, size);
  UploadPartRequest request = new UploadPartRequest()
      .withBucketName(mBucket)
      .withKey(key)
      .withUploadId(uploadId)
      .withPartNumber(partNumber)
      .withPartSize(size);
  if (uploadStream != null) {
    // there's an upload stream. Bind to it.
    request.setInputStream(uploadStream);
  } else {
    request.setFile(sourceFile);
  }
  return request;
}
 
源代码17 项目: stocator   文件: COSBlockOutputStream.java
/**
 * Upload a block of data. This will take the block
 *
 * @param block block to upload
 * @throws IOException upload failure
 */
private void uploadBlockAsync(final COSDataBlocks.DataBlock block) throws IOException {
  LOG.debug("Queueing upload of {}", block);
  final int size = block.dataSize();
  final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request = writeOperationHelper.newUploadPartRequest(uploadId,
      currentPartNumber, size,
      uploadData.getUploadStream(), uploadData.getFile());

  ListenableFuture<PartETag> partETagFuture = executorService.submit(new Callable<PartETag>() {
    @Override
    public PartETag call() throws Exception {
      // this is the queued upload operation
      LOG.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
      // do the upload
      PartETag partETag;
      try {
        partETag = fs.uploadPart(request).getPartETag();
        LOG.debug("Completed upload of {} to part {}", block, partETag.getETag());
      } finally {
        // close the stream and block
        closeAll(LOG, uploadData, block);
      }
      return partETag;
    }
  });
  partETagsFutures.add(partETagFuture);
}
 
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
 
@Test
public void uploadsArtifactToS3() throws IOException {
    // when
    publisher.invoke(workspace, null);

    // then
    final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client);
    inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION);
    inOrder.verify(awsClients).getCodePipelineClient();
    inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture());
    inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture());
    inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture());

    final com.amazonaws.auth.AWSSessionCredentials credentials
        = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials();
    assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId());
    assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey());
    assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken());

    verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture());
    assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId());

    final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, initRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, initRequest.getKey());

    final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, uploadRequest.getKey());
    assertEquals(UPLOAD_ID, uploadRequest.getUploadId());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());
}
 
@Before
public void setUp() {
    MockitoAnnotations.initMocks(this);

    when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
            .thenReturn(mockUploadResult);
    when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest);
    when(mockUploadResult.getUploadId()).thenReturn("123");
    when(mockArtifact.getLocation()).thenReturn(mockLocation);
    when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation);
    when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket");
    when(s3ArtifactLocation.getObjectKey()).thenReturn("Key");

    outContent = TestUtils.setOutputStream();
}
 
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
 
源代码22 项目: nifi   文件: TestS3EncryptionStrategies.java
@Before
public void setup() {
    byte[] keyRawBytes = new byte[32];
    SecureRandom secureRandom = new SecureRandom();
    secureRandom.nextBytes(keyRawBytes);
    randomKeyMaterial = Base64.encodeBase64String(keyRawBytes);

    metadata = new ObjectMetadata();
    putObjectRequest = new PutObjectRequest("", "", "");
    initUploadRequest = new InitiateMultipartUploadRequest("", "");
    getObjectRequest = new GetObjectRequest("", "");
    uploadPartRequest = new UploadPartRequest();
}
 
源代码23 项目: stratosphere   文件: S3DataOutputStream.java
private void uploadPartAndFlushBuffer() throws IOException {

		boolean operationSuccessful = false;

		if (this.uploadId == null) {
			this.uploadId = initiateMultipartUpload();
		}

		try {

			if (this.partNumber >= MAX_PART_NUMBER) {
				throw new IOException("Cannot upload any more data: maximum part number reached");
			}

			final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
			final UploadPartRequest request = new UploadPartRequest();
			request.setBucketName(this.bucket);
			request.setKey(this.object);
			request.setInputStream(inputStream);
			request.setUploadId(this.uploadId);
			request.setPartSize(this.bytesWritten);
			request.setPartNumber(this.partNumber++);

			final UploadPartResult result = this.s3Client.uploadPart(request);
			this.partETags.add(result.getPartETag());

			this.bytesWritten = 0;
			operationSuccessful = true;

		} catch (AmazonServiceException e) {
			throw new IOException(StringUtils.stringifyException(e));
		} finally {
			if (!operationSuccessful) {
				abortUpload();
			}
		}
	}
 
源代码24 项目: crate   文件: OutputS3.java
@Override
public void close() throws IOException {
    UploadPartRequest uploadPartRequest = new UploadPartRequest()
        .withBucketName(bucketName)
        .withKey(key)
        .withPartNumber(partNumber)
        .withPartSize(outputStream.size())
        .withUploadId(multipartUpload.getUploadId())
        .withInputStream(new ByteArrayInputStream(outputStream.toByteArray()));
    UploadPartResult uploadPartResult = client.uploadPart(uploadPartRequest);

    List<PartETag> partETags;
    try {
        partETags = CompletableFutures.allAsList(pendingUploads).get();
    } catch (InterruptedException | ExecutionException e) {
        throw new IOException(e);
    }
    partETags.add(uploadPartResult.getPartETag());
    client.completeMultipartUpload(
        new CompleteMultipartUploadRequest(
            bucketName,
            key,
            multipartUpload.getUploadId(),
            partETags)
    );
    super.close();
}
 
源代码25 项目: hadoop-ozone   文件: S3KeyGenerator.java
private void createKey(long counter) throws Exception {
  timer.time(() -> {
    if (multiPart) {

      final String keyName = generateObjectName(counter);
      final InitiateMultipartUploadRequest initiateRequest =
          new InitiateMultipartUploadRequest(bucketName, keyName);

      final InitiateMultipartUploadResult initiateMultipartUploadResult =
          s3.initiateMultipartUpload(initiateRequest);
      final String uploadId = initiateMultipartUploadResult.getUploadId();

      List<PartETag> parts = new ArrayList<>();
      for (int i = 1; i <= numberOfParts; i++) {

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
            .withBucketName(bucketName)
            .withKey(keyName)
            .withPartNumber(i)
            .withLastPart(i == numberOfParts)
            .withUploadId(uploadId)
            .withPartSize(fileSize)
            .withInputStream(new ByteArrayInputStream(content.getBytes(
                StandardCharsets.UTF_8)));

        final UploadPartResult uploadPartResult =
            s3.uploadPart(uploadPartRequest);
        parts.add(uploadPartResult.getPartETag());
      }

      s3.completeMultipartUpload(
          new CompleteMultipartUploadRequest(bucketName, keyName, uploadId,
              parts));

    } else {
      s3.putObject(bucketName, generateObjectName(counter),
          content);
    }

    return null;
  });
}
 
源代码26 项目: Flink-CEPplus   文件: HadoopS3AccessHelper.java
@Override
public UploadPartResult uploadPart(String key, String uploadId, int partNumber, File inputFile, long length) throws IOException {
	final UploadPartRequest uploadRequest = s3accessHelper.newUploadPartRequest(
		key, uploadId, partNumber, MathUtils.checkedDownCast(length), null, inputFile, 0L);
	return s3accessHelper.uploadPart(uploadRequest);
}
 
源代码27 项目: flink   文件: HadoopS3AccessHelper.java
@Override
public UploadPartResult uploadPart(String key, String uploadId, int partNumber, File inputFile, long length) throws IOException {
	final UploadPartRequest uploadRequest = s3accessHelper.newUploadPartRequest(
		key, uploadId, partNumber, MathUtils.checkedDownCast(length), null, inputFile, 0L);
	return s3accessHelper.uploadPart(uploadRequest);
}
 
源代码28 项目: s3committer   文件: S3Util.java
public static PendingUpload multipartUpload(
    AmazonS3 client, File localFile, String partition,
    String bucket, String key, long uploadPartSize) {

  InitiateMultipartUploadResult initiate = client.initiateMultipartUpload(
      new InitiateMultipartUploadRequest(bucket, key));
  String uploadId = initiate.getUploadId();

  boolean threw = true;
  try {
    Map<Integer, String> etags = Maps.newLinkedHashMap();

    long offset = 0;
    long numParts = (localFile.length() / uploadPartSize +
        ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

    Preconditions.checkArgument(numParts > 0,
        "Cannot upload 0 byte file: " + localFile);

    for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
      long size = Math.min(localFile.length() - offset, uploadPartSize);
      UploadPartRequest part = new UploadPartRequest()
          .withBucketName(bucket)
          .withKey(key)
          .withPartNumber(partNumber)
          .withUploadId(uploadId)
          .withFile(localFile)
          .withFileOffset(offset)
          .withPartSize(size)
          .withLastPart(partNumber == numParts);

      UploadPartResult partResult = client.uploadPart(part);
      PartETag etag = partResult.getPartETag();
      etags.put(etag.getPartNumber(), etag.getETag());

      offset += uploadPartSize;
    }

    PendingUpload pending = new PendingUpload(
        partition, bucket, key, uploadId, etags);

    threw = false;

    return pending;

  } finally {
    if (threw) {
      try {
        client.abortMultipartUpload(
            new AbortMultipartUploadRequest(bucket, key, uploadId));
      } catch (AmazonClientException e) {
        LOG.error("Failed to abort multi-part upload", e);
      }
    }
  }
}
 
源代码29 项目: s3committer   文件: TestUtil.java
public List<UploadPartRequest> getParts() {
  return parts;
}
 
源代码30 项目: bender   文件: MultiPartUpload.java
public UploadPartRequest getUploadPartRequest() {
  return new UploadPartRequest().withBucketName(this.bucketName).withKey(this.key)
      .withPartNumber(this.partCount.incrementAndGet()).withUploadId(this.uploadId);
}
 
 同包方法