类com.amazonaws.services.s3.model.PutObjectRequest源码实例Demo

下面列出了怎么用com.amazonaws.services.s3.model.PutObjectRequest的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
    // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码2 项目: pocket-etl   文件: S3FastLoader.java
private void writeBufferToS3(byte[] toWrite, int limit) {
    try (EtlProfilingScope scope = new EtlProfilingScope(parentMetrics, "S3FastLoader.writeToS3")) {
        InputStream inputStream = new ByteArrayInputStream(toWrite, 0, limit);
        String s3Key = s3PartFileKeyGenerator.apply(++fileSequenceNumber);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(limit);
        PutObjectRequest putObjectRequest = new PutObjectRequest(s3Bucket, s3Key, inputStream, metadata);

        if (sseKmsArn != null && !sseKmsArn.isEmpty()) {
            putObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsArn));
        }

        try {
            amazonS3.putObject(putObjectRequest);
            emitSuccessAndFailureMetrics(scope, true);
        } catch (AmazonClientException e) {
                logger.error(e);
                scope.addCounter(e.getClass().getSimpleName(), 1);
                emitSuccessAndFailureMetrics(scope, false);
                throw new UnrecoverableStreamFailureException("Exception caught trying to write object to S3: ", e);
        }
    }
}
 
源代码3 项目: tutorials   文件: MultipartUploadLiveTest.java
@Test
public void whenUploadingFileWithTransferManager_thenVerifyUploadRequested() {
    File file = mock(File.class);
    PutObjectResult s3Result = mock(PutObjectResult.class);

    when(amazonS3.putObject(anyString(), anyString(), (File) any())).thenReturn(s3Result);
    when(file.getName()).thenReturn(KEY_NAME);

    PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, KEY_NAME, file);
    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    assertThat(upload).isNotNull();
    verify(amazonS3).putObject(request);
}
 
源代码4 项目: herd   文件: S3DaoTest.java
@Test
public void testTagObjects()
{
    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(S3_BUCKET_NAME);

    // Create an S3 object summary.
    S3ObjectSummary s3ObjectSummary = new S3ObjectSummary();
    s3ObjectSummary.setKey(TARGET_S3_KEY);

    // Create an S3 object tag.
    Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);

    // Put a file in S3.
    s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null);

    // Tag the file with an S3 object tag.
    s3Dao.tagObjects(params, new S3FileTransferRequestParamsDto(), Collections.singletonList(s3ObjectSummary), tag);

    // Validate that the object got tagged.
    GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY), null);
    assertEquals(Collections.singletonList(tag), getObjectTaggingResult.getTagSet());
}
 
源代码5 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestored()
{
    // Put a 1 byte already restored Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Validate the file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.validateGlacierS3FilesRestored(params);
}
 
源代码6 项目: datacollector   文件: FileHelper.java
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
 
源代码7 项目: konker-platform   文件: AwsUploadRepository.java
public String upload(InputStream is, String fileKey, String fileName, String suffix, Boolean isPublic) throws Exception {
    validateFile(is, suffix);
    if (isPublic == null) {
        isPublic = Boolean.TRUE;
    }
    if ((is != null) && (fileKey != null)) {
        try {
            byte[] bytes = IOUtils.toByteArray(is);
            s3Client.putObject(
                    new PutObjectRequest(
                    		s3BucketConfig.getName(),
                            fileKey,
                            new ByteArrayInputStream(bytes),
                            S3ObjectMetadata.getObjectMetadata(bytes)
                    ).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead)
            );
            return fileName + '.' + suffix;
        } catch (AmazonServiceException | IOException e) {
            throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode());
        } finally {
            is.close();
        }
    } else {
        throw new BusinessException(Validations.INVALID_PARAMETERS.getCode());
    }
}
 
源代码8 项目: nifi   文件: TestPutS3Object.java
@Test
public void testPutSinglePart() {
    runner.setProperty("x-custom-prop", "hello");
    prepareTest();

    runner.run(1);

    ArgumentCaptor<PutObjectRequest> captureRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).putObject(captureRequest.capture());
    PutObjectRequest request = captureRequest.getValue();
    assertEquals("test-bucket", request.getBucketName());

    runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS, 1);

    List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS);
    MockFlowFile ff0 = flowFiles.get(0);

    ff0.assertAttributeEquals(CoreAttributes.FILENAME.key(), "testfile.txt");
    ff0.assertAttributeEquals(PutS3Object.S3_ETAG_ATTR_KEY, "test-etag");
    ff0.assertAttributeEquals(PutS3Object.S3_VERSION_ATTR_KEY, "test-version");
}
 
源代码9 项目: big-c   文件: S3AFileSystem.java
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
源代码10 项目: mojito   文件: S3BlobStorage.java
void put(String name, byte[] content, Retention retention, ObjectMetadata objectMetadata) {

        Preconditions.checkNotNull(objectMetadata);
        objectMetadata.setContentLength(content.length);

        PutObjectRequest putRequest = new PutObjectRequest(
                s3BlobStorageConfigurationProperties.getBucket(),
                getFullName(name),
                new ByteArrayInputStream(content),
                objectMetadata);

        List<Tag> tags = new ArrayList<Tag>();
        tags.add(new Tag("retention", retention.toString()));

        putRequest.setTagging(new ObjectTagging(tags));

        amazonS3.putObject(putRequest);
    }
 
@Before
public void setup() throws Exception
{
  client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
  client.createBucket(testMeta.bucketKey);
  inputDir = testMeta.baseDirectory + File.separator + "input";

  File file1 = new File(inputDir + File.separator + FILE_1);
  File file2 = new File(inputDir + File.separator + FILE_2);

  FileUtils.writeStringToFile(file1, FILE_1_DATA);
  FileUtils.writeStringToFile(file2, FILE_2_DATA);

  client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_1, file1));
  client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_2, file2));
  files = SCHEME + "://" + accessKey + ":" + secretKey + "@" + testMeta.bucketKey + "/input";
}
 
@Test
public void testUploadLocalSourceWithNoSSEAlgorithm() throws Exception {
    File file = new File(mockWorkspaceDir + "/source-file");
    FileUtils.write(file, "contents");

    PutObjectResult mockedResponse = new PutObjectResult();
    mockedResponse.setVersionId("some-version-id");
    when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
    S3DataManager d = new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, "", file.getPath(), "");

    ArgumentCaptor<PutObjectRequest> savedPutObjectRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    UploadToS3Output result = d.uploadSourceToS3(listener, testWorkSpace);
    assertEquals(result.getSourceLocation(), s3InputBucketName + "/" + s3InputKeyName);

    verify(s3Client).putObject(savedPutObjectRequest.capture());
    assertEquals(savedPutObjectRequest.getValue().getBucketName(), s3InputBucketName);
    assertEquals(savedPutObjectRequest.getValue().getKey(), s3InputKeyName);
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentMD5(), S3DataManager.getZipMD5(file));
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentLength(), file.length());
    assertNull(savedPutObjectRequest.getValue().getMetadata().getSSEAlgorithm());
}
 
源代码13 项目: chancery   文件: S3Archiver.java
private void upload(@NotNull File src, @NotNull String key, @NotNull CallbackPayload payload) {
    log.info("Uploading {} to {} in {}", src, key, bucketName);
    final PutObjectRequest request = new PutObjectRequest(bucketName, key, src);
    final ObjectMetadata metadata = request.getMetadata();
    final String commitId = payload.getAfter();
    if (commitId != null) {
        metadata.addUserMetadata("commit-id", commitId);
    }
    final DateTime timestamp = payload.getTimestamp();
    if (timestamp != null) {
        metadata.addUserMetadata("hook-timestamp",
                ISODateTimeFormat.basicTime().print(timestamp));
    }

    final TimerContext time = uploadTimer.time();
    try {
        s3Client.putObject(request);
    } catch (Exception e) {
        log.error("Couldn't upload to {} in {}", key, bucketName, e);
        throw e;
    } finally {
        time.stop();
    }
    log.info("Uploaded to {} in {}", key, bucketName);
}
 
源代码14 项目: entrada   文件: S3FileManagerImpl.java
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) {
  PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(),
      FileUtil.appendPath(dstDetails.getKey(), src.getName()), src);
  ObjectMetadata meta = new ObjectMetadata();

  if (archive) {
    meta
        .setHeader(Headers.STORAGE_CLASS,
            StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
  }

  if (encrypt) {
    meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }

  request.setMetadata(meta);
  try {
    amazonS3.putObject(request);
    return true;
  } catch (Exception e) {
    log.error("Error while uploading file: {}", src, e);
  }

  return false;
}
 
源代码15 项目: herd   文件: S3DaoTest.java
@Test
public void testTagVersionsTargetTagKeyAlreadyExists()
{
    // Create two S3 object tags having the same tag key.
    List<Tag> tags = Arrays.asList(new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE), new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE_2));

    // Put an S3 file that is already tagged with the first S3 object tag in an S3 bucket that has versioning enabled.
    PutObjectRequest putObjectRequest =
        new PutObjectRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]),
            new ObjectMetadata());
    putObjectRequest.setTagging(new ObjectTagging(Collections.singletonList(tags.get(0))));
    s3Operations.putObject(putObjectRequest, null);

    // List S3 versions that match the test S3 key.
    ListVersionsRequest listVersionsRequest =
        new ListVersionsRequest().withBucketName(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED).withPrefix(TARGET_S3_KEY);
    VersionListing versionListing = s3Operations.listVersions(listVersionsRequest, null);
    assertEquals(1, CollectionUtils.size(versionListing.getVersionSummaries()));
    assertEquals(TARGET_S3_KEY, versionListing.getVersionSummaries().get(0).getKey());
    assertNotNull(versionListing.getVersionSummaries().get(0).getVersionId());

    // Validate that the S3 object is tagged with the first tag only.
    GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(
        new GetObjectTaggingRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY,
            versionListing.getVersionSummaries().get(0).getVersionId()), null);
    assertEquals(Collections.singletonList(tags.get(0)), getObjectTaggingResult.getTagSet());

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED);

    // Tag the S3 version with the second S3 object tag.
    s3Dao.tagVersions(params, new S3FileTransferRequestParamsDto(), versionListing.getVersionSummaries(), tags.get(1));

    // Validate that the S3 object is now tagged with the second tag only.
    getObjectTaggingResult = s3Operations.getObjectTagging(
        new GetObjectTaggingRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY,
            versionListing.getVersionSummaries().get(0).getVersionId()), null);
    assertEquals(Collections.singletonList(tags.get(1)), getObjectTaggingResult.getTagSet());
}
 
源代码16 项目: data-highway   文件: S3ConnectivityCheckTest.java
@Test
public void checkS3() {
  Mockito.when(
      s3.putObject(
          Mockito.any(PutObjectRequest.class)))
      .thenReturn(new PutObjectResult());

  underTest.checkS3Put(s3, "bucket", "key");
}
 
源代码17 项目: data-highway   文件: S3ConnectivityCheckTest.java
@Test(expected = SdkClientException.class)
public void checkS3WithError() {
  Mockito.when(
      s3.putObject(
          Mockito.any(PutObjectRequest.class)))
      .thenThrow(new SdkClientException(""));

  underTest.checkS3Put(s3, "bucket", "key");
}
 
源代码18 项目: gocd-s3-artifacts   文件: PublishExecutorTest.java
@Test
public void shouldUploadALocalFileToS3WithDestinationPrefixUsingEnvVariable() {
    AmazonS3Client mockClient = mockClient();

    Config config = new Config(Maps.builder()
            .with(Constants.SOURCEDESTINATIONS, Maps.builder().with("value", "[{\"source\": \"target/*\", \"destination\": \"\"}]").build())
            .with(Constants.DESTINATION_PREFIX, Maps.builder().with("value", "test/${GO_PIPELINE_COUNTER}/").build())
            .with(Constants.ARTIFACTS_BUCKET, Maps.builder().with("value", "").build())
            .build());

    TaskExecutionResult result = executeMockPublish(
            mockClient,
            config,
            new String[]{"README.md", "s3publish-0.1.31.jar"}
    );

    assertTrue(result.isSuccessful());

    final List<PutObjectRequest> allPutObjectRequests = getPutObjectRequests(mockClient, 2);

    PutObjectRequest readmePutRequest = allPutObjectRequests.get(0);
    assertThat(readmePutRequest.getBucketName(), is("testS3Bucket"));
    assertThat(readmePutRequest.getKey(), is("test/pipelineCounter/README.md"));
    assertNull(readmePutRequest.getMetadata());

    PutObjectRequest jarPutRequest = allPutObjectRequests.get(1);
    assertNull(jarPutRequest.getMetadata());
    assertThat(jarPutRequest.getBucketName(), is("testS3Bucket"));
    assertThat(jarPutRequest.getKey(), is("test/pipelineCounter/s3publish-0.1.31.jar"));
    assertNull(jarPutRequest.getMetadata());
}
 
源代码19 项目: nifi   文件: AbstractS3IT.java
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);

    client.putObject(putRequest);
}
 
源代码20 项目: hop   文件: S3FileObjectTest.java
@Test
public void testDoCreateFolder() throws Exception {
  S3FileObject notRootBucket = spy(
    new S3FileObject( new S3FileName( SCHEME, BUCKET_NAME, BUCKET_NAME + "/" + origKey, FileType.IMAGINARY ),
      fileSystemSpy ) );
  notRootBucket.createFolder();
  ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor = ArgumentCaptor.forClass( PutObjectRequest.class );
  verify( s3ServiceMock ).putObject( putObjectRequestArgumentCaptor.capture() );
  assertEquals( BUCKET_NAME, putObjectRequestArgumentCaptor.getValue().getBucketName() );
  assertEquals( "some/key/", putObjectRequestArgumentCaptor.getValue().getKey() );
}
 
源代码21 项目: gocd-s3-artifacts   文件: S3ArtifactStoreTest.java
@Test
public void shouldUseStandardStorageClassAsDefault() {
    S3ArtifactStore store = new S3ArtifactStore(mockClient, "foo-bar");
    store.put(new PutObjectRequest("foo-bar", "key", new File("/tmp/baz")));
    verify(mockClient, times(1)).putObject(putCaptor.capture());
    PutObjectRequest putRequest = putCaptor.getValue();
    assertThat(putRequest.getStorageClass(), is("STANDARD"));
}
 
@Test
public void testWhenSendLargeMessageThenPayloadIsStoredInS3() {
    String messageBody = generateStringWithLength(MORE_THAN_SQS_SIZE_LIMIT);

    SendMessageRequest messageRequest = new SendMessageRequest(SQS_QUEUE_URL, messageBody);
    extendedSqsWithDefaultConfig.sendMessage(messageRequest);

    verify(mockS3, times(1)).putObject(isA(PutObjectRequest.class));
}
 
源代码23 项目: pocket-etl   文件: S3FastLoaderTest.java
@Before
public void initializeS3StreamCapture() {
    when(s3Client.putObject(any(PutObjectRequest.class))).thenAnswer(invocation -> {
        PutObjectRequest putObjectRequest = (PutObjectRequest) invocation.getArguments()[0];
        InputStream inputStream = putObjectRequest.getInputStream();
        s3StreamCapture.add(byteArrayToList(toByteArray(inputStream)));
        return new PutObjectResult();
    });
}
 
源代码24 项目: StormCV   文件: S3Connector.java
@Override
public void copyFile(File localFile, boolean delete) throws IOException {
	if(s3URI == null) throw new FileNotFoundException("No location set, use moveTo first!");
	String[] bucketKey = getBucketAndKey();
	s3.putObject(new PutObjectRequest(bucketKey[0], bucketKey[1], localFile));
	if(delete) localFile.delete();
}
 
源代码25 项目: aws-big-data-blog   文件: MCAWS.java
public static void putImageS3(String bucketName, String key, String fileName) {
    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    //Region usWest2 = Region.getRegion(s3Region);
    s3.setRegion(usWest2);
    try {
        File file = new File(fileName);
        s3.putObject(new PutObjectRequest(bucketName, key, file));
    } catch (Exception e) { System.out.println("ERROR ON IMAGE FILE"); }
}
 
@Test
public void testWhenSendMessageWithLargePayloadSupportDisabledThenS3IsNotUsedAndSqsBackendIsResponsibleToFailIt() {
    int messageLength = MORE_THAN_SQS_SIZE_LIMIT;
    String messageBody = generateStringWithLength(messageLength);
    ExtendedClientConfiguration extendedClientConfiguration = new ExtendedClientConfiguration()
            .withLargePayloadSupportDisabled();
    AmazonSQS sqsExtended = spy(new AmazonSQSExtendedClient(mockSqsBackend, extendedClientConfiguration));

    SendMessageRequest messageRequest = new SendMessageRequest(SQS_QUEUE_URL, messageBody);
    sqsExtended.sendMessage(messageRequest);

    verify(mockS3, never()).putObject(isA(PutObjectRequest.class));
    verify(mockSqsBackend).sendMessage(eq(messageRequest));
}
 
源代码27 项目: pocket-etl   文件: S3FastLoaderTest.java
@Test
public void writeCallsS3WithContentStreamLength() {
    testS3LoaderWithStrings("123456789ABCDEF");

    verify(s3Client, times(1)).putObject(putObjectRequestArgumentCaptor.capture());
    final PutObjectRequest request = putObjectRequestArgumentCaptor.getValue();

    assertThat(request.getMetadata().getContentLength(), equalTo(15L));
}
 
源代码28 项目: pocket-etl   文件: S3FastLoaderTest.java
@Test
public void s3WritesHaveCorrectKeysWithIncrementingSequenceNumber() {
    testS3LoaderWithStrings("123456789ABCDEF", "123456789ABCDEF", "123456789ABCDEF");
    verify(s3Client, times(3)).putObject(putObjectRequestArgumentCaptor.capture());

    List<PutObjectRequest> requests = putObjectRequestArgumentCaptor.getAllValues();
    assertThat(requests.get(0).getKey(), equalTo(S3_PREFIX + "/1" + S3_SUFFIX));
    assertThat(requests.get(1).getKey(), equalTo(S3_PREFIX + "/2" + S3_SUFFIX));
    assertThat(requests.get(2).getKey(), equalTo(S3_PREFIX + "/3" + S3_SUFFIX));
}
 
源代码29 项目: pocket-etl   文件: S3FastLoaderTest.java
@Test
public void writeWithExceptionThrowsUnrecoverableStreamFailureException() {
    AmazonClientException s3Exception = new AmazonClientException("oh no!");
    reset(s3Client);
    when(s3Client.putObject(any(PutObjectRequest.class))).thenThrow(s3Exception);

    try {
        testS3LoaderWithStrings("123456789ABCDEF");
        fail("Expected UnrecoverableStreamFailureException");
    } catch (UnrecoverableStreamFailureException e) {
        assertThat(e.getCause(), is(s3Exception));
    }

    verify(s3Client).putObject(any());
}
 
源代码30 项目: herd   文件: JobServiceTest.java
/**
 * Puts a Java properties file content into S3. The Properties contains a single key-value defined by the given parameter object.
 *
 * @param s3BucketName the S3 bucket name
 * @param s3ObjectKey the S3 object key
 * @param parameter the parameter
 */
private void putParameterIntoS3(String s3BucketName, String s3ObjectKey, Parameter parameter)
{
    String content = parameter.getName() + "=" + parameter.getValue();
    byte[] bytes = content.getBytes();
    ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(content.length());

    PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, s3ObjectKey, inputStream, metadata);
    s3Operations.putObject(putObjectRequest, null);
}
 
 同包方法