下面列出了com.amazonaws.services.s3.model.CompleteMultipartUploadRequest#com.amazonaws.services.s3.model.PutObjectRequest 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
// Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Try to validate if the Glacier S3 file is already restored.
try
{
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.validateGlacierS3FilesRestored(params);
fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
}
catch (IllegalArgumentException e)
{
assertEquals(String
.format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
}
}
private void writeBufferToS3(byte[] toWrite, int limit) {
try (EtlProfilingScope scope = new EtlProfilingScope(parentMetrics, "S3FastLoader.writeToS3")) {
InputStream inputStream = new ByteArrayInputStream(toWrite, 0, limit);
String s3Key = s3PartFileKeyGenerator.apply(++fileSequenceNumber);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(limit);
PutObjectRequest putObjectRequest = new PutObjectRequest(s3Bucket, s3Key, inputStream, metadata);
if (sseKmsArn != null && !sseKmsArn.isEmpty()) {
putObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsArn));
}
try {
amazonS3.putObject(putObjectRequest);
emitSuccessAndFailureMetrics(scope, true);
} catch (AmazonClientException e) {
logger.error(e);
scope.addCounter(e.getClass().getSimpleName(), 1);
emitSuccessAndFailureMetrics(scope, false);
throw new UnrecoverableStreamFailureException("Exception caught trying to write object to S3: ", e);
}
}
}
@Test
public void whenUploadingFileWithTransferManager_thenVerifyUploadRequested() {
File file = mock(File.class);
PutObjectResult s3Result = mock(PutObjectResult.class);
when(amazonS3.putObject(anyString(), anyString(), (File) any())).thenReturn(s3Result);
when(file.getName()).thenReturn(KEY_NAME);
PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, KEY_NAME, file);
request.setGeneralProgressListener(progressListener);
Upload upload = tm.upload(request);
assertThat(upload).isNotNull();
verify(amazonS3).putObject(request);
}
@Test
public void testTagObjects()
{
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(S3_BUCKET_NAME);
// Create an S3 object summary.
S3ObjectSummary s3ObjectSummary = new S3ObjectSummary();
s3ObjectSummary.setKey(TARGET_S3_KEY);
// Create an S3 object tag.
Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);
// Put a file in S3.
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null);
// Tag the file with an S3 object tag.
s3Dao.tagObjects(params, new S3FileTransferRequestParamsDto(), Collections.singletonList(s3ObjectSummary), tag);
// Validate that the object got tagged.
GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY), null);
assertEquals(Collections.singletonList(tag), getObjectTaggingResult.getTagSet());
}
@Test
public void testValidateGlacierS3FilesRestored()
{
// Put a 1 byte already restored Glacier storage class file in S3.
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(false);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Validate the file.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.validateGlacierS3FilesRestored(params);
}
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
final PutObjectRequest putObjectRequest = new PutObjectRequest(
bucket,
fileName,
is,
metadata
);
final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
Upload upload = transferManager.upload(putObjectRequest);
upload.addProgressListener((ProgressListener) progressEvent -> {
switch (progressEvent.getEventType()) {
case TRANSFER_STARTED_EVENT:
LOG.debug("Started uploading object {} into Amazon S3", object);
break;
case TRANSFER_COMPLETED_EVENT:
LOG.debug("Completed uploading object {} into Amazon S3", object);
break;
case TRANSFER_FAILED_EVENT:
LOG.debug("Failed uploading object {} into Amazon S3", object);
break;
default:
break;
}
});
return upload;
}
public String upload(InputStream is, String fileKey, String fileName, String suffix, Boolean isPublic) throws Exception {
validateFile(is, suffix);
if (isPublic == null) {
isPublic = Boolean.TRUE;
}
if ((is != null) && (fileKey != null)) {
try {
byte[] bytes = IOUtils.toByteArray(is);
s3Client.putObject(
new PutObjectRequest(
s3BucketConfig.getName(),
fileKey,
new ByteArrayInputStream(bytes),
S3ObjectMetadata.getObjectMetadata(bytes)
).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead)
);
return fileName + '.' + suffix;
} catch (AmazonServiceException | IOException e) {
throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode());
} finally {
is.close();
}
} else {
throw new BusinessException(Validations.INVALID_PARAMETERS.getCode());
}
}
@Test
public void testPutSinglePart() {
runner.setProperty("x-custom-prop", "hello");
prepareTest();
runner.run(1);
ArgumentCaptor<PutObjectRequest> captureRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
Mockito.verify(mockS3Client, Mockito.times(1)).putObject(captureRequest.capture());
PutObjectRequest request = captureRequest.getValue();
assertEquals("test-bucket", request.getBucketName());
runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS, 1);
List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS);
MockFlowFile ff0 = flowFiles.get(0);
ff0.assertAttributeEquals(CoreAttributes.FILENAME.key(), "testfile.txt");
ff0.assertAttributeEquals(PutS3Object.S3_ETAG_ATTR_KEY, "test-etag");
ff0.assertAttributeEquals(PutS3Object.S3_VERSION_ATTR_KEY, "test-version");
}
private void createEmptyObject(final String bucketName, final String objectName)
throws AmazonClientException, AmazonServiceException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
putObjectRequest.setCannedAcl(cannedACL);
s3.putObject(putObjectRequest);
statistics.incrementWriteOps(1);
}
void put(String name, byte[] content, Retention retention, ObjectMetadata objectMetadata) {
Preconditions.checkNotNull(objectMetadata);
objectMetadata.setContentLength(content.length);
PutObjectRequest putRequest = new PutObjectRequest(
s3BlobStorageConfigurationProperties.getBucket(),
getFullName(name),
new ByteArrayInputStream(content),
objectMetadata);
List<Tag> tags = new ArrayList<Tag>();
tags.add(new Tag("retention", retention.toString()));
putRequest.setTagging(new ObjectTagging(tags));
amazonS3.putObject(putRequest);
}
@Before
public void setup() throws Exception
{
client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
client.createBucket(testMeta.bucketKey);
inputDir = testMeta.baseDirectory + File.separator + "input";
File file1 = new File(inputDir + File.separator + FILE_1);
File file2 = new File(inputDir + File.separator + FILE_2);
FileUtils.writeStringToFile(file1, FILE_1_DATA);
FileUtils.writeStringToFile(file2, FILE_2_DATA);
client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_1, file1));
client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_2, file2));
files = SCHEME + "://" + accessKey + ":" + secretKey + "@" + testMeta.bucketKey + "/input";
}
@Test
public void testUploadLocalSourceWithNoSSEAlgorithm() throws Exception {
File file = new File(mockWorkspaceDir + "/source-file");
FileUtils.write(file, "contents");
PutObjectResult mockedResponse = new PutObjectResult();
mockedResponse.setVersionId("some-version-id");
when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
S3DataManager d = new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, "", file.getPath(), "");
ArgumentCaptor<PutObjectRequest> savedPutObjectRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
UploadToS3Output result = d.uploadSourceToS3(listener, testWorkSpace);
assertEquals(result.getSourceLocation(), s3InputBucketName + "/" + s3InputKeyName);
verify(s3Client).putObject(savedPutObjectRequest.capture());
assertEquals(savedPutObjectRequest.getValue().getBucketName(), s3InputBucketName);
assertEquals(savedPutObjectRequest.getValue().getKey(), s3InputKeyName);
assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentMD5(), S3DataManager.getZipMD5(file));
assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentLength(), file.length());
assertNull(savedPutObjectRequest.getValue().getMetadata().getSSEAlgorithm());
}
private void upload(@NotNull File src, @NotNull String key, @NotNull CallbackPayload payload) {
log.info("Uploading {} to {} in {}", src, key, bucketName);
final PutObjectRequest request = new PutObjectRequest(bucketName, key, src);
final ObjectMetadata metadata = request.getMetadata();
final String commitId = payload.getAfter();
if (commitId != null) {
metadata.addUserMetadata("commit-id", commitId);
}
final DateTime timestamp = payload.getTimestamp();
if (timestamp != null) {
metadata.addUserMetadata("hook-timestamp",
ISODateTimeFormat.basicTime().print(timestamp));
}
final TimerContext time = uploadTimer.time();
try {
s3Client.putObject(request);
} catch (Exception e) {
log.error("Couldn't upload to {} in {}", key, bucketName, e);
throw e;
} finally {
time.stop();
}
log.info("Uploaded to {} in {}", key, bucketName);
}
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) {
PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(),
FileUtil.appendPath(dstDetails.getKey(), src.getName()), src);
ObjectMetadata meta = new ObjectMetadata();
if (archive) {
meta
.setHeader(Headers.STORAGE_CLASS,
StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
}
if (encrypt) {
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
request.setMetadata(meta);
try {
amazonS3.putObject(request);
return true;
} catch (Exception e) {
log.error("Error while uploading file: {}", src, e);
}
return false;
}
@Test
public void testTagVersionsTargetTagKeyAlreadyExists()
{
// Create two S3 object tags having the same tag key.
List<Tag> tags = Arrays.asList(new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE), new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE_2));
// Put an S3 file that is already tagged with the first S3 object tag in an S3 bucket that has versioning enabled.
PutObjectRequest putObjectRequest =
new PutObjectRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]),
new ObjectMetadata());
putObjectRequest.setTagging(new ObjectTagging(Collections.singletonList(tags.get(0))));
s3Operations.putObject(putObjectRequest, null);
// List S3 versions that match the test S3 key.
ListVersionsRequest listVersionsRequest =
new ListVersionsRequest().withBucketName(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED).withPrefix(TARGET_S3_KEY);
VersionListing versionListing = s3Operations.listVersions(listVersionsRequest, null);
assertEquals(1, CollectionUtils.size(versionListing.getVersionSummaries()));
assertEquals(TARGET_S3_KEY, versionListing.getVersionSummaries().get(0).getKey());
assertNotNull(versionListing.getVersionSummaries().get(0).getVersionId());
// Validate that the S3 object is tagged with the first tag only.
GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(
new GetObjectTaggingRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY,
versionListing.getVersionSummaries().get(0).getVersionId()), null);
assertEquals(Collections.singletonList(tags.get(0)), getObjectTaggingResult.getTagSet());
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED);
// Tag the S3 version with the second S3 object tag.
s3Dao.tagVersions(params, new S3FileTransferRequestParamsDto(), versionListing.getVersionSummaries(), tags.get(1));
// Validate that the S3 object is now tagged with the second tag only.
getObjectTaggingResult = s3Operations.getObjectTagging(
new GetObjectTaggingRequest(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED, TARGET_S3_KEY,
versionListing.getVersionSummaries().get(0).getVersionId()), null);
assertEquals(Collections.singletonList(tags.get(1)), getObjectTaggingResult.getTagSet());
}
@Test
public void checkS3() {
Mockito.when(
s3.putObject(
Mockito.any(PutObjectRequest.class)))
.thenReturn(new PutObjectResult());
underTest.checkS3Put(s3, "bucket", "key");
}
@Test(expected = SdkClientException.class)
public void checkS3WithError() {
Mockito.when(
s3.putObject(
Mockito.any(PutObjectRequest.class)))
.thenThrow(new SdkClientException(""));
underTest.checkS3Put(s3, "bucket", "key");
}
@Test
public void shouldUploadALocalFileToS3WithDestinationPrefixUsingEnvVariable() {
AmazonS3Client mockClient = mockClient();
Config config = new Config(Maps.builder()
.with(Constants.SOURCEDESTINATIONS, Maps.builder().with("value", "[{\"source\": \"target/*\", \"destination\": \"\"}]").build())
.with(Constants.DESTINATION_PREFIX, Maps.builder().with("value", "test/${GO_PIPELINE_COUNTER}/").build())
.with(Constants.ARTIFACTS_BUCKET, Maps.builder().with("value", "").build())
.build());
TaskExecutionResult result = executeMockPublish(
mockClient,
config,
new String[]{"README.md", "s3publish-0.1.31.jar"}
);
assertTrue(result.isSuccessful());
final List<PutObjectRequest> allPutObjectRequests = getPutObjectRequests(mockClient, 2);
PutObjectRequest readmePutRequest = allPutObjectRequests.get(0);
assertThat(readmePutRequest.getBucketName(), is("testS3Bucket"));
assertThat(readmePutRequest.getKey(), is("test/pipelineCounter/README.md"));
assertNull(readmePutRequest.getMetadata());
PutObjectRequest jarPutRequest = allPutObjectRequests.get(1);
assertNull(jarPutRequest.getMetadata());
assertThat(jarPutRequest.getBucketName(), is("testS3Bucket"));
assertThat(jarPutRequest.getKey(), is("test/pipelineCounter/s3publish-0.1.31.jar"));
assertNull(jarPutRequest.getMetadata());
}
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);
client.putObject(putRequest);
}
@Test
public void testDoCreateFolder() throws Exception {
S3FileObject notRootBucket = spy(
new S3FileObject( new S3FileName( SCHEME, BUCKET_NAME, BUCKET_NAME + "/" + origKey, FileType.IMAGINARY ),
fileSystemSpy ) );
notRootBucket.createFolder();
ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor = ArgumentCaptor.forClass( PutObjectRequest.class );
verify( s3ServiceMock ).putObject( putObjectRequestArgumentCaptor.capture() );
assertEquals( BUCKET_NAME, putObjectRequestArgumentCaptor.getValue().getBucketName() );
assertEquals( "some/key/", putObjectRequestArgumentCaptor.getValue().getKey() );
}
@Test
public void shouldUseStandardStorageClassAsDefault() {
S3ArtifactStore store = new S3ArtifactStore(mockClient, "foo-bar");
store.put(new PutObjectRequest("foo-bar", "key", new File("/tmp/baz")));
verify(mockClient, times(1)).putObject(putCaptor.capture());
PutObjectRequest putRequest = putCaptor.getValue();
assertThat(putRequest.getStorageClass(), is("STANDARD"));
}
@Test
public void testWhenSendLargeMessageThenPayloadIsStoredInS3() {
String messageBody = generateStringWithLength(MORE_THAN_SQS_SIZE_LIMIT);
SendMessageRequest messageRequest = new SendMessageRequest(SQS_QUEUE_URL, messageBody);
extendedSqsWithDefaultConfig.sendMessage(messageRequest);
verify(mockS3, times(1)).putObject(isA(PutObjectRequest.class));
}
@Before
public void initializeS3StreamCapture() {
when(s3Client.putObject(any(PutObjectRequest.class))).thenAnswer(invocation -> {
PutObjectRequest putObjectRequest = (PutObjectRequest) invocation.getArguments()[0];
InputStream inputStream = putObjectRequest.getInputStream();
s3StreamCapture.add(byteArrayToList(toByteArray(inputStream)));
return new PutObjectResult();
});
}
@Override
public void copyFile(File localFile, boolean delete) throws IOException {
if(s3URI == null) throw new FileNotFoundException("No location set, use moveTo first!");
String[] bucketKey = getBucketAndKey();
s3.putObject(new PutObjectRequest(bucketKey[0], bucketKey[1], localFile));
if(delete) localFile.delete();
}
public static void putImageS3(String bucketName, String key, String fileName) {
AmazonS3 s3 = new AmazonS3Client();
Region usWest2 = Region.getRegion(Regions.US_WEST_2);
//Region usWest2 = Region.getRegion(s3Region);
s3.setRegion(usWest2);
try {
File file = new File(fileName);
s3.putObject(new PutObjectRequest(bucketName, key, file));
} catch (Exception e) { System.out.println("ERROR ON IMAGE FILE"); }
}
@Test
public void testWhenSendMessageWithLargePayloadSupportDisabledThenS3IsNotUsedAndSqsBackendIsResponsibleToFailIt() {
int messageLength = MORE_THAN_SQS_SIZE_LIMIT;
String messageBody = generateStringWithLength(messageLength);
ExtendedClientConfiguration extendedClientConfiguration = new ExtendedClientConfiguration()
.withLargePayloadSupportDisabled();
AmazonSQS sqsExtended = spy(new AmazonSQSExtendedClient(mockSqsBackend, extendedClientConfiguration));
SendMessageRequest messageRequest = new SendMessageRequest(SQS_QUEUE_URL, messageBody);
sqsExtended.sendMessage(messageRequest);
verify(mockS3, never()).putObject(isA(PutObjectRequest.class));
verify(mockSqsBackend).sendMessage(eq(messageRequest));
}
@Test
public void writeCallsS3WithContentStreamLength() {
testS3LoaderWithStrings("123456789ABCDEF");
verify(s3Client, times(1)).putObject(putObjectRequestArgumentCaptor.capture());
final PutObjectRequest request = putObjectRequestArgumentCaptor.getValue();
assertThat(request.getMetadata().getContentLength(), equalTo(15L));
}
@Test
public void s3WritesHaveCorrectKeysWithIncrementingSequenceNumber() {
testS3LoaderWithStrings("123456789ABCDEF", "123456789ABCDEF", "123456789ABCDEF");
verify(s3Client, times(3)).putObject(putObjectRequestArgumentCaptor.capture());
List<PutObjectRequest> requests = putObjectRequestArgumentCaptor.getAllValues();
assertThat(requests.get(0).getKey(), equalTo(S3_PREFIX + "/1" + S3_SUFFIX));
assertThat(requests.get(1).getKey(), equalTo(S3_PREFIX + "/2" + S3_SUFFIX));
assertThat(requests.get(2).getKey(), equalTo(S3_PREFIX + "/3" + S3_SUFFIX));
}
@Test
public void writeWithExceptionThrowsUnrecoverableStreamFailureException() {
AmazonClientException s3Exception = new AmazonClientException("oh no!");
reset(s3Client);
when(s3Client.putObject(any(PutObjectRequest.class))).thenThrow(s3Exception);
try {
testS3LoaderWithStrings("123456789ABCDEF");
fail("Expected UnrecoverableStreamFailureException");
} catch (UnrecoverableStreamFailureException e) {
assertThat(e.getCause(), is(s3Exception));
}
verify(s3Client).putObject(any());
}
/**
* Puts a Java properties file content into S3. The Properties contains a single key-value defined by the given parameter object.
*
* @param s3BucketName the S3 bucket name
* @param s3ObjectKey the S3 object key
* @param parameter the parameter
*/
private void putParameterIntoS3(String s3BucketName, String s3ObjectKey, Parameter parameter)
{
String content = parameter.getName() + "=" + parameter.getValue();
byte[] bytes = content.getBytes();
ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(content.length());
PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, s3ObjectKey, inputStream, metadata);
s3Operations.putObject(putObjectRequest, null);
}