com.amazonaws.services.s3.model.RestoreObjectRequest#com.amazonaws.services.s3.Headers源码实例Demo

下面列出了com.amazonaws.services.s3.model.RestoreObjectRequest#com.amazonaws.services.s3.Headers 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: presto   文件: PrestoS3FileSystem.java
private boolean deleteObject(String key)
{
    try {
        DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest(getBucketName(uri), key);
        if (requesterPaysEnabled) {
            // TODO use deleteObjectRequest.setRequesterPays() when https://github.com/aws/aws-sdk-java/issues/1219 is fixed
            // currently the method exists, but is ineffective (doesn't set the required HTTP header)
            deleteObjectRequest.putCustomRequestHeader(Headers.REQUESTER_PAYS_HEADER, Constants.REQUESTER_PAYS);
        }

        s3.deleteObject(deleteObjectRequest);
        return true;
    }
    catch (AmazonClientException e) {
        return false;
    }
}
 
源代码2 项目: entrada   文件: S3FileManagerImpl.java
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) {
  PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(),
      FileUtil.appendPath(dstDetails.getKey(), src.getName()), src);
  ObjectMetadata meta = new ObjectMetadata();

  if (archive) {
    meta
        .setHeader(Headers.STORAGE_CLASS,
            StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
  }

  if (encrypt) {
    meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }

  request.setMetadata(meta);
  try {
    amazonS3.putObject(request);
    return true;
  } catch (Exception e) {
    log.error("Error while uploading file: {}", src, e);
  }

  return false;
}
 
源代码3 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjects()
{
    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);

    // Validate that there is an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
 
源代码4 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsGlacierObjectAlreadyBeingRestored()
{
    // Put a 1 byte Glacier storage class file in S3 flagged as already being restored.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);

    // Validate that there is still an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
 
源代码5 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestored()
{
    // Put a 1 byte already restored Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Validate the file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.validateGlacierS3FilesRestored(params);
}
 
源代码6 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
    // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码7 项目: datacollector   文件: AmazonS3Util.java
static Map<String, Object> getMetaData(S3Object s3Object) {
  Map<String, Object> metaDataMap = new HashMap<>();

  // put the metadata of S3 Object
  metaDataMap.put(Headers.CACHE_CONTROL, s3Object.getObjectMetadata().getCacheControl());
  metaDataMap.put(Headers.CONTENT_DISPOSITION, s3Object.getObjectMetadata().getContentDisposition());
  metaDataMap.put(Headers.CONTENT_ENCODING, s3Object.getObjectMetadata().getContentEncoding());
  metaDataMap.put(Headers.CONTENT_LENGTH, s3Object.getObjectMetadata().getContentLength());
  metaDataMap.put(Headers.CONTENT_RANGE, s3Object.getObjectMetadata().getInstanceLength());
  metaDataMap.put(Headers.CONTENT_MD5, s3Object.getObjectMetadata().getContentMD5());
  metaDataMap.put(Headers.CONTENT_TYPE, s3Object.getObjectMetadata().getContentType());
  metaDataMap.put(Headers.EXPIRES, s3Object.getObjectMetadata().getExpirationTime());
  metaDataMap.put(Headers.ETAG, s3Object.getObjectMetadata().getETag());
  metaDataMap.put(Headers.LAST_MODIFIED, s3Object.getObjectMetadata().getLastModified());

  // put user metadata
  Map<String, String> userMetaMap = s3Object.getObjectMetadata().getUserMetadata();
  if(userMetaMap != null) {
    for (Map.Entry<String, String> entry : userMetaMap.entrySet()) {
      if (entry.getValue() != null) {
        metaDataMap.put(entry.getKey(), entry.getValue());
      }
    }
  }
  return metaDataMap;
}
 
private static Map<String, Object> getS3Metadata(S3Object s3Object) {
  Map<String, Object> metaDataMap = new HashMap<>();
  metaDataMap.put(Headers.CACHE_CONTROL, s3Object.getObjectMetadata().getCacheControl());
  metaDataMap.put(Headers.CONTENT_DISPOSITION, s3Object.getObjectMetadata().getContentDisposition());
  metaDataMap.put(Headers.CONTENT_ENCODING, s3Object.getObjectMetadata().getContentEncoding());
  metaDataMap.put(Headers.CONTENT_LENGTH, s3Object.getObjectMetadata().getContentLength());
  metaDataMap.put(Headers.CONTENT_RANGE, s3Object.getObjectMetadata().getInstanceLength());
  metaDataMap.put(Headers.CONTENT_MD5, s3Object.getObjectMetadata().getContentMD5());
  metaDataMap.put(Headers.CONTENT_TYPE, s3Object.getObjectMetadata().getContentType());
  metaDataMap.put(Headers.EXPIRES, s3Object.getObjectMetadata().getExpirationTime());
  metaDataMap.put(Headers.ETAG, s3Object.getObjectMetadata().getETag());
  metaDataMap.put(Headers.LAST_MODIFIED, s3Object.getObjectMetadata().getLastModified());
  metaDataMap.put("bucket", s3Object.getBucketName());
  metaDataMap.put("objectKey", s3Object.getKey());
  metaDataMap.put("size", s3Object.getObjectMetadata().getContentLength());
  return metaDataMap;
}
 
源代码9 项目: hawkbit-extensions   文件: S3Repository.java
private ObjectMetadata createObjectMetadata(final String mdMD5Hash16, final String contentType, final File file) {
    final ObjectMetadata objectMetadata = new ObjectMetadata();
    final String mdMD5Hash64 = BaseEncoding.base64().encode(BaseEncoding.base16().lowerCase().decode(mdMD5Hash16));
    objectMetadata.setContentMD5(mdMD5Hash64);
    objectMetadata.setContentType(contentType);
    objectMetadata.setContentLength(file.length());
    if (s3Properties.isServerSideEncryption()) {
        objectMetadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION, s3Properties.getServerSideEncryptionAlgorithm());
    }
    return objectMetadata;
}
 
源代码10 项目: entrada   文件: S3FileManagerImpl.java
private boolean uploadDirectory(File location, S3Details dstDetails, boolean archive) {

    ObjectMetadataProvider metaDataProvider = (file, meta) -> {

      if (archive) {
        meta
            .setHeader(Headers.STORAGE_CLASS,
                StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
      }

      if (encrypt) {
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
      }
    };

    MultipleFileUpload upload = transferManager
        .uploadDirectory(dstDetails.getBucket(), dstDetails.getKey(), location, true,
            metaDataProvider);


    if (log.isDebugEnabled()) {
      ProgressListener progressListener = progressEvent -> log
          .debug("S3 Transferred bytes: " + progressEvent.getBytesTransferred());
      upload.addProgressListener(progressListener);
    }

    try {
      upload.waitForCompletion();
      return true;
    } catch (Exception e) {
      log.error("Error while uploading directory: {}", location, e);
    }

    return false;
  }
 
源代码11 项目: herd   文件: S3DaoImpl.java
/**
 * Prepares the object metadata for server side encryption and reduced redundancy storage.
 *
 * @param params the parameters.
 * @param metadata the metadata to prepare.
 */
private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata)
{
    // Set the server side encryption
    if (params.getKmsKeyId() != null)
    {
        /*
         * TODO Use proper way to set KMS once AWS provides a way.
         * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID.
         * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is
         * correctly uploaded.
         * To get around this, a system property defined at
         * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set.
         */
        metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
        metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim());
    }
    else
    {
        metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
    }

    // If specified, set the metadata to use RRS.
    if (Boolean.TRUE.equals(params.isUseRrs()))
    {
        // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only
        // way to do it. However, setHeader() is flagged as For Internal Use Only
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString());
    }
}
 
源代码12 项目: herd   文件: S3DaoTest.java
@Test
public void testPrepareMetadataAssertSetKmsHeaders()
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    try
    {
        String s3BucketName = "s3BucketName";
        String s3KeyPrefix = "s3KeyPrefix";
        String kmsKeyId = "kmsKeyId";

        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
        s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);
        s3FileTransferRequestParamsDto.setKmsKeyId(kmsKeyId);

        when(mockS3Operations.putObject(any(), any())).then(new Answer<PutObjectResult>()
        {
            @Override
            public PutObjectResult answer(InvocationOnMock invocation) throws Throwable
            {
                PutObjectRequest putObjectRequest = invocation.getArgument(0);
                ObjectMetadata metadata = putObjectRequest.getMetadata();
                assertEquals("aws:kms", metadata.getSSEAlgorithm());
                assertEquals(kmsKeyId, metadata.getRawMetadata().get(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID));
                return new PutObjectResult();
            }
        });

        s3Dao.createDirectory(s3FileTransferRequestParamsDto);
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
    }
}
 
源代码13 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request to restore an object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to initiate a restore request for a mocked S3 file that would trigger an Amazon service exception when we request to restore an object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
        fail("Should throw an IllegalStateException when an S3 restore object operation fails.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码14 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsNonGlacierNonDeepArchiveObject()
{
    // Put a 1 byte non-Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to initiate a restore request for a non-Glacier file.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
        fail("Should throw an IllegalStateException when file has a non-Glacier storage class.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: object is not in Glacier or DeepArchive (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码15 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to validate if the Glacier S3 file is already restored for a mocked S3 file
    // that triggers an Amazon service exception when we request S3 metadata for the object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码16 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress()
{
    // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码17 项目: pipeline-aws-plugin   文件: S3UploadStep.java
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();
	final MultipleFileUpload fileUpload;
	ObjectMetadataProvider metadatasProvider = (file, meta) -> {
		if (meta != null) {
			if (RemoteListUploader.this.metadatas != null && RemoteListUploader.this.metadatas.size() > 0) {
				meta.setUserMetadata(RemoteListUploader.this.metadatas);
			}
			if (RemoteListUploader.this.acl != null) {
				meta.setHeader(Headers.S3_CANNED_ACL, RemoteListUploader.this.acl);
			}
			if (RemoteListUploader.this.cacheControl != null && !RemoteListUploader.this.cacheControl.isEmpty()) {
				meta.setCacheControl(RemoteListUploader.this.cacheControl);
			}
			if (RemoteListUploader.this.contentEncoding != null && !RemoteListUploader.this.contentEncoding.isEmpty()) {
				meta.setContentEncoding(RemoteListUploader.this.contentEncoding);
			}
			if (RemoteListUploader.this.contentType != null && !RemoteListUploader.this.contentType.isEmpty()) {
				meta.setContentType(RemoteListUploader.this.contentType);
			}
			if (RemoteListUploader.this.sseAlgorithm != null && !RemoteListUploader.this.sseAlgorithm.isEmpty()) {
				meta.setSSEAlgorithm(RemoteListUploader.this.sseAlgorithm);
			}
			if (RemoteListUploader.this.kmsId != null && !RemoteListUploader.this.kmsId.isEmpty()) {
				final SSEAwsKeyManagementParams sseAwsKeyManagementParams = new SSEAwsKeyManagementParams(RemoteListUploader.this.kmsId);
				meta.setSSEAlgorithm(sseAwsKeyManagementParams.getAwsKmsKeyId());
				meta.setHeader(
						Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
						sseAwsKeyManagementParams.getAwsKmsKeyId()
				);
			}

		}
	};

	ObjectTaggingProvider objectTaggingProvider =(uploadContext) -> {
		List<Tag> tagList = new ArrayList<Tag>();

		//add tags
		if(tags != null){
			for (Map.Entry<String, String> entry : tags.entrySet()) {
				Tag tag = new Tag(entry.getKey(), entry.getValue());
				tagList.add(tag);
			}
		}
		return new ObjectTagging(tagList);
	};

	try {
		fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider, objectTaggingProvider);
		for (final Upload upload : fileUpload.getSubTransfers()) {
			upload.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
				}
			});
		}
		fileUpload.waitForCompletion();
	}
	finally {
		mgr.shutdownNow();
	}
	return null;
}
 
@Test
public void testRestoreBusinessObjectDataAmazonServiceException() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket location.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity =
        businessObjectDataServiceTestHelper.createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey);

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    // Get the expected S3 key prefix for the business object data key.
    String s3KeyPrefix = AbstractServiceTest
        .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY,
            AbstractServiceTest.NO_SUB_PARTITION_KEYS);

    // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we request to restore objects.
    storageFileDaoTestHelper
        .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION),
            FILE_SIZE_1_KB, ROW_COUNT);

    try
    {
        // Put relative Glacier storage class files into the Glacier S3 bucket flagged as not being currently restored.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Try to initiate a restore request for the business object data when S3 restore object operation fails with an Amazon service exception.
        try
        {
            businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
            fail();
        }
        catch (IllegalStateException e)
        {
            assertEquals(String.format("Failed to initiate a restore request for \"%s/%s\" key in \"%s\" bucket. " +
                    "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix,
                MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still ARCHIVED.
        assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
 
@Test
public void testRestoreBusinessObjectDataNonGlacierNonDeepArchiveStorageClass() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto glacierS3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper
        .createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey, AbstractServiceTest.STORAGE_NAME, AbstractServiceTest.S3_BUCKET_NAME,
            StorageUnitStatusEntity.ARCHIVED, Collections.singletonList(LOCAL_FILE));

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    try
    {
        // Put relative non-Glacier non-DeepArchive storage class files into the S3 bucket.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Try to initiate a restore request for the business object data.
        try
        {
            businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
            fail();
        }
        catch (IllegalArgumentException e)
        {
            assertEquals(String.format("S3 file \"%s\" is not archived (found %s storage class when expecting %s or %s). S3 Bucket Name: \"%s\"",
                Iterables.get(storageUnitEntity.getStorageFiles(), 0).getPath(), StorageClass.Standard.toString(), StorageClass.Glacier.toString(),
                StorageClass.DeepArchive.toString(), S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still ARCHIVED.
        assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(glacierS3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(glacierS3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
@Test
public void testFinalizeRestoreAmazonServiceException() throws Exception
{
    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Get the expected S3 key prefix for the business object data key.
    String s3KeyPrefix = AbstractServiceTest
        .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY,
            AbstractServiceTest.NO_SUB_PARTITION_KEYS);

    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build();

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity =
        businessObjectDataServiceTestHelper.createDatabaseEntitiesForFinalizeRestoreTesting(businessObjectDataKey);

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we try to get metadata for the object.
    storageFileDaoTestHelper
        .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION),
            FILE_SIZE_1_KB, ROW_COUNT);

    // Create a storage unit key.
    BusinessObjectDataStorageUnitKey storageUnitKey = storageUnitHelper.createStorageUnitKey(businessObjectDataKey, STORAGE_NAME);

    try
    {
        // Put relative "already restored" Glacier storage class S3 files in the S3 bucket.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), null);
        }

        // Try to finalize a restore for the storage unit when get S3 object metadata operation fails with an Amazon service exception.
        try
        {
            businessObjectDataFinalizeRestoreService.finalizeRestore(storageUnitKey);
            fail("Should throw an IllegalStateException when a get S3 object metadata operation fails.");
        }
        catch (IllegalStateException e)
        {
            assertEquals(String.format("Fail to check restore status for \"%s/%s\" key in \"%s\" bucket. " +
                    "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix,
                MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still in RESTORING state.
        assertEquals(StorageUnitStatusEntity.RESTORING, storageUnitEntity.getStatus().getCode());

        // Validate that we have the S3 files at the expected S3 location.
        assertEquals(storageUnitEntity.getStorageFiles().size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
 
@Test
public void testExecuteS3SpecificSteps() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES,
            DATA_VERSION);

    // Create a list of storage files.
    List<StorageFile> storageFiles = new ArrayList<>();
    for (String filePath : LOCAL_FILES)
    {
        storageFiles.add(new StorageFile(TEST_S3_KEY_PREFIX + "/" + filePath, FILE_SIZE_1_KB, NO_ROW_COUNT));
    }

    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto =
        new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS,
            NO_STORAGE_UNIT_STATUS, storageFiles, NO_EXCEPTION, ARCHIVE_RETRIEVAL_OPTION);

    try
    {
        // Put relative Glacier storage class S3 files in the S3 bucket.
        for (StorageFile storageFile : storageFiles)
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(),
                new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Execute S3 specific steps to finalize a restore for the Glacier storage unit.
        businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);

        // Validate that we have the restored S3 files at the expected S3 location.
        assertEquals(storageFiles.size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
@Test
public void testExecuteS3SpecificStepsGlacierS3FileStillRestoring() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES,
            DATA_VERSION);

    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto =
        new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS,
            NO_STORAGE_UNIT_STATUS, Arrays.asList(new StorageFile(TEST_S3_KEY_PREFIX + "/" + LOCAL_FILE, FILE_SIZE_1_KB, NO_ROW_COUNT)),
            NO_EXCEPTION, ARCHIVE_RETRIEVAL_OPTION);

    try
    {
        // Put a "still restoring" Glacier storage class S3 file in the Glacier S3 bucket.
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
        metadata.setOngoingRestore(true);
        s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, String.format("%s/%s", TEST_S3_KEY_PREFIX, LOCAL_FILE),
            new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), metadata), NO_S3_CLIENT);

        // Try to execute S3 specific steps to finalize a restore for the storage unit when Glacier S3 file is still restoring.
        try
        {
            businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
            fail();
        }
        catch (IllegalArgumentException e)
        {
            assertEquals(String
                .format("Archived S3 file \"%s/%s\" is not restored. " + "StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}",
                    TEST_S3_KEY_PREFIX, LOCAL_FILE, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that we have a Glacier S3 file at the expected S3 location.
        assertEquals(1, s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
源代码23 项目: herd   文件: S3DaoImplTest.java
@Test
public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption()
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata with DeepArchive storage class.
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);

    doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations)
        .restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class));

    try
    {
        s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString());
        fail();
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)",
            TEST_FILE, S3_BUCKET_NAME), e.getMessage());
    }
}
 
源代码24 项目: herd   文件: S3DaoImplTest.java
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode)
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);

    // Create an Amazon S3 Exception
    AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage);
    amazonS3Exception.setStatusCode(statusCode);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
    doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());

    try
    {
        // Call the method under test.
        s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString());

        // If this is not a restore already in progress exception message (409) then we should have caught an exception.
        // Else if this is a restore already in progress message (409) then continue as usual.
        if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE))
        {
            // Should not be here. Fail!
            fail();
        }
        else
        {
            RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
            assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
            assertEquals(TEST_FILE, keyCaptor.getValue());

            // Verify Bulk option is used when the option is not provided
            assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString())
                ? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());
        }
    }
    catch (IllegalStateException illegalStateException)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " +
                "(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null",
            TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage());
    }

    // Verify the external calls
    verify(retryPolicyFactory).getRetryPolicy();
    verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
    verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
    verifyNoMoreInteractionsHelper();
}
 
源代码25 项目: herd   文件: S3DaoImplTest.java
/**
 * Run restore objects method
 *
 * @param archiveRetrievalOption the archive retrieval option
 */
private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass)
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
    doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());

    s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption);

    RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
    assertEquals(TEST_FILE, keyCaptor.getValue());

    // Verify Bulk option is used when the option is not provided
    assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption)
        ? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());

    // Verify the external calls
    verify(retryPolicyFactory).getRetryPolicy();
    verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
    verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
    verifyNoMoreInteractionsHelper();
}