com.amazonaws.services.s3.model.ObjectMetadata#setOngoingRestore ( )源码实例Demo

下面列出了com.amazonaws.services.s3.model.ObjectMetadata#setOngoingRestore ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjects()
{
    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);

    // Validate that there is an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
 
源代码2 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsGlacierObjectAlreadyBeingRestored()
{
    // Put a 1 byte Glacier storage class file in S3 flagged as already being restored.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);

    // Validate that there is still an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
 
源代码3 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestored()
{
    // Put a 1 byte already restored Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Validate the file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.validateGlacierS3FilesRestored(params);
}
 
源代码4 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request to restore an object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to initiate a restore request for a mocked S3 file that would trigger an Amazon service exception when we request to restore an object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
        fail("Should throw an IllegalStateException when an S3 restore object operation fails.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码5 项目: herd   文件: S3DaoTest.java
@Test
public void testRestoreObjectsNonGlacierNonDeepArchiveObject()
{
    // Put a 1 byte non-Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to initiate a restore request for a non-Glacier file.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
        fail("Should throw an IllegalStateException when file has a non-Glacier storage class.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: object is not in Glacier or DeepArchive (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码6 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to validate if the Glacier S3 file is already restored for a mocked S3 file
    // that triggers an Amazon service exception when we request S3 metadata for the object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
源代码7 项目: herd   文件: S3DaoTest.java
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress()
{
    // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
@Test
public void testRestoreBusinessObjectDataAmazonServiceException() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket location.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity =
        businessObjectDataServiceTestHelper.createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey);

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    // Get the expected S3 key prefix for the business object data key.
    String s3KeyPrefix = AbstractServiceTest
        .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY,
            AbstractServiceTest.NO_SUB_PARTITION_KEYS);

    // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we request to restore objects.
    storageFileDaoTestHelper
        .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION),
            FILE_SIZE_1_KB, ROW_COUNT);

    try
    {
        // Put relative Glacier storage class files into the Glacier S3 bucket flagged as not being currently restored.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Try to initiate a restore request for the business object data when S3 restore object operation fails with an Amazon service exception.
        try
        {
            businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
            fail();
        }
        catch (IllegalStateException e)
        {
            assertEquals(String.format("Failed to initiate a restore request for \"%s/%s\" key in \"%s\" bucket. " +
                    "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix,
                MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still ARCHIVED.
        assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
 
@Test
public void testRestoreBusinessObjectDataNonGlacierNonDeepArchiveStorageClass() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto glacierS3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper
        .createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey, AbstractServiceTest.STORAGE_NAME, AbstractServiceTest.S3_BUCKET_NAME,
            StorageUnitStatusEntity.ARCHIVED, Collections.singletonList(LOCAL_FILE));

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    try
    {
        // Put relative non-Glacier non-DeepArchive storage class files into the S3 bucket.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Try to initiate a restore request for the business object data.
        try
        {
            businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION);
            fail();
        }
        catch (IllegalArgumentException e)
        {
            assertEquals(String.format("S3 file \"%s\" is not archived (found %s storage class when expecting %s or %s). S3 Bucket Name: \"%s\"",
                Iterables.get(storageUnitEntity.getStorageFiles(), 0).getPath(), StorageClass.Standard.toString(), StorageClass.Glacier.toString(),
                StorageClass.DeepArchive.toString(), S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still ARCHIVED.
        assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(glacierS3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(glacierS3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
@Test
public void testFinalizeRestoreAmazonServiceException() throws Exception
{
    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE,
            NO_SUBPARTITION_VALUES, DATA_VERSION);

    // Get the expected S3 key prefix for the business object data key.
    String s3KeyPrefix = AbstractServiceTest
        .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY,
            AbstractServiceTest.NO_SUB_PARTITION_KEYS);

    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build();

    // Create database entities required for testing.
    BusinessObjectDataEntity businessObjectDataEntity =
        businessObjectDataServiceTestHelper.createDatabaseEntitiesForFinalizeRestoreTesting(businessObjectDataKey);

    // Get the storage unit entity.
    StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);

    // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we try to get metadata for the object.
    storageFileDaoTestHelper
        .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION),
            FILE_SIZE_1_KB, ROW_COUNT);

    // Create a storage unit key.
    BusinessObjectDataStorageUnitKey storageUnitKey = storageUnitHelper.createStorageUnitKey(businessObjectDataKey, STORAGE_NAME);

    try
    {
        // Put relative "already restored" Glacier storage class S3 files in the S3 bucket.
        for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles())
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(),
                new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), null);
        }

        // Try to finalize a restore for the storage unit when get S3 object metadata operation fails with an Amazon service exception.
        try
        {
            businessObjectDataFinalizeRestoreService.finalizeRestore(storageUnitKey);
            fail("Should throw an IllegalStateException when a get S3 object metadata operation fails.");
        }
        catch (IllegalStateException e)
        {
            assertEquals(String.format("Fail to check restore status for \"%s/%s\" key in \"%s\" bucket. " +
                    "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix,
                MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that the storage unit status is still in RESTORING state.
        assertEquals(StorageUnitStatusEntity.RESTORING, storageUnitEntity.getStatus().getCode());

        // Validate that we have the S3 files at the expected S3 location.
        assertEquals(storageUnitEntity.getStorageFiles().size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
 
@Test
public void testExecuteS3SpecificSteps() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES,
            DATA_VERSION);

    // Create a list of storage files.
    List<StorageFile> storageFiles = new ArrayList<>();
    for (String filePath : LOCAL_FILES)
    {
        storageFiles.add(new StorageFile(TEST_S3_KEY_PREFIX + "/" + filePath, FILE_SIZE_1_KB, NO_ROW_COUNT));
    }

    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto =
        new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS,
            NO_STORAGE_UNIT_STATUS, storageFiles, NO_EXCEPTION, ARCHIVE_RETRIEVAL_OPTION);

    try
    {
        // Put relative Glacier storage class S3 files in the S3 bucket.
        for (StorageFile storageFile : storageFiles)
        {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(),
                new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }

        // Execute S3 specific steps to finalize a restore for the Glacier storage unit.
        businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);

        // Validate that we have the restored S3 files at the expected S3 location.
        assertEquals(storageFiles.size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
@Test
public void testExecuteS3SpecificStepsGlacierS3FileStillRestoring() throws Exception
{
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto =
        S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();

    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey =
        new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES,
            DATA_VERSION);

    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto =
        new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS,
            NO_STORAGE_UNIT_STATUS, Arrays.asList(new StorageFile(TEST_S3_KEY_PREFIX + "/" + LOCAL_FILE, FILE_SIZE_1_KB, NO_ROW_COUNT)),
            NO_EXCEPTION, ARCHIVE_RETRIEVAL_OPTION);

    try
    {
        // Put a "still restoring" Glacier storage class S3 file in the Glacier S3 bucket.
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
        metadata.setOngoingRestore(true);
        s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, String.format("%s/%s", TEST_S3_KEY_PREFIX, LOCAL_FILE),
            new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), metadata), NO_S3_CLIENT);

        // Try to execute S3 specific steps to finalize a restore for the storage unit when Glacier S3 file is still restoring.
        try
        {
            businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
            fail();
        }
        catch (IllegalArgumentException e)
        {
            assertEquals(String
                .format("Archived S3 file \"%s/%s\" is not restored. " + "StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}",
                    TEST_S3_KEY_PREFIX, LOCAL_FILE, S3_BUCKET_NAME), e.getMessage());
        }

        // Validate that we have a Glacier S3 file at the expected S3 location.
        assertEquals(1, s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    }
    finally
    {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty())
        {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }

        s3Operations.rollback();
    }
}
 
源代码13 项目: herd   文件: S3DaoImplTest.java
@Test
public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption()
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata with DeepArchive storage class.
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);

    doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations)
        .restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class));

    try
    {
        s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString());
        fail();
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)",
            TEST_FILE, S3_BUCKET_NAME), e.getMessage());
    }
}
 
源代码14 项目: herd   文件: S3DaoImplTest.java
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode)
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);

    // Create an Amazon S3 Exception
    AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage);
    amazonS3Exception.setStatusCode(statusCode);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
    doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());

    try
    {
        // Call the method under test.
        s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString());

        // If this is not a restore already in progress exception message (409) then we should have caught an exception.
        // Else if this is a restore already in progress message (409) then continue as usual.
        if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE))
        {
            // Should not be here. Fail!
            fail();
        }
        else
        {
            RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
            assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
            assertEquals(TEST_FILE, keyCaptor.getValue());

            // Verify Bulk option is used when the option is not provided
            assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString())
                ? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());
        }
    }
    catch (IllegalStateException illegalStateException)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " +
                "(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null",
            TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage());
    }

    // Verify the external calls
    verify(retryPolicyFactory).getRetryPolicy();
    verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
    verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
    verifyNoMoreInteractionsHelper();
}
 
源代码15 项目: herd   文件: S3DaoImplTest.java
/**
 * Run restore objects method
 *
 * @param archiveRetrievalOption the archive retrieval option
 */
private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass)
{
    List<File> files = Collections.singletonList(new File(TEST_FILE));

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
    s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
    s3FileTransferRequestParamsDto.setFiles(files);

    // Create a retry policy.
    RetryPolicy retryPolicy =
        new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);

    // Create an Object Metadata
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setOngoingRestore(false);
    objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass);

    ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
    ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);

    // Mock the external calls.
    when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
    when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
    doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());

    s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption);

    RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
    assertEquals(TEST_FILE, keyCaptor.getValue());

    // Verify Bulk option is used when the option is not provided
    assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption)
        ? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());

    // Verify the external calls
    verify(retryPolicyFactory).getRetryPolicy();
    verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
    verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
    verifyNoMoreInteractionsHelper();
}