下面列出了com.amazonaws.services.s3.model.RestoreObjectRequest#com.amazonaws.retry.PredefinedRetryPolicies 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Bean
public AmazonSNSAsync snsClient(
@Value("${notification.sns.region}") String region,
@Value("${notification.sns.endpointUrl:disabled}") String snsEndpointUrl) {
if ("disabled".equalsIgnoreCase(snsEndpointUrl)) {
return null;
}
AmazonSNSAsync client = AmazonSNSAsyncClientBuilder
.standard()
.withClientConfiguration(
new ClientConfiguration().withRetryPolicy(PredefinedRetryPolicies.getDefaultRetryPolicy()))
.withExecutorFactory(() -> Executors.newSingleThreadScheduledExecutor())
.withEndpointConfiguration(new EndpointConfiguration(snsEndpointUrl, region))
.build();
return client;
}
private static ClientConfiguration createGatewayTimeoutRetryableConfiguration() {
ClientConfiguration retryableConfig = new ClientConfiguration();
RetryPolicy.RetryCondition retryCondition = new PredefinedRetryPolicies.SDKDefaultRetryCondition() {
@Override
public boolean shouldRetry(AmazonWebServiceRequest originalRequest, AmazonClientException exception,
int retriesAttempted) {
if (super.shouldRetry(originalRequest, exception, retriesAttempted)) {
return true;
}
if (exception != null && exception instanceof AmazonServiceException) {
AmazonServiceException ase = (AmazonServiceException) exception;
if (ase.getStatusCode() == SC_GATEWAY_TIMEOUT) {
return true;
}
}
return false;
}
};
RetryPolicy retryPolicy = new RetryPolicy(retryCondition, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY,
PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY, true);
retryableConfig.setRetryPolicy(retryPolicy);
return retryableConfig;
}
protected AmazonApiGateway getApiGatewayClient() {
if (apiGatewayClient != null) {
return apiGatewayClient;
}
RetryPolicy.RetryCondition retryCondition = new RetryPolicy.RetryCondition() {
@Override
public boolean shouldRetry(AmazonWebServiceRequest amazonWebServiceRequest, AmazonClientException amazonClientException, int i) {
if (amazonClientException instanceof TooManyRequestsException) {
return true;
}
return PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION.shouldRetry(amazonWebServiceRequest,
amazonClientException, i);
}
};
RetryPolicy retryPolicy = new RetryPolicy(retryCondition,
PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY,
10, true);
ClientConfiguration clientConfig = new ClientConfiguration()
.withRetryPolicy(retryPolicy);
apiGatewayClient = new AmazonApiGatewayClient(getAWSCredentialsProvideChain(), clientConfig).withRegion(Region.getRegion(Regions.fromName(deployment.getRegion())));
return apiGatewayClient;
}
/**
* Asserts that the retry policy generated by the factory is configured with the correct values.
*/
@Test
public void assertResultRetryPolicyConfiguredCorrectly()
{
int expectedMaxErrorRetry = 12345;
when(configurationHelper.getProperty(any(), eq(Integer.class))).thenReturn(expectedMaxErrorRetry);
RetryPolicy retryPolicy = retryPolicyFactory.getRetryPolicy();
assertEquals(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, retryPolicy.getRetryCondition());
assertEquals(backoffStrategy, retryPolicy.getBackoffStrategy());
assertEquals(expectedMaxErrorRetry, retryPolicy.getMaxErrorRetry());
}
@Test
public void testGetTemporarySecurityCredentials()
{
// Create an AWS parameters DTO with proxy settings.
AwsParamsDto awsParamsDto = new AwsParamsDto();
awsParamsDto.setHttpProxyHost(HTTP_PROXY_HOST);
awsParamsDto.setHttpProxyPort(HTTP_PROXY_PORT);
// Specify the duration, in seconds, of the role session.
int awsRoleDurationSeconds = INTEGER_VALUE;
// Create an IAM policy.
Policy policy = new Policy(STRING_VALUE);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create the expected assume role request.
AssumeRoleRequest assumeRoleRequest = new AssumeRoleRequest().withRoleArn(AWS_ROLE_ARN).withRoleSessionName(SESSION_NAME).withPolicy(policy.toJson())
.withDurationSeconds(awsRoleDurationSeconds);
// Create AWS credentials for API authentication.
Credentials credentials = new Credentials();
credentials.setAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY);
credentials.setSecretAccessKey(AWS_ASSUMED_ROLE_SECRET_KEY);
credentials.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN);
// Create an assume role result.
AssumeRoleResult assumeRoleResult = new AssumeRoleResult();
assumeRoleResult.setCredentials(credentials);
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(stsOperations.assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest))).thenReturn(assumeRoleResult);
// Call the method under test.
Credentials result = stsDaoImpl.getTemporarySecurityCredentials(awsParamsDto, SESSION_NAME, AWS_ROLE_ARN, awsRoleDurationSeconds, policy);
// Verify the external calls.
verify(retryPolicyFactory).getRetryPolicy();
verify(stsOperations).assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest));
verifyNoMoreInteractionsHelper();
// Validate the returned object.
assertEquals(credentials, result);
}
@Test
public void testGetTemporarySecurityCredentialsMissingOptionalParameters()
{
// Create an AWS parameters DTO without proxy settings.
AwsParamsDto awsParamsDto = new AwsParamsDto();
// Specify the duration, in seconds, of the role session.
int awsRoleDurationSeconds = INTEGER_VALUE;
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create the expected assume role request.
AssumeRoleRequest assumeRoleRequest =
new AssumeRoleRequest().withRoleArn(AWS_ROLE_ARN).withRoleSessionName(SESSION_NAME).withDurationSeconds(awsRoleDurationSeconds);
// Create AWS credentials for API authentication.
Credentials credentials = new Credentials();
credentials.setAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY);
credentials.setSecretAccessKey(AWS_ASSUMED_ROLE_SECRET_KEY);
credentials.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN);
// Create an assume role result.
AssumeRoleResult assumeRoleResult = new AssumeRoleResult();
assumeRoleResult.setCredentials(credentials);
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(stsOperations.assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest))).thenReturn(assumeRoleResult);
// Call the method under test. Please note that we do not specify an IAM policy.
Credentials result = stsDaoImpl.getTemporarySecurityCredentials(awsParamsDto, SESSION_NAME, AWS_ROLE_ARN, awsRoleDurationSeconds, null);
// Verify the external calls.
verify(retryPolicyFactory).getRetryPolicy();
verify(stsOperations).assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest));
verifyNoMoreInteractionsHelper();
// Validate the returned object.
assertEquals(credentials, result);
}
@Test
public void testDeleteDirectoryNoS3VersionsExist()
{
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an empty version listing.
VersionListing versionListing = new VersionListing();
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class))).thenReturn(versionListing);
// Call the method under test.
s3DaoImpl.deleteDirectory(s3FileTransferRequestParamsDto);
// Verify the external calls.
verify(retryPolicyFactory).getRetryPolicy();
verify(s3Operations).listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
@Provides
protected ApiGateway provideAmazonApiGateway(AWSCredentialsProvider credsProvider,
RetryPolicy.BackoffStrategy backoffStrategy,
@Named("region") String region) {
final RetryPolicy retrypolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, backoffStrategy, 5, true);
final ClientConfiguration clientConfig = new ClientConfiguration().withUserAgent(USER_AGENT).withRetryPolicy(retrypolicy);
return new AmazonApiGateway(getEndpoint(region)).with(credsProvider).with(clientConfig).getApiGateway();
}
/**
* Create a named {@link RetryPolicy} to be used by the {@link AmazonSNS} client, unless a bean by that name
* already exists in context.
*
* @param retryProperties The retry properties
* @return a named {@link RetryPolicy}
*/
@Bean(name = SNS_CLIENT_RETRY_POLICY_BEAN_NAME)
@ConditionalOnMissingBean(name = SNS_CLIENT_RETRY_POLICY_BEAN_NAME)
public RetryPolicy jobNotificationsSNSClientRetryPolicy(
final RetryProperties retryProperties
) {
return PredefinedRetryPolicies.getDefaultRetryPolicyWithCustomMaxRetries(
retryProperties.getSns().getNoOfRetries()
);
}
@Test
public void testDeleteDirectoryMultiObjectDeleteException()
{
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an S3 version summary.
S3VersionSummary s3VersionSummary = new S3VersionSummary();
s3VersionSummary.setKey(S3_KEY);
s3VersionSummary.setVersionId(S3_VERSION_ID);
// Create a version listing.
VersionListing versionListing = new VersionListing();
versionListing.setVersionSummaries(Collections.singletonList(s3VersionSummary));
// Create a delete error.
MultiObjectDeleteException.DeleteError deleteError = new MultiObjectDeleteException.DeleteError();
deleteError.setKey(S3_KEY);
deleteError.setVersionId(S3_VERSION_ID);
deleteError.setCode(ERROR_CODE);
deleteError.setMessage(ERROR_MESSAGE);
// Create a multi object delete exception.
MultiObjectDeleteException multiObjectDeleteException = new MultiObjectDeleteException(Collections.singletonList(deleteError), new ArrayList<>());
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class))).thenReturn(versionListing);
when(s3Operations.deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class))).thenThrow(multiObjectDeleteException);
// Try to call the method under test.
try
{
s3DaoImpl.deleteDirectory(s3FileTransferRequestParamsDto);
}
catch (IllegalStateException e)
{
assertEquals(String.format(
"Failed to delete keys/key versions with prefix \"%s\" from bucket \"%s\". Reason: One or more objects could not be deleted " +
"(Service: null; Status Code: 0; Error Code: null; Request ID: null; S3 Extended Request ID: null)", S3_KEY_PREFIX, S3_BUCKET_NAME),
e.getMessage());
}
// Verify the external calls.
verify(retryPolicyFactory, times(2)).getRetryPolicy();
verify(s3Operations).listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class));
verify(s3Operations).deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
@Test
public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption()
{
List<File> files = Collections.singletonList(new File(TEST_FILE));
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
s3FileTransferRequestParamsDto.setFiles(files);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an Object Metadata with DeepArchive storage class.
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setOngoingRestore(false);
objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);
ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations)
.restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class));
try
{
s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString());
fail();
}
catch (IllegalArgumentException e)
{
assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
"Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)",
TEST_FILE, S3_BUCKET_NAME), e.getMessage());
}
}
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode)
{
List<File> files = Collections.singletonList(new File(TEST_FILE));
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
s3FileTransferRequestParamsDto.setFiles(files);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an Object Metadata
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setOngoingRestore(false);
objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive);
ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);
// Create an Amazon S3 Exception
AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage);
amazonS3Exception.setStatusCode(statusCode);
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());
try
{
// Call the method under test.
s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString());
// If this is not a restore already in progress exception message (409) then we should have caught an exception.
// Else if this is a restore already in progress message (409) then continue as usual.
if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE))
{
// Should not be here. Fail!
fail();
}
else
{
RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
assertEquals(TEST_FILE, keyCaptor.getValue());
// Verify Bulk option is used when the option is not provided
assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString())
? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());
}
}
catch (IllegalStateException illegalStateException)
{
assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
"Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " +
"(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null",
TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage());
}
// Verify the external calls
verify(retryPolicyFactory).getRetryPolicy();
verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
/**
* Run restore objects method
*
* @param archiveRetrievalOption the archive retrieval option
*/
private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass)
{
List<File> files = Collections.singletonList(new File(TEST_FILE));
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
s3FileTransferRequestParamsDto.setFiles(files);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an Object Metadata
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setOngoingRestore(false);
objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass);
ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class);
ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class);
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata);
doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture());
s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption);
RestoreObjectRequest requestStore = requestStoreCaptor.getValue();
assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue());
assertEquals(TEST_FILE, keyCaptor.getValue());
// Verify Bulk option is used when the option is not provided
assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption)
? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier());
// Verify the external calls
verify(retryPolicyFactory).getRetryPolicy();
verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class));
verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
private void runTagObjectsTest()
{
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
// Create an S3 file transfer request parameters DTO to tag S3 objects.
S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = new S3FileTransferRequestParamsDto();
s3ObjectTaggerParamsDto.setAwsAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY);
s3ObjectTaggerParamsDto.setAwsSecretKey(AWS_ASSUMED_ROLE_SECRET_KEY);
s3ObjectTaggerParamsDto.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN);
// Create an S3 object summary.
S3ObjectSummary s3ObjectSummary = new S3ObjectSummary();
s3ObjectSummary.setKey(S3_KEY);
// Create an S3 object tag.
Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create a get object tagging result.
GetObjectTaggingResult getObjectTaggingResult = new GetObjectTaggingResult(null);
// Create a set object tagging result.
SetObjectTaggingResult setObjectTaggingResult = new SetObjectTaggingResult();
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(getObjectTaggingResult);
when(s3Operations.setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(setObjectTaggingResult);
// Call the method under test.
s3DaoImpl.tagObjects(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, Collections.singletonList(s3ObjectSummary), tag);
// Verify the external calls.
verify(retryPolicyFactory, times(2)).getRetryPolicy();
verify(s3Operations).getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class));
verify(s3Operations).setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
private void runTagVersionsTest()
{
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
// Create an S3 file transfer request parameters DTO to tag S3 objects.
S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = new S3FileTransferRequestParamsDto();
s3ObjectTaggerParamsDto.setAwsAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY);
s3ObjectTaggerParamsDto.setAwsSecretKey(AWS_ASSUMED_ROLE_SECRET_KEY);
s3ObjectTaggerParamsDto.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN);
// Create an S3 version summary.
S3VersionSummary s3VersionSummary = new S3VersionSummary();
s3VersionSummary.setKey(S3_KEY);
s3VersionSummary.setVersionId(S3_VERSION_ID);
// Create an S3 object tag.
Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);
// Create a retry policy.
RetryPolicy retryPolicy =
new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create a get object tagging result.
GetObjectTaggingResult getObjectTaggingResult = new GetObjectTaggingResult(null);
// Create a set object tagging result.
SetObjectTaggingResult setObjectTaggingResult = new SetObjectTaggingResult();
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(getObjectTaggingResult);
when(s3Operations.setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(setObjectTaggingResult);
// Call the method under test.
s3DaoImpl.tagVersions(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, Collections.singletonList(s3VersionSummary), tag);
// Verify the external calls.
verify(retryPolicyFactory, times(2)).getRetryPolicy();
verify(s3Operations).getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class));
verify(s3Operations).setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
private static ClientConfiguration getClientConfiguration() {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setRetryPolicy(PredefinedRetryPolicies.getDefaultRetryPolicyWithCustomMaxRetries(MAX_ERROR_RETRY));
return clientConfiguration;
}
public InstrumentedRetryCondition(Registry registry) {
this(registry, PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION);
}
public InstrumentedBackoffStrategy(Registry registry) {
this(registry, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY);
}
/**
* Configures this appender instance and makes it ready for use by the
* consumers. It validates mandatory parameters and confirms if the configured
* stream is ready for publishing data yet.
*
* Error details are made available through the fallback handler for this
* appender
*
* @throws IllegalStateException
* if we encounter issues configuring this appender instance
*/
@Override
public void activateOptions() {
if (streamName == null) {
initializationFailed = true;
error("Invalid configuration - streamName cannot be null for appender: " + name);
}
if (layout == null) {
initializationFailed = true;
error("Invalid configuration - No layout for appender: " + name);
}
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration = setProxySettingsFromSystemProperties(clientConfiguration);
clientConfiguration.setMaxErrorRetry(maxRetries);
clientConfiguration.setRetryPolicy(new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION,
PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, maxRetries, true));
clientConfiguration.setUserAgent(AppenderConstants.USER_AGENT_STRING);
BlockingQueue<Runnable> taskBuffer = new LinkedBlockingDeque<Runnable>(bufferSize);
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(threadCount, threadCount,
AppenderConstants.DEFAULT_THREAD_KEEP_ALIVE_SEC, TimeUnit.SECONDS, taskBuffer, new BlockFastProducerPolicy());
threadPoolExecutor.prestartAllCoreThreads();
kinesisClient = new AmazonKinesisAsyncClient(new CustomCredentialsProviderChain(), clientConfiguration,
threadPoolExecutor);
boolean regionProvided = !Validator.isBlank(region);
if (!regionProvided) {
region = AppenderConstants.DEFAULT_REGION;
}
if (!Validator.isBlank(endpoint)) {
if (regionProvided) {
LOGGER
.warn("Received configuration for both region as well as Amazon Kinesis endpoint. ("
+ endpoint
+ ") will be used as endpoint instead of default endpoint for region ("
+ region + ")");
}
kinesisClient.setEndpoint(endpoint,
AppenderConstants.DEFAULT_SERVICE_NAME, region);
} else {
kinesisClient.setRegion(Region.getRegion(Regions.fromName(region)));
}
DescribeStreamResult describeResult = null;
try {
describeResult = kinesisClient.describeStream(streamName);
String streamStatus = describeResult.getStreamDescription().getStreamStatus();
if (!StreamStatus.ACTIVE.name().equals(streamStatus) && !StreamStatus.UPDATING.name().equals(streamStatus)) {
initializationFailed = true;
error("Stream " + streamName + " is not ready (in active/updating status) for appender: " + name);
}
} catch (ResourceNotFoundException rnfe) {
initializationFailed = true;
error("Stream " + streamName + " doesn't exist for appender: " + name, rnfe);
}
asyncCallHander = new AsyncPutCallStatsReporter(name);
}
private ClientConfiguration getDefaultClientConfiguration() {
return new ClientConfiguration()
.withThrottledRetries(true)
.withMaxConsecutiveRetriesBeforeThrottling(MAX_CONSECUTIVE_RETRIES_BEFORE_THROTTLING)
.withRetryPolicy(PredefinedRetryPolicies.getDefaultRetryPolicyWithCustomMaxRetries(MAX_CLIENT_RETRIES));
}
private ClientConfiguration getDynamoDbClientConfiguration() {
return new ClientConfiguration()
.withRetryPolicy(PredefinedRetryPolicies.getDynamoDBDefaultRetryPolicyWithCustomMaxRetries(MAX_CLIENT_RETRIES));
}
/**
* Constructor with given parameters, used for when MultiRegionEmitter creates emitter dynamically.
*
* @param endpoint
* The endpoint of the emitter
* @param region
* The region of the emitter
* @param tableName
* The tableName the emitter should emit to
* @param cloudwatch
* The cloudwatch client used for this application
* @param credentialProvider
* The credential provider used for the DynamoDB client
* @deprecated Deprecated by {@link #DynamoDBReplicationEmitter(String, String, String, String, AmazonDynamoDBAsync, AmazonCloudWatchAsync)}
*/
@Deprecated
public DynamoDBReplicationEmitter(final String applicationName, final String endpoint, final String region, final String tableName,
final AmazonCloudWatchAsync cloudwatch, final AWSCredentialsProvider credentialProvider) {
this(applicationName, endpoint, region, tableName,
new AmazonDynamoDBAsyncClient(credentialProvider, new ClientConfiguration().withMaxConnections(MAX_THREADS).withRetryPolicy(PredefinedRetryPolicies.DYNAMODB_DEFAULT), Executors.newFixedThreadPool(MAX_THREADS)),
cloudwatch);
}
/**
* Gets the application's default retry policy. The policy uses PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, a SimpleExponentialBackoffStrategy, and the
* maximum number of attempts is dynamically configurable through AWS_MAX_RETRY_ATTEMPT.
*
* @return RetryPolicy
*/
public RetryPolicy getRetryPolicy()
{
return new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, backoffStrategy,
configurationHelper.getProperty(ConfigurationValue.AWS_MAX_RETRY_ATTEMPT, Integer.class), true);
}