下面列出了com.amazonaws.services.s3.model.CompleteMultipartUploadRequest#com.amazonaws.services.s3.AmazonS3Client 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
protected void doStart() {
AWSCredentials myCredentials = new BasicAWSCredentials(accessKey, secretKey);
AmazonS3 s3Client = new AmazonS3Client(myCredentials);
ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
ObjectListing objectListing = s3Client.listObjects(listObjectsRequest);
ChannelProcessor channelProcessor = getChannelProcessor();
for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) {
String file = s3ObjectSummary.getKey();
LOGGER.info("Read the content of {}", file);
GetObjectRequest objectRequest = new GetObjectRequest(bucket, file);
S3Object objectPortion = s3Client.getObject(objectRequest);
try {
long startTime = System.currentTimeMillis();
processLines(channelProcessor, objectPortion.getObjectContent());
LOGGER.info("Processing of {} took {} ms", file, System.currentTimeMillis() - startTime);
} catch (IOException e) {
LOGGER.warn("Cannot process the {}, skipping", file, e);
}
}
}
@Inject
public KinesisClientManager(KinesisConfig config)
{
if (!isNullOrEmpty(config.getAccessKey()) && !isNullOrEmpty(config.getSecretKey())) {
BasicAWSCredentials awsCredentials = new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey());
this.client = new AmazonKinesisClient(awsCredentials);
this.amazonS3Client = new AmazonS3Client(awsCredentials);
this.dynamoDbClient = new AmazonDynamoDBClient(awsCredentials);
}
else {
DefaultAWSCredentialsProviderChain defaultChain = new DefaultAWSCredentialsProviderChain();
this.client = new AmazonKinesisClient(defaultChain);
this.amazonS3Client = new AmazonS3Client(defaultChain);
this.dynamoDbClient = new AmazonDynamoDBClient(defaultChain);
}
this.client.setEndpoint("kinesis." + config.getAwsRegion() + ".amazonaws.com");
this.dynamoDbClient.setEndpoint("dynamodb." + config.getAwsRegion() + ".amazonaws.com");
}
@Test
public void getAllSummaries() {
answer = true;
AmazonS3Client client = mock(AmazonS3Client.class);
ObjectListing objectListing = mock(ObjectListing.class);
when(client.listObjects(any(ListObjectsRequest.class))).thenReturn(objectListing);
when(objectListing.isTruncated()).thenAnswer(__ -> {
try {
return answer;
} finally {
answer = false;
}
});
S3Utils utils = new S3Utils(
client, null, null, false, null);
utils.getAllSummaries(new ListObjectsRequest());
verify(objectListing, times(2)).getObjectSummaries();
}
/** Returns an S3 client given the configuration **/
public static AmazonS3Client getS3Client(Map conf) {
AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
AWSCredentials credentials = provider.getCredentials();
ClientConfiguration config = new ClientConfiguration();
AmazonS3Client client = new AmazonS3Client(credentials, config);
String regionName = ConfUtils.getString(conf, REGION);
if (StringUtils.isNotBlank(regionName)) {
client.setRegion(RegionUtils.getRegion(regionName));
}
String endpoint = ConfUtils.getString(conf, ENDPOINT);
if (StringUtils.isNotBlank(endpoint)) {
client.setEndpoint(endpoint);
}
return client;
}
private void copyToS3( String fileName ) {
String bucketName = ( String ) properties.get( BUCKET_PROPNAME );
String accessId = ( String ) properties.get( ACCESS_ID_PROPNAME );
String secretKey = ( String ) properties.get( SECRET_KEY_PROPNAME );
Properties overrides = new Properties();
overrides.setProperty( "s3" + ".identity", accessId );
overrides.setProperty( "s3" + ".credential", secretKey );
final Iterable<? extends Module> MODULES = ImmutableSet
.of( new JavaUrlHttpCommandExecutorServiceModule(), new Log4JLoggingModule(),
new NettyPayloadModule() );
AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol( Protocol.HTTP);
AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);
s3Client.createBucket( bucketName );
File uploadFile = new File( fileName );
PutObjectResult putObjectResult = s3Client.putObject( bucketName, uploadFile.getName(), uploadFile );
logger.info("Uploaded file etag={}", putObjectResult.getETag());
}
@Before
public void setUp()
throws Exception
{
assumeThat(TEST_S3_ENDPOINT, not(isEmptyOrNullString()));
AWSCredentials credentials = new BasicAWSCredentials(TEST_S3_ACCESS_KEY_ID, TEST_S3_SECRET_ACCESS_KEY);
AmazonS3Client s3 = new AmazonS3Client(credentials);
s3.setEndpoint(TEST_S3_ENDPOINT);
String bucket = UUID.randomUUID().toString();
s3.createBucket(bucket);
ConfigFactory cf = new ConfigFactory(objectMapper());
Config config = cf.create()
.set("endpoint", TEST_S3_ENDPOINT)
.set("bucket", bucket) // use unique bucket name
.set("credentials.access-key-id", TEST_S3_ACCESS_KEY_ID)
.set("credentials.secret-access-key", TEST_S3_SECRET_ACCESS_KEY)
;
storage = new S3StorageFactory().newStorage(config);
}
/**
* @param awsS3Client
* @param s3BucketName
* @param accessType
* @return
*/
public static Set<Permission> checkACLPermissions(AmazonS3Client awsS3Client, String s3BucketName, String accessType) {
AccessControlList bucketAcl;
Set<Permission> permissionList = new HashSet<>();
try {
bucketAcl = awsS3Client.getBucketAcl(s3BucketName);
List<Grant> grants = bucketAcl.getGrantsAsList();
if (!CollectionUtils.isNullOrEmpty(grants)) {
permissionList = checkAnyGrantHasOpenToReadOrWriteAccess(grants, accessType);
}
} catch (AmazonS3Exception s3Exception) {
logger.error("error : ", s3Exception);
throw new RuleExecutionFailedExeption(s3Exception.getMessage());
}
return permissionList;
}
protected synchronized MultipartState getLocalStateIfInS3(final AmazonS3Client s3, final String bucket,
final String s3ObjectKey) throws IOException {
MultipartState currState = getLocalState(s3ObjectKey);
if (currState == null) {
return null;
}
if (localUploadExistsInS3(s3, bucket, currState)) {
getLogger().info("Local state for {} loaded with uploadId {} and {} partETags",
new Object[]{s3ObjectKey, currState.getUploadId(), currState.getPartETags().size()});
return currState;
} else {
getLogger().info("Local state for {} with uploadId {} does not exist in S3, deleting local state",
new Object[]{s3ObjectKey, currState.getUploadId()});
persistLocalState(s3ObjectKey, null);
return null;
}
}
/**
* Return an AmazonS3Client set up with the proper endpoint
* defined in core-site.xml using a property like fs.s3a.endpoint.
* This mimics code found in S3AFileSystem.
*
* @param conf
* @param scheme
* @return
*/
private static AmazonS3Client getS3Client(Configuration conf, String scheme)
{
AmazonS3Client s3Client = new AmazonS3Client(new DefaultAWSCredentialsProviderChain());
String endpointKey = "fs." + scheme.toLowerCase() + ".endpoint";
String endPoint = conf.getTrimmed(endpointKey,"");
log.debug("Using endpoint setting " + endpointKey);
if (!endPoint.isEmpty()) {
try {
log.debug("Setting S3 client endpoint to " + endPoint);
s3Client.setEndpoint(endPoint);
} catch (IllegalArgumentException e) {
String msg = "Incorrect endpoint: " + e.getMessage();
log.error(msg);
throw new IllegalArgumentException(msg, e);
}
}
return s3Client;
}
@Inject
KinesisClientManager(KinesisConnectorConfig kinesisConnectorConfig)
{
log.info("Creating new client for Consumer");
if (nonEmpty(kinesisConnectorConfig.getAccessKey()) && nonEmpty(kinesisConnectorConfig.getSecretKey())) {
this.kinesisAwsCredentials = new KinesisAwsCredentials(kinesisConnectorConfig.getAccessKey(), kinesisConnectorConfig.getSecretKey());
this.client = new AmazonKinesisClient(this.kinesisAwsCredentials);
this.amazonS3Client = new AmazonS3Client(this.kinesisAwsCredentials);
this.dynamoDBClient = new AmazonDynamoDBClient(this.kinesisAwsCredentials);
}
else {
this.kinesisAwsCredentials = null;
DefaultAWSCredentialsProviderChain defaultChain = new DefaultAWSCredentialsProviderChain();
this.client = new AmazonKinesisClient(defaultChain);
this.amazonS3Client = new AmazonS3Client(defaultChain);
this.dynamoDBClient = new AmazonDynamoDBClient(defaultChain);
}
this.client.setEndpoint("kinesis." + kinesisConnectorConfig.getAwsRegion() + ".amazonaws.com");
this.dynamoDBClient.setEndpoint("dynamodb." + kinesisConnectorConfig.getAwsRegion() + ".amazonaws.com");
}
@Test
public void shouldFailIfNoFilesToUploadBasedOnSource() {
AmazonS3Client mockClient = mockClient();
Config config = new Config(Maps.builder()
.with(Constants.SOURCEDESTINATIONS, Maps.builder().with("value", "[{\"source\": \"target/*\", \"destination\": \"\"}]").build())
.with(Constants.DESTINATION_PREFIX, Maps.builder().with("value", "").build())
.with(Constants.ARTIFACTS_BUCKET, Maps.builder().with("value", "").build())
.build());
TaskExecutionResult result = executeMockPublish(
mockClient,
config,
new String[]{}
);
assertFalse(result.isSuccessful());
assertThat(result.message(), containsString("Source target/* didn't yield any files to upload"));
}
@Override
public S3DataTransferer createDataTransfer() {
String bucketName = System.getProperty(BUCKET_NAME_PROPERTY);
String region = System.getProperty(REGION_PROPERTY);
String accessKey = System.getProperty(ACCESS_KEY_PROPERTY);
String secretKey = System.getProperty(SECRET_KEY_PROPERTY);
boolean encrypt = Boolean.getBoolean(ENCRYPT_PROPERTY);
AmazonS3Client s3 = (AmazonS3Client) AmazonS3ClientBuilder.standard()
.withRegion(region)
.withCredentials(
new AWSStaticCredentialsProvider(
new BasicAWSCredentials(accessKey, secretKey)))
.build();
return new S3DataTransferer(s3, bucketName, encrypt);
}
@Test
public void shouldNotThrowIfAWSUseIAMRoleIsTrueAndAWS_SECRET_ACCESS_KEYNotPresent() {
Maps.MapBuilder<String, String> mockVariables = mockEnvironmentVariables
.with(AWS_USE_IAM_ROLE, "True")
.with(AWS_ACCESS_KEY_ID, "")
.with(AWS_SECRET_ACCESS_KEY, "");
AmazonS3Client mockClient = mockClient();
Config config = new Config(Maps.builder()
.with(Constants.SOURCEDESTINATIONS, Maps.builder().with("value", "[{\"source\": \"target/*\", \"destination\": \"\"}]").build())
.with(Constants.DESTINATION_PREFIX, Maps.builder().with("value", "").build())
.with(Constants.ARTIFACTS_BUCKET, Maps.builder().with("value", "").build())
.build());
TaskExecutionResult result = executeMockPublish(
mockClient,
config,
new String[]{"README.md"},
mockVariables
);
assertTrue(result.isSuccessful());
}
@Test
public void runAppAndBasicTest() {
AmazonS3Client s3client = server.getSpringContext()
.getBean(AmazonS3Client.class);
assertThat(s3client != null, is(true));
S3Configuration s3Configuration = server.getSpringContext()
.getBean(S3Configuration.class);
assertThat(s3Configuration.getAccessKey(), is(""));
assertThat(s3Configuration.getSecretKey(), is(""));
assertThat(s3Configuration.getSessionToken() == null, is(true));
assertThat(s3Configuration.getRegion() == null, is(true));
assertThat(s3Configuration.getUploadThreads(), is(5));
assertThat(s3Configuration.getUploadThreadNamePrefix(), is("s3-transfer-manager-worker-"));
S3Utils s3Utils = server.getSpringContext()
.getBean(S3Utils.class);
assertThat(s3Utils != null, is(true));
TransferManager tm = server.getSpringContext()
.getBean(TransferManager.class);
assertThat(tm != null, is(true));
}
@Test
public void shouldBeFailureIfFetchConfigNotValid() {
Map<String, String> mockVariables = mockEnvironmentVariables.build();
config = new Config(Maps.builder()
.with(Constants.REPO, Maps.builder().with("value", "Wrong").build())
.with(Constants.PACKAGE, Maps.builder().with("value", "TESTPUBLISHS3ARTIFACTS").build())
.with(Constants.DESTINATION, Maps.builder().with("value", "artifacts").build())
.build());
AmazonS3Client mockClient = mockClient();
S3ArtifactStore store = new S3ArtifactStore(mockClient, bucket);
doReturn(store).when(fetchExecutor).getS3ArtifactStore(any(GoEnvironment.class), eq(bucket));
TaskExecutionResult result = fetchExecutor.execute(config, mockContext(mockVariables));
assertFalse(result.isSuccessful());
assertThat(result.message(), is("Failure while downloading artifacts - Please check Repository name or Package name configuration. Also, ensure that the appropriate S3 material is configured for the pipeline."));
}
@Override
public void setConf(AbstractConfig config) {
this.config = (GeoIpOperationConfig) config;
AmazonS3Client client = this.s3Factory.newInstance();
AmazonS3URI uri = new AmazonS3URI(this.config.getGeoLiteDb());
GetObjectRequest req = new GetObjectRequest(uri.getBucket(), uri.getKey());
S3Object obj = client.getObject(req);
try {
this.databaseReader =
new DatabaseReader.Builder(obj.getObjectContent()).withCache(new CHMCache()).build();
} catch (IOException e) {
throw new ConfigurationException("Unable to read " + this.config.getGeoLiteDb(), e);
}
}
String getEndpoint(BackupRestoreContext ctx) throws URISyntaxException {
URI uri = new URI(ctx.getExternalLocation());
String scheme = uri.getScheme();
if (scheme.equals(AmazonS3Client.S3_SERVICE_NAME)) {
return Constants.S3_HOSTNAME;
} else {
String endpoint = scheme + "://" + uri.getHost();
int port = uri.getPort();
if (port != -1) {
endpoint += ":" + Integer.toString(port);
}
return endpoint;
}
}
@Before
public void setUp()
throws Exception
{
assumeThat(TEST_S3_ENDPOINT, not(isEmptyOrNullString()));
projectDir = folder.getRoot().toPath().resolve("foobar");
config = folder.newFile().toPath();
client = DigdagClient.builder()
.host(server.host())
.port(server.port())
.build();
AWSCredentials credentials = new BasicAWSCredentials(TEST_S3_ACCESS_KEY_ID, TEST_S3_SECRET_ACCESS_KEY);
s3 = new AmazonS3Client(credentials);
s3.setEndpoint(TEST_S3_ENDPOINT);
s3.createBucket(archiveBucket);
s3.createBucket(logStorageBucket);
}
@Bean
AmazonS3 s3(
@Value("${s3.endpoint.url}") String s3EndpointUrl,
@Value("${s3.endpoint.signingRegion}") String signingRegion) {
return AmazonS3Client
.builder()
.withCredentials(new DefaultAWSCredentialsProviderChain())
.withEndpointConfiguration(new EndpointConfiguration(s3EndpointUrl, signingRegion))
.build();
}
@Primary
@Bean
AmazonS3 testS3(@Value("${s3.port}") int port) {
return AmazonS3Client
.builder()
.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
.withEndpointConfiguration(new EndpointConfiguration("http://127.0.0.1:" + port, "us-west-2"))
.build();
}
@Before
public void setUp() {
mockS3Client = Mockito.mock(AmazonS3Client.class);
mockTagS3Object = new TagS3Object() {
protected AmazonS3Client getClient() {
actualS3Client = client;
return mockS3Client;
}
};
runner = TestRunners.newTestRunner(mockTagS3Object);
}
@Test
public void shouldUploadALocalFileToS3WithSlashDestinationPrefix() {
AmazonS3Client mockClient = mockClient();
Config config = new Config(Maps.builder()
.with(Constants.SOURCEDESTINATIONS, Maps.builder().with("value", "[{\"source\": \"target/*\", \"destination\": \"\"}]").build())
.with(Constants.DESTINATION_PREFIX, Maps.builder().with("value", "/").build())
.with(Constants.ARTIFACTS_BUCKET, Maps.builder().with("value", "").build())
.build());
TaskExecutionResult result = executeMockPublish(
mockClient,
config,
new String[]{"README.md", "s3publish-0.1.31.jar"}
);
assertTrue(result.isSuccessful());
final List<PutObjectRequest> allPutObjectRequests = getPutObjectRequests(mockClient, 2);
PutObjectRequest readmePutRequest = allPutObjectRequests.get(0);
assertThat(readmePutRequest.getBucketName(), is("testS3Bucket"));
assertThat(readmePutRequest.getKey(), is("README.md"));
assertNull(readmePutRequest.getMetadata());
PutObjectRequest jarPutRequest = allPutObjectRequests.get(1);
assertNull(jarPutRequest.getMetadata());
assertThat(jarPutRequest.getBucketName(), is("testS3Bucket"));
assertThat(jarPutRequest.getKey(), is("s3publish-0.1.31.jar"));
assertNull(jarPutRequest.getMetadata());
}
/**
* Create client using AWSCredentials
*
* @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
*/
@Override
protected AmazonS3Client createClient(final ProcessContext context, final AWSCredentials credentials, final ClientConfiguration config) {
getLogger().info("Creating client with awd credentials");
final AmazonS3Client s3 = new AmazonS3Client(credentials, config);
return s3;
}
@Override
public PutObjectResult putObject(PutObjectRequest request) throws Exception
{
RefCountedClient holder = client.get();
AmazonS3Client amazonS3Client = holder.useClient();
try
{
return amazonS3Client.putObject(request);
}
finally
{
holder.release();
}
}
/**
* Reads the file from S3 and returns an InputStream for the contents of the file. This has been tested to also work
* with SSE-KMS encrypted files.
*
* @return an open InputStream for the contents of the file in S3.
*/
@Override
@Nonnull
public S3BufferedInputStream get() {
AmazonS3 s3Client = (amazonS3 == null) ? AmazonS3Client.builder().build() : amazonS3;
return new S3BufferedInputStream(s3Bucket, s3Key, s3Client, DEFAULT_CACHING_FUNCTION);
}
@Override
public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws Exception
{
RefCountedClient holder = client.get();
AmazonS3Client amazonS3Client = holder.useClient();
try
{
return amazonS3Client.listNextBatchOfObjects(previousObjectListing);
}
finally
{
holder.release();
}
}
/**
* revokes all ACL permissions.
*
* @param awsS3Client the aws S 3 client
* @param s3BucketName the s 3 bucket name
*/
private void revokeACLPublicPermission(AmazonS3Client awsS3Client, String s3BucketName) {
AccessControlList bucketAcl;
try {
bucketAcl = awsS3Client.getBucketAcl(s3BucketName);
List<Grant> grants = bucketAcl.getGrantsAsList();
if (!CollectionUtils.isNullOrEmpty(grants)) {
for (Grant grant : grants) {
if ((PacmanSdkConstants.ANY_S3_AUTHENTICATED_USER_URI
.equalsIgnoreCase(grant.getGrantee().getIdentifier())
|| PacmanSdkConstants.ALL_S3_USER_URI.equalsIgnoreCase(grant.getGrantee().getIdentifier()))
&&
(grant.getPermission().toString().equalsIgnoreCase(PacmanSdkConstants.READ_ACCESS) || (grant
.getPermission().toString().equalsIgnoreCase(PacmanSdkConstants.WRITE_ACCESS)
|| (grant.getPermission().toString()
.equalsIgnoreCase(PacmanSdkConstants.READ_ACP_ACCESS)
|| (grant.getPermission().toString()
.equalsIgnoreCase(PacmanSdkConstants.WRITE_ACP_ACCESS)
|| grant.getPermission().toString()
.equalsIgnoreCase(PacmanSdkConstants.FULL_CONTROL)))))) {
bucketAcl.revokeAllPermissions(grant.getGrantee());
}
}
awsS3Client.setBucketAcl(s3BucketName, bucketAcl);
}
} catch (AmazonS3Exception s3Exception) {
LOGGER.error(String.format("AmazonS3Exception in revokeACLPublicPermission: %s", s3Exception.getMessage()));
throw new RuleEngineRunTimeException(s3Exception);
}
}
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws Exception
{
RefCountedClient holder = client.get();
AmazonS3Client amazonS3Client = holder.useClient();
try
{
return amazonS3Client.initiateMultipartUpload(request);
}
finally
{
holder.release();
}
}
@Override
public void execute(Configuration conf, String[] args) throws Exception {
CmdLineParser parser = new CmdLineParser(this);
String keyId = conf.get("fs.s3n.awsAccessKeyId");
String keySecret = conf.get("fs.s3n.awsSecretAccessKey");
s3 = new AmazonS3Client(new BasicAWSCredentials(keyId, keySecret));
try {
parser.parseArgument(args);
ExecutorService executor = Executors.newFixedThreadPool(threads);
List<Future> futures = new ArrayList<Future>();
BufferedReader fin = new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF8"));
try {
for(String line = fin.readLine(); line != null; line = fin.readLine()) {
futures.add(executor.submit(new FileCheckTask(new Path(line.trim()))));
}
} finally {
fin.close();
}
for(Future f : futures) {
f.get();
}
executor.shutdown();
} catch (CmdLineException e) {
System.err.println(e.getMessage());
System.err.println("s3mper fs verify [options]");
// print the list of available options
parser.printUsage(System.err);
System.err.println();
System.err.println(" Example: s3mper fs verify "+parser.printExample(OptionHandlerFilter.ALL));
}
}
@Override
public F.Promise<Void> delete(String key, String name) {
Promise<Void> promise = Futures.promise();
AmazonS3 amazonS3 = new AmazonS3Client(credentials);
DeleteObjectRequest request = new DeleteObjectRequest(bucketName, key);
request.withGeneralProgressListener(progressEvent -> {
if (progressEvent.getEventType().isTransferEvent()) {
if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
promise.success(null);
} else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
logger.error(progressEvent.toString());
promise.failure(new Exception(progressEvent.toString()));
}
}
});
try {
amazonS3.deleteObject(request);
} catch (AmazonServiceException ase) {
logAmazonServiceException (ase);
} catch (AmazonClientException ace) {
logAmazonClientException(ace);
}
return F.Promise.wrap(promise.future());
}