下面列出了怎么用com.amazonaws.services.s3.model.CannedAccessControlList的API类实例代码及写法,或者点击链接到github查看源代码。
@Test
public void testDefaultAcl()
throws Exception
{
Configuration config = new Configuration(false);
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
MockAmazonS3 s3 = new MockAmazonS3();
String expectedBucketName = "test-bucket";
fs.initialize(new URI("s3n://" + expectedBucketName + "/"), config);
fs.setS3Client(s3);
try (FSDataOutputStream stream = fs.create(new Path("s3n://test-bucket/test"))) {
// initiate an upload by creating a stream & closing it immediately
}
assertEquals(CannedAccessControlList.Private, s3.getAcl());
}
}
@Test
public void testFullBucketOwnerControlAcl()
throws Exception
{
Configuration config = new Configuration(false);
config.set(S3_ACL_TYPE, "BUCKET_OWNER_FULL_CONTROL");
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
MockAmazonS3 s3 = new MockAmazonS3();
String expectedBucketName = "test-bucket";
fs.initialize(new URI("s3n://" + expectedBucketName + "/"), config);
fs.setS3Client(s3);
try (FSDataOutputStream stream = fs.create(new Path("s3n://test-bucket/test"))) {
// initiate an upload by creating a stream & closing it immediately
}
assertEquals(CannedAccessControlList.BucketOwnerFullControl, s3.getAcl());
}
}
public boolean uploadFile(final AmazonS3 amazonS3, MultipartFile fileToUpload, String s3BucketName, String key) {
try {
File file = AdminUtils.convert(fileToUpload);
long size = fileToUpload.getSize();
String contentType = fileToUpload.getContentType();
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
metadata.setContentLength(size);
PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead);
amazonS3.putObject(putObjectRequest);
return Boolean.TRUE;
} catch (IOException exception) {
log.error(UNEXPECTED_ERROR_OCCURRED, exception);
}
return Boolean.FALSE;
}
/**
* Returns a {@link CannedAccessControlList} from its {@code x-amz-acl} value.
*
* @param cannedAcl S3 x-amz-acl value
* @return The corresponding CannedAccessControlList value
*/
public static CannedAccessControlList toCannedAccessControlList(String cannedAcl) {
if (cannedAcl == null) {
return null;
}
cannedAcl = cannedAcl.toLowerCase(Locale.ROOT);
for (CannedAccessControlList acl : CannedAccessControlList.values()) {
if (acl.toString().equals(cannedAcl)) {
return acl;
}
}
throw new IllegalArgumentException("CannedAccessControlList does not contain " + cannedAcl);
}
public String upload(InputStream is, String fileKey, String fileName, String suffix, Boolean isPublic) throws Exception {
validateFile(is, suffix);
if (isPublic == null) {
isPublic = Boolean.TRUE;
}
if ((is != null) && (fileKey != null)) {
try {
byte[] bytes = IOUtils.toByteArray(is);
s3Client.putObject(
new PutObjectRequest(
s3BucketConfig.getName(),
fileKey,
new ByteArrayInputStream(bytes),
S3ObjectMetadata.getObjectMetadata(bytes)
).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead)
);
return fileName + '.' + suffix;
} catch (AmazonServiceException | IOException e) {
throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode());
} finally {
is.close();
}
} else {
throw new BusinessException(Validations.INVALID_PARAMETERS.getCode());
}
}
RemoteUploader(S3ClientOptions amazonS3ClientOptions, EnvVars envVars, TaskListener taskListener, String bucket, String path, Map<String, String> metadatas, Map<String, String> tags, CannedAccessControlList acl, String cacheControl, String contentEncoding, String contentType, String kmsId, String sseAlgorithm, String redirectLocation) {
this.amazonS3ClientOptions = amazonS3ClientOptions;
this.envVars = envVars;
this.taskListener = taskListener;
this.bucket = bucket;
this.path = path;
this.metadatas = metadatas;
this.tags=tags;
this.acl = acl;
this.cacheControl = cacheControl;
this.contentEncoding = contentEncoding;
this.contentType = contentType;
this.kmsId = kmsId;
this.sseAlgorithm = sseAlgorithm;
this.redirectLocation = redirectLocation;
}
RemoteListUploader(S3ClientOptions amazonS3ClientOptions, EnvVars envVars, TaskListener taskListener, List<File> fileList, String bucket, String path, Map<String, String> metadatas, Map<String, String> tags, CannedAccessControlList acl, final String cacheControl, final String contentEncoding, final String contentType, String kmsId, String sseAlgorithm) {
this.amazonS3ClientOptions = amazonS3ClientOptions;
this.envVars = envVars;
this.taskListener = taskListener;
this.fileList = fileList;
this.bucket = bucket;
this.path = path;
this.metadatas = metadatas;
this.tags = tags;
this.acl = acl;
this.cacheControl = cacheControl;
this.contentEncoding = contentEncoding;
this.contentType = contentType;
this.kmsId = kmsId;
this.sseAlgorithm = sseAlgorithm;
}
@Test
public void gettersWorkAsExpectedForFileCase() throws Exception {
S3CopyStep step = new S3CopyStep("my-bucket", "my-path", "other-bucket", "other-path", false, false);
step.setKmsId("alias/foo");
step.setMetadatas(metas);
step.setAcl(CannedAccessControlList.PublicRead);
step.setCacheControl("my-cachecontrol");
step.setContentType("text/plain");
step.setSseAlgorithm("AES256");
Assert.assertEquals("my-bucket", step.getFromBucket());
Assert.assertEquals("my-path", step.getFromPath());
Assert.assertEquals("other-bucket", step.getToBucket());
Assert.assertEquals("other-path", step.getToPath());
Assert.assertEquals("alias/foo", step.getKmsId());
Assert.assertArrayEquals(metas, step.getMetadatas());
Assert.assertEquals(CannedAccessControlList.PublicRead, step.getAcl());
Assert.assertEquals("my-cachecontrol", step.getCacheControl());
Assert.assertEquals("text/plain", step.getContentType());
Assert.assertEquals("AES256", step.getSseAlgorithm());
}
@Test
public void gettersWorkAsExpectedForFileCase() throws Exception {
S3UploadStep step = new S3UploadStep("my-bucket", false, false);
step.setFile("my-file");
step.setText("my content text");
step.setKmsId("alias/foo");
step.setAcl(CannedAccessControlList.PublicRead);
step.setCacheControl("my-cachecontrol");
step.setSseAlgorithm("AES256");
step.setRedirectLocation("/redirect");
Assert.assertEquals("my-file", step.getFile());
Assert.assertEquals("my content text", step.getText());
Assert.assertEquals("my-bucket", step.getBucket());
Assert.assertEquals(CannedAccessControlList.PublicRead, step.getAcl());
Assert.assertEquals("my-cachecontrol", step.getCacheControl());
Assert.assertEquals("AES256", step.getSseAlgorithm());
Assert.assertEquals("alias/foo", step.getKmsId());
Assert.assertEquals("/redirect", step.getRedirectLocation());
}
public cfData execute( cfSession _session, cfArgStructData argStruct ) throws cfmRunTimeException{
AmazonKey amazonKey = getAmazonKey(_session, argStruct);
AmazonS3 s3Client = getAmazonS3(amazonKey);
String bucket = getNamedStringParam(argStruct, "bucket", null );
String key = getNamedStringParam(argStruct, "key", null );
if ( key != null && key.charAt( 0 ) == '/' )
key = key.substring(1);
CannedAccessControlList acl = amazonKey.getAmazonCannedAcl( getNamedStringParam(argStruct, "acl", null ) );
try {
s3Client.setObjectAcl(bucket, key, acl);
} catch (Exception e) {
throwException(_session, "AmazonS3: " + e.getMessage() );
}
return cfBooleanData.TRUE;
}
public cfData execute( cfSession _session, cfArgStructData argStruct ) throws cfmRunTimeException{
AmazonKey amazonKey = getAmazonKey(_session, argStruct);
AmazonS3 s3Client = getAmazonS3(amazonKey);
String bucket = getNamedStringParam(argStruct, "bucket", null );
CannedAccessControlList acl = amazonKey.getAmazonCannedAcl( getNamedStringParam(argStruct, "acl", null ) );
try {
s3Client.setBucketAcl(bucket, acl);
} catch (Exception e) {
throwException(_session, "AmazonS3: " + e.getMessage() );
}
return cfBooleanData.TRUE;
}
/**
* private | public-read | public-read-write | authenticated-read | bucket-owner-read | bucket-owner-full-control | log-delivery-write
*
* @param acl
* @return
*/
public CannedAccessControlList getAmazonCannedAcl(String acl) {
if (acl.equalsIgnoreCase("private"))
return CannedAccessControlList.Private;
else if (acl.equalsIgnoreCase("public-read") || acl.equalsIgnoreCase("publicread"))
return CannedAccessControlList.PublicRead;
else if (acl.equalsIgnoreCase("public-read-write") || acl.equalsIgnoreCase("publicreadwrite"))
return CannedAccessControlList.PublicReadWrite;
else if (acl.equalsIgnoreCase("authenticated-read") || acl.equalsIgnoreCase("authenticatedread"))
return CannedAccessControlList.AuthenticatedRead;
else if (acl.equalsIgnoreCase("bucket-owner-read") || acl.equalsIgnoreCase("bucketownerread"))
return CannedAccessControlList.BucketOwnerRead;
else if (acl.equalsIgnoreCase("bucket-owner-full-control") || acl.equalsIgnoreCase("bucketownerfullcontrol"))
return CannedAccessControlList.BucketOwnerFullControl;
else if (acl.equalsIgnoreCase("log-delivery-write") || acl.equalsIgnoreCase("logdeliverywrite"))
return CannedAccessControlList.LogDeliveryWrite;
else
return CannedAccessControlList.Private;
}
@Test
public void testHttpClient() throws Exception {
String blobName = "blob-name";
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(BYTE_SOURCE.size());
client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),
metadata);
if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) {
client.setBucketAcl(containerName,
CannedAccessControlList.PublicRead);
} else {
client.setObjectAcl(containerName, blobName,
CannedAccessControlList.PublicRead);
}
HttpClient httpClient = context.utils().http();
URI uri = new URI(s3Endpoint.getScheme(), s3Endpoint.getUserInfo(),
s3Endpoint.getHost(), s3Proxy.getSecurePort(),
servicePath + "/" + containerName + "/" + blobName,
/*query=*/ null, /*fragment=*/ null);
try (InputStream actual = httpClient.get(uri);
InputStream expected = BYTE_SOURCE.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
}
@Override
public void uploadPrivateFile(Path filePath, InputStream content) {
uploadPublicFile(filePath, content);
String destFilePathString = filePath.toString();
s3.setObjectAcl(bucketName, destFilePathString, CannedAccessControlList.PublicRead);
}
private void upload(String path, byte[] content) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(content.length);
metadata.setContentType(getContentType(path));
PutObjectRequest request = new PutObjectRequest(bucketName, basePath + path, new ByteArrayInputStream(content),
metadata).withCannedAcl(CannedAccessControlList.PublicRead);
s3Client.putObject(request);
}
/**
* Create CannedAccessControlList if {@link #CANNED_ACL} property specified.
*
* @param context ProcessContext
* @param flowFile FlowFile
* @return CannedAccessControlList or null if not specified
*/
protected final CannedAccessControlList createCannedACL(final ProcessContext context, final FlowFile flowFile) {
CannedAccessControlList cannedAcl = null;
final String cannedAclString = context.getProperty(CANNED_ACL).evaluateAttributeExpressions(flowFile).getValue();
if (!StringUtils.isEmpty(cannedAclString)) {
cannedAcl = CannedAccessControlList.valueOf(cannedAclString);
}
return cannedAcl;
}
public CannedAccessControlList calculateAcls(String aclStr) {
for (CannedAccessControlList val : CannedAccessControlList.values()) {
if (val.toString().equals(aclStr)) {
return val;
}
}
throw new IllegalArgumentException(String.format("'%s' is not a valid ACL setting", aclStr));
}
@Override
public void run() {
ObjectMetadata meta_data = new ObjectMetadata();
if (p_content_type != null)
meta_data.setContentType(p_content_type);
meta_data.setContentLength(p_size);
PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
PutObjectResult res = s3Client.putObject(putObjectRequest);
}
@Test
public void overwriteAllCopierOptions() throws Exception {
copierOptions.put(CREDENTIAL_PROVIDER, "jceks://hdfs/foo/bar.jceks");
copierOptions.put(MULTIPART_UPLOAD_CHUNK_SIZE, "1234");
copierOptions.put(S3_SERVER_SIDE_ENCRYPTION, "true");
copierOptions.put(STORAGE_CLASS, "reduced_redundancy");
copierOptions.put(TASK_BANDWIDTH, "567");
copierOptions.put(NUMBER_OF_WORKERS_PER_MAP, "89");
copierOptions.put(MULTIPART_UPLOAD_THRESHOLD, "123456");
copierOptions.put(MAX_MAPS, "78");
copierOptions.put(COPY_STRATEGY, "the-strategy");
copierOptions.put(LOG_PATH, "hdfs://path/to/logs/");
copierOptions.put(REGION, "us-east-1");
copierOptions.put(IGNORE_FAILURES, "true");
copierOptions.put(CANNED_ACL, CannedAccessControlList.BucketOwnerFullControl.toString());
S3MapReduceCpCopier copier = new S3MapReduceCpCopier(conf, sourceDataBaseLocation, Collections.<Path>emptyList(),
replicaDataLocation, copierOptions, executor, metricRegistry);
Metrics metrics = copier.copy();
assertThat(metrics, not(nullValue()));
verify(executor).exec(confCaptor.capture(), optionsCaptor.capture());
S3MapReduceCpOptions options = optionsCaptor.getValue();
assertThat(options.getSources(), is(Arrays.asList(sourceDataBaseLocation)));
assertThat(options.getTarget(), is(replicaDataLocation.toUri()));
assertThat(options.getCredentialsProvider(), is(URI.create("jceks://hdfs/foo/bar.jceks")));
assertThat(options.getMultipartUploadPartSize(), is(1234L));
assertThat(options.isS3ServerSideEncryption(), is(true));
assertThat(options.getStorageClass(), is(StorageClass.ReducedRedundancy.toString()));
assertThat(options.getMaxBandwidth(), is(567L));
assertThat(options.getNumberOfUploadWorkers(), is(89));
assertThat(options.getMultipartUploadThreshold(), is(123456L));
assertThat(options.getMaxMaps(), is(78));
assertThat(options.getCopyStrategy(), is("the-strategy"));
assertThat(options.getLogPath(), is(new Path("hdfs://path/to/logs/")));
assertThat(options.getRegion(), is(Regions.US_EAST_1.getName()));
assertThat(options.isIgnoreFailures(), is(true));
assertThat(options.getCannedAcl(), is(CannedAccessControlList.BucketOwnerFullControl.toString()));
}
public CannedAccessControlList getCannedAcl() {
String cannedAcl = MapUtils.getString(copierOptions, Keys.CANNED_ACL.keyName(), null);
if (cannedAcl != null) {
return CannedAclUtils.toCannedAccessControlList(cannedAcl);
}
return null;
}
@Test
public void copyCannedAcl() throws Exception {
client.putObject("source", "data", inputData);
Path sourceBaseLocation = new Path("s3://source/");
Path replicaLocation = new Path("s3://target/");
List<Path> sourceSubLocations = new ArrayList<>();
Map<String, Object> copierOptions = new HashMap<>();
copierOptions
.put(S3S3CopierOptions.Keys.CANNED_ACL.keyName(), CannedAccessControlList.BucketOwnerFullControl.toString());
S3S3CopierOptions customOptions = new S3S3CopierOptions(copierOptions);
TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class);
TransferManager mockedTransferManager = Mockito.mock(TransferManager.class);
when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(customOptions)))
.thenReturn(mockedTransferManager);
Copy copy = Mockito.mock(Copy.class);
when(mockedTransferManager
.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class)))
.thenReturn(copy);
TransferProgress transferProgress = new TransferProgress();
when(copy.getProgress()).thenReturn(transferProgress);
S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory,
mockedTransferManagerFactory, listObjectsRequestFactory, registry, customOptions);
s3s3Copier.copy();
ArgumentCaptor<CopyObjectRequest> argument = ArgumentCaptor.forClass(CopyObjectRequest.class);
verify(mockedTransferManager).copy(argument.capture(), any(AmazonS3.class), any(TransferStateChangeListener.class));
CopyObjectRequest copyObjectRequest = argument.getValue();
assertThat(copyObjectRequest.getCannedAccessControlList(), is(CannedAccessControlList.BucketOwnerFullControl));
}
@Test
public void builderWithCannedAcl() {
S3MapReduceCpOptions options = S3MapReduceCpOptions
.builder(SOURCES, TARGET)
.cannedAcl(CannedAccessControlList.BucketOwnerFullControl.toString())
.build();
assertThat(options.isHelp(), is(false));
assertThat(options.isBlocking(), is(true));
assertThat(options.getSources(), is(SOURCES));
assertThat(options.getTarget(), is(TARGET));
assertThat(options.getCredentialsProvider(), is(ConfigurationVariable.CREDENTIAL_PROVIDER.defaultURIValue()));
assertThat(options.getMultipartUploadPartSize(),
is(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE.defaultLongValue()));
assertThat(options.isS3ServerSideEncryption(),
is(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION.defaultBooleanValue()));
assertThat(options.getStorageClass(), is(ConfigurationVariable.STORAGE_CLASS.defaultValue()));
assertThat(options.getMaxBandwidth(), is(ConfigurationVariable.MAX_BANDWIDTH.defaultLongValue()));
assertThat(options.getNumberOfUploadWorkers(),
is(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS.defaultIntValue()));
assertThat(options.getMultipartUploadThreshold(),
is(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD.defaultLongValue()));
assertThat(options.getMaxMaps(), is(ConfigurationVariable.MAX_MAPS.defaultIntValue()));
assertThat(options.getCopyStrategy(), is(ConfigurationVariable.COPY_STRATEGY.defaultValue()));
assertThat(options.getLogPath(), is(nullValue()));
assertThat(options.getRegion(), is(ConfigurationVariable.REGION.defaultValue()));
assertThat(options.isIgnoreFailures(), is(ConfigurationVariable.IGNORE_FAILURES.defaultBooleanValue()));
assertThat(options.getS3EndpointUri(), is(ConfigurationVariable.S3_ENDPOINT_URI.defaultURIValue()));
assertThat(options.getUploadRetryCount(), is(ConfigurationVariable.UPLOAD_RETRY_COUNT.defaultIntValue()));
assertThat(options.getUploadRetryDelayMs(), is(ConfigurationVariable.UPLOAD_RETRY_DELAY_MS.defaultLongValue()));
assertThat(options.getUploadBufferSize(), is(ConfigurationVariable.UPLOAD_BUFFER_SIZE.defaultIntValue()));
assertThat(options.getCannedAcl(), is(CannedAccessControlList.BucketOwnerFullControl.toString()));
assertThat(options.getAssumeRole(), is(ConfigurationVariable.ASSUME_ROLE.defaultValue()));
}
public String upload(InputStream is, String fileName, String suffix, Boolean isPublic) throws Exception {
validateFile(is, suffix);
if (isPublic == null) {
isPublic = Boolean.TRUE;
}
if ((is != null) && (fileName != null)) {
try {
byte[] bytes = IOUtils.toByteArray(is);
s3Client.putObject(
new PutObjectRequest(
cdnConfig.getName(),
fileName + '.' + suffix,
new ByteArrayInputStream(bytes),
S3ObjectMetadata.getObjectMetadata(bytes)
).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead)
);
return fileName + '.' + suffix;
} catch (AmazonServiceException | IOException e) {
throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode());
} finally {
is.close();
}
} else {
throw new BusinessException(Validations.INVALID_PARAMETERS.getCode());
}
}
public void uploadFileToS3(final File file, final String bucket, final String path, final String region, final String roleArn) {
// upload mp3 to S3 bucket
final PutObjectRequest s3Put = new PutObjectRequest(bucket, path, file).withCannedAcl(CannedAccessControlList.PublicRead);
getS3Client(region, roleArn).putObject(s3Put);
if (!file.delete()) {
logger.warning("Could not delete mp3 temporary audio file.");
}
}
@Test
public void aclCanBeCustomized() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
EnvironmentTestUtils.addEnvironment(context, "s3.bucket:foo", "s3.acl:AuthenticatedRead");
context.register(Conf.class);
context.refresh();
AmazonS3SinkProperties properties = context.getBean(AmazonS3SinkProperties.class);
assertThat(properties.getAcl(), equalTo(CannedAccessControlList.AuthenticatedRead));
context.close();
}
@Before
public void setUp() throws Exception {
TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy(
"s3proxy-cors.conf");
awsCreds = new BasicAWSCredentials(info.getS3Identity(),
info.getS3Credential());
context = info.getBlobStore().getContext();
s3Proxy = info.getS3Proxy();
s3Endpoint = info.getSecureEndpoint();
servicePath = info.getServicePath();
s3EndpointConfig = new EndpointConfiguration(
s3Endpoint.toString() + servicePath, "us-east-1");
s3Client = AmazonS3ClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(awsCreds))
.withEndpointConfiguration(s3EndpointConfig)
.build();
httpClient = getHttpClient();
containerName = createRandomContainerName();
info.getBlobStore().createContainerInLocation(null, containerName);
s3Client.setBucketAcl(containerName,
CannedAccessControlList.PublicRead);
String blobName = "test";
ByteSource payload = ByteSource.wrap("blob-content".getBytes(
StandardCharsets.UTF_8));
Blob blob = info.getBlobStore().blobBuilder(blobName)
.payload(payload).contentLength(payload.size()).build();
info.getBlobStore().putBlob(containerName, blob);
Date expiration = new Date(System.currentTimeMillis() +
TimeUnit.HOURS.toMillis(1));
presignedGET = s3Client.generatePresignedUrl(containerName, blobName,
expiration, HttpMethod.GET).toURI();
publicGET = s3Client.getUrl(containerName, blobName).toURI();
}
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
if (VERBOSE) Log.i(TAG, "Queueing upload " + key);
final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
por.setGeneralProgressListener(new ProgressListener() {
final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
private long uploadStartTime;
@Override
public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
try {
if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
uploadStartTime = System.currentTimeMillis();
} else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
if (VERBOSE)
Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
} else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
Log.w(TAG, "Upload failed for " + url);
}
} catch (Exception excp) {
Log.e(TAG, "ProgressListener error");
excp.printStackTrace();
}
}
});
por.setCannedAcl(CannedAccessControlList.PublicRead);
for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
S3RequestInterceptor interceptor = ref.get();
if (interceptor != null) {
interceptor.interceptRequest(por);
}
}
mQueue.add(new Pair<>(por, lastUpload));
}
private void saveFile(String key, ByteString data, int cache_seconds)
{
ObjectMetadata omd = new ObjectMetadata();
omd.setCacheControl("max-age=" + cache_seconds);
omd.setContentLength(data.size());
PutObjectRequest put = new PutObjectRequest(bucket, key, data.newInput(), omd);
put.setCannedAcl(CannedAccessControlList.PublicRead);
put.setStorageClass(com.amazonaws.services.s3.model.StorageClass.StandardInfrequentAccess.toString());
s3.putObject(put);
}
/**
* Create CannedAccessControlList if {@link #CANNED_ACL} property specified.
*
* @param context ProcessContext
* @param flowFile FlowFile
* @return CannedAccessControlList or null if not specified
*/
protected final CannedAccessControlList createCannedACL(final ProcessContext context, final FlowFile flowFile) {
CannedAccessControlList cannedAcl = null;
final String cannedAclString = context.getProperty(CANNED_ACL).evaluateAttributeExpressions(flowFile).getValue();
if (!StringUtils.isEmpty(cannedAclString)) {
cannedAcl = CannedAccessControlList.valueOf(cannedAclString);
}
return cannedAcl;
}
void setBUCKETwebsite(String object, String access_key, String secret_key, String endpoint, String bucket) {
try {
AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
AmazonS3 s3Client = new AmazonS3Client(credentials,
new ClientConfiguration());
if (endpoint.contains("amazonaws.com")) {
String aws_endpoint = s3Client.getBucketLocation(new GetBucketLocationRequest(bucket));
if (aws_endpoint.contains("US")) {
s3Client.setEndpoint("https://s3.amazonaws.com");
} else if (aws_endpoint.contains("us-west")) {
s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
} else if (aws_endpoint.contains("eu-west")) {
s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
} else if (aws_endpoint.contains("ap-")) {
s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
} else if (aws_endpoint.contains("sa-east-1")) {
s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
} else {
s3Client.setEndpoint("https://s3." + aws_endpoint + ".amazonaws.com");
}
} else {
s3Client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
s3Client.setEndpoint(endpoint);
}
BucketWebsiteConfiguration bucketWebsiteConfiguration = s3Client.getBucketWebsiteConfiguration(bucket);
s3Client.setBucketAcl(bucket, CannedAccessControlList.PublicRead);
s3Client.setBucketWebsiteConfiguration(bucket, new BucketWebsiteConfiguration("index.html", "error.html"));
} catch (Exception setACLpublic) {
mainFrame.jTextArea1.append("\nException occurred in ACL");
}
}