com.amazonaws.services.s3.model.PutObjectRequest#setCannedAcl ( )源码实例Demo

下面列出了com.amazonaws.services.s3.model.PutObjectRequest#setCannedAcl ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: S3AFileSystem.java
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
源代码2 项目: big-c   文件: S3AFileSystem.java
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
源代码3 项目: aws-photosharing-example   文件: UploadThread.java
@Override
public void run() {
    ObjectMetadata meta_data = new ObjectMetadata();
    if (p_content_type != null)
        meta_data.setContentType(p_content_type);

    meta_data.setContentLength(p_size);

    PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    PutObjectResult res = s3Client.putObject(putObjectRequest);       
}
 
源代码4 项目: hadoop   文件: S3AFastOutputStream.java
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
源代码5 项目: big-c   文件: S3AFastOutputStream.java
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
源代码6 项目: kickflip-android-sdk   文件: S3BroadcastManager.java
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
    if (VERBOSE) Log.i(TAG, "Queueing upload " + key);

    final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
    por.setGeneralProgressListener(new ProgressListener() {
        final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
        private long uploadStartTime;

        @Override
        public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
            try {
                if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
                    uploadStartTime = System.currentTimeMillis();
                } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
                    long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
                    int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
                    if (VERBOSE)
                        Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
                    mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
                } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
                    Log.w(TAG, "Upload failed for " + url);
                }
            } catch (Exception excp) {
                Log.e(TAG, "ProgressListener error");
                excp.printStackTrace();
            }
        }
    });
    por.setCannedAcl(CannedAccessControlList.PublicRead);
    for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
        S3RequestInterceptor interceptor = ref.get();
        if (interceptor != null) {
            interceptor.interceptRequest(por);
        }
    }
    mQueue.add(new Pair<>(por, lastUpload));
}
 
源代码7 项目: jelectrum   文件: BlockRepoSaver.java
private void saveFile(String key, ByteString data, int cache_seconds)
{
  ObjectMetadata omd = new ObjectMetadata();
  omd.setCacheControl("max-age=" + cache_seconds);
  omd.setContentLength(data.size());


  PutObjectRequest put = new PutObjectRequest(bucket, key, data.newInput(), omd);
  put.setCannedAcl(CannedAccessControlList.PublicRead);
  put.setStorageClass(com.amazonaws.services.s3.model.StorageClass.StandardInfrequentAccess.toString());

  s3.putObject(put);

}
 
源代码8 项目: crate   文件: S3BlobContainer.java
/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore,
                         final String blobName,
                         final InputStream input,
                         final long blobSize) throws IOException {

    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }

    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());

    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}
 
源代码9 项目: hadoop   文件: S3AOutputStream.java
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
源代码10 项目: hadoop   文件: S3AFileSystem.java
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
源代码11 项目: big-c   文件: S3AOutputStream.java
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
源代码12 项目: big-c   文件: S3AFileSystem.java
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
源代码13 项目: openbd-core   文件: Write.java
private void writeData( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders ) throws Exception {
	if ( mimetype == null ) {
		if ( data.getDataType() == cfData.CFBINARYDATA )
			mimetype = "application/unknown";
		else if ( cfData.isSimpleValue( data ) )
			mimetype = "text/plain";
		else
			mimetype = "application/json";

		// Check to see if the mime type is in the metadata
		if ( metadata != null && metadata.containsKey( "Content-Type" ) )
			mimetype = metadata.get( "Content-Type" );
	}


	InputStream ios = null;
	long size = 0;
	if ( data.getDataType() == cfData.CFSTRINGDATA ) {
		ios = new java.io.ByteArrayInputStream( data.getString().getBytes() );
		size = data.getString().length();
	} else if ( data.getDataType() == cfData.CFBINARYDATA ) {
		ios = new java.io.ByteArrayInputStream( ( (cfBinaryData) data ).getByteArray() );
		size = ( (cfBinaryData) data ).getLength();
	} else {
		serializejson json = new serializejson();
		StringBuilder out = new StringBuilder();
		json.encodeJSON( out, data, false, CaseType.MAINTAIN, DateType.LONG );
		size = out.length();
		mimetype = "application/json";
		ios = new java.io.ByteArrayInputStream( out.toString().getBytes() );
	}


	// Setup the object data
	ObjectMetadata omd = new ObjectMetadata();
	if ( metadata != null )
		omd.setUserMetadata( metadata );

	omd.setContentType( mimetype );
	omd.setContentLength( size );

	AmazonS3 s3Client = getAmazonS3( amazonKey );

	// Let us run around the number of attempts
	int attempts = 0;
	while ( attempts < retry ) {
		try {

			PutObjectRequest por = new PutObjectRequest( bucket, key, ios, omd );
			por.setStorageClass( storage );

			if ( aes256key != null && !aes256key.isEmpty() )
				por.setSSECustomerKey( new SSECustomerKey( aes256key ) );

			if ( acl != null && !acl.isEmpty() )
				por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) );

			if ( customheaders != null && !customheaders.isEmpty() ) {
				Iterator<String> it = customheaders.keySet().iterator();
				while ( it.hasNext() ) {
					String k = it.next();
					por.putCustomRequestHeader( k, customheaders.get( k ) );
				}
			}

			s3Client.putObject( por );
			break;

		} catch ( Exception e ) {
			cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" );
			attempts++;

			if ( attempts == retry )
				throw e;
			else
				Thread.sleep( retryseconds * 1000 );
		}
	}
}
 
源代码14 项目: openbd-core   文件: Write.java
private void writeFile( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile, boolean background, String callback, String callbackdata, String appname, String acl, String aes256key, Map<String, String> customheaders ) throws Exception {
	File localFile = new File( localpath );
	if ( !localFile.isFile() )
		throw new Exception( "The file specified does not exist: " + localpath );

	// Push this to the background loader to handle and return immediately
	if ( background ) {
		BackgroundUploader.acceptFile( amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds, deletefile, callback, callbackdata, appname, acl, aes256key, customheaders );
		return;
	}


	// Setup the object data
	ObjectMetadata omd = new ObjectMetadata();
	if ( metadata != null )
		omd.setUserMetadata( metadata );

	AmazonS3 s3Client = getAmazonS3( amazonKey );

	// Let us run around the number of attempts
	int attempts = 0;
	while ( attempts < retry ) {
		try {

			PutObjectRequest por = new PutObjectRequest( bucket, key, localFile );
			por.setMetadata( omd );
			por.setStorageClass( storage );

			if ( acl != null && !acl.isEmpty() )
				por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) );

			if ( aes256key != null && !aes256key.isEmpty() )
				por.setSSECustomerKey( new SSECustomerKey( aes256key ) );

			if ( customheaders != null && !customheaders.isEmpty() ) {
				Iterator<String> it = customheaders.keySet().iterator();
				while ( it.hasNext() ) {
					String k = it.next();
					por.putCustomRequestHeader( k, customheaders.get( k ) );
				}
			}

			s3Client.putObject( por );
			break;

		} catch ( Exception e ) {
			cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" );
			attempts++;

			if ( attempts == retry )
				throw e;
			else
				Thread.sleep( retryseconds * 1000 );
		}
	}


	// delete the file now that it is a success
	if ( deletefile )
		localFile.delete();
}
 
源代码15 项目: nfscan   文件: S3Upload.java
/**
 * Uploads a new object to the specified Amazon S3 bucket.
 *
 * @param bucketName   bucket name
 * @param key          object key
 * @param file         file you want to upload
 * @param willBePublic whether or not this file should be accessible publicly
 * @throws AmazonClientException  If any errors are encountered in the client while making the
 *                                request or handling the response.
 * @throws AmazonServiceException If any errors occurred in Amazon S3 while processing the
 *                                request.
 */
protected void startUpload(String bucketName, String key, File file, boolean willBePublic) throws AmazonClientException, AmazonServiceException {
    PutObjectRequest putObj = new PutObjectRequest(bucketName, key, file);
    if (willBePublic) {
        putObj.setCannedAcl(CannedAccessControlList.PublicRead);
    }
    amazonS3.putObject(putObj);
}