com.amazonaws.services.s3.model.SSEAwsKeyManagementParams#com.amazonaws.services.s3.transfer.TransferManagerBuilder源码实例Demo

下面列出了com.amazonaws.services.s3.model.SSEAwsKeyManagementParams#com.amazonaws.services.s3.transfer.TransferManagerBuilder 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Flink-CEPplus   文件: S3UtilProgram.java
private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3prefix = params.getRequired("s3prefix");
	final String localFolder = params.getRequired("localFolder");
	final String s3filePrefix = params.get("s3filePrefix", "");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
	KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL :
		objectSummary -> keyPredicate.test(objectSummary.getKey());
	try {
		tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
源代码2 项目: flink   文件: S3UtilProgram.java
private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3prefix = params.getRequired("s3prefix");
	final String localFolder = params.getRequired("localFolder");
	final String s3filePrefix = params.get("s3filePrefix", "");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
	KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL :
		objectSummary -> keyPredicate.test(objectSummary.getKey());
	try {
		tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
源代码3 项目: circus-train   文件: CopyMapper.java
/**
 * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's
 * configuration, to set up the Job.
 *
 * @param context Mapper's context.
 * @throws IOException On IO failure.
 * @throws InterruptedException If the job is interrupted.
 */
@Override
public void setup(Context context) throws IOException, InterruptedException {
  conf = new S3MapReduceCpConfiguration(context.getConfiguration());

  ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES);

  targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH));

  AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory();
  transferManager = TransferManagerBuilder
      .standard()
      .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE))
      .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD))
      .withS3Client(awsS3ClientFactory.newInstance(conf))
      .withShutDownThreadPools(true)
      .withExecutorFactory(new ExecutorFactory() {
        @Override
        public ExecutorService newExecutor() {
          return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS));
        }
      })
      .build();
}
 
源代码4 项目: aws-doc-sdk-examples   文件: XferMgrDownload.java
public static void downloadDir(String bucket_name, String key_prefix,
                               String dir_path, boolean pause) {
    System.out.println("downloading to directory: " + dir_path +
            (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_download.directory]
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();

    try {
        MultipleFileDownload xfer = xfer_mgr.downloadDirectory(
                bucket_name, key_prefix, new File(dir_path));
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_download.directory]
}
 
源代码5 项目: aws-doc-sdk-examples   文件: XferMgrDownload.java
public static void downloadFile(String bucket_name, String key_name,
                                String file_path, boolean pause) {
    System.out.println("Downloading to file: " + file_path +
            (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_download.single]
    File f = new File(file_path);
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Download xfer = xfer_mgr.download(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_download.single]
}
 
源代码6 项目: aws-doc-sdk-examples   文件: XferMgrCopy.java
public static void copyObjectSimple(String from_bucket, String from_key,
                                    String to_bucket, String to_key) {
    // snippet-start:[s3.java1.s3_xfer_mgr_copy.copy_object]
    System.out.println("Copying s3 object: " + from_key);
    System.out.println("      from bucket: " + from_bucket);
    System.out.println("     to s3 object: " + to_key);
    System.out.println("        in bucket: " + to_bucket);

    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Copy xfer = xfer_mgr.copy(from_bucket, from_key, to_bucket, to_key);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_copy.copy_object]
}
 
源代码7 项目: aws-doc-sdk-examples   文件: XferMgrUpload.java
public static void uploadDir(String dir_path, String bucket_name,
                             String key_prefix, boolean recursive, boolean pause) {
    System.out.println("directory: " + dir_path + (recursive ?
            " (recursive)" : "") + (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_upload.directory]
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadDirectory(bucket_name,
                key_prefix, new File(dir_path), recursive);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.directory]
}
 
源代码8 项目: presto   文件: PrestoS3FileSystem.java
public PrestoS3OutputStream(
        AmazonS3 s3,
        String bucket,
        String key,
        File tempFile,
        boolean sseEnabled,
        PrestoS3SseType sseType,
        String sseKmsKeyId,
        long multiPartUploadMinFileSize,
        long multiPartUploadMinPartSize,
        PrestoS3AclType aclType,
        boolean requesterPaysEnabled,
        PrestoS3StorageClass s3StorageClass)
        throws IOException
{
    super(new BufferedOutputStream(new FileOutputStream(requireNonNull(tempFile, "tempFile is null"))));

    transferManager = TransferManagerBuilder.standard()
            .withS3Client(requireNonNull(s3, "s3 is null"))
            .withMinimumUploadPartSize(multiPartUploadMinPartSize)
            .withMultipartUploadThreshold(multiPartUploadMinFileSize).build();

    requireNonNull(aclType, "aclType is null");
    requireNonNull(s3StorageClass, "s3StorageClass is null");
    this.aclType = aclType.getCannedACL();
    this.bucket = requireNonNull(bucket, "bucket is null");
    this.key = requireNonNull(key, "key is null");
    this.tempFile = tempFile;
    this.sseEnabled = sseEnabled;
    this.sseType = requireNonNull(sseType, "sseType is null");
    this.sseKmsKeyId = sseKmsKeyId;
    this.requesterPaysEnabled = requesterPaysEnabled;
    this.s3StorageClass = s3StorageClass.getS3StorageClass();

    log.debug("OutputStream for key '%s' using file: %s", key, tempFile);
}
 
源代码9 项目: Flink-CEPplus   文件: S3UtilProgram.java
private static void downloadFile(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3file = params.getRequired("s3file");
	final String localFile = params.getRequired("localFile");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	try {
		tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
源代码10 项目: flink   文件: S3UtilProgram.java
private static void downloadFile(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3file = params.getRequired("s3file");
	final String localFile = params.getRequired("localFile");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	try {
		tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
源代码11 项目: circus-train   文件: TransferManagerFactory.java
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) {
  LOG
      .debug("Initializing transfer manager with {} threads.", s3s3CopierOptions.getMaxThreadPoolSize());

  return TransferManagerBuilder.standard()
      .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold())
      .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize())
      .withExecutorFactory(() -> Executors.newFixedThreadPool(s3s3CopierOptions.getMaxThreadPoolSize()))
      .withS3Client(targetS3Client)
      .build();
}
 
源代码12 项目: spring-cloud-aws-sample   文件: TablonController.java
@RequestMapping("/anuncio/nuevo")
public String nuevoAnuncio(Model model, 
		@RequestParam String nombre,
		@RequestParam String asunto,
		@RequestParam String comentario,
		@RequestParam String filename,
		@RequestParam MultipartFile file) {

       if (!file.isEmpty()) {
           try {
               ObjectMetadata objectMetadata = new ObjectMetadata();
               objectMetadata.setContentType(file.getContentType());

               TransferManager transferManager = TransferManagerBuilder.defaultTransferManager();
               transferManager.upload(bucket, filename, file.getInputStream(), objectMetadata);
               
           } catch (Exception e) {
           	model.addAttribute("message", "You failed to upload " + filename + " => " + e.getMessage());
               return "error";
           }
       } else {
       	model.addAttribute("message", "You failed to upload " + filename + " because the file was empty.");
           return "error";
       }

       Anuncio anuncio = new Anuncio(nombre, asunto, comentario);
       anuncio.setFoto(s3.getUrl(bucket, filename));

	repository.save(anuncio);

       return "anuncio_guardado";

}
 
源代码13 项目: pipeline-aws-plugin   文件: S3DownloadStep.java
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();

	if (this.path == null || this.path.isEmpty() || this.path.endsWith("/")) {
		try {
			final MultipleFileDownload fileDownload = mgr.downloadDirectory(this.bucket, this.path, localFile);
			fileDownload.waitForCompletion();
			RemoteDownloader.this.taskListener.getLogger().println("Finished: " + fileDownload.getDescription());
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	} else {
		try {
			final Download download = mgr.download(this.bucket, this.path, localFile);
			download.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteDownloader.this.taskListener.getLogger().println("Finished: " + download.getDescription());
				}
			});
			download.waitForCompletion();
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	}
}
 
@Before
public void setupSdk() throws Exception {
	PowerMockito.mockStatic(AWSClientFactory.class);
	AmazonS3 amazonS3 = PowerMockito.mock(AmazonS3.class);
	PowerMockito.when(AWSClientFactory.create(Mockito.any(AwsSyncClientBuilder.class), Mockito.any(StepContext.class)))
			.thenReturn(amazonS3);

	PowerMockito.mockStatic(TransferManagerBuilder.class);
	transferManager = Mockito.mock(TransferManager.class);
	TransferManagerBuilder transferManagerBuilder = PowerMockito.mock(TransferManagerBuilder.class);
	PowerMockito.when(TransferManagerBuilder.standard()).thenReturn(transferManagerBuilder);
	PowerMockito.when(transferManagerBuilder.withS3Client(Mockito.any(AmazonS3.class))).thenReturn(transferManagerBuilder);
	PowerMockito.when(transferManagerBuilder.build()).thenReturn(transferManager);
}
 
源代码15 项目: aws-doc-sdk-examples   文件: XferMgrUpload.java
public static void uploadFileList(String[] file_paths, String bucket_name,
                                  String key_prefix, boolean pause) {
    System.out.println("file list: " + Arrays.toString(file_paths) +
            (pause ? " (pause)" : ""));
    // convert the file paths to a list of File objects (required by the
    // uploadFileList method)
    // snippet-start:[s3.java1.s3_xfer_mgr_upload.list_of_files]
    ArrayList<File> files = new ArrayList<File>();
    for (String path : file_paths) {
        files.add(new File(path));
    }

    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadFileList(bucket_name,
                key_prefix, new File("."), files);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.list_of_files]
}
 
源代码16 项目: aws-doc-sdk-examples   文件: XferMgrUpload.java
public static void uploadFile(String file_path, String bucket_name,
                              String key_prefix, boolean pause) {
    System.out.println("file: " + file_path +
            (pause ? " (pause)" : ""));

    String key_name = null;
    if (key_prefix != null) {
        key_name = key_prefix + '/' + file_path;
    } else {
        key_name = file_path;
    }

    // snippet-start:[s3.java1.s3_xfer_mgr_upload.single]
    File f = new File(file_path);
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Upload xfer = xfer_mgr.upload(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        //  or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.single]
}
 
源代码17 项目: nexus-public   文件: TransferManagerUploader.java
public void upload(final AmazonS3 s3, final String bucket, final String key, final InputStream contents) {
  try {
    TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
    transferManager.upload(bucket, key, contents, new ObjectMetadata())
        .waitForCompletion();
  } catch (InterruptedException e) {
    throw new BlobStoreException("error uploading blob", e, null);
  }
}
 
源代码18 项目: entrada   文件: S3FileManagerImpl.java
public S3FileManagerImpl(AmazonS3 amazonS3, @Value("${aws.upload.parallelism}") int parallelism,
    @Value("${aws.upload.multipart.mb.size:5}") int multipartSize) {
  this.amazonS3 = amazonS3;

  this.transferManager = TransferManagerBuilder
      .standard()
      .withS3Client(amazonS3)
      .withMultipartUploadThreshold(multipartSize * 1024L * 1024L)
      .withExecutorFactory(() -> Executors.newFixedThreadPool(parallelism))
      .build();
}
 
源代码19 项目: datacollector   文件: S3Manager.java
S3Manager(EmrClusterConfig pipelineEmrConfigs, String pipelineId, String uniquePrefix, List<File> filesToUpload) {
  this.pipelineEmrConfigs = pipelineEmrConfigs;
  this.pipelineId = pipelineId;
  this.uniquePrefix = uniquePrefix;
  this.filesToUpload = filesToUpload;
  s3Client = getS3Client();
  int uploadThreads = Integer.parseInt(System.getProperty("sdc.emr.s3.uploadThreads", "5"));
  s3TransferManager = TransferManagerBuilder.standard()
      .withS3Client(s3Client)
      .withMultipartUploadThreshold(10L*1024*1024)
      .withMinimumUploadPartSize(10L*1024*1024)
      .withExecutorFactory(() -> Executors.newFixedThreadPool(uploadThreads))
      .withShutDownThreadPools(true)
      .build();
}
 
源代码20 项目: james-project   文件: AwsS3ObjectStorage.java
private static TransferManager getTransferManager(AmazonS3 s3Client, ExecutorService executorService) {
    return TransferManagerBuilder
            .standard()
            .withS3Client(s3Client)
            .withMultipartUploadThreshold(MULTIPART_UPLOAD_THRESHOLD.getValue())
            .withExecutorFactory(() -> executorService)
            .withShutDownThreadPools(DO_NOT_SHUTDOWN_THREAD_POOL)
            .build();
}
 
源代码21 项目: tutorials   文件: MultipartUpload.java
public static void main(String[] args) throws Exception {
    String existingBucketName = "baeldung-bucket";
    String keyName = "my-picture.jpg";
    String filePath = "documents/my-picture.jpg";

    AmazonS3 amazonS3 = AmazonS3ClientBuilder
            .standard()
            .withCredentials(new DefaultAWSCredentialsProviderChain())
            .withRegion(Regions.DEFAULT_REGION)
            .build();

    int maxUploadThreads = 5;

    TransferManager tm = TransferManagerBuilder
            .standard()
            .withS3Client(amazonS3)
            .withMultipartUploadThreshold((long) (5 * 1024 * 1024))
            .withExecutorFactory(() -> Executors.newFixedThreadPool(maxUploadThreads))
            .build();

    ProgressListener progressListener =
            progressEvent -> System.out.println("Transferred bytes: " + progressEvent.getBytesTransferred());

    PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));

    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    try {
        upload.waitForCompletion();
        System.out.println("Upload complete.");
    } catch (AmazonClientException e) {
        System.out.println("Error occurred while uploading file");
        e.printStackTrace();
    }
}
 
源代码22 项目: tutorials   文件: MultipartUploadLiveTest.java
@Before
public void setup() {
    amazonS3 = mock(AmazonS3.class);
    tm = TransferManagerBuilder
            .standard()
            .withS3Client(amazonS3)
            .withMultipartUploadThreshold((long) (5 * 1024 * 1025))
            .withExecutorFactory(() -> Executors.newFixedThreadPool(5))
            .build();
    progressListener =
            progressEvent -> System.out.println("Transferred bytes: " + progressEvent.getBytesTransferred());
}
 
源代码23 项目: data-highway   文件: S3SchemaUriResolver.java
public S3SchemaUriResolver(AmazonS3 s3Client, String bucket, String keyPrefix, boolean enableServerSideEncryption) {
  this(TransferManagerBuilder.standard().withS3Client(s3Client).build(), S3_URI_FORMAT, bucket, keyPrefix,
      enableServerSideEncryption);
}
 
源代码24 项目: cassandra-backup   文件: S3Module.java
public TransferManager build(final AbstractOperationRequest operationRequest) {
    final AmazonS3 amazonS3 = provideAmazonS3(coreV1ApiProvider, operationRequest);
    return TransferManagerBuilder.standard().withS3Client(amazonS3).build();
}
 
源代码25 项目: pipeline-aws-plugin   文件: S3CopyStep.java
@Override
public String run() throws Exception {
	final String fromBucket = this.step.getFromBucket();
	final String toBucket = this.step.getToBucket();
	final String fromPath = this.step.getFromPath();
	final String toPath = this.step.getToPath();
	final String kmsId = this.step.getKmsId();
	final Map<String, String> metadatas = new HashMap<>();
	final CannedAccessControlList acl = this.step.getAcl();
	final String cacheControl = this.step.getCacheControl();
	final String contentType = this.step.getContentType();
	final String sseAlgorithm = this.step.getSseAlgorithm();
	final S3ClientOptions s3ClientOptions = this.step.createS3ClientOptions();
	final EnvVars envVars = this.getContext().get(EnvVars.class);

	if (this.step.getMetadatas() != null && this.step.getMetadatas().length != 0) {
		for (String metadata : this.step.getMetadatas()) {
			if (metadata.split(":").length == 2) {
				metadatas.put(metadata.split(":")[0], metadata.split(":")[1]);
			}
		}
	}

	Preconditions.checkArgument(fromBucket != null && !fromBucket.isEmpty(), "From bucket must not be null or empty");
	Preconditions.checkArgument(fromPath != null && !fromPath.isEmpty(), "From path must not be null or empty");
	Preconditions.checkArgument(toBucket != null && !toBucket.isEmpty(), "To bucket must not be null or empty");
	Preconditions.checkArgument(toPath != null && !toPath.isEmpty(), "To path must not be null or empty");

	TaskListener listener = Execution.this.getContext().get(TaskListener.class);
	listener.getLogger().format("Copying s3://%s/%s to s3://%s/%s%n", fromBucket, fromPath, toBucket, toPath);

	CopyObjectRequest request = new CopyObjectRequest(fromBucket, fromPath, toBucket, toPath);

	// Add metadata
	if (metadatas.size() > 0 || (cacheControl != null && !cacheControl.isEmpty()) || (contentType != null && !contentType.isEmpty()) || (sseAlgorithm != null && !sseAlgorithm.isEmpty())) {
		ObjectMetadata metas = new ObjectMetadata();
		if (metadatas.size() > 0) {
			metas.setUserMetadata(metadatas);
		}
		if (cacheControl != null && !cacheControl.isEmpty()) {
			metas.setCacheControl(cacheControl);
		}
		if (contentType != null && !contentType.isEmpty()) {
			metas.setContentType(contentType);
		}
		if (sseAlgorithm != null && !sseAlgorithm.isEmpty()) {
			metas.setSSEAlgorithm(sseAlgorithm);
		}
		request.withNewObjectMetadata(metas);
	}

	// Add acl
	if (acl != null) {
		request.withCannedAccessControlList(acl);
	}

	// Add kms
	if (kmsId != null && !kmsId.isEmpty()) {
		listener.getLogger().format("Using KMS: %s%n", kmsId);
		request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsId));
	}

	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(s3ClientOptions.createAmazonS3ClientBuilder(), envVars))
			.build();
	try {
		final Copy copy = mgr.copy(request);
		copy.addProgressListener((ProgressListener) progressEvent -> {
			if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
				listener.getLogger().println("Finished: " + copy.getDescription());
			}
		});
		copy.waitForCompletion();
	}
	finally{
		mgr.shutdownNow();
	}

	listener.getLogger().println("Copy complete");
	return String.format("s3://%s/%s", toBucket, toPath);
}
 
源代码26 项目: pipeline-aws-plugin   文件: S3UploadStep.java
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();
	final MultipleFileUpload fileUpload;
	ObjectMetadataProvider metadatasProvider = (file, meta) -> {
		if (meta != null) {
			if (RemoteListUploader.this.metadatas != null && RemoteListUploader.this.metadatas.size() > 0) {
				meta.setUserMetadata(RemoteListUploader.this.metadatas);
			}
			if (RemoteListUploader.this.acl != null) {
				meta.setHeader(Headers.S3_CANNED_ACL, RemoteListUploader.this.acl);
			}
			if (RemoteListUploader.this.cacheControl != null && !RemoteListUploader.this.cacheControl.isEmpty()) {
				meta.setCacheControl(RemoteListUploader.this.cacheControl);
			}
			if (RemoteListUploader.this.contentEncoding != null && !RemoteListUploader.this.contentEncoding.isEmpty()) {
				meta.setContentEncoding(RemoteListUploader.this.contentEncoding);
			}
			if (RemoteListUploader.this.contentType != null && !RemoteListUploader.this.contentType.isEmpty()) {
				meta.setContentType(RemoteListUploader.this.contentType);
			}
			if (RemoteListUploader.this.sseAlgorithm != null && !RemoteListUploader.this.sseAlgorithm.isEmpty()) {
				meta.setSSEAlgorithm(RemoteListUploader.this.sseAlgorithm);
			}
			if (RemoteListUploader.this.kmsId != null && !RemoteListUploader.this.kmsId.isEmpty()) {
				final SSEAwsKeyManagementParams sseAwsKeyManagementParams = new SSEAwsKeyManagementParams(RemoteListUploader.this.kmsId);
				meta.setSSEAlgorithm(sseAwsKeyManagementParams.getAwsKmsKeyId());
				meta.setHeader(
						Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
						sseAwsKeyManagementParams.getAwsKmsKeyId()
				);
			}

		}
	};

	ObjectTaggingProvider objectTaggingProvider =(uploadContext) -> {
		List<Tag> tagList = new ArrayList<Tag>();

		//add tags
		if(tags != null){
			for (Map.Entry<String, String> entry : tags.entrySet()) {
				Tag tag = new Tag(entry.getKey(), entry.getValue());
				tagList.add(tag);
			}
		}
		return new ObjectTagging(tagList);
	};

	try {
		fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider, objectTaggingProvider);
		for (final Upload upload : fileUpload.getSubTransfers()) {
			upload.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
				}
			});
		}
		fileUpload.waitForCompletion();
	}
	finally {
		mgr.shutdownNow();
	}
	return null;
}
 
public S3Downloader(AmazonS3Client s3Client) {
    this.s3Client = s3Client;
    transferManager = TransferManagerBuilder.standard().withS3Client(s3Client).build();
}
 
源代码28 项目: datacollector   文件: S3Accessor.java
TransferManagerBuilder createTransferManagerBuilder() {
  return TransferManagerBuilder.standard();
}
 
源代码29 项目: datacollector   文件: AmazonS3Target.java
@Override
public List<ConfigIssue> init() {
  List<ConfigIssue> issues = s3TargetConfigBean.init(getContext(), super.init(), isErrorStage);

  elVars = getContext().createELVars();
  bucketEval = getContext().createELEval(BUCKET_TEMPLATE);
  partitionEval = getContext().createELEval(PARTITION_TEMPLATE);
  timeDriverEval = getContext().createELEval(TIME_DRIVER_TEMPLATE);
  calendar = Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(s3TargetConfigBean.timeZoneID)));

  transferManager = TransferManagerBuilder
      .standard()
      .withS3Client(s3TargetConfigBean.s3Config.getS3Client())
      .withExecutorFactory(() -> Executors.newFixedThreadPool(s3TargetConfigBean.tmConfig.threadPoolSize))
      .withMinimumUploadPartSize(s3TargetConfigBean.tmConfig.minimumUploadPartSize)
      .withMultipartUploadThreshold(s3TargetConfigBean.tmConfig.multipartUploadThreshold)
      .build();

  TimeEL.setCalendarInContext(elVars, calendar);
  TimeNowEL.setTimeNowInContext(elVars, new Date());

  if (partitionTemplate.contains(EL_PREFIX)) {
    ELUtils.validateExpression(partitionTemplate,
        getContext(),
        Groups.S3.getLabel(),
        S3TargetConfigBean.S3_TARGET_CONFIG_BEAN_PREFIX + PARTITION_TEMPLATE,
        Errors.S3_03, issues
    );
  }

  if (timeDriverTemplate.contains(EL_PREFIX)) {
    ELUtils.validateExpression(timeDriverTemplate,
        getContext(),
        Groups.S3.getLabel(),
        S3TargetConfigBean.S3_TARGET_CONFIG_BEAN_PREFIX + TIME_DRIVER_TEMPLATE,
        Errors.S3_04, issues
    );
  }
  if (!isErrorStage && getContext().getService(DataFormatGeneratorService.class).isWholeFileFormat()) {
    fileHelper = new WholeFileHelper(getContext(), s3TargetConfigBean, transferManager, issues);
  } else {
    fileHelper = new DefaultFileHelper(getContext(), s3TargetConfigBean, transferManager, isErrorStage);
  }

  this.errorRecordHandler = new DefaultErrorRecordHandler(getContext());

  return issues;
}
 
源代码30 项目: ecs-sync   文件: AwsS3Storage.java
@Override
void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om;
    if (options.isSyncMetadata()) om = s3MetaFromSyncMeta(obj.getMetadata());
    else om = new ObjectMetadata();

    if (obj.getMetadata().isDirectory()) om.setContentType(TYPE_DIRECTORY);

    PutObjectRequest req;
    File file = (File) obj.getProperty(AbstractFilesystemStorage.PROP_FILE);
    S3ProgressListener progressListener = null;
    if (obj.getMetadata().isDirectory()) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (file != null) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, file).withMetadata(om);
        progressListener = new ByteTransferListener(obj);
    } else {
        InputStream stream = obj.getDataStream();
        if (options.isMonitorPerformance())
            stream = new ProgressInputStream(stream, new PerformanceListener(getWriteWindow()));
        req = new PutObjectRequest(config.getBucketName(), targetKey, stream, om);
    }

    if (options.isSyncAcl())
        req.setAccessControlList(s3AclFromSyncAcl(obj.getAcl(), options.isIgnoreInvalidAcls()));

    TransferManager xferManager = null;
    try {
        // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
        // and abort if it fails
        xferManager = TransferManagerBuilder.standard()
                .withS3Client(s3)
                .withExecutorFactory(() -> Executors.newFixedThreadPool(config.getMpuThreadCount()))
                .withMultipartUploadThreshold((long) config.getMpuThresholdMb() * 1024 * 1024)
                .withMinimumUploadPartSize((long) config.getMpuPartSizeMb() * 1024 * 1024)
                .withShutDownThreadPools(true)
                .build();

        // directly update

        final Upload upload = xferManager.upload(req, progressListener);
        try {
            String eTag = time((Callable<String>) () -> upload.waitForUploadResult().getETag(), OPERATION_MPU);
            log.debug("Wrote {}, etag: {}", targetKey, eTag);
        } catch (Exception e) {
            log.error("upload exception", e);
            if (e instanceof RuntimeException) throw (RuntimeException) e;
            throw new RuntimeException("upload thread was interrupted", e);
        }
    } finally {
        // NOTE: apparently if we do not reference xferManager again after the upload() call (as in this finally
        // block), the JVM will for some crazy reason determine it is eligible for GC and call finalize(), which
        // shuts down the thread pool, fails the upload, and gives absolutely no indication of what's going on...
        if (xferManager != null) xferManager.shutdownNow(false);
    }
}