下面列出了怎么用com.amazonaws.services.s3.model.AbortMultipartUploadRequest的API类实例代码及写法,或者点击链接到github查看源代码。
@Test
public void testAtomicMpuAbort() throws Exception {
String key = "testAtomicMpuAbort";
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(BYTE_SOURCE.size());
client.putObject(containerName, key, BYTE_SOURCE.openStream(),
metadata);
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest(containerName, key);
InitiateMultipartUploadResult initResponse =
client.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId();
client.abortMultipartUpload(new AbortMultipartUploadRequest(
containerName, key, uploadId));
S3Object object = client.getObject(containerName, key);
assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
BYTE_SOURCE.size());
try (InputStream actual = object.getObjectContent();
InputStream expected = BYTE_SOURCE.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
}
@Test
public void abort() {
ArgumentCaptor<AbortMultipartUploadRequest> request = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class);
doNothing().when(s3).abortMultipartUpload(request.capture());
underTest.abort(UPLOAD_ID);
assertThat(request.getValue().getBucketName(), is(BUCKET));
assertThat(request.getValue().getKey(), is(KEY));
assertThat(request.getValue().getUploadId(), is(UPLOAD_ID));
}
private void cleanupMultipartUploads() {
final AmazonS3 s3Client = transferManager.getAmazonS3Client();
final Instant yesterdayInstant = ZonedDateTime.now().minusDays(1).toInstant();
logger.info("Cleaning up multipart uploads older than {}.", yesterdayInstant);
final ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(request.storageLocation.bucket)
.withPrefix(request.storageLocation.clusterId + "/" + request.storageLocation.datacenterId);
while (true) {
final MultipartUploadListing multipartUploadListing = s3Client.listMultipartUploads(listMultipartUploadsRequest);
multipartUploadListing.getMultipartUploads().stream()
.filter(u -> u.getInitiated().toInstant().isBefore(yesterdayInstant))
.forEach(u -> {
logger.info("Aborting multi-part upload for key \"{}\" initiated on {}", u.getKey(), u.getInitiated().toInstant());
try {
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(request.storageLocation.bucket, u.getKey(), u.getUploadId()));
} catch (final AmazonClientException e) {
logger.error("Failed to abort multipart upload for key \"{}\".", u.getKey(), e);
}
});
if (!multipartUploadListing.isTruncated()) {
break;
}
listMultipartUploadsRequest
.withKeyMarker(multipartUploadListing.getKeyMarker())
.withUploadIdMarker(multipartUploadListing.getUploadIdMarker());
}
}
protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket, final MultipartUpload upload) {
final String uploadKey = upload.getKey();
final String uploadId = upload.getUploadId();
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(
bucket, uploadKey, uploadId);
try {
s3.abortMultipartUpload(abortRequest);
getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}",
new Object[]{bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated())});
} catch (AmazonClientException ace) {
getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}",
new Object[]{bucket, uploadKey, uploadId, ace.getMessage()});
}
}
public void abort() {
LOG.warn("Aborting multi-part upload with id '{}'", uploadId);
try {
client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket,
key, uploadId));
} catch (Exception e2) {
LOG.warn("Unable to abort multipart upload, you may need to purge " +
"uploaded parts: " + e2, e2);
}
}
private static Set<String> getAbortedIds(
List<AbortMultipartUploadRequest> aborts) {
Set<String> abortedUploads = Sets.newHashSet();
for (AbortMultipartUploadRequest abort : aborts) {
abortedUploads.add(abort.getUploadId());
}
return abortedUploads;
}
public void abort() {
log.warn("Aborting multi-part upload with id '{}'", uploadId);
try {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
} catch (Exception e) {
// ignoring failure on abort.
log.warn("Unable to abort multipart upload, you may need to purge uploaded parts: ", e);
}
}
public void abort() {
LOG.warn("Aborting multi-part upload with id '{}'", uploadId);
try {
client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket,
key, uploadId));
} catch (Exception e2) {
LOG.warn("Unable to abort multipart upload, you may need to purge " +
"uploaded parts: " + e2, e2);
}
}
protected void parallelRequests(final AmazonS3 s3,
final String bucket,
final String key,
final Supplier<IOFunction<String, List<PartETag>>> operations)
{
InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
String uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();
CompletionService<List<PartETag>> completionService = new ExecutorCompletionService<>(executorService);
try {
for (int i = 0; i < parallelism; i++) {
completionService.submit(() -> operations.get().apply(uploadId));
}
List<PartETag> partETags = new ArrayList<>();
for (int i = 0; i < parallelism; i++) {
partETags.addAll(completionService.take().get());
}
s3.completeMultipartUpload(new CompleteMultipartUploadRequest()
.withBucketName(bucket)
.withKey(key)
.withUploadId(uploadId)
.withPartETags(partETags));
}
catch (InterruptedException interrupted) {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
Thread.currentThread().interrupt();
}
catch (CancellationException | ExecutionException ex) {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
throw new BlobStoreException(
format("Error executing parallel requests for bucket:%s key:%s with uploadId:%s", bucket, key, uploadId), ex,
null);
}
}
private void abortMultiPartUpload() {
if (isMultiPartUpload()) {
SimpleStorageResource.this.amazonS3
.abortMultipartUpload(new AbortMultipartUploadRequest(
this.multiPartUploadResult.getBucketName(),
this.multiPartUploadResult.getKey(),
this.multiPartUploadResult.getUploadId()));
}
}
protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket, final MultipartUpload upload) {
final String uploadKey = upload.getKey();
final String uploadId = upload.getUploadId();
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(
bucket, uploadKey, uploadId);
// No call to an encryption service is necessary for an AbortMultipartUploadRequest.
try {
s3.abortMultipartUpload(abortRequest);
getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}",
new Object[]{bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated())});
} catch (AmazonClientException ace) {
getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}",
new Object[]{bucket, uploadKey, uploadId, ace.getMessage()});
}
}
private void abortUpload() {
if (this.uploadId == null) {
// This is not a multipart upload, nothing to do here
return;
}
try {
final AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(this.bucket, this.object,
this.uploadId);
this.s3Client.abortMultipartUpload(request);
} catch (AmazonServiceException e) {
// Ignore exception
}
}
void abort(String uploadId) {
AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(bucket, key, uploadId);
s3.abortMultipartUpload(request);
log.warn("Aborted upload to s3://{}/{}.", bucket, key);
}
public static PendingUpload multipartUpload(
AmazonS3 client, File localFile, String partition,
String bucket, String key, long uploadPartSize) {
InitiateMultipartUploadResult initiate = client.initiateMultipartUpload(
new InitiateMultipartUploadRequest(bucket, key));
String uploadId = initiate.getUploadId();
boolean threw = true;
try {
Map<Integer, String> etags = Maps.newLinkedHashMap();
long offset = 0;
long numParts = (localFile.length() / uploadPartSize +
((localFile.length() % uploadPartSize) > 0 ? 1 : 0));
Preconditions.checkArgument(numParts > 0,
"Cannot upload 0 byte file: " + localFile);
for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
long size = Math.min(localFile.length() - offset, uploadPartSize);
UploadPartRequest part = new UploadPartRequest()
.withBucketName(bucket)
.withKey(key)
.withPartNumber(partNumber)
.withUploadId(uploadId)
.withFile(localFile)
.withFileOffset(offset)
.withPartSize(size)
.withLastPart(partNumber == numParts);
UploadPartResult partResult = client.uploadPart(part);
PartETag etag = partResult.getPartETag();
etags.put(etag.getPartNumber(), etag.getETag());
offset += uploadPartSize;
}
PendingUpload pending = new PendingUpload(
partition, bucket, key, uploadId, etags);
threw = false;
return pending;
} finally {
if (threw) {
try {
client.abortMultipartUpload(
new AbortMultipartUploadRequest(bucket, key, uploadId));
} catch (AmazonClientException e) {
LOG.error("Failed to abort multi-part upload", e);
}
}
}
}
public AbortMultipartUploadRequest newAbortRequest() {
return new AbortMultipartUploadRequest(bucket, key, uploadId);
}
public List<AbortMultipartUploadRequest> getAborts() {
return aborts;
}
public AbortMultipartUploadRequest getAbortMultipartUploadRequest() {
return new AbortMultipartUploadRequest(this.bucketName, this.key, this.uploadId);
}
/**
* Abort a multipart upload operation
* @param uploadId multipart operation Id
* @throws AmazonClientException on problems
*/
void abortMultipartUpload(String uploadId) throws AmazonClientException {
LOG.debug("Aborting multipart upload {}", uploadId);
mClient.abortMultipartUpload(
new AbortMultipartUploadRequest(mBucket, key, uploadId));
}
private void uploadMultiPart(final AmazonS3 s3,
final String bucket,
final String key,
final InputStream firstChunk,
final InputStream restOfContents)
throws IOException {
checkState(firstChunk.available() > 0);
String uploadId = null;
try {
InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();
log.debug("Starting multipart upload {} to key {} in bucket {}", uploadId, key, bucket);
List<UploadPartResult> results = new ArrayList<>();
for (int partNumber = 1; ; partNumber++) {
InputStream chunk = partNumber == 1 ? firstChunk : readChunk(restOfContents);
if (chunk.available() == 0) {
break;
}
else {
log.debug("Uploading chunk {} for {} of {} bytes", partNumber, uploadId, chunk.available());
UploadPartRequest part = new UploadPartRequest()
.withBucketName(bucket)
.withKey(key)
.withUploadId(uploadId)
.withPartNumber(partNumber)
.withInputStream(chunk)
.withPartSize(chunk.available());
results.add(s3.uploadPart(part));
}
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
.withBucketName(bucket)
.withKey(key)
.withUploadId(uploadId)
.withPartETags(results);
s3.completeMultipartUpload(compRequest);
log.debug("Upload {} complete", uploadId);
uploadId = null;
}
finally {
if (uploadId != null) {
try {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
}
catch(Exception e) {
log.error("Error aborting S3 multipart upload to bucket {} with key {}", bucket, key,
log.isDebugEnabled() ? e : null);
}
}
}
}
private void copyMultiPart(final AmazonS3 s3,
final String bucket,
final String sourcePath,
final String destinationPath,
final long length) {
checkState(length > 0);
String uploadId = null;
try {
long remaining = length;
long offset = 0;
InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, destinationPath);
uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();
log.debug("Starting multipart copy {} to key {} from key {}", uploadId, destinationPath, sourcePath);
List<CopyPartResult> results = new ArrayList<>();
for (int partNumber = 1; ; partNumber++) {
if (remaining <= 0) {
break;
}
else {
long partSize = min(remaining, chunkSize);
log.trace("Copying chunk {} for {} from byte {} to {}, size {}", partNumber, uploadId, offset,
offset + partSize - 1, partSize);
CopyPartRequest part = new CopyPartRequest()
.withSourceBucketName(bucket)
.withSourceKey(sourcePath)
.withDestinationBucketName(bucket)
.withDestinationKey(destinationPath)
.withUploadId(uploadId)
.withPartNumber(partNumber)
.withFirstByte(offset)
.withLastByte(offset + partSize - 1);
results.add(s3.copyPart(part));
offset += partSize;
remaining -= partSize;
}
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
.withBucketName(bucket)
.withKey(destinationPath)
.withUploadId(uploadId)
.withPartETags(results.stream().map(r -> new PartETag(r.getPartNumber(), r.getETag())).collect(toList()));
s3.completeMultipartUpload(compRequest);
log.debug("Copy {} complete", uploadId);
}
catch(SdkClientException e) {
if (uploadId != null) {
try {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, destinationPath, uploadId));
}
catch(Exception inner) {
log.error("Error aborting S3 multipart copy to bucket {} with key {}", bucket, destinationPath,
log.isDebugEnabled() ? inner : null);
}
}
throw e;
}
}
@Override
public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest, AmazonS3 s3Client)
{
s3Client.abortMultipartUpload(abortMultipartUploadRequest);
}
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate)
{
// Create an Amazon S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
int abortedMultipartUploadsCount = 0;
try
{
// List upload markers. Null implies initial list request.
String uploadIdMarker = null;
String keyMarker = null;
boolean truncated;
do
{
// Create the list multipart request, optionally using the last markers.
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
request.setUploadIdMarker(uploadIdMarker);
request.setKeyMarker(keyMarker);
// Request the multipart upload listing.
MultipartUploadListing uploadListing = s3Operations.listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);
for (MultipartUpload upload : uploadListing.getMultipartUploads())
{
if (upload.getInitiated().compareTo(thresholdDate) < 0)
{
// Abort the upload.
s3Operations.abortMultipartUpload(TransferManager
.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(params.getS3BucketName(), upload.getKey(), upload.getUploadId())),
s3Client);
// Log the information about the aborted multipart upload.
LOGGER.info("Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"", upload.getKey(),
params.getS3BucketName(), upload.getInitiated());
// Increment the counter.
abortedMultipartUploadsCount++;
}
}
// Determine whether there are more uploads to list.
truncated = uploadListing.isTruncated();
if (truncated)
{
// Record the list markers.
uploadIdMarker = uploadListing.getNextUploadIdMarker();
keyMarker = uploadListing.getNextKeyMarker();
}
}
while (truncated);
}
finally
{
// Shutdown the Amazon S3 client instance to release resources.
s3Client.shutdown();
}
return abortedMultipartUploadsCount;
}
/**
* {@inheritDoc}
* <p/>
* This implementation simulates abort multipart upload operation.
*/
@Override
public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest, AmazonS3 s3Client)
{
// This method does nothing.
}
/** Unsupported Operation. */
@Override public void abortMultipartUpload(
AbortMultipartUploadRequest req) throws SdkClientException {
throw new UnsupportedOperationException("Operation not supported");
}
@Test
public void testMultipartUploadAbort() throws Exception {
String blobName = "multipart-upload-abort";
ByteSource byteSource = TestUtils.randomByteSource().slice(
0, MINIMUM_MULTIPART_SIZE);
InitiateMultipartUploadResult result = client.initiateMultipartUpload(
new InitiateMultipartUploadRequest(containerName, blobName));
// TODO: google-cloud-storage and openstack-swift cannot list multipart
// uploads
MultipartUploadListing multipartListing = client.listMultipartUploads(
new ListMultipartUploadsRequest(containerName));
if (blobStoreType.equals("azureblob")) {
// Azure does not create a manifest during initiate multi-part
// upload. Instead the first part creates this.
assertThat(multipartListing.getMultipartUploads()).isEmpty();
} else {
assertThat(multipartListing.getMultipartUploads()).hasSize(1);
}
PartListing partListing = client.listParts(new ListPartsRequest(
containerName, blobName, result.getUploadId()));
assertThat(partListing.getParts()).isEmpty();
client.uploadPart(new UploadPartRequest()
.withBucketName(containerName)
.withKey(blobName)
.withUploadId(result.getUploadId())
.withPartNumber(1)
.withPartSize(byteSource.size())
.withInputStream(byteSource.openStream()));
multipartListing = client.listMultipartUploads(
new ListMultipartUploadsRequest(containerName));
assertThat(multipartListing.getMultipartUploads()).hasSize(1);
partListing = client.listParts(new ListPartsRequest(
containerName, blobName, result.getUploadId()));
assertThat(partListing.getParts()).hasSize(1);
client.abortMultipartUpload(new AbortMultipartUploadRequest(
containerName, blobName, result.getUploadId()));
multipartListing = client.listMultipartUploads(
new ListMultipartUploadsRequest(containerName));
if (blobStoreType.equals("azureblob")) {
// Azure does not support explicit abort. It automatically
// removes incomplete multi-part uploads after 7 days.
assertThat(multipartListing.getMultipartUploads()).hasSize(1);
} else {
assertThat(multipartListing.getMultipartUploads()).isEmpty();
}
ObjectListing listing = client.listObjects(containerName);
assertThat(listing.getObjectSummaries()).isEmpty();
}
private void multipartUpload(
String key,
File file,
ObjectMetadata objectMetadata,
Optional<StorageClass> maybeStorageClass
)
throws Exception {
List<PartETag> partETags = new ArrayList<>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
bucketName,
key,
objectMetadata
);
if (maybeStorageClass.isPresent()) {
initRequest.setStorageClass(maybeStorageClass.get());
}
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(
initRequest
);
long contentLength = file.length();
long partSize = configuration.getUploadPartSize();
try {
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
partSize = Math.min(partSize, (contentLength - filePosition));
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(key)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);
partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
bucketName,
key,
initResponse.getUploadId(),
partETags
);
s3Client.completeMultipartUpload(completeRequest);
} catch (Exception e) {
s3Client.abortMultipartUpload(
new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId())
);
throw new RuntimeException(e);
}
}
@Override
public void abortMultipartUpload(AbortMultipartUploadRequest request) throws AmazonClientException,
AmazonServiceException {
// TODO Auto-generated method stub
}
public void multipartUpload(String bucketName, String keyName, String filePath) throws IOException {
//Create a list of UploadPartResponse objects. You get one of these for each part upload.
List<PartETag> partETags = new ArrayList<PartETag>();
// Step 1: Initialize.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
File file = new File(filePath);
long contentLength = file.length();
long partSize = 5 * 1024 * 1024; // Set part size to 1kb.
try {
// Step 2: Upload parts.
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Last part can be less than 5 MB. Adjust part size.
partSize = Math.min(partSize, (contentLength - filePosition));
// Create request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName).withKey(keyName)
.withUploadId(initResponse.getUploadId()).withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);
// Upload part and add response to our list.
partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
// Step 3: Complete.
CompleteMultipartUploadRequest compRequest =
new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (Exception e) {
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId()));
e.printStackTrace();
}
}
/**
* Aborts a multipart upload.
*
* @param abortMultipartUploadRequest the request object containing all the parameters for the operation
* @param s3Client the {@link AmazonS3} implementation to use
*/
public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest, AmazonS3 s3Client);