下面列出了org.apache.hadoop.fs.s3.S3Exception#org.jets3t.service.ServiceException 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public HttpResponseOutputStream<VersionId> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final S3Object object = new S3WriteFeature(session, new S3DisabledMultipartService())
.getDetails(file, status);
// ID for the initiated multipart upload.
final MultipartUpload multipart;
try {
multipart = session.getClient().multipartStartUpload(
containerService.getContainer(file).getName(), object);
if(log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s",
multipart.getObjectKey(), multipart.getUploadId()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
final MultipartOutputStream proxy = new MultipartOutputStream(multipart, file, status);
return new HttpResponseOutputStream<VersionId>(new MemorySegementingOutputStream(proxy,
preferences.getInteger("s3.upload.multipart.size"))) {
@Override
public VersionId getStatus() {
return proxy.getVersionId();
}
};
}
@Override
public boolean retryRequest(final IOException exception, final int executionCount, final HttpContext context) {
if(super.retryRequest(exception, executionCount, context)) {
final Object attribute = context.getAttribute(HttpCoreContext.HTTP_REQUEST);
if(attribute instanceof HttpUriRequest) {
final HttpUriRequest method = (HttpUriRequest) attribute;
log.warn(String.format("Retrying request %s", method));
try {
// Build the authorization string for the method.
authorizer.authorizeHttpRequest(method, context, null);
return true;
}
catch(ServiceException e) {
log.warn("Unable to generate updated authorization string for retried request", e);
}
}
}
return false;
}
@Override
public void setConfiguration(final Path file, final LoggingConfiguration configuration) throws BackgroundException {
// Logging target bucket
final Path bucket = containerService.getContainer(file);
try {
final S3BucketLoggingStatus status = new S3BucketLoggingStatus(
StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : bucket.getName(), null);
if(configuration.isEnabled()) {
status.setLogfilePrefix(PreferencesFactory.get().getProperty("s3.logging.prefix"));
}
session.getClient().setBucketLoggingStatus(bucket.getName(), status, true);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
@Override
public void setConfiguration(final Path file, final LifecycleConfiguration configuration) throws BackgroundException {
final Path container = containerService.getContainer(file);
try {
if(configuration.getTransition() != null || configuration.getExpiration() != null) {
final LifecycleConfig config = new LifecycleConfig();
// Unique identifier for the rule. The value cannot be longer than 255 characters. When you specify an empty prefix, the rule applies to all objects in the bucket
final LifecycleConfig.Rule rule = config.newRule(
String.format("%s-%s", PreferencesFactory.get().getProperty("application.name"), new AlphanumericRandomStringService().random()), StringUtils.EMPTY, true);
if(configuration.getTransition() != null) {
rule.newTransition().setDays(configuration.getTransition());
}
if(configuration.getExpiration() != null) {
rule.newExpiration().setDays(configuration.getExpiration());
}
session.getClient().setLifecycleConfig(container.getName(), config);
}
else {
session.getClient().deleteLifecycleConfig(container.getName());
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, container);
}
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
StorageObject object = null;
try {
LOG.debug("Getting metadata for key: {} from bucket: {}",
key, bucket.getName());
object = s3Service.getObjectDetails(bucket.getName(), key);
return new FileMetadata(key, object.getContentLength(),
object.getLastModifiedDate().getTime());
} catch (ServiceException e) {
try {
// process
handleException(e, key);
return null;
} catch (FileNotFoundException fnfe) {
// and downgrade missing files
return null;
}
} finally {
if (object != null) {
object.closeDataInputStream();
}
}
}
/**
*
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
key, bucket.getName(), byteRangeStart);
S3Object object = s3Service.getObject(bucket, key, null, null, null,
null, byteRangeStart, null);
return object.getDataInputStream();
} catch (ServiceException e) {
handleException(e, key);
return null;
}
}
/**
* list objects
* @param prefix prefix
* @param delimiter delimiter
* @param maxListingLength max no. of entries
* @param priorLastKey last key in any previous search
* @return a list of matches
* @throws IOException on any reported failure
*/
private PartialListing list(String prefix, String delimiter,
int maxListingLength, String priorLastKey) throws IOException {
try {
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata =
new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(),
object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
chunk.getCommonPrefixes());
} catch (ServiceException e) {
handleException(e, prefix);
return null; // never returned - keep compiler happy
}
}
@Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
}
if (multipartEnabled) {
S3Object object = s3Service.getObjectDetails(bucket, srcKey, null,
null, null, null);
if (multipartCopyBlockSize > 0 &&
object.getContentLength() > multipartCopyBlockSize) {
copyLargeFile(object, dstKey);
return;
}
}
S3Object dstObject = new S3Object(dstKey);
dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
dstObject, false);
} catch (ServiceException e) {
handleException(e, srcKey);
}
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
StorageObject object = null;
try {
LOG.debug("Getting metadata for key: {} from bucket: {}",
key, bucket.getName());
object = s3Service.getObjectDetails(bucket.getName(), key);
return new FileMetadata(key, object.getContentLength(),
object.getLastModifiedDate().getTime());
} catch (ServiceException e) {
try {
// process
handleException(e, key);
return null;
} catch (FileNotFoundException fnfe) {
// and downgrade missing files
return null;
}
} finally {
if (object != null) {
object.closeDataInputStream();
}
}
}
/**
*
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
key, bucket.getName(), byteRangeStart);
S3Object object = s3Service.getObject(bucket, key, null, null, null,
null, byteRangeStart, null);
return object.getDataInputStream();
} catch (ServiceException e) {
handleException(e, key);
return null;
}
}
/**
* list objects
* @param prefix prefix
* @param delimiter delimiter
* @param maxListingLength max no. of entries
* @param priorLastKey last key in any previous search
* @return a list of matches
* @throws IOException on any reported failure
*/
private PartialListing list(String prefix, String delimiter,
int maxListingLength, String priorLastKey) throws IOException {
try {
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata =
new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(),
object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
chunk.getCommonPrefixes());
} catch (ServiceException e) {
handleException(e, prefix);
return null; // never returned - keep compiler happy
}
}
@Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
}
if (multipartEnabled) {
S3Object object = s3Service.getObjectDetails(bucket, srcKey, null,
null, null, null);
if (multipartCopyBlockSize > 0 &&
object.getContentLength() > multipartCopyBlockSize) {
copyLargeFile(object, dstKey);
return;
}
}
S3Object dstObject = new S3Object(dstKey);
dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
dstObject, false);
} catch (ServiceException e) {
handleException(e, srcKey);
}
}
public List<ListItem> listAll(String path) {
m_logger.debug("Start list all: " + path);
try {
List<ListItem> result = new ArrayList<>();
String priorLastKey = null;
while(true) {
StorageObjectsChunk chunk = m_s3service.listObjectsChunked(BUCKET, path, "/", CHUNK_SIZE, priorLastKey);
m_logger.trace("ListObjects: {}", path);
inc();
StorageObject[] objects = chunk.getObjects();
for(int i = 0; i < objects.length; i++) {
String key = objects[i].getKey();
if(key.endsWith("/")) key = key.substring(0, key.length() - 1);
key = key.substring(path.length(), key.length());
ListItem item = new ListItem(key, objects[i].getContentLength() != 0);
result.add(item);
}
if(chunk.isListingComplete()) break;
priorLastKey = chunk.getPriorLastKey();
}
return result;
} catch (ServiceException e) {
throw new RuntimeException(e);
}
}
public void deleteAll(String path) {
try {
String priorLastKey = null;
while(true) {
StorageObjectsChunk chunk = m_s3service.listObjectsChunked(BUCKET, path, "?", CHUNK_SIZE, priorLastKey);
m_logger.trace("ListObjects to delete: {}", path);
inc();
StorageObject[] objects = chunk.getObjects();
if(objects.length == 0) break;
String[] names = new String[objects.length];
for(int i = 0; i < objects.length; i++) {
names[i] = objects[i].getKey();
}
m_s3service.deleteMultipleObjects(BUCKET, names);
m_logger.trace("DeleteObjects: {}", objects.length);
// do not inc() because delete requests are not counted
if(chunk.isListingComplete()) break;
priorLastKey = chunk.getPriorLastKey();
}
} catch (ServiceException e) {
throw new RuntimeException(e);
}
}
public boolean grantAcl(S3Object object) throws ServiceException, InterruptedException {
if(Strings.isNullOrEmpty(s3Acl)){
return true;
}
for (int i = 0; i < s3AclRetries; ++i) {
try {
AccessControlList acl = s3Service.getObjectAcl(object.getBucketName(), object.getKey());
for (String id : s3Acl.split(",")) {
acl.grantPermission(new CanonicalGrantee(id), Permission.PERMISSION_READ);
}
s3Service.putObjectAcl(object.getBucketName(), object.getKey(), acl);
return true;
} catch (Exception e) {
log.error("Exception while granting ACL: " + e.getMessage(), e);
Thread.sleep(1000 * (i + 1));
}
}
return false;
}
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
// Make sure file is available in cache
final List<TransferStatus> chunks = bulk.query(Transfer.Type.download, file, status);
// Sort chunks by offset
chunks.sort(Comparator.comparingLong(TransferStatus::getOffset));
final List<LazyInputStream> streams = new ArrayList<>();
for(TransferStatus chunk : chunks) {
final LazyInputStream in = new LazyInputStream(new LazyInputStream.OpenCallback() {
@Override
public InputStream open() throws IOException {
try {
return session.getClient().getObjectImpl(
false,
containerService.getContainer(file).getName(),
containerService.getKey(file),
null,
null,
null,
null,
null,
null,
file.attributes().getVersionId(),
new HashMap<String, Object>(),
chunk.getParameters())
.getDataInputStream();
}
catch(ServiceException e) {
throw new IOException(e.getMessage(), e);
}
}
});
streams.add(in);
}
// Concatenate streams
return new SequenceInputStream(Collections.enumeration(streams));
}
@Override
public VersioningConfiguration getConfiguration(final Path file) throws BackgroundException {
final Path container = containerService.getContainer(file);
if(container.isRoot()) {
return VersioningConfiguration.empty();
}
if(cache.contains(container)) {
return cache.get(container);
}
try {
final S3BucketVersioningStatus status
= session.getClient().getBucketVersioningStatus(container.getName());
final VersioningConfiguration configuration = new VersioningConfiguration(status.isVersioningEnabled(),
status.isMultiFactorAuthDeleteRequired());
cache.put(container, configuration);
return configuration;
}
catch(ServiceException e) {
try {
throw new S3ExceptionMappingService().map("Cannot read container configuration", e);
}
catch(AccessDeniedException l) {
log.warn(String.format("Missing permission to read versioning configuration for %s %s", container, e.getMessage()));
return VersioningConfiguration.empty();
}
catch(InteroperabilityException i) {
log.warn(String.format("Not supported to read versioning configuration for %s %s", container, e.getMessage()));
return VersioningConfiguration.empty();
}
}
}
protected void create(final Path bucket, final AccessControlList acl, final String region) throws BackgroundException {
try {
if(StringUtils.isNotBlank(region)) {
if(S3Session.isAwsHostname(session.getHost().getHostname())) {
session.getClient().getConfiguration().setProperty("s3service.s3-endpoint", String.format("s3.dualstack.%s.amazonaws.com", region));
}
}
// Create bucket
session.getClient().createBucket(URIEncoder.encode(containerService.getContainer(bucket).getName()),
"us-east-1".equals(region) ? "US" : region, acl);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot create folder {0}", e, bucket);
}
}
@Override
public Acl getPermission(final Path file) throws BackgroundException {
try {
if(file.getType().contains(Path.Type.upload)) {
return Acl.EMPTY;
}
if(containerService.isContainer(file)) {
// This method can be performed by anonymous services, but can only succeed if the
// bucket's existing ACL already allows write access by the anonymous user.
// In general, you can only access the ACL of a bucket if the ACL already in place
// for that bucket (in S3) allows you to do so.
return this.convert(session.getClient().getBucketAcl(containerService.getContainer(file).getName()));
}
else if(file.isFile() || file.isPlaceholder()) {
return this.convert(session.getClient().getVersionedObjectAcl(file.attributes().getVersionId(),
containerService.getContainer(file).getName(), containerService.getKey(file)));
}
return Acl.EMPTY;
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(file.isPlaceholder()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
return Acl.EMPTY;
}
}
if(failure instanceof InteroperabilityException) {
// The specified method is not allowed against this resource. The case for delete markers in versioned buckets.
return Acl.EMPTY;
}
throw failure;
}
}
@Override
public void setPermission(final Path file, final Acl acl) throws BackgroundException {
try {
final Path container = containerService.getContainer(file);
if(null == acl.getOwner()) {
// Read owner from cache
acl.setOwner(file.attributes().getAcl().getOwner());
}
if(null == acl.getOwner()) {
// Read owner from bucket
final Acl permission = this.getPermission(container);
acl.setOwner(permission.getOwner());
}
if(containerService.isContainer(file)) {
session.getClient().putBucketAcl(container.getName(), this.convert(acl));
}
else {
if(file.isFile() || file.isPlaceholder()) {
session.getClient().putObjectAcl(container.getName(), containerService.getKey(file), this.convert(acl));
}
}
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Cannot change permissions of {0}", e, file);
if(file.isPlaceholder()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
return;
}
}
throw failure;
}
}
/**
* @param list ACL from server
* @return Editable ACL
*/
protected Acl convert(final AccessControlList list) {
if(log.isDebugEnabled()) {
try {
log.debug(list.toXml());
}
catch(ServiceException e) {
log.error(e.getMessage());
}
}
Acl acl = new Acl();
acl.setOwner(new Acl.CanonicalUser(list.getOwner().getId(), list.getOwner().getDisplayName()));
for(GrantAndPermission grant : list.getGrantAndPermissions()) {
Acl.Role role = new Acl.Role(grant.getPermission().toString());
if(grant.getGrantee() instanceof CanonicalGrantee) {
acl.addAll(new Acl.CanonicalUser(grant.getGrantee().getIdentifier(),
((CanonicalGrantee) grant.getGrantee()).getDisplayName(), false), role);
}
else if(grant.getGrantee() instanceof EmailAddressGrantee) {
acl.addAll(new Acl.EmailUser(grant.getGrantee().getIdentifier()), role);
}
else if(grant.getGrantee() instanceof GroupGrantee) {
acl.addAll(new Acl.GroupUser(grant.getGrantee().getIdentifier()), role);
}
}
return acl;
}
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(file.getType().contains(Path.Type.upload)) {
return new NullInputStream(0L);
}
final HttpRange range = HttpRange.withStatus(status);
final RequestEntityRestStorageService client = session.getClient();
final S3Object object = client.getVersionedObject(
file.attributes().getVersionId(),
containerService.getContainer(file).getName(),
containerService.getKey(file),
null, // ifModifiedSince
null, // ifUnmodifiedSince
null, // ifMatch
null, // ifNoneMatch
status.isAppend() ? range.getStart() : null,
status.isAppend() ? (range.getEnd() == -1 ? null : range.getEnd()) : null);
if(log.isDebugEnabled()) {
log.debug(String.format("Reading stream with content length %d", object.getContentLength()));
}
return object.getDataInputStream();
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Download {0} failed", e, file);
}
}
protected String copy(final Path source, final S3Object destination, final TransferStatus status) throws BackgroundException {
try {
// Copying object applying the metadata of the original
final Map<String, Object> stringObjectMap = session.getClient().copyVersionedObject(source.attributes().getVersionId(), containerService.getContainer(source).getName(),
containerService.getKey(source),
destination.getBucketName(), destination, false);
final Map complete = (Map) stringObjectMap.get(Constants.KEY_FOR_COMPLETE_METADATA);
return (String) complete.get(Constants.AMZ_VERSION_ID);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source);
}
}
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final S3Object object = this.getDetails(file, status);
final DelayedHttpEntityCallable<StorageObject> command = new DelayedHttpEntityCallable<StorageObject>() {
@Override
public StorageObject call(final AbstractHttpEntity entity) throws BackgroundException {
try {
final RequestEntityRestStorageService client = session.getClient();
client.putObjectWithRequestEntityImpl(
containerService.getContainer(file).getName(), object, entity, status.getParameters());
if(log.isDebugEnabled()) {
log.debug(String.format("Saved object %s with checksum %s", file, object.getETag()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
return object;
}
@Override
public long getContentLength() {
return status.getLength();
}
};
return this.write(file, status, command);
}
@Override
protected void logout() throws BackgroundException {
try {
client.shutdown();
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map(e);
}
}
@Override
public StorageObject getObjectImpl(boolean headOnly, String bucketName, String objectKey,
Calendar ifModifiedSince, Calendar ifUnmodifiedSince,
String[] ifMatchTags, String[] ifNoneMatchTags,
Long byteRangeStart, Long byteRangeEnd, String versionId,
Map<String, Object> requestHeaders,
Map<String, String> requestParameters) throws ServiceException {
return super.getObjectImpl(headOnly, bucketName, objectKey, ifModifiedSince, ifUnmodifiedSince, ifMatchTags, ifNoneMatchTags, byteRangeStart, byteRangeEnd,
versionId, requestHeaders, requestParameters);
}
@Override
public void verifyExpectedAndActualETagValues(String expectedETag, StorageObject uploadedObject) throws ServiceException {
if(StringUtils.isBlank(uploadedObject.getETag())) {
log.warn("No ETag to verify");
return;
}
super.verifyExpectedAndActualETagValues(expectedETag, uploadedObject);
}
@Override
public void authorizeHttpRequest(final HttpUriRequest httpMethod, final HttpContext context,
final String forceRequestSignatureVersion) throws ServiceException {
if(forceRequestSignatureVersion != null) {
final S3Protocol.AuthenticationHeaderSignatureVersion authenticationHeaderSignatureVersion
= S3Protocol.AuthenticationHeaderSignatureVersion.valueOf(StringUtils.remove(forceRequestSignatureVersion, "-"));
log.warn(String.format("Switched authentication signature version to %s", forceRequestSignatureVersion));
session.setSignatureVersion(authenticationHeaderSignatureVersion);
}
super.authorizeHttpRequest(httpMethod, context, forceRequestSignatureVersion);
}
@Override
public LifecycleConfiguration getConfiguration(final Path file) throws BackgroundException {
final Path container = containerService.getContainer(file);
if(container.isRoot()) {
return LifecycleConfiguration.empty();
}
if(file.getType().contains(Path.Type.upload)) {
return LifecycleConfiguration.empty();
}
try {
final LifecycleConfig status = session.getClient().getLifecycleConfig(container.getName());
if(null != status) {
Integer transition = null;
Integer expiration = null;
String storageClass = null;
for(LifecycleConfig.Rule rule : status.getRules()) {
if(rule.getTransition() != null) {
storageClass = rule.getTransition().getStorageClass();
transition = rule.getTransition().getDays();
}
if(rule.getExpiration() != null) {
expiration = rule.getExpiration().getDays();
}
}
return new LifecycleConfiguration(transition, storageClass, expiration);
}
return LifecycleConfiguration.empty();
}
catch(ServiceException e) {
try {
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, container);
}
catch(AccessDeniedException | InteroperabilityException l) {
log.warn(String.format("Missing permission to read lifecycle configuration for %s %s", container, e.getMessage()));
return LifecycleConfiguration.empty();
}
}
}
private Future<Path> submit(final ThreadPool pool, final Path bucket, final Path directory, final String common) {
return pool.execute(new BackgroundExceptionCallable<Path>() {
@Override
public Path call() throws BackgroundException {
final PathAttributes attributes = new PathAttributes();
attributes.setRegion(bucket.attributes().getRegion());
final Path prefix = new Path(directory, PathNormalizer.name(common),
EnumSet.of(Path.Type.directory, Path.Type.placeholder), attributes);
try {
final VersionOrDeleteMarkersChunk versions = session.getClient().listVersionedObjectsChunked(
bucket.getName(), common, null, 1,
null, null, false);
if(versions.getItems().length == 1) {
final BaseVersionOrDeleteMarker version = versions.getItems()[0];
if(version.getKey().equals(common)) {
attributes.setVersionId(version.getVersionId());
if(version.isDeleteMarker()) {
attributes.setCustom(ImmutableMap.of(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
attributes.setDuplicate(true);
}
}
else {
// no placeholder but objects inside - need to check if all of them are deleted
final StorageObjectsChunk unversioned = session.getClient().listObjectsChunked(bucket.getName(), common,
null, 1, null, false);
if(unversioned.getObjects().length == 0) {
attributes.setDuplicate(true);
}
}
}
return prefix;
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, prefix);
}
}
});
}