下面列出了com.amazonaws.services.s3.model.ObjectMetadata#getContentLength ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public AbstractDbArtifact getArtifactBySha1(final String tenant, final String sha1Hash) {
final String key = objectKey(tenant, sha1Hash);
LOG.info("Retrieving S3 object from bucket {} and key {}", s3Properties.getBucketName(), key);
try (final S3Object s3Object = amazonS3.getObject(s3Properties.getBucketName(), key)) {
if (s3Object == null) {
return null;
}
final ObjectMetadata s3ObjectMetadata = s3Object.getObjectMetadata();
// the MD5Content is stored in the ETag
return new S3Artifact(amazonS3, s3Properties, key, sha1Hash,
new DbArtifactHash(sha1Hash,
BaseEncoding.base16().lowerCase()
.encode(BaseEncoding.base64().decode(s3ObjectMetadata.getETag())),
null),
s3ObjectMetadata.getContentLength(), s3ObjectMetadata.getContentType());
} catch (final IOException e) {
LOG.error("Could not verify S3Object", e);
return null;
}
}
private ObjectMetadata getConfigMetadata() throws Exception
{
try
{
ObjectMetadata metadata = s3Client.getObjectMetadata(arguments.getBucket(), arguments.getKey());
if ( metadata.getContentLength() > 0 )
{
return metadata;
}
}
catch ( AmazonS3Exception e )
{
if ( !isNotFoundError(e) )
{
throw e;
}
}
return null;
}
@Override
public void copy(final AmazonS3 s3, final String bucket, final String sourcePath, final String destinationPath) {
ObjectMetadata metadataResult = s3.getObjectMetadata(bucket, sourcePath);
long length = metadataResult.getContentLength();
try {
if (length < chunkSize) {
copySinglePart(s3, bucket, sourcePath, destinationPath);
}
else {
copyMultiPart(s3, bucket, sourcePath, destinationPath, length);
}
}
catch(SdkClientException e) {
throw new BlobStoreException("Error copying blob", e, null);
}
}
/**
* Update object's information
*/
protected boolean updateInfo(S3Object s3object) {
// Read metadata
ObjectMetadata om = s3object.getObjectMetadata();
// Update data
size = om.getContentLength();
canRead = true;
canWrite = true;
lastModified = om.getLastModified();
exists = true;
latestUpdate = new Timer(CACHE_TIMEOUT);
// Show information
if (debug) Timer.showStdErr("Updated infromation for '" + this + "'"//
+ "\n\tcanRead : " + canRead //
+ "\n\texists : " + exists //
+ "\n\tlast modified: " + lastModified //
+ "\n\tsize : " + size //
);
return true;
}
/**
* Returns the size of the dataset, in bytes. Will be zero if this dataset is a collection or non-existent.
*
* @return the size of the dataset
*/
@Override
public long length() {
// If the summary is already in the cache, return it.
// It'll have been added by a listDatasets() call on the parent directory.
S3ObjectSummary objectSummary = objectSummaryCache.getIfPresent(s3uri);
if (objectSummary != null) {
return objectSummary.getSize();
}
/*
* Get the metadata directly from S3. This will be expensive.
* We get punished hard if length() and/or lastModified() is called on a bunch of datasets without
* listDatasets() first being called on their parent directory.
*
* So, is the right thing to do here "getParentDataset().listDatasets()" and then query the cache again?
* Perhaps, but listDatasets() throws an IOException, and length() and lastModified() do not.
* We would have to change their signatures and the upstream client code to make it work.
*/
ObjectMetadata metadata = threddsS3Client.getObjectMetadata(s3uri);
if (metadata != null) {
return metadata.getContentLength();
} else {
// "this" may be a collection or non-existent. In both cases, we return 0.
return 0;
}
}
private static long getObjectSize(Path path, ObjectMetadata metadata)
throws IOException
{
Map<String, String> userMetadata = metadata.getUserMetadata();
String length = userMetadata.get(UNENCRYPTED_CONTENT_LENGTH);
if (userMetadata.containsKey(SERVER_SIDE_ENCRYPTION) && length == null) {
throw new IOException(format("%s header is not set on an encrypted object: %s", UNENCRYPTED_CONTENT_LENGTH, path));
}
return (length != null) ? Long.parseLong(length) : metadata.getContentLength();
}
@Override
public void commitAfterRecovery() throws IOException {
if (totalLength > 0L) {
LOG.info("Trying to commit after recovery {} with MPU ID {}", objectName, uploadId);
try {
s3AccessHelper.commitMultiPartUpload(objectName, uploadId, parts, totalLength, new AtomicInteger());
} catch (IOException e) {
LOG.info("Failed to commit after recovery {} with MPU ID {}. " +
"Checking if file was committed before...", objectName, uploadId);
LOG.trace("Exception when committing:", e);
try {
ObjectMetadata metadata = s3AccessHelper.getObjectMetadata(objectName);
if (totalLength != metadata.getContentLength()) {
String message = String.format("Inconsistent result for object %s: conflicting lengths. " +
"Recovered committer for upload %s indicates %s bytes, present object is %s bytes",
objectName, uploadId, totalLength, metadata.getContentLength());
LOG.warn(message);
throw new IOException(message, e);
}
} catch (FileNotFoundException fnf) {
LOG.warn("Object {} not existing after failed recovery commit with MPU ID {}", objectName, uploadId);
throw new IOException(String.format("Recovering commit failed for object %s. " +
"Object does not exist and MultiPart Upload %s is not valid.", objectName, uploadId), e);
}
}
} else {
LOG.debug("No data to commit for file: {}", objectName);
}
}
@Override
public void commitAfterRecovery() throws IOException {
if (totalLength > 0L) {
LOG.info("Trying to commit after recovery {} with MPU ID {}", objectName, uploadId);
try {
s3AccessHelper.commitMultiPartUpload(objectName, uploadId, parts, totalLength, new AtomicInteger());
} catch (IOException e) {
LOG.info("Failed to commit after recovery {} with MPU ID {}. " +
"Checking if file was committed before...", objectName, uploadId);
LOG.trace("Exception when committing:", e);
try {
ObjectMetadata metadata = s3AccessHelper.getObjectMetadata(objectName);
if (totalLength != metadata.getContentLength()) {
String message = String.format("Inconsistent result for object %s: conflicting lengths. " +
"Recovered committer for upload %s indicates %s bytes, present object is %s bytes",
objectName, uploadId, totalLength, metadata.getContentLength());
LOG.warn(message);
throw new IOException(message, e);
}
} catch (FileNotFoundException fnf) {
LOG.warn("Object {} not existing after failed recovery commit with MPU ID {}", objectName, uploadId);
throw new IOException(String.format("Recovering commit failed for object %s. " +
"Object does not exist and MultiPart Upload %s is not valid.", objectName, uploadId), e);
}
}
} else {
LOG.debug("No data to commit for file: {}", objectName);
}
}
@VisibleForTesting
void copy(S3ResourceId sourcePath, S3ResourceId destinationPath) throws IOException {
try {
ObjectMetadata sourceObjectMetadata = getObjectMetadata(sourcePath);
if (sourceObjectMetadata.getContentLength() < MAX_COPY_OBJECT_SIZE_BYTES) {
atomicCopy(sourcePath, destinationPath, sourceObjectMetadata);
} else {
multipartCopy(sourcePath, destinationPath, sourceObjectMetadata);
}
} catch (AmazonClientException e) {
throw new IOException(e);
}
}
@Override
public void commitAfterRecovery() throws IOException {
if (totalLength > 0L) {
LOG.info("Trying to commit after recovery {} with MPU ID {}", objectName, uploadId);
try {
s3AccessHelper.commitMultiPartUpload(objectName, uploadId, parts, totalLength, new AtomicInteger());
} catch (IOException e) {
LOG.info("Failed to commit after recovery {} with MPU ID {}. " +
"Checking if file was committed before...", objectName, uploadId);
LOG.trace("Exception when committing:", e);
try {
ObjectMetadata metadata = s3AccessHelper.getObjectMetadata(objectName);
if (totalLength != metadata.getContentLength()) {
String message = String.format("Inconsistent result for object %s: conflicting lengths. " +
"Recovered committer for upload %s indicates %s bytes, present object is %s bytes",
objectName, uploadId, totalLength, metadata.getContentLength());
LOG.warn(message);
throw new IOException(message, e);
}
} catch (FileNotFoundException fnf) {
LOG.warn("Object {} not existing after failed recovery commit with MPU ID {}", objectName, uploadId);
throw new IOException(String.format("Recovering commit failed for object %s. " +
"Object does not exist and MultiPart Upload %s is not valid.", objectName, uploadId), e);
}
}
} else {
LOG.debug("No data to commit for file: {}", objectName);
}
}
private boolean isNewResourceAvailable(String filename) {
File file = new File(downloadDirectory, filename);
// TODO: partial downloaded file
if (!file.exists()) {
return true;
}
try {
ObjectMetadata metadata = manager.getAmazonS3Client()
.getObjectMetadata(AWSClientManager.S3_BUCKET_NAME,
filename);
long remoteLastModified = metadata.getLastModified().getTime();
filesize = metadata.getContentLength();
if (file.lastModified() < remoteLastModified) {
return true;
} else {
return false;
}
} catch (Exception e) {
Log.e("isNewResourceAvailable", e.getMessage());
e.printStackTrace();
}
return true;
}
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
final S3BucketObjectPair bop = this.directoryStructure.toBucketObjectPair(f);
try {
if (!bop.hasBucket()) {
final List<Bucket> list = this.s3Client.listBuckets();
final S3FileStatus[] array = new S3FileStatus[list.size()];
final Iterator<Bucket> it = list.iterator();
int i = 0;
while (it.hasNext()) {
final Bucket bucket = it.next();
final long creationDate = dateToLong(bucket.getCreationDate());
// S3 does not track access times, so this implementation always sets it to 0
final S3FileStatus status = new S3FileStatus(extendPath(f, bucket.getName()
+ S3_DIRECTORY_SEPARATOR), 0, true, creationDate, 0L);
array[i++] = status;
}
return array;
}
if (bop.hasBucket() && !bop.hasObject()) {
// Check if the bucket really exists
if (!this.s3Client.doesBucketExist(bop.getBucket())) {
throw new FileNotFoundException("Cannot find " + f.toUri());
}
return listBucketContent(f, bop);
} else {
final ObjectMetadata omd = this.s3Client.getObjectMetadata(bop.getBucket(), bop.getObject());
if (objectRepresentsDirectory(bop.getObject(), omd.getContentLength())) {
return listBucketContent(f, bop);
} else {
final S3FileStatus fileStatus = new S3FileStatus(f, omd.getContentLength(), false,
dateToLong(omd.getLastModified()), 0L);
return new FileStatus[] { fileStatus };
}
}
} catch (AmazonClientException e) {
throw new IOException(StringUtils.stringifyException(e));
}
}
public TocPathOpResult validateOnS3(TOCPayload payload) {
if (s3Client == null || s3BucketName == null) {
throw new RuntimeException("Cannot validateOnS3(), TOCPayloadValidator is not configured w/ s3Client or bucket name");
}
try {
String keyToCheck = toc2Key(payload.tocInfo.getPath(),payload.tocInfo.isDirectory);
logger.debug("validateOnS3() " + keyToCheck);
ObjectMetadata md = s3Client.getObjectMetadata(getS3BucketName(), keyToCheck);
// size not match!
if (payload.tocInfo.size != md.getContentLength()) {
logger.error("validateOnS3() S3 object length does not match! " +
"" + keyToCheck + " expected:" + payload.tocInfo.size + " actual:" + md.getContentLength());;
return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
"s3.check.content.length", "expected:"+ payload.tocInfo.size + " actual:"+md.getContentLength());
}
// SUCCESS (no 404 so size matches and it exists)
return new TocPathOpResult(payload.mode, true, payload.tocInfo.getPath(), "s3.check", "ok");
} catch(AmazonS3Exception e) {
// 404
if (e.getStatusCode() == 404) {
logger.error("validateOnS3() " + payload.tocInfo.getPath() + " s3check returned 404");
return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
"s3.check.404", "key not found 404 at " + this.getS3BucketName());
// other error
} else {
logger.error("validateOnS3() " + payload.tocInfo.getPath() + " unexpected error: " + e.getMessage(),e);
return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
"s3.check.error", "error getting object metadata: " + e.getMessage());
}
}
}