com.amazonaws.services.s3.model.DeleteObjectsResult#com.amazonaws.services.s3.AmazonS3URI源码实例Demo

下面列出了com.amazonaws.services.s3.model.DeleteObjectsResult#com.amazonaws.services.s3.AmazonS3URI 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: presto   文件: TestS3TableConfigClient.java
@Test
public void testS3URIValues()
{
    // Verify that S3URI values will work:
    AmazonS3URI uri1 = new AmazonS3URI("s3://our.data.warehouse/prod/client_actions");
    assertNotNull(uri1.getKey());
    assertNotNull(uri1.getBucket());

    assertEquals(uri1.toString(), "s3://our.data.warehouse/prod/client_actions");
    assertEquals(uri1.getBucket(), "our.data.warehouse");
    assertEquals(uri1.getKey(), "prod/client_actions");
    assertTrue(uri1.getRegion() == null);

    // show info:
    log.info("Tested out URI1 : " + uri1.toString());

    AmazonS3URI uri2 = new AmazonS3URI("s3://some.big.bucket/long/complex/path");
    assertNotNull(uri2.getKey());
    assertNotNull(uri2.getBucket());

    assertEquals(uri2.toString(), "s3://some.big.bucket/long/complex/path");
    assertEquals(uri2.getBucket(), "some.big.bucket");
    assertEquals(uri2.getKey(), "long/complex/path");
    assertTrue(uri2.getRegion() == null);

    // info:
    log.info("Tested out URI2 : " + uri2.toString());

    AmazonS3URI uri3 = new AmazonS3URI("s3://presto.kinesis.config/unit-test/presto-kinesis");
    assertNotNull(uri3.getKey());
    assertNotNull(uri3.getBucket());

    assertEquals(uri3.toString(), "s3://presto.kinesis.config/unit-test/presto-kinesis");
    assertEquals(uri3.getBucket(), "presto.kinesis.config");
    assertEquals(uri3.getKey(), "unit-test/presto-kinesis");
}
 
源代码2 项目: xyz-hub   文件: RelocationClient.java
/**
 * Relocates a request or response.
 *
 * @param streamId The streamId of the original request or response
 * @param bytes the bytes of the feature collection to be returned.
 * @return the serialized RelocatedEvent as bytes
 * @throws Exception if any error occurred.
 */
public byte[] relocate(String streamId, byte[] bytes) throws Exception {
  ByteArrayInputStream is = new ByteArrayInputStream(bytes);
  if (!Payload.isCompressed(is)) {
    bytes = Payload.compress(bytes);
  }

  String name = UUID.randomUUID().toString();
  RelocatedEvent event = new RelocatedEvent();
  event.setStreamId(streamId);

  // Keep backward compatibility.
  event.setLocation(name);
  event.setURI("s3://" + bucket + "/" + S3_PATH + name);

  logger.info("{} - Relocating data to: {}", streamId, event.getURI());
  uploadToS3(new AmazonS3URI(event.getURI()), bytes);

  return event.toString().getBytes();
}
 
源代码3 项目: xyz-hub   文件: RelocationClient.java
/**
 * Returns the input stream of the relocated event.
 *
 * @param event the relocation event.
 * @return the input stream of the real event
 * @throws ErrorResponseException when any error occurred
 */
public InputStream processRelocatedEvent(RelocatedEvent event) throws ErrorResponseException {
  try {
    if (event.getURI() == null && event.getLocation() != null) {
      event.setURI("s3://" + bucket + "/" + S3_PATH + event.getLocation());
    }
    logger.info("{}, Found relocation event, loading final event from '{}'", event.getStreamId(), event.getURI());

    if (event.getURI().startsWith("s3://")) {
      return downloadFromS3(new AmazonS3URI(event.getURI()));
    } else {
      throw new ErrorResponseException(event.getStreamId(), XyzError.ILLEGAL_ARGUMENT, "Unsupported URI type");
    }

  } catch (IOException e) {
    throw new ErrorResponseException(event.getStreamId(), XyzError.BAD_GATEWAY, "Unable to download the relocated event from S3.");
  }
}
 
源代码4 项目: circus-train   文件: JceksAmazonS3ClientFactory.java
private AmazonS3 newS3Client(
    AmazonS3URI uri,
    S3S3CopierOptions s3s3CopierOptions,
    HadoopAWSCredentialProviderChain credentialProviderChain) {
  LOG.debug("trying to get a client for uri '{}'", uri);
  AmazonS3 globalClient = newGlobalInstance(s3s3CopierOptions, credentialProviderChain);
  try {

    /*
     * When using roles it can take a while for the credentials to be retrieved from the
     * AssumeRoleCredentialsProvider. This can mean that the rest of the code completes before the credentials are
     * retrieved, resulting in errors. A temporary fix for this situation is to put the thread to sleep for 10s to
     * allow for retrieval before the code continues. Thread.sleep(10000);
     **/

    String bucketRegion = regionForUri(globalClient, uri);
    LOG.debug("Bucket region: {}", bucketRegion);
    return newInstance(bucketRegion, s3s3CopierOptions, credentialProviderChain);
  } catch (IllegalArgumentException e) {
    LOG.warn("Using global (non region specific) client", e);
    return globalClient;
  }
}
 
源代码5 项目: circus-train   文件: JceksAmazonS3ClientFactory.java
private String regionForUri(AmazonS3 client, AmazonS3URI uri) {
  String bucketRegion = client.getBucketLocation(uri.getBucket());
  Region region = Region.fromValue(bucketRegion);

  // S3 doesn't have a US East 1 region, US East 1 is really the region
  // US Standard. US Standard places the data in either an east coast
  // or west coast data center geographically closest to you.
  // SigV4 requires you to mention a region while signing a request
  // and for the S3's US standard endpoints the value to be used is "us-east-1"
  // US West 1 has an endpoint and so is treated as a stand alone region,
  // US East 1 doesn't and so is bundled into US Standard
  if (region.equals(Region.US_Standard)) {
    bucketRegion = "us-east-1";
  } else {
    bucketRegion = region.toString();
  }
  return bucketRegion;
}
 
源代码6 项目: circus-train   文件: S3S3Copier.java
private void initialiseAllCopyRequests() {
  LOG.info("Initialising all copy jobs");

  AmazonS3URI sourceBase = toAmazonS3URI(sourceBaseLocation.toUri());
  AmazonS3URI targetBase = toAmazonS3URI(replicaLocation.toUri());
  srcClient = s3ClientFactory.newInstance(sourceBase, s3s3CopierOptions);

  if (sourceSubLocations.isEmpty()) {
    initialiseCopyJobs(sourceBase, targetBase);
  } else {
    for (Path path : sourceSubLocations) {
      AmazonS3URI subLocation = toAmazonS3URI(path.toUri());
      String partitionKey = StringUtils.removeStart(subLocation.getKey(), sourceBase.getKey());
      partitionKey = StringUtils.removeStart(partitionKey, "/");
      AmazonS3URI targetS3Uri = toAmazonS3URI(new Path(replicaLocation, partitionKey).toUri());
      initialiseCopyJobs(subLocation, targetS3Uri);
    }
  }

  int totalCopyJobs = copyJobRequests.size();
  LOG.info("Finished initialising {} copy job(s)", totalCopyJobs);
  s3s3CopierOptions
      .setMaxThreadPoolSize(determineThreadPoolSize(totalCopyJobs, s3s3CopierOptions.getMaxThreadPoolSize()));
  targetClient = s3ClientFactory.newInstance(targetBase, s3s3CopierOptions);
  transferManager = transferManagerFactory.newInstance(targetClient, s3s3CopierOptions);
}
 
源代码7 项目: bender   文件: GeoIpOperationFactory.java
@Override
public void setConf(AbstractConfig config) {
  this.config = (GeoIpOperationConfig) config;
  AmazonS3Client client = this.s3Factory.newInstance();

  AmazonS3URI uri = new AmazonS3URI(this.config.getGeoLiteDb());
  GetObjectRequest req = new GetObjectRequest(uri.getBucket(), uri.getKey());
  S3Object obj = client.getObject(req);

  try {
    this.databaseReader =
        new DatabaseReader.Builder(obj.getObjectContent()).withCache(new CHMCache()).build();
  } catch (IOException e) {
    throw new ConfigurationException("Unable to read " + this.config.getGeoLiteDb(), e);
  }
}
 
源代码8 项目: bender   文件: BenderConfig.java
public static BenderConfig load(AmazonS3ClientFactory s3ClientFactory, AmazonS3URI s3Uri) {
  AmazonS3Client s3 = s3ClientFactory.newInstance();
  S3Object s3object = s3.getObject(s3Uri.getBucket(), s3Uri.getKey());

  StringWriter writer = new StringWriter();

  try {
    IOUtils.copy(s3object.getObjectContent(), writer, "UTF-8");
  } catch (IOException e) {
    throw new ConfigurationException("Unable to read file from s3", e);
  }
  BenderConfig config = load(s3Uri.getKey().toString(), writer.toString());
  config.setConfigFile(s3Uri.getURI().toString());

  return config;
}
 
源代码9 项目: digdag   文件: EmrIT.java
protected void validateTdSparkQueryOutput()
{
    AmazonS3URI resultUri = new AmazonS3URI(tmpS3FolderUri.toString() + "/result/");
    ObjectListing resultListing = s3.listObjects(new ListObjectsRequest().withBucketName(resultUri.getBucket()).withPrefix(resultUri.getKey()));
    List<String> resultLines = resultListing.getObjectSummaries().stream().flatMap(o -> {
        try (S3Object object = s3.getObject(o.getBucketName(), o.getKey())) {
            return CharStreams.readLines(new InputStreamReader(object.getObjectContent())).stream();
        }
        catch (IOException e) {
            throw Throwables.propagate(e);
        }
    }).collect(toList());
    // FIXME
    // we need to specify the version of td-spark that we can use for acceptance tests.
    // In the meantime the assertion is commented out.
    //assertThat(resultLines, Matchers.hasItem(",164.54.104.106,/item/games/4663,/category/electronics,404,Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0),121,GET,1412383598"));
}
 
源代码10 项目: engine   文件: S3ContentStoreAdapter.java
/**
 * {@inheritDoc}
 */
@Override
public Context createContext(final String id, final String rootFolderPath, final boolean mergingOn,
                             final boolean cacheOn, final int maxAllowedItemsInCache,
                             final boolean ignoreHiddenFiles)
    throws RootFolderNotFoundException, StoreException, AuthenticationException {

    AmazonS3URI uri = new AmazonS3URI(StringUtils.removeEnd(rootFolderPath, DELIMITER));

    ListObjectsV2Request request = new ListObjectsV2Request()
                                        .withBucketName(uri.getBucket())
                                        .withPrefix(uri.getKey())
                                        .withDelimiter(DELIMITER);
    ListObjectsV2Result result = client.listObjectsV2(request);

    if(isResultEmpty(result)) {
        throw new RootFolderNotFoundException("Root folder " + rootFolderPath + " not found");
    }

    return new S3Context(id, this, rootFolderPath, mergingOn, cacheOn, maxAllowedItemsInCache,
                         ignoreHiddenFiles, uri);
}
 
源代码11 项目: presto   文件: S3TableConfigClient.java
/**
 * Call S3 to get the most recent object list.
 * <p>
 * This is an object list request to AWS in the given "directory".
 */
private List<S3ObjectSummary> getObjectSummaries()
{
    AmazonS3Client s3client = clientManager.getS3Client();
    AmazonS3URI directoryURI = new AmazonS3URI(bucketUrl.get());

    List<S3ObjectSummary> result = new ArrayList<>();
    try {
        log.info("Getting the listing of objects in the S3 table config directory: bucket %s prefix %s :", directoryURI.getBucket(), directoryURI.getKey());
        ListObjectsRequest request = new ListObjectsRequest()
                .withBucketName(directoryURI.getBucket())
                .withPrefix(directoryURI.getKey() + "/")
                .withDelimiter("/")
                .withMaxKeys(25);
        ObjectListing response;

        do {
            response = s3client.listObjects(request);

            result.addAll(response.getObjectSummaries());
            request.setMarker(response.getNextMarker());
        }
        while (response.isTruncated());

        log.info("Completed getting S3 object listing.");
    }
    catch (AmazonClientException e) {
        log.error("Skipping update as faced error fetching table descriptions from S3 " + e.toString());
    }
    return result;
}
 
源代码12 项目: circus-train   文件: S3S3Copier.java
private BytesTransferStateChangeListener(
    S3ObjectSummary s3ObjectSummary,
    AmazonS3URI targetS3Uri,
    String targetKey) {
  this.s3ObjectSummary = s3ObjectSummary;
  this.targetS3Uri = targetS3Uri;
  this.targetKey = targetKey;
}
 
源代码13 项目: circus-train   文件: S3S3Copier.java
private void initialiseCopyJobs(AmazonS3URI source, AmazonS3URI target) {
  ListObjectsRequest request = listObjectsRequestFactory
      .newInstance()
      .withBucketName(source.getBucket())
      .withPrefix(source.getKey());
  ObjectListing listing = srcClient.listObjects(request);
  initialiseCopyJobsFromListing(source, target, request, listing);
  while (listing.isTruncated()) {
    listing = srcClient.listNextBatchOfObjects(listing);
    initialiseCopyJobsFromListing(source, target, request, listing);
  }
}
 
源代码14 项目: circus-train   文件: S3S3Copier.java
private void initialiseCopyJobsFromListing(
    AmazonS3URI sourceS3Uri,
    final AmazonS3URI targetS3Uri,
    ListObjectsRequest request,
    ObjectListing listing) {
  LOG
      .debug("Found objects to copy {}, for request {}/{}", listing.getObjectSummaries(), request.getBucketName(),
          request.getPrefix());
  List<S3ObjectSummary> objectSummaries = listing.getObjectSummaries();
  for (final S3ObjectSummary s3ObjectSummary : objectSummaries) {
    totalBytesToReplicate += s3ObjectSummary.getSize();
    String fileName = StringUtils.removeStart(s3ObjectSummary.getKey(), sourceS3Uri.getKey());
    final String targetKey = Strings.nullToEmpty(targetS3Uri.getKey()) + fileName;
    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(s3ObjectSummary.getBucketName(),
        s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey);

    if (s3s3CopierOptions.getCannedAcl() != null) {
      copyObjectRequest.withCannedAccessControlList(s3s3CopierOptions.getCannedAcl());
    }

    applyObjectMetadata(copyObjectRequest);

    TransferStateChangeListener stateChangeListener = new BytesTransferStateChangeListener(s3ObjectSummary,
        targetS3Uri, targetKey);
    copyJobRequests.add(new CopyJobRequest(copyObjectRequest, stateChangeListener));
  }
}
 
源代码15 项目: circus-train   文件: S3S3CopierTest.java
@Before
public void setUp() throws Exception {
  inputData = temp.newFile("data");
  Files.write("bar foo", inputData, Charsets.UTF_8);

  client = newClient();
  client.createBucket("source");
  client.createBucket("target");

  when(s3ClientFactory.newInstance(any(AmazonS3URI.class), any(S3S3CopierOptions.class))).thenReturn(newClient());
}
 
源代码16 项目: circus-train   文件: AmazonS3URIs.java
public static AmazonS3URI toAmazonS3URI(URI uri) {
  if (FS_PROTOCOL_S3.equalsIgnoreCase(uri.getScheme())) {
    return new AmazonS3URI(uri);
  } else if (S3Schemes.isS3Scheme(uri.getScheme())) {
    try {
      return new AmazonS3URI(new URI(FS_PROTOCOL_S3, uri.getUserInfo(), uri.getHost(), uri.getPort(), uri.getPath(),
          uri.getQuery(), uri.getFragment()));
    } catch (URISyntaxException e) {
      // ignore it, it will fail on the return
    }
  }
  // Build it anyway we'll get AmazonS3URI exception back.
  return new AmazonS3URI(uri);
}
 
源代码17 项目: circus-train   文件: S3DataManipulator.java
@Override
public boolean delete(String path) {
  log.info("Deleting all data at location: {}", path);
  AmazonS3URI uri = toAmazonS3URI(URI.create(path));
  String bucket = uri.getBucket();

  List<KeyVersion> keysToDelete = getKeysToDelete(bucket, uri.getKey());
  log.debug("Deleting keys: {}", keysToDelete.stream().map(k -> k.getKey()).collect(Collectors.toList()));

  DeleteObjectsResult result = s3Client.deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keysToDelete));
  return successfulDeletion(result, keysToDelete.size());
}
 
private AmazonS3 newS3Client(String tableUri) {
  AmazonS3URI base = toAmazonS3URI(URI.create(tableUri));
  S3S3CopierOptions s3s3CopierOptions = new S3S3CopierOptions(ImmutableMap
      .<String, Object>builder()
      .put(S3S3CopierOptions.Keys.S3_ENDPOINT_URI.keyName(), s3Proxy.getUri().toString())
      .build());
  return s3ClientFactory.newInstance(base, s3s3CopierOptions);
}
 
private AmazonS3 newS3Client(String tableUri) {
  AmazonS3URI base = toAmazonS3URI(URI.create(tableUri));
  S3S3CopierOptions s3s3CopierOptions = new S3S3CopierOptions(ImmutableMap
      .<String, Object>builder()
      .put(S3S3CopierOptions.Keys.S3_ENDPOINT_URI.keyName(), s3Proxy.getUri().toString())
      .build());
  return s3ClientFactory.newInstance(base, s3s3CopierOptions);
}
 
private AmazonS3 newS3Client(String tableUri) {
  AmazonS3URI base = toAmazonS3URI(URI.create(tableUri));
  S3S3CopierOptions s3s3CopierOptions = new S3S3CopierOptions(ImmutableMap
      .<String, Object>builder()
      .put(S3S3CopierOptions.Keys.S3_ENDPOINT_URI.keyName(), s3Proxy.getUri().toString())
      .build());
  return s3ClientFactory.newInstance(base, s3s3CopierOptions);
}
 
源代码21 项目: s3-inventory-usage-examples   文件: BucketKey.java
public BucketKey(String inputFilePath, String outputFilePath){
    AmazonS3URI srcURI = new AmazonS3URI(inputFilePath);
    AmazonS3URI destURI = new AmazonS3URI(outputFilePath);
    this.srcBucket = srcURI.getBucket();
    this.srcKey = srcURI.getKey();
    this.destBucket = destURI.getBucket();
    this.destPrefix = destURI.getKey();
}
 
源代码22 项目: digdag   文件: EmrOperatorFactory.java
Filer(AmazonS3Client s3, Optional<AmazonS3URI> staging, Workspace workspace, TemplateEngine templateEngine, Config params)
{
    this.s3 = s3;
    this.staging = staging;
    this.workspace = workspace;
    this.templateEngine = templateEngine;
    this.params = params;
}
 
源代码23 项目: digdag   文件: EmrOperatorFactory.java
RemoteFile prepareRemoteFile(String tag, String section, String path, FileReference reference, boolean template, String localDir)
{
    String id = randomTag(s -> !ids.add(s));

    String prefix = tag + "/" + section + "/" + path + "/" + id;

    if (localDir == null) {
        localDir = LOCAL_STAGING_DIR + "/" + prefix;
    }

    ImmutableRemoteFile.Builder builder =
            ImmutableRemoteFile.builder()
                    .reference(reference)
                    .localPath(localDir + "/" + reference.filename());

    if (reference.local()) {
        // Local file? Then we need to upload it to S3.
        if (!staging.isPresent()) {
            throw new ConfigException("Please configure a S3 'staging' directory");
        }
        String baseKey = staging.get().getKey();
        String key = (baseKey != null ? baseKey : "") + prefix + "/" + reference.filename();
        builder.s3Uri(new AmazonS3URI("s3://" + staging.get().getBucket() + "/" + key));
    }
    else {
        builder.s3Uri(new AmazonS3URI(reference.reference().get()));
    }

    RemoteFile remoteFile = builder.build();

    if (reference.local()) {
        files.add(StagingFile.of(template, remoteFile));
    }

    return remoteFile;
}
 
源代码24 项目: presto-kinesis   文件: TestS3TableConfigClient.java
@Test
public void testS3URIValues()
{
    // Verify that S3URI values will work:
    AmazonS3URI uri1 = new AmazonS3URI("s3://our.data.warehouse/prod/client_actions");
    assertNotNull(uri1.getKey());
    assertNotNull(uri1.getBucket());

    assertEquals(uri1.toString(), "s3://our.data.warehouse/prod/client_actions");
    assertEquals(uri1.getBucket(), "our.data.warehouse");
    assertEquals(uri1.getKey(), "prod/client_actions");
    assertTrue(uri1.getRegion() == null);

    // show info:
    log.info("Tested out URI1 : " + uri1.toString());

    AmazonS3URI uri2 = new AmazonS3URI("s3://some.big.bucket/long/complex/path");
    assertNotNull(uri2.getKey());
    assertNotNull(uri2.getBucket());

    assertEquals(uri2.toString(), "s3://some.big.bucket/long/complex/path");
    assertEquals(uri2.getBucket(), "some.big.bucket");
    assertEquals(uri2.getKey(), "long/complex/path");
    assertTrue(uri2.getRegion() == null);

    // info:
    log.info("Tested out URI2 : " + uri2.toString());

    AmazonS3URI uri3 = new AmazonS3URI("s3://presto.kinesis.config/unit-test/presto-kinesis");
    assertNotNull(uri3.getKey());
    assertNotNull(uri3.getBucket());

    assertEquals(uri3.toString(), "s3://presto.kinesis.config/unit-test/presto-kinesis");
    assertEquals(uri3.getBucket(), "presto.kinesis.config");
    assertEquals(uri3.getKey(), "unit-test/presto-kinesis");
}
 
源代码25 项目: spring-cloud-aws   文件: AmazonS3ClientFactory.java
private static String getRegion(String endpointUrl) {
	Assert.notNull(endpointUrl, "Endpoint Url must not be null");
	try {
		URI uri = new URI(endpointUrl);
		if ("s3.amazonaws.com".equals(uri.getHost())) {
			return Regions.DEFAULT_REGION.getName();
		}
		else {
			return new AmazonS3URI(endpointUrl).getRegion();
		}
	}
	catch (URISyntaxException e) {
		throw new RuntimeException("Malformed URL received for endpoint", e);
	}
}
 
源代码26 项目: genie   文件: ArgumentValidators.java
/**
 * {@inheritDoc}
 */
@Override
public void validate(final String name, final String value) throws ParameterException {
    try {
        //Check if a valid S3 uri can be created
        new AmazonS3URI(value);
    } catch (Exception e) {
        throw new ParameterException(name + " is not a valid S3 uri");
    }
}
 
源代码27 项目: genie   文件: S3FileTransferImpl.java
/**
 * {@inheritDoc}
 */
@Override
public void getFile(
    @NotBlank(message = "Source file path cannot be empty.") final String srcRemotePath,
    @NotBlank(message = "Destination local path cannot be empty") final String dstLocalPath
) throws GenieException {
    final long start = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    try {
        log.debug("Called with src path {} and destination path {}", srcRemotePath, dstLocalPath);

        final AmazonS3URI s3Uri = getS3Uri(srcRemotePath);
        try {
            this.s3ClientFactory
                .getClient(s3Uri)
                .getObject(
                    new GetObjectRequest(s3Uri.getBucket(), s3Uri.getKey()),
                    new File(dstLocalPath)
                );
        } catch (final AmazonS3Exception ase) {
            log.error("Error fetching file {} from s3 due to exception {}", srcRemotePath, ase.toString());
            throw new GenieServerException("Error downloading file from s3. Filename: " + srcRemotePath, ase);
        }
        MetricsUtils.addSuccessTags(tags);
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(DOWNLOAD_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}
 
源代码28 项目: genie   文件: S3FileTransferImpl.java
/**
 * {@inheritDoc}
 */
@Override
public void putFile(
    @NotBlank(message = "Source local path cannot be empty.") final String srcLocalPath,
    @NotBlank(message = "Destination remote path cannot be empty") final String dstRemotePath
) throws GenieException {
    final long start = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    try {
        log.debug("Called with src path {} and destination path {}", srcLocalPath, dstRemotePath);

        final AmazonS3URI s3Uri = getS3Uri(dstRemotePath);
        try {
            this.s3ClientFactory
                .getClient(s3Uri)
                .putObject(s3Uri.getBucket(), s3Uri.getKey(), new File(srcLocalPath));
        } catch (final AmazonS3Exception ase) {
            log.error("Error posting file {} to s3 due to exception {}", dstRemotePath, ase.toString());
            throw new GenieServerException("Error uploading file to s3. Filename: " + dstRemotePath, ase);
        }
        MetricsUtils.addSuccessTags(tags);
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(UPLOAD_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}
 
源代码29 项目: genie   文件: S3FileTransferImpl.java
/**
 * {@inheritDoc}
 */
@Override
public long getLastModifiedTime(final String path) throws GenieException {
    final long start = System.nanoTime();
    final long lastModTime;
    final Set<Tag> tags = Sets.newHashSet();
    try {
        final AmazonS3URI s3Uri = this.getS3Uri(path);
        try {
            final ObjectMetadata o = this.s3ClientFactory
                .getClient(s3Uri)
                .getObjectMetadata(s3Uri.getBucket(), s3Uri.getKey());
            lastModTime = o.getLastModified().getTime();
        } catch (final Exception ase) {
            final String message = String.format("Failed getting the metadata of the s3 file %s", path);
            log.error(message);
            throw new GenieServerException(message, ase);
        }
        MetricsUtils.addSuccessTags(tags);
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(GET_METADATA_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
    return lastModTime;
}
 
源代码30 项目: genie   文件: S3FileTransferImplTest.java
/**
 * Setup the tests.
 */
@Before
public void setup() {
    MockitoAnnotations.initMocks(this);
    this.registry = Mockito.mock(MeterRegistry.class);
    this.downloadTimer = Mockito.mock(Timer.class);
    this.uploadTimer = Mockito.mock(Timer.class);
    this.urlFailingStrictValidationCounter = Mockito.mock(Counter.class);
    final S3ClientFactory s3ClientFactory = Mockito.mock(S3ClientFactory.class);
    this.s3Client = Mockito.mock(AmazonS3Client.class);
    Mockito.when(s3ClientFactory.getClient(Mockito.any(AmazonS3URI.class))).thenReturn(this.s3Client);
    Mockito.
        when(registry.timer(Mockito.eq(S3FileTransferImpl.DOWNLOAD_TIMER_NAME), Mockito.anySet()))
        .thenReturn(this.downloadTimer);
    Mockito.
        when(registry.timer(Mockito.eq(S3FileTransferImpl.UPLOAD_TIMER_NAME), Mockito.anySet()))
        .thenReturn(this.uploadTimer);
    Mockito
        .when(registry.counter(S3FileTransferImpl.STRICT_VALIDATION_COUNTER_NAME))
        .thenReturn(this.urlFailingStrictValidationCounter);
    this.s3FileTransferProperties = Mockito.mock(S3FileTransferProperties.class);
    this.s3FileTransfer = new S3FileTransferImpl(
        s3ClientFactory,
        this.registry,
        this.s3FileTransferProperties
    );
}