类com.amazonaws.services.s3.model.PutObjectResult源码实例Demo

下面列出了怎么用com.amazonaws.services.s3.model.PutObjectResult的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: singleton   文件: S3OneComponentDaoImpl.java
/**
 * update the component bundle file to remote S3 server
 */
@Override
public boolean update(String productName, String version, String component, String locale,
      Map<String, String> messages) throws DataException {
   if (StringUtils.isEmpty(component)) {
      component = ConstantsFile.DEFAULT_COMPONENT;
   }
   String filePath = S3Utils.genProductVersionS3Path(productName, version) + component
         + ConstantsChar.BACKSLASH + ResourceFilePathGetter.getLocalizedJSONFileName(locale);
   Map<String, Object> json = new HashMap<String, Object>();
   json.put(ConstantsKeys.COMPONENT, component);
   json.put(ConstantsKeys.lOCALE, locale);
   json.put(ConstantsKeys.MESSAGES, messages);
   String content;
   try {
      content = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(json);
   } catch (JsonProcessingException e) {
      throw new DataException(
            ConstantsKeys.FATA_ERROR + "Failed to convert content to file: " + filePath + ".",
            e);
   }
   PutObjectResult putResult =
         s3Client.getS3Client().putObject(config.getBucketName(), filePath, content);
   return (putResult != null);
}
 
@Test
public void testUploadLocalSourceWithNoSSEAlgorithm() throws Exception {
    File file = new File(mockWorkspaceDir + "/source-file");
    FileUtils.write(file, "contents");

    PutObjectResult mockedResponse = new PutObjectResult();
    mockedResponse.setVersionId("some-version-id");
    when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
    S3DataManager d = new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, "", file.getPath(), "");

    ArgumentCaptor<PutObjectRequest> savedPutObjectRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    UploadToS3Output result = d.uploadSourceToS3(listener, testWorkSpace);
    assertEquals(result.getSourceLocation(), s3InputBucketName + "/" + s3InputKeyName);

    verify(s3Client).putObject(savedPutObjectRequest.capture());
    assertEquals(savedPutObjectRequest.getValue().getBucketName(), s3InputBucketName);
    assertEquals(savedPutObjectRequest.getValue().getKey(), s3InputKeyName);
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentMD5(), S3DataManager.getZipMD5(file));
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentLength(), file.length());
    assertNull(savedPutObjectRequest.getValue().getMetadata().getSSEAlgorithm());
}
 
private static void uploadAndDeleteObjectsWithVersions() {
    System.out.println("Uploading and deleting objects with versions specified.");

    // Upload three sample objects.
    ArrayList<KeyVersion> keys = new ArrayList<KeyVersion>();
    for (int i = 0; i < 3; i++) {
        String keyName = "delete object without version ID example " + i;
        PutObjectResult putResult = S3_CLIENT.putObject(VERSIONED_BUCKET_NAME, keyName,
                "Object number " + i + " to be deleted.");
        // Gather the new object keys with version IDs.
        keys.add(new KeyVersion(keyName, putResult.getVersionId()));
    }

    // Delete the specified versions of the sample objects.
    DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(VERSIONED_BUCKET_NAME)
            .withKeys(keys)
            .withQuiet(false);

    // Verify that the object versions were successfully deleted.
    DeleteObjectsResult delObjRes = S3_CLIENT.deleteObjects(multiObjectDeleteRequest);
    int successfulDeletes = delObjRes.getDeletedObjects().size();
    System.out.println(successfulDeletes + " objects successfully deleted");
}
 
源代码4 项目: exhibitor   文件: S3Utils.java
public static ObjectMetadata simpleUploadFile(S3Client client, byte[] bytes, String bucket, String key) throws Exception
{
    byte[]                          md5 = md5(bytes, bytes.length);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bytes.length);
    metadata.setLastModified(new Date());
    metadata.setContentMD5(S3Utils.toBase64(md5));
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(bytes), metadata);
    PutObjectResult putObjectResult = client.putObject(putObjectRequest);

    if ( !putObjectResult.getETag().equals(S3Utils.toHex(md5)) )
    {
        throw new Exception("Unable to match MD5 for config");
    }

    return metadata;
}
 
源代码5 项目: attic-apex-malhar   文件: S3ReconcilerTest.java
@Override
protected void starting(Description description)
{
  super.starting(description);
  outputPath = new File(
      "target" + Path.SEPARATOR + description.getClassName() + Path.SEPARATOR + description.getMethodName())
          .getPath();

  Attribute.AttributeMap attributes = new Attribute.AttributeMap.DefaultAttributeMap();
  attributes.put(DAG.DAGContext.APPLICATION_ID, description.getClassName());
  attributes.put(DAG.DAGContext.APPLICATION_PATH, outputPath);
  context = mockOperatorContext(1, attributes);

  underTest = new S3Reconciler();
  underTest.setAccessKey("");
  underTest.setSecretKey("");

  underTest.setup(context);

  MockitoAnnotations.initMocks(this);
  PutObjectResult result = new PutObjectResult();
  result.setETag(outputPath);
  when(s3clientMock.putObject((PutObjectRequest)any())).thenReturn(result);
  underTest.setS3client(s3clientMock);
}
 
源代码6 项目: s3proxy   文件: AwsSdkTest.java
@Test
public void testConditionalGet() throws Exception {
    assumeTrue(!blobStoreType.equals("b2"));

    String blobName = "blob-name";
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(BYTE_SOURCE.size());
    PutObjectResult result = client.putObject(containerName, blobName,
            BYTE_SOURCE.openStream(), metadata);

    S3Object object = client.getObject(
            new GetObjectRequest(containerName, blobName)
                    .withMatchingETagConstraint(result.getETag()));
    try (InputStream is = object.getObjectContent()) {
        assertThat(is).isNotNull();
        ByteStreams.copy(is, ByteStreams.nullOutputStream());
    }

    object = client.getObject(
            new GetObjectRequest(containerName, blobName)
                    .withNonmatchingETagConstraint(result.getETag()));
    assertThat(object).isNull();
}
 
源代码7 项目: nifi   文件: TestPutS3Object.java
private void prepareTest(String filename) {
    runner.setProperty(PutS3Object.REGION, "ap-northeast-1");
    runner.setProperty(PutS3Object.BUCKET, "test-bucket");
    runner.assertValid();

    Map<String, String> ffAttributes = new HashMap<>();
    ffAttributes.put("filename", filename);
    ffAttributes.put("tagS3PII", "true");
    runner.enqueue("Test Content", ffAttributes);

    PutObjectResult putObjectResult = new PutObjectResult();
    putObjectResult.setExpirationTime(new Date());
    putObjectResult.setMetadata(new ObjectMetadata());
    putObjectResult.setVersionId("test-version");
    putObjectResult.setETag("test-etag");

    Mockito.when(mockS3Client.putObject(Mockito.any(PutObjectRequest.class))).thenReturn(putObjectResult);

    MultipartUploadListing uploadListing = new MultipartUploadListing();
    Mockito.when(mockS3Client.listMultipartUploads(Mockito.any(ListMultipartUploadsRequest.class))).thenReturn(uploadListing);
}
 
源代码8 项目: tutorials   文件: MultipartUploadLiveTest.java
@Test
public void whenUploadingFileWithTransferManager_thenVerifyUploadRequested() {
    File file = mock(File.class);
    PutObjectResult s3Result = mock(PutObjectResult.class);

    when(amazonS3.putObject(anyString(), anyString(), (File) any())).thenReturn(s3Result);
    when(file.getName()).thenReturn(KEY_NAME);

    PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, KEY_NAME, file);
    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    assertThat(upload).isNotNull();
    verify(amazonS3).putObject(request);
}
 
源代码9 项目: usergrid   文件: WarehouseExport.java
private void copyToS3( String fileName ) {

        String bucketName = ( String ) properties.get( BUCKET_PROPNAME );
        String accessId = ( String ) properties.get( ACCESS_ID_PROPNAME );
        String secretKey = ( String ) properties.get( SECRET_KEY_PROPNAME );

        Properties overrides = new Properties();
        overrides.setProperty( "s3" + ".identity", accessId );
        overrides.setProperty( "s3" + ".credential", secretKey );

        final Iterable<? extends Module> MODULES = ImmutableSet
                .of( new JavaUrlHttpCommandExecutorServiceModule(), new Log4JLoggingModule(),
                        new NettyPayloadModule() );

        AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setProtocol( Protocol.HTTP);

        AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);

        s3Client.createBucket( bucketName );
        File uploadFile = new File( fileName );
        PutObjectResult putObjectResult = s3Client.putObject( bucketName, uploadFile.getName(), uploadFile );
        logger.info("Uploaded file etag={}", putObjectResult.getETag());
    }
 
源代码10 项目: Scribengin   文件: AmazonS3Mock.java
@Override
public PutObjectResult putObject(String bucketName, String key, File file) throws AmazonClientException,
    AmazonServiceException {
  throwException(putObjectException);
  
  List<String> keys = files.get(bucketName);
  if (keys == null) {
    throw new AmazonClientException("Bucket do not exist");
  }
  keys.add(key);
  files.put(bucketName, keys);
  PutObjectResult result = new PutObjectResult();
  try {
    result.setContentMd5(new String(Md5Utils.md5AsBase64(file)));
  } catch (IOException e) {
    // TODO Auto-generated catch block
    e.printStackTrace();
  }
  return result;
}
 
源代码11 项目: genie   文件: S3FileTransferImplTest.java
/**
 * Test the putFile method for valid s3 path.
 *
 * @throws GenieException If there is any problem
 */
@Test
public void testPutFileMethodValidS3Path() throws GenieException {
    final PutObjectResult putObjectResult = Mockito.mock(PutObjectResult.class);
    Mockito.when(this.s3Client.putObject(Mockito.any(), Mockito.any(), Mockito.any(File.class)))
        .thenReturn(putObjectResult);
    final ArgumentCaptor<String> bucketArgument = ArgumentCaptor.forClass(String.class);
    final ArgumentCaptor<String> keyArgument = ArgumentCaptor.forClass(String.class);

    s3FileTransfer.putFile(LOCAL_PATH, S3_PATH);
    Mockito
        .verify(this.s3Client)
        .putObject(bucketArgument.capture(), keyArgument.capture(), Mockito.any(File.class));
    Assert.assertEquals(S3_BUCKET, bucketArgument.getValue());
    Assert.assertEquals(S3_KEY, keyArgument.getValue());
    Mockito
        .verify(this.uploadTimer, Mockito.times(1))
        .record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS));
    Mockito
        .verify(this.registry, Mockito.times(1))
        .timer(Mockito.eq(S3FileTransferImpl.UPLOAD_TIMER_NAME), this.tagsCaptor.capture());
    Assert.assertEquals(SUCCESS_TAGS, this.tagsCaptor.getValue());
}
 
源代码12 项目: crate   文件: MockAmazonS3.java
@Override
public PutObjectResult putObject(final PutObjectRequest request) throws AmazonClientException {
    assertThat(request.getBucketName(), equalTo(bucket));
    assertThat(request.getMetadata().getSSEAlgorithm(), serverSideEncryption ? equalTo("AES256") : nullValue());
    assertThat(request.getCannedAcl(), notNullValue());
    assertThat(request.getCannedAcl().toString(), cannedACL != null ? equalTo(cannedACL) : equalTo("private"));
    assertThat(request.getStorageClass(), storageClass != null ? equalTo(storageClass) : equalTo("STANDARD"));


    final String blobName = request.getKey();
    final ByteArrayOutputStream out = new ByteArrayOutputStream();
    try {
        Streams.copy(request.getInputStream(), out);
        blobs.put(blobName, out.toByteArray());
    } catch (IOException e) {
        throw new AmazonClientException(e);
    }
    return new PutObjectResult();
}
 
源代码13 项目: data-highway   文件: S3ConnectivityCheckTest.java
@Test
public void checkS3() {
  Mockito.when(
      s3.putObject(
          Mockito.any(PutObjectRequest.class)))
      .thenReturn(new PutObjectResult());

  underTest.checkS3Put(s3, "bucket", "key");
}
 
源代码14 项目: pocket-etl   文件: S3FastLoaderTest.java
@Before
public void initializeS3StreamCapture() {
    when(s3Client.putObject(any(PutObjectRequest.class))).thenAnswer(invocation -> {
        PutObjectRequest putObjectRequest = (PutObjectRequest) invocation.getArguments()[0];
        InputStream inputStream = putObjectRequest.getInputStream();
        s3StreamCapture.add(byteArrayToList(toByteArray(inputStream)));
        return new PutObjectResult();
    });
}
 
源代码15 项目: pocket-etl   文件: MockedS3FunctionalTest.java
@Before
public void prepareMockS3() {
    when(mockAmazonS3.putObject(any(PutObjectRequest.class))).thenAnswer((Answer<PutObjectResult>) invocation -> {
        PutObjectRequest request = (PutObjectRequest)invocation.getArguments()[0];
        InputStream inputStream = request.getInputStream();
        outputStrings.add(IOUtils.toString(inputStream));
        return null;
    });
}
 
源代码16 项目: aws-photosharing-example   文件: UploadThread.java
@Override
public void run() {
    ObjectMetadata meta_data = new ObjectMetadata();
    if (p_content_type != null)
        meta_data.setContentType(p_content_type);

    meta_data.setContentLength(p_size);

    PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    PutObjectResult res = s3Client.putObject(putObjectRequest);       
}
 
源代码17 项目: hadoop   文件: S3AFastOutputStream.java
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
源代码18 项目: big-c   文件: S3AFastOutputStream.java
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
private S3DataManager createDefaultSource(String localSourcePath, String workspaceSubdir) {
    this.s3ARNs.put("main", "ARN1/bucket/thing.zip"); //put one item in s3ARNs so exception doesn't happen.

    PutObjectResult mockedResponse = new PutObjectResult();
    mockedResponse.setVersionId("some-version-id");
    when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
    return new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, sseAlgorithm, localSourcePath, workspaceSubdir);
}
 
源代码20 项目: java-slack-sdk   文件: AmazonS3OAuthStateService.java
@Override
public void addNewStateToDatastore(String state) throws Exception {
    AmazonS3 s3 = this.createS3Client();
    String value = "" + (System.currentTimeMillis() + getExpirationInSeconds() * 1000);
    PutObjectResult putObjectResult = s3.putObject(bucketName, getKey(state), value);
    if (log.isDebugEnabled()) {
        log.debug("AWS S3 putObject result of state data - {}", JsonOps.toJsonString(putObjectResult));
    }
}
 
源代码21 项目: emodb   文件: S3ScanWriter.java
/**
 * Starts an asynchronous upload and returns a ListenableFuture for handling the result.
 */
synchronized ListenableFuture<String> upload() {
    // Reset values from possible prior attempt
    _attempts += 1;
    _bytesTransferred = 0;

    // Separate the future returned to the caller from the future generated by submitting the
    // putObject request.  If the writer is closed then uploadFuture may be canceled before it executes,
    // in which case it may not trigger any callbacks.  To ensure there is always a callback resultFuture is
    // tracked independently and, in the event that the upload is aborted, gets set on abort().
    _resultFuture = SettableFuture.create();

    _uploadFuture = _uploadService.submit(new Runnable() {
        @Override
        public void run() {
            try {
                ProgressListener progressListener = new ProgressListener() {
                    @Override
                    public void progressChanged(ProgressEvent progressEvent) {
                        // getBytesTransferred() returns zero for all events not pertaining to the file transfer
                        _bytesTransferred += progressEvent.getBytesTransferred();
                    }
                };

                PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, _key, _file);
                putObjectRequest.setGeneralProgressListener(progressListener);
                PutObjectResult result = _amazonS3.putObject(putObjectRequest);
                _resultFuture.set(result.getETag());
            } catch (Throwable t) {
                _resultFuture.setException(t);
            }
        }
    });

    return _resultFuture;
}
 
源代码22 项目: emodb   文件: S3ScanWriterTest.java
@Test
public void testWriteWithCancel()
        throws Exception {
    URI baseUri = URI.create("s3://test-bucket/scan");
    ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2);

    try {
        PutObjectResult putObjectResult = new PutObjectResult();
        putObjectResult.setETag("dummy-etag");

        AmazonS3 amazonS3 = mock(AmazonS3.class);
        when(amazonS3.putObject(argThat(putsIntoBucket("test-bucket"))))
                .thenReturn(putObjectResult);

        AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class);
        when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3);

        S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), new MetricRegistry(), amazonS3Provider, uploadService, new ObjectMapper());

        ScanDestinationWriter scanDestinationWriters[] = new ScanDestinationWriter[2];

        for (int i = 0; i < 2; i++) {
            scanDestinationWriters[i] = scanWriter.writeShardRows("table" + i, "p0", 0, i);
            scanDestinationWriters[i].writeDocument(ImmutableMap.of("type", "review", "rating", i));
        }

        // Simulate canceling shardWriter[0] in response to a failure.
        scanDestinationWriters[0].closeAndCancel();
        // Close shardWriter[1] normally
        scanDestinationWriters[1].closeAndTransferAsync(Optional.of(1));

        verifyAllTransfersComplete(scanWriter, uploadService);
    } finally {
        uploadService.shutdownNow();
    }
}
 
源代码23 项目: emodb   文件: S3ScanWriterTest.java
@Test
public void testWriteWithClose()
        throws Exception {

    URI baseUri = URI.create("s3://test-bucket/scan");
    ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2);

    try {
        PutObjectResult putObjectResult = new PutObjectResult();
        putObjectResult.setETag("dummy-etag");

        AmazonS3 amazonS3 = mock(AmazonS3.class);
        when(amazonS3.putObject(argThat(putsIntoBucket("test-bucket"))))
                .thenReturn(putObjectResult);

        AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class);
        when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3);

        S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), new MetricRegistry(), amazonS3Provider, uploadService, new ObjectMapper());

        ScanDestinationWriter scanDestinationWriters[] = new ScanDestinationWriter[2];

        for (int i=0; i < 2; i++) {
            scanDestinationWriters[i] = scanWriter.writeShardRows("table" + i, "p0", 0, i);
            scanDestinationWriters[i].writeDocument(ImmutableMap.of("type", "review", "rating", i));
        }

        // Simulate closing shardWriter[0] but not shardWriter[1]
        scanDestinationWriters[0].closeAndTransferAsync(Optional.of(1));

        scanWriter.close();

        verifyAllTransfersComplete(scanWriter, uploadService);
    } finally {
        uploadService.shutdownNow();
    }
}
 
源代码24 项目: emodb   文件: ScanUploaderTest.java
private PutObjectResult mockUploadS3File(String bucket, String key, byte[] contents, HashBasedTable<String, String, ByteBuffer> s3FileTable) {
    // Place the contents in the s3 file table keyed by the file's parent directory and file name
    int idx = key.lastIndexOf('/');
    String parentDir = key.substring(0, idx);
    String fileName = key.substring(idx + 1);
    // HashBasedTable is not thread-safe if multiple threads try to write to the same directory concurrently
    synchronized (s3FileTable) {
        s3FileTable.put(format("%s/%s", bucket, parentDir), fileName, ByteBuffer.wrap(contents));
    }

    PutObjectResult result = new PutObjectResult();
    result.setETag("etag");
    return result;
}
 
源代码25 项目: emodb   文件: AthenaAuditWriter.java
/**
 * This method takes all log files prepared by {@link #prepareClosedLogFilesForTransfer()} and initiates their
 * transfer to S3.  The transfer itself is performed asynchronously.
 */
private void transferLogFilesToS3() {
    if (_fileTransfersEnabled) {
        // Find all closed log files in the staging directory and move them to S3
        for (final File logFile : _stagingDir.listFiles((dir, name) -> name.startsWith(_logFilePrefix) && name.endsWith(COMPRESSED_FILE_SUFFIX))) {
            // Extract the date portion of the file name and is it to partition the file in S3
            String auditDate = logFile.getName().substring(_logFilePrefix.length() + 1, _logFilePrefix.length() + 9);
            String dest = String.format("%s/date=%s/%s", _s3AuditRoot, auditDate, logFile.getName());

            _fileTransferService.submit(() -> {
                // Since file transfers are done in a single thread, there shouldn't be any concurrency issues,
                // but verify the same file wasn't submitted previously and is already transferred.
                if (logFile.exists()) {
                    try {
                        PutObjectResult result = _s3.putObject(_s3Bucket, dest, logFile);
                        _log.debug("Audit log copied: {}, ETag={}", logFile, result.getETag());

                        if (!logFile.delete()) {
                            _log.warn("Failed to delete file after copying to s3: {}", logFile);
                        }
                    } catch (Exception e) {

                        // Log the error, try again on the next iteration
                        _rateLimitedLog.error(e, "Failed to copy log file {}", logFile);
                    }
                }
            });
        }
    }

}
 
源代码26 项目: emodb   文件: AthenaAuditWriterTest.java
@BeforeMethod
public void setUp() {
    _s3 = mock(AmazonS3.class);
    when(_s3.putObject(eq(BUCKET), anyString(), any(File.class))).then(invocationOnMock -> {
        // The file will be deleted after the put object returns successfully, so capture the contents now
        File file = (File) invocationOnMock.getArguments()[2];
        try (FileInputStream fileIn = new FileInputStream(file);
             GzipCompressorInputStream in = new GzipCompressorInputStream(fileIn);
             BufferedReader reader = new BufferedReader(new InputStreamReader(in, Charsets.UTF_8))) {

            String line;
            while ((line = reader.readLine()) != null) {
                Map<String, Object> auditJson = JsonHelper.fromJson(line, new TypeReference<Map<String, Object>>() {});
                _uploadedAudits.put((String) invocationOnMock.getArguments()[1], auditJson);
            }
        }

        PutObjectResult result = new PutObjectResult();
        result.setETag(file.getName());
        return result;
    });

    _tempStagingDir = Files.createTempDir();

    // Start with some default time; individual tests can override as necessary
    _now = Instant.from(ZonedDateTime.of(2018, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC));

    _clock = mock(Clock.class);
    when(_clock.millis()).then(ignore -> _now.toEpochMilli());
    when(_clock.instant()).then(ignore -> _now);
}
 
源代码27 项目: hawkbit-extensions   文件: S3Repository.java
@Override
protected AbstractDbArtifact store(final String tenant, final DbArtifactHash base16Hashes, final String contentType,
        final String tempFile) throws IOException {
    final File file = new File(tempFile);

    final S3Artifact s3Artifact = createS3Artifact(tenant, base16Hashes, contentType, file);
    final String key = objectKey(tenant, base16Hashes.getSha1());

    LOG.info("Storing file {} with length {} to AWS S3 bucket {} with key {}", file.getName(), file.length(),
            s3Properties.getBucketName(), key);

    if (existsByTenantAndSha1(tenant, base16Hashes.getSha1())) {
        LOG.debug("Artifact {} already exists on S3 bucket {}, don't need to upload twice", key,
                s3Properties.getBucketName());
        return s3Artifact;
    }

    try (final InputStream inputStream = new BufferedInputStream(new FileInputStream(file),
            RequestClientOptions.DEFAULT_STREAM_BUFFER_SIZE)) {
        final ObjectMetadata objectMetadata = createObjectMetadata(base16Hashes.getMd5(), contentType, file);
        final PutObjectResult result = amazonS3.putObject(s3Properties.getBucketName(), key, inputStream,
                objectMetadata);

        LOG.debug("Artifact {} stored on S3 bucket {} with server side Etag {} and MD5 hash {}", key,
                s3Properties.getBucketName(), result.getETag(), result.getContentMd5());

        return s3Artifact;
    } catch (final AmazonClientException e) {
        throw new ArtifactStoreException("Failed to store artifact into S3 ", e);
    }
}
 
源代码28 项目: stocator   文件: COSAPIClient.java
/**
 * PUT an object directly (i.e. not via the transfer manager).
 * @param putObjectRequest the request
 * @return the upload initiated
 * @throws IOException on problems
 */
PutObjectResult putObject(PutObjectRequest putObjectRequest)
    throws IOException {
  try {
    PutObjectResult result = mClient.putObject(putObjectRequest);
    return result;
  } catch (AmazonClientException e) {
    throw translateException("put", putObjectRequest.getKey(), e);
  }
}
 
源代码29 项目: herd   文件: S3DaoTest.java
@Test
public void testCreateDirectoryAssertAddTrailingSlashOnlyIfNotPresent()
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    try
    {
        String s3BucketName = "s3BucketName";
        String s3KeyPrefix = "s3KeyPrefix/";

        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
        s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);

        when(mockS3Operations.putObject(any(), any())).then(new Answer<PutObjectResult>()
        {
            @Override
            public PutObjectResult answer(InvocationOnMock invocation) throws Throwable
            {
                PutObjectRequest putObjectRequest = invocation.getArgument(0);
                assertEquals(s3BucketName, putObjectRequest.getBucketName());
                assertEquals(s3KeyPrefix, putObjectRequest.getKey());

                PutObjectResult putObjectResult = new PutObjectResult();
                return putObjectResult;
            }
        });

        s3Dao.createDirectory(s3FileTransferRequestParamsDto);
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
    }
}
 
源代码30 项目: herd   文件: S3DaoTest.java
@Test
public void testCreateDirectoryAssertCallsPutObject()
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    try
    {
        String s3BucketName = "s3BucketName";
        String s3KeyPrefix = "s3KeyPrefix";
        String expectedS3KeyPrefix = s3KeyPrefix + "/";

        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
        s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);

        when(mockS3Operations.putObject(any(), any())).then(new Answer<PutObjectResult>()
        {
            @Override
            public PutObjectResult answer(InvocationOnMock invocation) throws Throwable
            {
                PutObjectRequest putObjectRequest = invocation.getArgument(0);
                assertEquals(s3BucketName, putObjectRequest.getBucketName());
                assertEquals(expectedS3KeyPrefix, putObjectRequest.getKey());

                PutObjectResult putObjectResult = new PutObjectResult();
                return putObjectResult;
            }
        });

        s3Dao.createDirectory(s3FileTransferRequestParamsDto);
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
    }
}
 
 类方法
 同包方法