com.amazonaws.services.s3.model.ObjectMetadata#setContentType ( )源码实例Demo

下面列出了com.amazonaws.services.s3.model.ObjectMetadata#setContentType ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hop   文件: S3CommonFileObject.java
@Override
protected void doCreateFolder() throws Exception {
  if ( !isRootBucket() ) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength( 0 );
    metadata.setContentType( "binary/octet-stream" );

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream( new byte[ 0 ] );

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = createPutObjectRequest( bucketName, key + DELIMITER, emptyContent, metadata );

    // send request to S3 to create folder
    try {
      fileSystem.getS3Client().putObject( putObjectRequest );
    } catch ( AmazonS3Exception e ) {
      throw new FileSystemException( "vfs.provider.local/create-folder.error", this, e );
    }
  } else {
    throw new FileSystemException( "vfs.provider/create-folder-not-supported.error" );
  }
}
 
源代码2 项目: judgels   文件: AwsFileSystem.java
@Override
public void uploadPublicFile(Path filePath, InputStream content) {
    String destFilePathString = filePath.toString();

    ObjectMetadata objectMetadata = new ObjectMetadata();

    String contentType = URLConnection.guessContentTypeFromName(destFilePathString);
    if (contentType != null) {
        objectMetadata.setContentType(contentType);
        if (contentType.startsWith("image/")) {
            objectMetadata.setCacheControl("no-transform,public,max-age=300,s-maxage=900");
        }
    }

    s3.putObject(bucketName, destFilePathString, content, objectMetadata);
}
 
源代码3 项目: pacbot   文件: AwsS3BucketService.java
public boolean uploadFile(final AmazonS3 amazonS3, MultipartFile fileToUpload, String s3BucketName, String key) {
	try {
		File file = AdminUtils.convert(fileToUpload);
		long size = fileToUpload.getSize();
		String contentType = fileToUpload.getContentType();
		ObjectMetadata metadata = new ObjectMetadata();
		metadata.setContentType(contentType);
		metadata.setContentLength(size);
		PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead);
		amazonS3.putObject(putObjectRequest);
		return Boolean.TRUE;
	} catch (IOException exception) {
		log.error(UNEXPECTED_ERROR_OCCURRED, exception);
	} 
	return Boolean.FALSE;
}
 
源代码4 项目: fredbet   文件: AmazonS3ClientWrapper.java
public void uploadFile(String key, byte[] fileContent, String contentType) {
	try (ByteArrayInputStream byteIn = new ByteArrayInputStream(fileContent)) {
		ObjectMetadata meta = new ObjectMetadata();
		meta.setContentLength(fileContent.length);
		meta.setContentType(contentType);
		this.amazonS3.putObject(bucketName, key, byteIn, meta);
	} catch (IOException e) {
		throw new AwsS3AccessException(e.getMessage(), e);
	}
}
 
源代码5 项目: presto   文件: TestPrestoS3FileSystem.java
@Test
public void testEmptyDirectory()
        throws Exception
{
    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        MockAmazonS3 s3 = new MockAmazonS3()
        {
            @Override
            public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest)
            {
                if (getObjectMetadataRequest.getKey().equals("empty-dir/")) {
                    ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentType(S3_DIRECTORY_OBJECT_CONTENT_TYPE);
                    return objectMetadata;
                }
                return super.getObjectMetadata(getObjectMetadataRequest);
            }
        };
        fs.initialize(new URI("s3n://test-bucket/"), new Configuration(false));
        fs.setS3Client(s3);

        FileStatus fileStatus = fs.getFileStatus(new Path("s3n://test-bucket/empty-dir/"));
        assertTrue(fileStatus.isDirectory());

        fileStatus = fs.getFileStatus(new Path("s3n://test-bucket/empty-dir"));
        assertTrue(fileStatus.isDirectory());
    }
}
 
源代码6 项目: Scribengin   文件: S3ClientIntegrationTest.java
@Test
public void testUpdateObjectMetadata() throws IOException, InterruptedException {
  String KEY = "test-update-objec-metadata" ;
  ObjectMetadata metadata = new ObjectMetadata() ;
  metadata.setContentType("text/plain");
  s3Client.createObject(BUCKET_NAME, KEY, new byte[0], metadata);
  metadata = s3Client.getObjectMetadata(BUCKET_NAME, KEY) ;
  metadata.addUserMetadata("transaction", "buffering");
  s3Client.updateObjectMetadata(BUCKET_NAME, KEY, metadata);
  metadata = s3Client.getObjectMetadata(BUCKET_NAME, KEY) ;
  Assert.assertEquals("buffering", metadata.getUserMetaDataOf("transaction"));
}
 
源代码7 项目: java-almanac   文件: S3MultiReportOutput.java
private void upload(String path, byte[] content) {
	ObjectMetadata metadata = new ObjectMetadata();
	metadata.setContentLength(content.length);
	metadata.setContentType(getContentType(path));
	PutObjectRequest request = new PutObjectRequest(bucketName, basePath + path, new ByteArrayInputStream(content),
			metadata).withCannedAcl(CannedAccessControlList.PublicRead);
	s3Client.putObject(request);
}
 
源代码8 项目: pizzeria   文件: AwsS3FileStorageService.java
@Override
protected void saveFileContent(InputStream inputStream, String name, String contentType) throws IOException {
    byte[] bytes = IOUtils.toByteArray(inputStream);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentType(contentType);
    objectMetadata.setContentLength(bytes.length);
    getAmazonS3().putObject(bucketName, name, new ByteArrayInputStream(bytes), objectMetadata);
}
 
源代码9 项目: aws-photosharing-example   文件: UploadThread.java
@Override
public void run() {
    ObjectMetadata meta_data = new ObjectMetadata();
    if (p_content_type != null)
        meta_data.setContentType(p_content_type);

    meta_data.setContentLength(p_size);

    PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    PutObjectResult res = s3Client.putObject(putObjectRequest);       
}
 
源代码10 项目: Scribengin   文件: S3SinkStreamWriter.java
public S3SinkStreamWriter(S3Folder streamS3Folder) throws IOException {
  this.streamS3Folder = streamS3Folder;
  segmentName = "segment-" + UUID.randomUUID().toString();
  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentType("application/binary");
  metadata.addUserMetadata("transaction", "prepare");
  writer = streamS3Folder.createObjectWriter(segmentName, metadata) ;
}
 
源代码11 项目: github-bucket   文件: RepositoryS3.java
private boolean walk(Iterator<S3ObjectSummary> iter, ObjectId file, String path) throws IOException {
    byte[] content;
    byte[] newHash;
    LOG.debug("Start processing file: {}", path);
    try (DigestInputStream is = new DigestInputStream(repository.open(file).openStream(), DigestUtils.getMd5Digest())) {
        // Get content
        content = IOUtils.toByteArray(is);
        // Get hash
        newHash = is.getMessageDigest().digest();
    }
    if (isUploadFile(iter, path, Hex.encodeHexString(newHash))) {
        LOG.info("Uploading file: {}", path);
        ObjectMetadata bucketMetadata = new ObjectMetadata();
        bucketMetadata.setContentMD5(Base64.encodeAsString(newHash));
        bucketMetadata.setContentLength(content.length);
        // Give Tika a few hints for the content detection
        Metadata tikaMetadata = new Metadata();
        tikaMetadata.set(Metadata.RESOURCE_NAME_KEY, FilenameUtils.getName(FilenameUtils.normalize(path)));
        // Fire!
        try (InputStream bis = TikaInputStream.get(content, tikaMetadata)) {
            bucketMetadata.setContentType(TIKA_DETECTOR.detect(bis, tikaMetadata).toString());
            s3.putObject(bucket.getName(), path, bis, bucketMetadata);
            return true;
        }
    }
    LOG.info("Skipping file (same checksum): {}", path);
    return false;
}
 
源代码12 项目: spring-cloud-aws-sample   文件: TablonController.java
@RequestMapping("/anuncio/nuevo")
public String nuevoAnuncio(Model model, 
		@RequestParam String nombre,
		@RequestParam String asunto,
		@RequestParam String comentario,
		@RequestParam String filename,
		@RequestParam MultipartFile file) {

       if (!file.isEmpty()) {
           try {
               ObjectMetadata objectMetadata = new ObjectMetadata();
               objectMetadata.setContentType(file.getContentType());

               TransferManager transferManager = TransferManagerBuilder.defaultTransferManager();
               transferManager.upload(bucket, filename, file.getInputStream(), objectMetadata);
               
           } catch (Exception e) {
           	model.addAttribute("message", "You failed to upload " + filename + " => " + e.getMessage());
               return "error";
           }
       } else {
       	model.addAttribute("message", "You failed to upload " + filename + " because the file was empty.");
           return "error";
       }

       Anuncio anuncio = new Anuncio(nombre, asunto, comentario);
       anuncio.setFoto(s3.getUrl(bucket, filename));

	repository.save(anuncio);

       return "anuncio_guardado";

}
 
源代码13 项目: hawkbit-extensions   文件: S3Repository.java
private ObjectMetadata createObjectMetadata(final String mdMD5Hash16, final String contentType, final File file) {
    final ObjectMetadata objectMetadata = new ObjectMetadata();
    final String mdMD5Hash64 = BaseEncoding.base64().encode(BaseEncoding.base16().lowerCase().decode(mdMD5Hash16));
    objectMetadata.setContentMD5(mdMD5Hash64);
    objectMetadata.setContentType(contentType);
    objectMetadata.setContentLength(file.length());
    if (s3Properties.isServerSideEncryption()) {
        objectMetadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION, s3Properties.getServerSideEncryptionAlgorithm());
    }
    return objectMetadata;
}
 
源代码14 项目: github-bucket   文件: RepositoryS3.java
private boolean walk(Iterator<S3ObjectSummary> iter, ObjectId file, String path) throws IOException {
    byte[] content;
    byte[] newHash;
    LOG.debug("Start processing file: {}", path);
    try (DigestInputStream is = new DigestInputStream(repository.open(file).openStream(), DigestUtils.getMd5Digest())) {
        // Get content
        content = IOUtils.toByteArray(is);
        // Get hash
        newHash = is.getMessageDigest().digest();
    }
    if (isUploadFile(iter, path, Hex.encodeHexString(newHash))) {
        LOG.info("Uploading file: {}", path);
        ObjectMetadata bucketMetadata = new ObjectMetadata();
        bucketMetadata.setContentMD5(Base64.encodeAsString(newHash));
        bucketMetadata.setContentLength(content.length);
        // Give Tika a few hints for the content detection
        Metadata tikaMetadata = new Metadata();
        tikaMetadata.set(Metadata.RESOURCE_NAME_KEY, FilenameUtils.getName(FilenameUtils.normalize(path)));
        // Fire!
        try (InputStream bis = TikaInputStream.get(content, tikaMetadata)) {
            bucketMetadata.setContentType(TIKA_DETECTOR.detect(bis, tikaMetadata).toString());
            s3.putObject(bucket.getName(), path, bis, bucketMetadata);
            return true;
        }
    }
    LOG.info("Skipping file (same checksum): {}", path);
    return false;
}
 
源代码15 项目: digdag   文件: S3WaitOperatorFactoryTest.java
@Test
public void testExponentialBackoff()
        throws Exception
{
    Config config = newConfig();
    config.set("_command", BUCKET + "/" + KEY);

    when(taskRequest.getConfig()).thenReturn(config);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentType(CONTENT_TYPE);
    objectMetadata.setContentLength(CONTENT_LENGTH);
    objectMetadata.setUserMetadata(USER_METADATA);

    when(s3Client.getObjectMetadata(any(GetObjectMetadataRequest.class))).thenThrow(NOT_FOUND_EXCEPTION);

    Operator operator = factory.newOperator(newContext(projectPath, taskRequest));

    List<Integer> retryIntervals = new ArrayList<>();

    for (int i = 0; i < 10; i++) {
        try {
            operator.run();
            fail();
        }
        catch (TaskExecutionException e) {
            assertThat(e.isError(), is(false));
            assertThat(e.getRetryInterval().isPresent(), is(true));
            retryIntervals.add(e.getRetryInterval().get());
            Config lastStateParams = e.getStateParams(configFactory).get();
            when(taskRequest.getLastStateParams()).thenReturn(lastStateParams);
        }
    }

    for (int i = 1; i < retryIntervals.size(); i++) {
        int prevInterval = retryIntervals.get(i - 1);
        int interval = retryIntervals.get(i);
        assertThat(interval, is(Math.min(MAX_POLL_INTERVAL, prevInterval * 2)));
    }

    assertThat(retryIntervals.get(retryIntervals.size() - 1), is(MAX_POLL_INTERVAL));
}
 
源代码16 项目: openbd-core   文件: Write.java
private void writeData( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders ) throws Exception {
	if ( mimetype == null ) {
		if ( data.getDataType() == cfData.CFBINARYDATA )
			mimetype = "application/unknown";
		else if ( cfData.isSimpleValue( data ) )
			mimetype = "text/plain";
		else
			mimetype = "application/json";

		// Check to see if the mime type is in the metadata
		if ( metadata != null && metadata.containsKey( "Content-Type" ) )
			mimetype = metadata.get( "Content-Type" );
	}


	InputStream ios = null;
	long size = 0;
	if ( data.getDataType() == cfData.CFSTRINGDATA ) {
		ios = new java.io.ByteArrayInputStream( data.getString().getBytes() );
		size = data.getString().length();
	} else if ( data.getDataType() == cfData.CFBINARYDATA ) {
		ios = new java.io.ByteArrayInputStream( ( (cfBinaryData) data ).getByteArray() );
		size = ( (cfBinaryData) data ).getLength();
	} else {
		serializejson json = new serializejson();
		StringBuilder out = new StringBuilder();
		json.encodeJSON( out, data, false, CaseType.MAINTAIN, DateType.LONG );
		size = out.length();
		mimetype = "application/json";
		ios = new java.io.ByteArrayInputStream( out.toString().getBytes() );
	}


	// Setup the object data
	ObjectMetadata omd = new ObjectMetadata();
	if ( metadata != null )
		omd.setUserMetadata( metadata );

	omd.setContentType( mimetype );
	omd.setContentLength( size );

	AmazonS3 s3Client = getAmazonS3( amazonKey );

	// Let us run around the number of attempts
	int attempts = 0;
	while ( attempts < retry ) {
		try {

			PutObjectRequest por = new PutObjectRequest( bucket, key, ios, omd );
			por.setStorageClass( storage );

			if ( aes256key != null && !aes256key.isEmpty() )
				por.setSSECustomerKey( new SSECustomerKey( aes256key ) );

			if ( acl != null && !acl.isEmpty() )
				por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) );

			if ( customheaders != null && !customheaders.isEmpty() ) {
				Iterator<String> it = customheaders.keySet().iterator();
				while ( it.hasNext() ) {
					String k = it.next();
					por.putCustomRequestHeader( k, customheaders.get( k ) );
				}
			}

			s3Client.putObject( por );
			break;

		} catch ( Exception e ) {
			cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" );
			attempts++;

			if ( attempts == retry )
				throw e;
			else
				Thread.sleep( retryseconds * 1000 );
		}
	}
}
 
源代码17 项目: aws-lambda-unzip   文件: S3EventProcessorUnzip.java
@Override
public String handleRequest(S3Event s3Event, Context context) {
    byte[] buffer = new byte[1024];
    try {
        for (S3EventNotificationRecord record: s3Event.getRecords()) {
            String srcBucket = record.getS3().getBucket().getName();

            // Object key may have spaces or unicode non-ASCII characters.
            String srcKey = record.getS3().getObject().getKey()
                    .replace('+', ' ');
            srcKey = URLDecoder.decode(srcKey, "UTF-8");

            // Detect file type
            Matcher matcher = Pattern.compile(".*\\.([^\\.]*)").matcher(srcKey);
            if (!matcher.matches()) {
                System.out.println("Unable to detect file type for key " + srcKey);
                return "";
            }
            String extension = matcher.group(1).toLowerCase();
            if (!"zip".equals(extension)) {
                System.out.println("Skipping non-zip file " + srcKey + " with extension " + extension);
                return "";
            }
            System.out.println("Extracting zip file " + srcBucket + "/" + srcKey);
            
            // Download the zip from S3 into a stream
            AmazonS3 s3Client = new AmazonS3Client();
            S3Object s3Object = s3Client.getObject(new GetObjectRequest(srcBucket, srcKey));
            ZipInputStream zis = new ZipInputStream(s3Object.getObjectContent());
            ZipEntry entry = zis.getNextEntry();

            while(entry != null) {
                String fileName = entry.getName();
                String mimeType = FileMimeType.fromExtension(FilenameUtils.getExtension(fileName)).mimeType();
                System.out.println("Extracting " + fileName + ", compressed: " + entry.getCompressedSize() + " bytes, extracted: " + entry.getSize() + " bytes, mimetype: " + mimeType);
                ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
                int len;
                while ((len = zis.read(buffer)) > 0) {
                    outputStream.write(buffer, 0, len);
                }
                InputStream is = new ByteArrayInputStream(outputStream.toByteArray());
                ObjectMetadata meta = new ObjectMetadata();
                meta.setContentLength(outputStream.size());
                meta.setContentType(mimeType);
                s3Client.putObject(srcBucket, FilenameUtils.getFullPath(srcKey) + fileName, is, meta);
                is.close();
                outputStream.close();
                entry = zis.getNextEntry();
            }
            zis.closeEntry();
            zis.close();
            
            //delete zip file when done
            System.out.println("Deleting zip file " + srcBucket + "/" + srcKey + "...");
            s3Client.deleteObject(new DeleteObjectRequest(srcBucket, srcKey));
            System.out.println("Done deleting");
        }
        return "Ok";
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
源代码18 项目: nifi   文件: TestFetchS3Object.java
@Test
public void testGetObject() throws IOException {
    runner.setProperty(FetchS3Object.REGION, "us-east-1");
    runner.setProperty(FetchS3Object.BUCKET, "request-bucket");
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "request-key");
    runner.enqueue(new byte[0], attrs);

    S3Object s3ObjectResponse = new S3Object();
    s3ObjectResponse.setBucketName("response-bucket-name");
    s3ObjectResponse.setKey("response-key");
    s3ObjectResponse.setObjectContent(new StringInputStream("Some Content"));
    ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class);
    metadata.setContentDisposition("key/path/to/file.txt");
    metadata.setContentType("text/plain");
    metadata.setContentMD5("testMD5hash");
    Date expiration = new Date();
    metadata.setExpirationTime(expiration);
    metadata.setExpirationTimeRuleId("testExpirationRuleId");
    Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("userKey1", "userValue1");
    userMetadata.put("userKey2", "userValue2");
    metadata.setUserMetadata(userMetadata);
    metadata.setSSEAlgorithm("testAlgorithm");
    Mockito.when(metadata.getETag()).thenReturn("test-etag");
    s3ObjectResponse.setObjectMetadata(metadata);
    Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse);

    runner.run(1);

    ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture());
    GetObjectRequest request = captureRequest.getValue();
    assertEquals("request-bucket", request.getBucketName());
    assertEquals("request-key", request.getKey());
    assertFalse(request.isRequesterPays());
    assertNull(request.getVersionId());

    runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1);
    final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS);
    MockFlowFile ff = ffs.get(0);
    ff.assertAttributeEquals("s3.bucket", "response-bucket-name");
    ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt");
    ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to");
    ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt");
    ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain");
    ff.assertAttributeEquals("hash.value", "testMD5hash");
    ff.assertAttributeEquals("hash.algorithm", "MD5");
    ff.assertAttributeEquals("s3.etag", "test-etag");
    ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime()));
    ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId");
    ff.assertAttributeEquals("userKey1", "userValue1");
    ff.assertAttributeEquals("userKey2", "userValue2");
    ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm");
    ff.assertContentEquals("Some Content");
}
 
源代码19 项目: nifi   文件: TestFetchS3Object.java
@Test
public void testGetObjectWithRequesterPays() throws IOException {
    runner.setProperty(FetchS3Object.REGION, "us-east-1");
    runner.setProperty(FetchS3Object.BUCKET, "request-bucket");
    runner.setProperty(FetchS3Object.REQUESTER_PAYS, "true");
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "request-key");
    runner.enqueue(new byte[0], attrs);

    S3Object s3ObjectResponse = new S3Object();
    s3ObjectResponse.setBucketName("response-bucket-name");
    s3ObjectResponse.setKey("response-key");
    s3ObjectResponse.setObjectContent(new StringInputStream("Some Content"));
    ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class);
    metadata.setContentDisposition("key/path/to/file.txt");
    metadata.setContentType("text/plain");
    metadata.setContentMD5("testMD5hash");
    Date expiration = new Date();
    metadata.setExpirationTime(expiration);
    metadata.setExpirationTimeRuleId("testExpirationRuleId");
    Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("userKey1", "userValue1");
    userMetadata.put("userKey2", "userValue2");
    metadata.setUserMetadata(userMetadata);
    metadata.setSSEAlgorithm("testAlgorithm");
    Mockito.when(metadata.getETag()).thenReturn("test-etag");
    s3ObjectResponse.setObjectMetadata(metadata);
    Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse);

    runner.run(1);

    ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture());
    GetObjectRequest request = captureRequest.getValue();
    assertEquals("request-bucket", request.getBucketName());
    assertEquals("request-key", request.getKey());
    assertTrue(request.isRequesterPays());
    assertNull(request.getVersionId());

    runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1);
    final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS);
    MockFlowFile ff = ffs.get(0);
    ff.assertAttributeEquals("s3.bucket", "response-bucket-name");
    ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt");
    ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to");
    ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt");
    ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain");
    ff.assertAttributeEquals("hash.value", "testMD5hash");
    ff.assertAttributeEquals("hash.algorithm", "MD5");
    ff.assertAttributeEquals("s3.etag", "test-etag");
    ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime()));
    ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId");
    ff.assertAttributeEquals("userKey1", "userValue1");
    ff.assertAttributeEquals("userKey2", "userValue2");
    ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm");
    ff.assertContentEquals("Some Content");
}
 
源代码20 项目: Scribengin   文件: S3Folder.java
public void create(String name, InputStream is, String mimeType) {
  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentType(mimeType);
  create(name, is, metadata);
}