下面列出了com.google.common.io.ByteSource#openStream ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public PrometheusRecordCursor(List<PrometheusColumnHandle> columnHandles, ByteSource byteSource)
{
this.columnHandles = columnHandles;
fieldToColumnIndex = new int[columnHandles.size()];
for (int i = 0; i < columnHandles.size(); i++) {
PrometheusColumnHandle columnHandle = columnHandles.get(i);
fieldToColumnIndex[i] = columnHandle.getOrdinalPosition();
}
try (CountingInputStream input = new CountingInputStream(byteSource.openStream())) {
metricsItr = prometheusResultsInStandardizedForm(new PrometheusQueryResponseParse(input).getResults()).iterator();
totalBytes = input.getCount();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public ExampleRecordCursor(List<ExampleColumnHandle> columnHandles, ByteSource byteSource)
{
this.columnHandles = columnHandles;
fieldToColumnIndex = new int[columnHandles.size()];
for (int i = 0; i < columnHandles.size(); i++) {
ExampleColumnHandle columnHandle = columnHandles.get(i);
fieldToColumnIndex[i] = columnHandle.getOrdinalPosition();
}
try (CountingInputStream input = new CountingInputStream(byteSource.openStream())) {
lines = byteSource.asCharSource(UTF_8).readLines().iterator();
totalBytes = input.getCount();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Given a list of {@link ByteSource} computes the GZIP size increments attributed to each stream.
*/
public ImmutableList<Long> calculateGZipSizeForEntries(List<ByteSource> byteSources)
throws IOException {
ImmutableList.Builder<Long> gzipSizeIncrements = ImmutableList.builder();
try (ApkGzipDeflater deflater = deflaterSupplier.get()) {
// matches the {@code ByteStreams} buffer size
byte[] inputBuffer = new byte[INPUT_BUFFER_SIZE];
for (ByteSource byteSource : byteSources) {
try (InputStream is = byteSource.openStream()) {
while (true) {
int r = is.read(inputBuffer);
if (r == -1) {
gzipSizeIncrements.add(
Math.max(0, deflater.entryComplete() - DEFLATER_SYNC_OVERHEAD_BYTES));
break;
}
deflater.handleInput(inputBuffer, r);
}
}
}
}
return gzipSizeIncrements.build();
}
private static String computeHash(ByteSource source, HashFunction hashFunction)
throws IOException {
try (InputStream inputStream = source.openStream();
HashingOutputStream outputStream =
new HashingOutputStream(
hashFunction,
new OutputStream() {
@Override
public void write(int b) {
// Do nothing.
}
})) {
ByteStreams.copy(inputStream, outputStream);
return outputStream.hash().toString();
}
}
public static EtcdClusterConfig fromProperties(ByteSource source) throws IOException {
Properties props = new Properties();
try (InputStream in = source.openStream()) {
props.load(in);
}
String epString = props.getProperty("endpoints");
if (epString == null) {
throw new IOException("etcd config must contain endpoints property");
}
EtcdClusterConfig config = new EtcdClusterConfig();
config.endpoints = Sets.newHashSet(epString.split(","));
config.user = bs(props.getProperty("username"));
config.password = bs(props.getProperty("password"));
config.composeDeployment = props.getProperty("compose_deployment");
config.rootPrefix = bs(props.getProperty("root_prefix")); // a.k.a namespace
String tlsMode = props.getProperty("tls_mode");
if (tlsMode != null) {
config.tlsMode = TlsMode.valueOf(tlsMode);
}
String certPath = props.getProperty("certificate_file");
if (certPath != null) {
File certFile = new File(certPath);
if (!certFile.exists()) {
throw new IOException("cant find certificate file: " + certPath);
}
config.certificate = Files.asByteSource(certFile);
}
config.overrideAuthority = props.getProperty("override_authority");
return config;
}
private static Supplier<InputStream> getStream(ByteSource byteSource) {
return () -> {
try {
return byteSource.openStream();
} catch (IOException e) {
throw new RuntimeException("Could not open stream", e);
}
};
}
static PGPPublicKey loadPublicKey(ByteSource pgpPublicKeyFile) {
try (InputStream input = pgpPublicKeyFile.openStream();
InputStream decoder = PGPUtil.getDecoderStream(input)) {
return new BcPGPPublicKeyRing(decoder).getPublicKey();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void configureLogging() throws IOException {
ByteSource baseConfig = (configFile != null)
? Files.asByteSource(configFile.toFile())
: DEFAULT_LOG_CONFIG;
if (logLevel != null) {
configLines.add(".level = " + logLevel);
}
// Add an extra leading newline in case base properties file does not end in a newline.
String customProperties = "\n" + Joiner.on('\n').join(configLines);
ByteSource logConfig =
ByteSource.concat(baseConfig, ByteSource.wrap(customProperties.getBytes(UTF_8)));
try (InputStream input = logConfig.openStream()) {
LogManager.getLogManager().readConfiguration(input);
}
}
private void filterResource(final ByteSource source, final File target) throws IOException {
try (final InputStreamReader input = new InputStreamReader(source.openStream())) {
try (final Writer output = new OutputStreamWriter(createFile(target))) {
TEMPLATER.execute(input, output, target.getAbsolutePath(), parameters);
}
}
}
/** Calculates the GZip compressed size in bytes of the target {@code stream}. */
public static long calculateGzipCompressedSize(ByteSource byteSource) throws IOException {
try (InputStream is = byteSource.openStream()) {
return calculateGzipCompressedSize(is);
}
}
public static void main(String[] args) throws Exception {
Options options = new Options();
CmdLineParser parser = new CmdLineParser(options);
try {
parser.parseArgument(args);
} catch (CmdLineException cle) {
usage(parser);
}
if (options.version) {
System.err.println(
Main.class.getPackage().getImplementationVersion());
System.exit(0);
}
ByteSource byteSource;
if (options.propertiesFile == null) {
byteSource = Resources.asByteSource(Resources.getResource(
"chaos-http-proxy.conf"));
} else {
byteSource = Files.asByteSource(options.propertiesFile);
}
ChaosConfig config;
try (InputStream is = byteSource.openStream()) {
config = ChaosConfig.loadFromPropertyStream(is);
} catch (IOException ioe) {
System.err.println(ioe.getMessage());
System.exit(1);
return;
}
URI proxyEndpoint = new URI("http", null, options.address,
options.port, null, null, null);
ChaosHttpProxy proxy = new ChaosHttpProxy(proxyEndpoint, config);
try {
proxy.start();
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
@Test
public void testCreateMultipartBlobGetBlob() throws Exception {
String blobName = "multipart-upload";
BlobMetadata blobMetadata = makeBlob(nullBlobStore, blobName)
.getMetadata();
MultipartUpload mpu = nullBlobStore.initiateMultipartUpload(
containerName, blobMetadata, new PutOptions());
ByteSource byteSource = TestUtils.randomByteSource().slice(
0, nullBlobStore.getMinimumMultipartPartSize() + 1);
ByteSource byteSource1 = byteSource.slice(
0, nullBlobStore.getMinimumMultipartPartSize());
ByteSource byteSource2 = byteSource.slice(
nullBlobStore.getMinimumMultipartPartSize(), 1);
Payload payload1 = Payloads.newByteSourcePayload(byteSource1);
Payload payload2 = Payloads.newByteSourcePayload(byteSource2);
payload1.getContentMetadata().setContentLength(byteSource1.size());
payload2.getContentMetadata().setContentLength(byteSource2.size());
MultipartPart part1 = nullBlobStore.uploadMultipartPart(mpu, 1,
payload1);
MultipartPart part2 = nullBlobStore.uploadMultipartPart(mpu, 2,
payload2);
List<MultipartPart> parts = nullBlobStore.listMultipartUpload(mpu);
assertThat(parts.get(0).partNumber()).isEqualTo(1);
assertThat(parts.get(0).partSize()).isEqualTo(byteSource1.size());
assertThat(parts.get(0).partETag()).isEqualTo(part1.partETag());
assertThat(parts.get(1).partNumber()).isEqualTo(2);
assertThat(parts.get(1).partSize()).isEqualTo(byteSource2.size());
assertThat(parts.get(1).partETag()).isEqualTo(part2.partETag());
assertThat(nullBlobStore.listMultipartUpload(mpu)).hasSize(2);
nullBlobStore.completeMultipartUpload(mpu, parts);
Blob newBlob = nullBlobStore.getBlob(containerName, blobName);
validateBlobMetadata(newBlob.getMetadata());
// content differs, only compare length
try (InputStream actual = newBlob.getPayload().openStream();
InputStream expected = byteSource.openStream()) {
long actualLength = ByteStreams.copy(actual,
ByteStreams.nullOutputStream());
long expectedLength = ByteStreams.copy(expected,
ByteStreams.nullOutputStream());
assertThat(actualLength).isEqualTo(expectedLength);
}
nullBlobStore.removeBlob(containerName, blobName);
assertThat(nullBlobStore.list(containerName)).isEmpty();
}
@Test
public void testBigMultipartUpload() throws Exception {
String key = "multipart-upload";
long partSize = MINIMUM_MULTIPART_SIZE;
long size = partSize + 1;
ByteSource byteSource = TestUtils.randomByteSource().slice(0, size);
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest(containerName, key);
InitiateMultipartUploadResult initResponse =
client.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId();
ByteSource byteSource1 = byteSource.slice(0, partSize);
UploadPartRequest uploadRequest1 = new UploadPartRequest()
.withBucketName(containerName)
.withKey(key)
.withUploadId(uploadId)
.withPartNumber(1)
.withInputStream(byteSource1.openStream())
.withPartSize(byteSource1.size());
uploadRequest1.getRequestClientOptions().setReadLimit(
(int) byteSource1.size());
UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1);
ByteSource byteSource2 = byteSource.slice(partSize, size - partSize);
UploadPartRequest uploadRequest2 = new UploadPartRequest()
.withBucketName(containerName)
.withKey(key)
.withUploadId(uploadId)
.withPartNumber(2)
.withInputStream(byteSource2.openStream())
.withPartSize(byteSource2.size());
uploadRequest2.getRequestClientOptions().setReadLimit(
(int) byteSource2.size());
UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2);
CompleteMultipartUploadRequest completeRequest =
new CompleteMultipartUploadRequest(
containerName, key, uploadId,
ImmutableList.of(
uploadPartResult1.getPartETag(),
uploadPartResult2.getPartETag()));
client.completeMultipartUpload(completeRequest);
S3Object object = client.getObject(containerName, key);
assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
size);
try (InputStream actual = object.getObjectContent();
InputStream expected = byteSource.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
}
@Test
public void testMultipartUpload() throws Exception {
String blobName = "multipart-upload";
String cacheControl = "max-age=3600";
String contentDisposition = "attachment; filename=new.jpg";
String contentEncoding = "gzip";
String contentLanguage = "fr";
String contentType = "audio/mp4";
Map<String, String> userMetadata = ImmutableMap.of(
"key1", "value1",
"key2", "value2");
ObjectMetadata metadata = new ObjectMetadata();
if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {
metadata.setCacheControl(cacheControl);
}
if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
metadata.setContentDisposition(contentDisposition);
}
if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
metadata.setContentEncoding(contentEncoding);
}
if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {
metadata.setContentLanguage(contentLanguage);
}
metadata.setContentType(contentType);
// TODO: expires
metadata.setUserMetadata(userMetadata);
InitiateMultipartUploadResult result = client.initiateMultipartUpload(
new InitiateMultipartUploadRequest(containerName, blobName,
metadata));
ByteSource byteSource = TestUtils.randomByteSource().slice(
0, MINIMUM_MULTIPART_SIZE + 1);
ByteSource byteSource1 = byteSource.slice(0, MINIMUM_MULTIPART_SIZE);
ByteSource byteSource2 = byteSource.slice(MINIMUM_MULTIPART_SIZE, 1);
UploadPartResult part1 = client.uploadPart(new UploadPartRequest()
.withBucketName(containerName)
.withKey(blobName)
.withUploadId(result.getUploadId())
.withPartNumber(1)
.withPartSize(byteSource1.size())
.withInputStream(byteSource1.openStream()));
UploadPartResult part2 = client.uploadPart(new UploadPartRequest()
.withBucketName(containerName)
.withKey(blobName)
.withUploadId(result.getUploadId())
.withPartNumber(2)
.withPartSize(byteSource2.size())
.withInputStream(byteSource2.openStream()));
client.completeMultipartUpload(new CompleteMultipartUploadRequest(
containerName, blobName, result.getUploadId(),
ImmutableList.of(part1.getPartETag(), part2.getPartETag())));
ObjectListing listing = client.listObjects(containerName);
assertThat(listing.getObjectSummaries()).hasSize(1);
S3Object object = client.getObject(containerName, blobName);
try (InputStream actual = object.getObjectContent();
InputStream expected = byteSource.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
ObjectMetadata newContentMetadata = object.getObjectMetadata();
if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {
assertThat(newContentMetadata.getCacheControl()).isEqualTo(
cacheControl);
}
if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentDisposition()).isEqualTo(
contentDisposition);
}
if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentEncoding()).isEqualTo(
contentEncoding);
}
if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentLanguage()).isEqualTo(
contentLanguage);
}
assertThat(newContentMetadata.getContentType()).isEqualTo(
contentType);
// TODO: expires
assertThat(newContentMetadata.getUserMetadata()).isEqualTo(
userMetadata);
}
/**
* Provide CA certificate to use for TLS connection
*
* @param certSource
* @throws IOException if there is an error reading from the provided {@link ByteSource}
* @throws SSLException
*/
public Builder withCaCert(ByteSource certSource) throws IOException, SSLException {
try (InputStream cert = certSource.openStream()) {
sslContext = sslBuilder().trustManager(cert).build();
}
return this;
}
/**
* Use a {@link ByteSource} as input for the servlet. Be sure to call {@link #close} after
* your servlet runs so the resource opened via {@code bytes} gets closed.
* @throws IOException
*/
public FakeServletInputStream(ByteSource bytes) throws IOException {
this.input = bytes.openStream();
}