下面列出了com.google.common.io.ByteSink#com.google.common.io.Closeables 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private static String getStringValue(@NonNull IAbstractFile file, @NonNull String xPath)
throws StreamException, XPathExpressionException {
XPath xpath = AndroidXPathFactory.newXPath();
InputStream is = null;
try {
is = file.getContents();
return xpath.evaluate(xPath, new InputSource(is));
} finally {
try {
Closeables.close(is, true /* swallowIOException */);
} catch (IOException e) {
// cannot happen
}
}
}
public static String readFromResource(String resource) throws IOException {
InputStream in = null;
try {
in = Thread.currentThread().getContextClassLoader().getResourceAsStream(resource);
if (in == null) {
in = Utils.class.getResourceAsStream(resource);
}
if (in == null) {
return null;
}
String text = Utils.read(in);
return text;
} finally {
Closeables.closeQuietly(in);
}
}
private static String getStringValue(@NonNull IAbstractFile file, @NonNull String xPath)
throws StreamException, XPathExpressionException {
XPath xpath = AndroidXPathFactory.newXPath();
InputStream is = null;
try {
is = file.getContents();
return xpath.evaluate(xPath, new InputSource(is));
} finally {
try {
Closeables.close(is, true /* swallowIOException */);
} catch (IOException e) {
// cannot happen
}
}
}
public static boolean createNewStore(String storeType, File storeFile, char[] storePassword, DN dn) {
if (storeType == null) {
storeType = "jks";
}
try {
KeyStore ks = KeyStore.getInstance(storeType);
ks.load(null, null);
Pair<PrivateKey, X509Certificate> generated = generateKeyAndCertificate("RSA", "SHA1withRSA", dn.validityYears, encodeDN(dn));
ks.setKeyEntry(dn.alias, generated.getFirst(), dn.password, new Certificate[]{generated.getSecond()});
FileOutputStream fos = new FileOutputStream(storeFile);
boolean threw = true;
try {
ks.store(fos, storePassword);
threw = false;
} finally {
Closeables.close(fos, threw);
}
} catch (KeyStoreException | IOException | NoSuchAlgorithmException | CertificateException | OperatorCreationException e) {
return false;
}
return true;
}
public static boolean addNewKey(KeyStore ks, File storeFile, char[] storePassword, DN dn) {
try {
Pair<PrivateKey, X509Certificate> generated = generateKeyAndCertificate("RSA", "SHA1withRSA", dn.validityYears, encodeDN(dn));
ks.setKeyEntry(dn.alias, generated.getFirst(), dn.password, new Certificate[]{generated.getSecond()});
FileOutputStream fos = new FileOutputStream(storeFile);
boolean threw = true;
try {
ks.store(fos, storePassword);
threw = false;
} finally {
Closeables.close(fos, threw);
}
} catch (KeyStoreException | IOException | NoSuchAlgorithmException | CertificateException | OperatorCreationException e) {
return false;
}
return true;
}
static List<PendingUpload> readPendingCommits(FileSystem fs,
Path pendingCommitsFile)
throws IOException {
List<PendingUpload> commits = Lists.newArrayList();
ObjectInputStream in = new ObjectInputStream(fs.open(pendingCommitsFile));
boolean threw = true;
try {
for (PendingUpload commit : new ObjectIterator<PendingUpload>(in)) {
commits.add(commit);
}
threw = false;
} finally {
Closeables.close(in, threw);
}
return commits;
}
/**
* @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String)
*/
@Override
public Table getTable(String name) {
// Read the meta data for the specified table
InputStream inputStream = xmlStreamProvider.openInputStreamForTable(name);
try {
XMLStreamReader xmlStreamReader = openPullParser(inputStream);
XmlPullProcessor.readTag(xmlStreamReader, XmlDataSetNode.TABLE_NODE);
String version = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.VERSION_ATTRIBUTE);
if (StringUtils.isNotEmpty(version)) {
return new PullProcessorTableMetaData(xmlStreamReader, Integer.parseInt(version));
} else {
return new PullProcessorTableMetaData(xmlStreamReader, 1);
}
} finally {
// abandon any remaining content
Closeables.closeQuietly(inputStream);
}
}
/**
* reset metrics report
*/
public static synchronized void stop() {
if(!isStart()){
return;
}
try {
if (METRICS_REPORTER_INSTANCE._jmxReporter != null) {
Closeables.close(METRICS_REPORTER_INSTANCE._jmxReporter, true);
}
if (METRICS_REPORTER_INSTANCE._graphiteReporter != null) {
Closeables.close(METRICS_REPORTER_INSTANCE._graphiteReporter, true);
}
} catch (Exception e) {
LOGGER.error("Error while closing Jmx and Graphite reporters.", e);
}
DID_INIT = false;
METRICS_REPORTER_INSTANCE = null;
}
public SecondaryIndexTable(TransactionServiceClient transactionServiceClient, Table table,
byte[] secondaryIndex) throws IOException {
secondaryIndexTableName = TableName.valueOf(table.getName().getNameAsString() + ".idx");
this.connection = ConnectionFactory.createConnection(table.getConfiguration());
Table secondaryIndexHTable = null;
try (Admin hBaseAdmin = this.connection.getAdmin()) {
if (!hBaseAdmin.tableExists(secondaryIndexTableName)) {
hBaseAdmin.createTable(TableDescriptorBuilder.newBuilder(secondaryIndexTableName).build());
}
secondaryIndexHTable = this.connection.getTable(secondaryIndexTableName);
} catch (Exception e) {
Closeables.closeQuietly(connection);
Throwables.propagate(e);
}
this.secondaryIndex = secondaryIndex;
this.transactionAwareHTable = new TransactionAwareHTable(table);
this.secondaryIndexTable = new TransactionAwareHTable(secondaryIndexHTable);
this.transactionContext = new TransactionContext(transactionServiceClient, transactionAwareHTable,
secondaryIndexTable);
}
@Override
public void writeSnapshot(TransactionSnapshot snapshot) throws IOException {
// save the snapshot to a temporary file
File snapshotTmpFile = new File(snapshotDir, TMP_SNAPSHOT_FILE_PREFIX + snapshot.getTimestamp());
LOG.debug("Writing snapshot to temporary file {}", snapshotTmpFile);
OutputStream out = Files.newOutputStreamSupplier(snapshotTmpFile).getOutput();
boolean threw = true;
try {
codecProvider.encode(out, snapshot);
threw = false;
} finally {
Closeables.close(out, threw);
}
// move the temporary file into place with the correct filename
File finalFile = new File(snapshotDir, SNAPSHOT_FILE_PREFIX + snapshot.getTimestamp());
if (!snapshotTmpFile.renameTo(finalFile)) {
throw new IOException("Failed renaming temporary snapshot file " + snapshotTmpFile.getName() + " to " +
finalFile.getName());
}
LOG.debug("Completed snapshot to file {}", finalFile);
}
/**
* Converts this chunk into an array of bytes representation. Normally you will not need to
* override this method unless your header changes based on the contents / size of the payload.
*/
@Override
public final byte[] toByteArray(int options) throws IOException {
ByteBuffer header = ByteBuffer.allocate(getHeaderSize()).order(ByteOrder.LITTLE_ENDIAN);
writeHeader(header, 0); // The chunk size isn't known yet. This will be filled in later.
ByteArrayOutputStream baos = new ByteArrayOutputStream();
LittleEndianDataOutputStream payload = new LittleEndianDataOutputStream(baos);
try {
writePayload(payload, header, options);
} finally {
Closeables.close(payload, true);
}
byte[] payloadBytes = baos.toByteArray();
int chunkSize = getHeaderSize() + payloadBytes.length;
header.putInt(CHUNK_SIZE_OFFSET, chunkSize);
// Combine results
ByteBuffer result = ByteBuffer.allocate(chunkSize).order(ByteOrder.LITTLE_ENDIAN);
result.put(header.array());
result.put(payloadBytes);
return result.array();
}
@Override
protected void writePayload(DataOutput output, ByteBuffer header, int options)
throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int stringOffset = 0;
ByteBuffer offsets = ByteBuffer.allocate(getOffsetSize());
offsets.order(ByteOrder.LITTLE_ENDIAN);
// Write to a temporary payload so we can rearrange this and put the offsets first
LittleEndianDataOutputStream payload = new LittleEndianDataOutputStream(baos);
try {
stringOffset = writeStrings(payload, offsets, options);
writeStyles(payload, offsets, options);
} finally {
Closeables.close(payload, true);
}
output.write(offsets.array());
output.write(baos.toByteArray());
if (!styles.isEmpty()) {
header.putInt(STYLE_START_OFFSET, getHeaderSize() + getOffsetSize() + stringOffset);
}
}
@Override
public byte[] toByteArray(int options) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
LittleEndianDataOutputStream payload = new LittleEndianDataOutputStream(baos);
try {
for (StringPoolSpan span : spans()) {
byte[] encodedSpan = span.toByteArray(options);
if (encodedSpan.length != StringPoolSpan.SPAN_LENGTH) {
throw new IllegalStateException("Encountered a span of invalid length.");
}
payload.write(encodedSpan);
}
payload.writeInt(RES_STRING_POOL_SPAN_END);
} finally {
Closeables.close(payload, true);
}
return baos.toByteArray();
}
@Test
@UseDataProvider("merge")
public void testMerge(InputStream local, InputStream remote, InputStream base, InputStream expected)
throws IOException {
MergeClient mergeClient = new ScmPomVersionsMergeClient();
ByteArrayOutputStream os = new ByteArrayOutputStream();
try {
mergeClient.merge(local, remote, base, os);
String resultPOM = os.toString(StandardCharsets.UTF_8.name());
String expectedPOM = new String(ByteStreams.toByteArray(expected), StandardCharsets.UTF_8);
// replacement of whitespaces is performed in order to avoid problems with line endings on different systems
Assert.assertEquals(expectedPOM.replaceAll("\\s+", ""), resultPOM.replaceAll("\\s+", ""));
} finally {
Closeables.closeQuietly(local);
Closeables.closeQuietly(remote);
Closeables.closeQuietly(base);
Closeables.close(os, true);
}
}
@Override
protected void run(Bootstrap<EmoConfiguration> bootstrap, Namespace namespace, EmoConfiguration emoConfiguration)
throws Exception {
_outputOnly = namespace.getBoolean("output_only");
DdlConfiguration ddlConfiguration = parseDdlConfiguration(toFile(namespace.getString("config-ddl")));
CuratorFramework curator = null;
if (!_outputOnly) {
curator = emoConfiguration.getZooKeeperConfiguration().newCurator();
curator.start();
}
try {
createKeyspacesIfNecessary(emoConfiguration, ddlConfiguration, curator, bootstrap.getMetricRegistry());
} finally {
Closeables.close(curator, true);
}
}
public String toCqlScript() {
try {
final InputStream cqlStream = CreateKeyspacesCommand.class.getResourceAsStream(_templateResourceName);
if (cqlStream == null) {
throw new IllegalStateException("couldn't find " + _templateResourceName + " in classpath");
}
String cql;
try {
cql = CharStreams.toString(new InputStreamReader(cqlStream, "UTF-8"));
} finally {
Closeables.close(cqlStream, true);
}
// replace bindings
for (Map.Entry<String, String> binding : _bindings.entrySet()) {
cql = cql.replace("${" + binding.getKey() + "}", binding.getValue());
}
return cql;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Read the next row from the split, storing the coordinate in "key" and the content in "value". If there are no
* more rows then false is returned and "key" and "value" are not modified.
* @return true if a row was read, false if there were no more rows
*/
public boolean setNextKeyValue(Text key, Row value)
throws IOException {
if (!_rows.hasNext()) {
Closeables.close(this, true);
return false;
}
try {
Map<String, Object> content = _rows.next();
key.set(Coordinate.fromJson(content).toString());
value.set(content);
_rowsRead += 1;
} catch (Exception e) {
for (Throwable cause : Throwables.getCausalChain(e)) {
Throwables.propagateIfInstanceOf(cause, IOException.class);
}
throw new IOException("Failed to read next row", e);
}
return true;
}
/**
* In order to fail fast create the initial iterator on instantiation. This way exceptions such as
* TableNotStashedException will be thrown immediately and not deferred until the first iteration.
*/
public StashRowIterable() {
_initialIterator = createStashRowIterator();
// Force the first record to be evaluated by calling hasNext()
try {
_initialIterator.hasNext();
_openIterators.add(_initialIterator);
} catch (Exception e) {
try {
Closeables.close(_initialIterator, true);
} catch (IOException e2) {
// Already caught and logged
}
throw Throwables.propagate(e);
}
}
StashSplitIterator(AmazonS3 s3, String bucket, String key) {
InputStream rawIn = new RestartingS3InputStream(s3, bucket, key);
try {
// File is gzipped
// Note:
// Because the content may be concatenated gzip files we cannot use the default GZIPInputStream.
// GzipCompressorInputStream supports concatenated gzip files.
GzipCompressorInputStream gzipIn = new GzipCompressorInputStream(rawIn, true);
_in = new BufferedReader(new InputStreamReader(gzipIn, Charsets.UTF_8));
// Create a line reader
_reader = new LineReader(_in);
} catch (Exception e) {
try {
Closeables.close(rawIn, true);
} catch (IOException ignore) {
// Won't happen, already caught and logged
}
throw Throwables.propagate(e);
}
}
private StreamSupplier streamSupplier(final BlobRequest request, final BlobResponse response) {
return new StreamSupplier() {
@Override
public void writeTo(OutputStream out) throws IOException {
InputStream in = response.getInputStream();
if (in == null) {
// The original stream has already been consumed. Re-open a new stream from the server.
in = get(request).getInputStream();
}
try {
ByteStreams.copy(in, out);
} finally {
Closeables.close(in, true);
}
}
};
}
public void testNormal() {// 12.526秒
Jedis jedis = new Jedis("120.25.241.144", 6379);
jedis.auth("b840fc02d52404542994");
long start = System.currentTimeMillis();
for (int i = 0; i < 1000; i++) {
jedis.set("n" + i, "n" + i);
System.out.println(i);
}
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
jedis.disconnect();
try {
Closeables.close(jedis, true);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void testTrans() {// 0.304秒
Jedis jedis = new Jedis("120.25.241.144", 6379);
jedis.auth("b840fc02d52404542994");
long start = System.currentTimeMillis();
Transaction tx = jedis.multi();
for (int i = 0; i < 1000; i++) {
tx.set("n" + i, "n" + i);
System.out.println(i);
}
tx.exec();
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
jedis.disconnect();
try {
Closeables.close(jedis, true);
} catch (IOException e) {
e.printStackTrace();
}
}
public void testPipelined() {// 0.076秒
Jedis jedis = new Jedis("120.25.241.144", 6379);
jedis.auth("b840fc02d52404542994");
long start = System.currentTimeMillis();
Pipeline pipeline = jedis.pipelined();
for (int i = 0; i < 1000; i++) {
pipeline.set("n" + i, "n" + i);
System.out.println(i);
}
pipeline.syncAndReturnAll();
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
jedis.disconnect();
try {
Closeables.close(jedis, true);
} catch (IOException e) {
e.printStackTrace();
}
}
public void testCombPipelineTrans() {// 0.099秒
Jedis jedis = new Jedis("120.25.241.144", 6379);
jedis.auth("b840fc02d52404542994");
long start = System.currentTimeMillis();
Pipeline pipeline = jedis.pipelined();
pipeline.multi();
for (int i = 0; i < 1000; i++) {
pipeline.set("n" + i, "n" + i);
System.out.println(i);
}
pipeline.exec();
pipeline.syncAndReturnAll();
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
jedis.disconnect();
try {
Closeables.close(jedis, true);
} catch (IOException e) {
e.printStackTrace();
}
}
public void testShardNormal() {// 13.619秒
JedisShardInfo jedis = new JedisShardInfo("120.25.241.144", 6379);
jedis.setPassword("b840fc02d52404542994");
List<JedisShardInfo> shards = Arrays.asList(jedis);
ShardedJedis sharding = new ShardedJedis(shards);
long start = System.currentTimeMillis();
for (int i = 0; i < 1000; i++) {
sharding.set("n" + i, "n" + i);
System.out.println(i);
}
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
sharding.disconnect();
try {
Closeables.close(sharding, true);
} catch (IOException e) {
e.printStackTrace();
}
}
public void testShardPipelined() {// 0.127秒
JedisShardInfo jedis = new JedisShardInfo("120.25.241.144", 6379);
jedis.setPassword("b840fc02d52404542994");
List<JedisShardInfo> shards = Arrays.asList(jedis);
ShardedJedis sharding = new ShardedJedis(shards);
ShardedJedisPipeline pipeline = sharding.pipelined();
long start = System.currentTimeMillis();
for (int i = 0; i < 1000; i++) {
pipeline.set("n" + i, "n" + i);
System.out.println(i);
}
pipeline.syncAndReturnAll();
long end = System.currentTimeMillis();
System.out.println("共花费:" + (end - start) / 1000.0 + "秒");
sharding.disconnect();
try {
Closeables.close(sharding, true);
} catch (IOException e) {
e.printStackTrace();
}
}
protected Thread startOutputProcessor(final KitLogger logger, final InputStream inputStream, final boolean error, final AtomicBoolean outputEnabled) throws IOException {
Thread printer = new Thread() {
@Override
public void run() {
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
try {
String line;
while ((line = reader.readLine()) != null) {
if (outputEnabled.get()) {
if (error) {
logger.error("%s", line);
} else {
logger.info("%s", line);
}
}
}
} catch (Exception e) {
if (outputEnabled.get()) {
logger.error("Failed to process " + (error ? "stderr" : "stdout") + " from spring-remote process: " + e);
}
} finally {
Closeables.closeQuietly(reader);
}
}
};
printer.start();
return printer;
}
/**
* If set, the file is expected to contain a checkpoints file calculated with BuildCheckpoints. It makes initial
* block sync faster for new users - please refer to the documentation on the bitcoinj website
* (https://bitcoinj.github.io/speeding-up-chain-sync) for further details.
*/
public WalletAppKit setCheckpoints(InputStream checkpoints) {
if (this.checkpoints != null)
Closeables.closeQuietly(checkpoints);
this.checkpoints = checkNotNull(checkpoints);
return this;
}
public static <T> T deserializeJAXB(Resource resource, Class<T> clazz) {
InputStream is = null;
try{
is = resource.open();
return deserializeJAXB(is, clazz);
}catch (Exception e){
throw new RuntimeException("Error parsing XML document", e);
}finally{
Closeables.closeQuietly(is);
}
}
public static Document parseDocumentDOM(String xmlResource) {
InputStream is = null;
try{
Resource resource = Resources.getResource(xmlResource);
is = resource.open();
return parseDocumentDOM(is);
}
catch(Exception ioe){
throw new RuntimeException(ioe);
}
finally{
Closeables.closeQuietly(is);
}
}