下面列出了com.google.common.io.Closer#close ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Read an existing index. Reads and returns the index index, which is a list of chunks defined by the Cassandra
* Index.db file along with the configured split size.
*
* @param fileSystem Hadoop file system.
* @param sstablePath SSTable Index.db.
* @return Index of chunks.
* @throws IOException
*/
public static SSTableIndexIndex readIndex(final FileSystem fileSystem, final Path sstablePath) throws IOException {
final Closer closer = Closer.create();
final Path indexPath = sstablePath.suffix(SSTABLE_INDEX_SUFFIX);
// Detonate if we don't have an index.
final FSDataInputStream inputStream = closer.register(fileSystem.open(indexPath));
final SSTableIndexIndex indexIndex = new SSTableIndexIndex();
try {
while (inputStream.available() != 0) {
indexIndex.add(inputStream.readLong(), inputStream.readLong());
}
} finally {
closer.close();
}
return indexIndex;
}
@Override
@NonNull
public Properties loadProperties(@NonNull File file) {
Properties props = new Properties();
Closer closer = Closer.create();
try {
FileInputStream fis = closer.register(new FileInputStream(file));
props.load(fis);
} catch (IOException ignore) {
} finally {
try {
closer.close();
} catch (IOException e) {
}
}
return props;
}
@Test
public void testGetWorkUnitsAndExtractor() throws IOException, DataRecordException {
OldApiHadoopFileInputSource<String, Text, LongWritable, Text> fileInputSource = new TestHadoopFileInputSource();
List<WorkUnit> workUnitList = fileInputSource.getWorkunits(this.sourceState);
Assert.assertEquals(workUnitList.size(), 1);
WorkUnitState workUnitState = new WorkUnitState(workUnitList.get(0));
Closer closer = Closer.create();
try {
OldApiHadoopFileInputExtractor<String, Text, LongWritable, Text> extractor =
(OldApiHadoopFileInputExtractor<String, Text, LongWritable, Text>) fileInputSource.getExtractor(
workUnitState);
Text text = extractor.readRecord(null);
Assert.assertEquals(text.toString(), TEXT);
Assert.assertNull(extractor.readRecord(null));
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
private static void zipFiles(List<File> files, String basepath, OutputStream os) throws IOException {
Closer closer = Closer.create();
try {
ZipOutputStream zos = new ZipOutputStream(os);
closer.register(zos);
for (File file : files) {
if (file.isFile()) {
FileInputStream inputStream = new FileInputStream(file);
closer.register(inputStream);
writeZipEntry(zipEntry(basepath, file), inputStream, zos);
} else {
ZipEntry ze = zipEntry(basepath, file);
logger.trace("Adding directory zip entry: " + ze.toString());
zos.putNextEntry(ze);
}
}
} catch (IOException ioe) {
throw closer.rethrow(ioe);
} finally {
closer.close();
}
}
public static void zip(FileObject root, List<FileObject> files, OutputStream out) throws IOException {
String basePath = root.getName().getPath();
Closer closer = Closer.create();
try {
ZipOutputStream zos = new ZipOutputStream(out);
closer.register(zos);
for (FileObject fileToCopy : files) {
ZipEntry zipEntry = zipEntry(basePath, fileToCopy);
zos.putNextEntry(zipEntry);
copyFileContents(fileToCopy, zos);
zos.flush();
zos.closeEntry();
}
} catch (IOException e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
@Test(dependsOnMethods = {"testSetAndGet"})
public void testSerDe()
throws IOException {
Closer closer = Closer.create();
try {
ByteArrayOutputStream baos = closer.register(new ByteArrayOutputStream());
DataOutputStream dos = closer.register(new DataOutputStream(baos));
this.jobState.write(dos);
ByteArrayInputStream bais = closer.register((new ByteArrayInputStream(baos.toByteArray())));
DataInputStream dis = closer.register((new DataInputStream(bais)));
JobState newJobState = new JobState();
newJobState.readFields(dis);
doAsserts(newJobState, true, false);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
/**
* Update the log4j configuration.
*
* @param targetClass the target class used to get the original log4j configuration file as a resource
* @param log4jFileName the custom log4j configuration properties file name
* @throws IOException if there's something wrong with updating the log4j configuration
*/
public static void updateLog4jConfiguration(Class<?> targetClass, String log4jFileName)
throws IOException {
final Closer closer = Closer.create();
try {
final InputStream inputStream = closer.register(targetClass.getResourceAsStream("/" + log4jFileName));
final Properties originalProperties = new Properties();
originalProperties.load(inputStream);
LogManager.resetConfiguration();
PropertyConfigurator.configure(originalProperties);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@Override
public void saveProperties(
@NonNull File file,
@NonNull Properties props,
@NonNull String comments) throws IOException {
Closer closer = Closer.create();
try {
OutputStream fos = closer.register(newFileOutputStream(file));
props.store(fos, comments);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
private static void writeConfigSyncedFile(File file, String agentId) throws IOException {
Closer closer = Closer.create();
try {
PrintWriter out = closer.register(new PrintWriter(file, UTF_8.name()));
out.println("# this file is created after the agent has pushed its local configuration"
+ " to the central collector");
out.println("#");
out.println("# when this file is present (and the agent.id below matches the running"
+ " agent's agent.id), the agent");
out.println("# will overwrite its local configuration with the agent configuration it"
+ " retrieves from the central");
out.println("# collector on JVM startup");
out.println("#");
out.println("# when this file is not present (or the agent.id below does not match the"
+ " running agent's agent.id),");
out.println("# the agent will push its local configuration to the central collector on"
+ " JVM startup (overwriting");
out.println("# any existing remote configuration), after which the agent will"
+ " (re-)create this file using the");
out.println("# running agent's agent.id");
out.println("");
out.println("agent.id=" + agentId);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@Override
public void saveProperties(
@NonNull File file,
@NonNull Properties props,
@NonNull String comments) throws IOException {
Closer closer = Closer.create();
try {
OutputStream fos = closer.register(newFileOutputStream(file));
props.store(fos, comments);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
@Test
@SuppressWarnings("deprecation")
public void testSerializeToSequenceFile() throws IOException {
Closer closer = Closer.create();
Configuration conf = new Configuration();
WritableShimSerialization.addToHadoopConfiguration(conf);
try {
SequenceFile.Writer writer1 = closer.register(SequenceFile.createWriter(this.fs, conf,
new Path(this.outputPath, "seq1"), Text.class, WorkUnitState.class));
Text key = new Text();
WorkUnitState workUnitState = new WorkUnitState();
TestWatermark watermark = new TestWatermark();
watermark.setLongWatermark(10L);
workUnitState.setActualHighWatermark(watermark);
writer1.append(key, workUnitState);
SequenceFile.Writer writer2 = closer.register(SequenceFile.createWriter(this.fs, conf,
new Path(this.outputPath, "seq2"), Text.class, WorkUnitState.class));
watermark.setLongWatermark(100L);
workUnitState.setActualHighWatermark(watermark);
writer2.append(key, workUnitState);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
/**
* Generates the BuildConfig class.
*/
public void generate() throws IOException {
File pkgFolder = getFolderPath();
if (!pkgFolder.isDirectory()) {
if (!pkgFolder.mkdirs()) {
throw new RuntimeException("Failed to create " + pkgFolder.getAbsolutePath());
}
}
File buildConfigJava = new File(pkgFolder, BUILD_CONFIG_NAME);
Closer closer = Closer.create();
try {
FileOutputStream fos = closer.register(new FileOutputStream(buildConfigJava));
OutputStreamWriter out = closer.register(new OutputStreamWriter(fos, Charsets.UTF_8));
JavaWriter writer = closer.register(new JavaWriter(out));
writer.emitJavadoc("Automatically generated file. DO NOT MODIFY")
.emitPackage(mBuildConfigPackageName)
.beginType("BuildConfig", "class", PUBLIC_FINAL);
for (ClassField field : mFields) {
emitClassField(writer, field);
}
for (Object item : mItems) {
if (item instanceof ClassField) {
emitClassField(writer, (ClassField) item);
} else if (item instanceof String) {
writer.emitSingleLineComment((String) item);
}
}
writer.endType();
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Initialize a jobContext by initializing jobLauncher. This code is mostly used for
* testing job templates resolution.
*/
private JobContext dummyJobContextInitHelper(Properties jobProps) throws Exception {
JobContext jobContext = null;
Closer closer = Closer.create();
try {
JobLauncher jobLauncher = closer.register(JobLauncherFactory.newJobLauncher(this.launcherProps, jobProps));
return ((AbstractJobLauncher) jobLauncher).getJobContext();
} finally {
closer.close();
}
}
@Test
public void testGetFileBasedJobLock() throws JobLockException, IOException {
Closer closer = Closer.create();
try {
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.FS_URI_KEY, "file:///");
properties.setProperty(FileBasedJobLock.JOB_LOCK_DIR, "JobLockFactoryTest");
properties.setProperty(ConfigurationKeys.JOB_NAME_KEY, "JobLockFactoryTest-" + System.currentTimeMillis());
properties.setProperty(ConfigurationKeys.JOB_LOCK_TYPE, FileBasedJobLock.class.getName());
JobLock jobLock = closer.register(LegacyJobLockFactoryManager.getJobLock(properties, new JobLockEventListener()));
MatcherAssert.assertThat(jobLock, Matchers.instanceOf(FileBasedJobLock.class));
} finally {
closer.close();
}
}
@Override
public void close() throws IOException {
if (!_openIterators.isEmpty()) {
try {
// Use a closer to cleanly close all iterators even if one throws an exception on close
Closer closer = Closer.create();
for (StashRowIterator iterator : _openIterators) {
closer.register(iterator);
}
closer.close();
} finally {
_openIterators.clear();
}
}
}
private static void loadClassNamesFromDirectoryInsideJarFile(File jarFile,
String directoryInsideJarFile, Location location,
Multimap<String, Location> newClassNameLocations) throws IOException {
Closer closer = Closer.create();
try {
InputStream in = closer.register(new FileInputStream(jarFile));
JarInputStream jarIn = closer.register(new JarInputStream(in));
loadClassNamesFromJarInputStream(jarIn, directoryInsideJarFile, location,
newClassNameLocations);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
private Schema parseSchema(String schemaFile) throws IOException {
Closer closer = Closer.create();
try {
InputStream in = closer.register(getClass().getResourceAsStream(schemaFile));
return new Schema.Parser().parse(in);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
/**
* Parses a {@link org.apache.gobblin.metrics.MetricReport} from a byte array representing a json input.
* @param reuse MetricReport to reuse.
* @param bytes Input bytes.
* @return MetricReport.
* @throws java.io.IOException
*/
public synchronized static MetricReport deserializeReportFromJson(MetricReport reuse, byte[] bytes)
throws IOException {
if (!READER.isPresent()) {
READER = Optional.of(new SpecificDatumReader<>(MetricReport.class));
}
Closer closer = Closer.create();
try {
DataInputStream inputStream = closer.register(new DataInputStream(new ByteArrayInputStream(bytes)));
// Check version byte
int versionNumber = inputStream.readInt();
if (versionNumber != SCHEMA_VERSION) {
throw new IOException(String
.format("MetricReport schema version not recognized. Found version %d, expected %d.", versionNumber,
SCHEMA_VERSION));
}
// Decode the rest
Decoder decoder = DecoderFactory.get().jsonDecoder(MetricReport.SCHEMA$, inputStream);
return READER.get().read(reuse, decoder);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@Test
public void testServerInjectError() throws IOException {
Closer closer = Closer.create();
try {
File tmpFile = new File("./target/tmp/StreamServerTest");
tmpFile.mkdirs();
IndexServer indexServer = new TestIndexServer();
StreamProcessor streamProcessor = new StreamProcessor(indexServer, tmpFile);
int timeout = 3000000;
String classLoaderId = UUID.randomUUID().toString();
StreamServer server = closer.register(new StreamServer(0, 100, streamProcessor));
server.start();
int port = server.getPort();
StreamClient client = closer.register(new StreamClient("localhost", port, timeout));
assertFalse(client.isClassLoaderAvailable(classLoaderId));
client.loadJars(classLoaderId, getTestJar());
String table = "test";
String shard = "shard";
String user = "test";
Map<String, String> userAttributes = new HashMap<String, String>();
StreamSplit split = new StreamSplit(table, shard, classLoaderId, user, userAttributes);
try {
Iterable<String> it = client.executeStream(split, new StreamFunction<String>() {
@Override
public void call(IndexContext indexContext, StreamWriter<String> writer) throws Exception {
Class.forName("errorclass");
}
});
Iterator<String> iterator = it.iterator();
if (iterator.hasNext()) {
iterator.next();
}
fail();
} catch (StreamException e) {
Throwable cause = e.getCause();
cause.printStackTrace();
}
} finally {
closer.close();
}
}
/**
* This test uses Avro SerDe to deserialize data from Avro files, and use ORC SerDe
* to serialize them into ORC files.
*/
@Test(groups = { "gobblin.serde" })
public void testAvroOrcSerDes()
throws IOException, DataRecordException, DataConversionException, URISyntaxException {
Properties properties = new Properties();
properties.load(HiveSerDeTest.class.getClassLoader().getResourceAsStream("serde/serde.properties"));
SourceState sourceState = new SourceState(new State(properties), ImmutableList.<WorkUnitState> of());
File schemaFile = new File(HiveSerDeTest.class.getClassLoader().getResource("serde/serde.avsc").toURI());
sourceState.setProp("avro.schema.url" , schemaFile.getAbsolutePath());
OldApiWritableFileSource source = new OldApiWritableFileSource();
List<WorkUnit> workUnits = source.getWorkunits(sourceState);
Assert.assertEquals(workUnits.size(), 1);
WorkUnitState wus = new WorkUnitState(workUnits.get(0));
wus.addAll(sourceState);
Closer closer = Closer.create();
HiveWritableHdfsDataWriter writer = null;
try {
OldApiWritableFileExtractor extractor = closer.register((OldApiWritableFileExtractor) source.getExtractor(wus));
HiveSerDeConverter converter = closer.register(new HiveSerDeConverter());
writer =
closer.register((HiveWritableHdfsDataWriter) new HiveWritableHdfsDataWriterBuilder<>().withBranches(1)
.withWriterId("0").writeTo(Destination.of(DestinationType.HDFS, sourceState))
.withAttemptId("0-0")
.writeInFormat(WriterOutputFormat.ORC).build());
Assert.assertTrue(writer.isSpeculativeAttemptSafe());
converter.init(wus);
Writable record;
while ((record = extractor.readRecord(null)) != null) {
Iterable<Writable> convertedRecordIterable = converter.convertRecordImpl(null, record, wus);
Assert.assertEquals(Iterators.size(convertedRecordIterable.iterator()), 1);
writer.write(convertedRecordIterable.iterator().next());
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
if (writer != null) {
writer.commit();
}
Assert.assertTrue(this.fs.exists(new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
sourceState.getProp(ConfigurationKeys.WRITER_FILE_NAME))));
HadoopUtils.deletePath(this.fs, new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)), true);
}
}