下面列出了com.google.protobuf.ByteString#readFrom ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public List<Map.Entry<String, Double>> recognise(InputStream stream) throws Exception {
List<Map.Entry<String, Double>> objects = new ArrayList<>();
ByteString jpegData = ByteString.readFrom(stream);
InceptionRequest request = InceptionRequest.newBuilder()
.setJpegEncoded(jpegData)
.build();
long st = System.currentTimeMillis();
InceptionResponse response = stub.classify(request);
long timeTaken = System.currentTimeMillis() - st;
LOG.debug("Time taken : {}ms", timeTaken);
Iterator<String> classes = response.getClassesList().iterator();
Iterator<Float> scores = response.getScoresList().iterator();
while (classes.hasNext() && scores.hasNext()){
String className = classes.next();
Float score = scores.next();
Map.Entry<String, Double>object = new AbstractMap.SimpleEntry<>(className, score.doubleValue());
objects.add(object);
}
return objects;
}
/**
* Creates a buffer with data read from a file.
*/
@SuppressWarnings("Finally") // Not concerned about suppression; expected to be exceedingly rare
private ByteString createBufferFromFile(String fileClassPath) {
ByteString buffer = ByteString.EMPTY;
final InputStream inputStream = getClass().getResourceAsStream(fileClassPath);
if (inputStream == null) {
throw new IllegalArgumentException("Unable to locate file on classpath: " + fileClassPath);
}
try {
buffer = ByteString.readFrom(inputStream);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
inputStream.close();
} catch (IOException ignorable) {
// ignore
}
}
return buffer;
}
public static void detectProperties(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.IMAGE_PROPERTIES).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
DominantColorsAnnotation colors = res.getImagePropertiesAnnotation().getDominantColors();
for (ColorInfo color : colors.getColorsList()) {
System.out.format(
"fraction: %f%nr: %f, g: %f, b: %f%n",
color.getPixelFraction(),
color.getColor().getRed(),
color.getColor().getGreen(),
color.getColor().getBlue());
}
}
}
}
/**
* Analyze an image and extract the features of the image specified by
* {@code featureTypes}.
* <p>A feature describes the kind of Cloud Vision analysis one wishes to perform on an
* image, such as text detection, image labelling, facial detection, etc. A full list of
* feature types can be found in {@link Feature.Type}.
* @param imageResource the image one wishes to analyze. The Cloud Vision APIs support
* image formats described here: https://cloud.google.com/vision/docs/supported-files
* @param imageContext the image context used to customize the Vision API request
* @param featureTypes the types of image analysis to perform on the image
* @return the results of image analyses
* @throws CloudVisionException if the image could not be read or if a malformed response
* is received from the Cloud Vision APIs
*/
public AnnotateImageResponse analyzeImage(
Resource imageResource, ImageContext imageContext, Feature.Type... featureTypes) {
ByteString imgBytes;
try {
imgBytes = ByteString.readFrom(imageResource.getInputStream());
}
catch (IOException ex) {
throw new CloudVisionException("Failed to read image bytes from provided resource.", ex);
}
Image image = Image.newBuilder().setContent(imgBytes).build();
List<Feature> featureList = Arrays.stream(featureTypes)
.map((featureType) -> Feature.newBuilder().setType(featureType).build())
.collect(Collectors.toList());
BatchAnnotateImagesRequest request = BatchAnnotateImagesRequest.newBuilder()
.addRequests(
AnnotateImageRequest.newBuilder()
.addAllFeatures(featureList)
.setImageContext(imageContext)
.setImage(image))
.build();
BatchAnnotateImagesResponse batchResponse = this.imageAnnotatorClient.batchAnnotateImages(request);
List<AnnotateImageResponse> annotateImageResponses = batchResponse.getResponsesList();
if (!annotateImageResponses.isEmpty()) {
return annotateImageResponses.get(0);
}
else {
throw new CloudVisionException(
"Failed to receive valid response Vision APIs; empty response received.");
}
}
@Override
public Blob get(Digest digest) {
try (InputStream in = newStreamInput(getBlobName(digest), /* offset=*/ 0)) {
ByteString content = ByteString.readFrom(in);
if (content.size() != digest.getSizeBytes()) {
throw new IOException(
String.format(
"size/data mismatch: was %d, expected %d", content.size(), digest.getSizeBytes()));
}
return new Blob(content, digest);
} catch (IOException ex) {
expire(digest);
return null;
}
}
public static void detectSafeSearch(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
System.out.format(
"adult: %s%nmedical: %s%nspoofed: %s%nviolence: %s%nracy: %s%n",
annotation.getAdult(),
annotation.getMedical(),
annotation.getSpoof(),
annotation.getViolence(),
annotation.getRacy());
}
}
}
public static void detectLogos(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.LOGO_DETECTION).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLogoAnnotationsList()) {
System.out.println(annotation.getDescription());
}
}
}
}
public static void detectLabels(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.LABEL_DETECTION).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLabelAnnotationsList()) {
annotation
.getAllFields()
.forEach((k, v) -> System.out.format("%s : %s%n", k, v.toString()));
}
}
}
}
@Override
public ByteString decode(InputStream inStream, Context context) throws IOException {
if (context.isWholeStream) {
return ByteString.readFrom(inStream);
}
int size = VarInt.decodeInt(inStream);
// ByteString reads to the end of the input stream, so give it a limited stream of exactly
// the right length. Also set its chunk size so that the ByteString will contain exactly
// one chunk.
return ByteString.readFrom(ByteStreams.limit(inStream, size), size);
}
/**
* Detects localized objects in the specified local image.
*
* @param filePath The path to the file to perform localized object detection on.
* @param out A {@link PrintStream} to write detected objects to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLocalizedObjects(String filePath, PrintStream out)
throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder()
.addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION))
.setImage(img)
.build();
requests.add(request);
// Perform the request
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
// Display the results
for (AnnotateImageResponse res : responses) {
for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
out.format("Object name: %s\n", entity.getName());
out.format("Confidence: %s\n", entity.getScore());
out.format("Normalized Vertices:\n");
entity
.getBoundingPoly()
.getNormalizedVerticesList()
.forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
}
}
}
}
static void inspectImageFileAllInfoTypes(String projectId, String inputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be inspected.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE_JPEG).setData(fileBytes).build();
// Construct the Inspect request to be sent by the client.
// Do not specify the type of info to inspect.
InspectContentRequest request =
InspectContentRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setItem(ContentItem.newBuilder().setByteItem(byteItem).build())
.build();
// Use the client to send the API request.
InspectContentResponse response = dlp.inspectContent(request);
// Parse the response and process results.
System.out.println("Findings: " + response.getResult().getFindingsCount());
for (Finding f : response.getResult().getFindingsList()) {
System.out.println("\tQuote: " + f.getQuote());
System.out.println("\tInfo type: " + f.getInfoType().getName());
System.out.println("\tLikelihood: " + f.getLikelihood());
}
}
}
/**
* Detects localized objects in the specified local image.
*
* @param filePath The path to the file to perform localized object detection on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLocalizedObjects(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
AnnotateImageRequest request =
AnnotateImageRequest.newBuilder()
.addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION))
.setImage(img)
.build();
requests.add(request);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
// Perform the request
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
// Display the results
for (AnnotateImageResponse res : responses) {
for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
System.out.format("Object name: %s%n", entity.getName());
System.out.format("Confidence: %s%n", entity.getScore());
System.out.format("Normalized Vertices:%n");
entity
.getBoundingPoly()
.getNormalizedVerticesList()
.forEach(vertex -> System.out.format("- (%s, %s)%n", vertex.getX(), vertex.getY()));
}
}
}
}
static void redactImageFileAllText(String projectId, String inputPath, String outputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be redacted.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE_JPEG).setData(fileBytes).build();
// Enable redaction of all text.
ImageRedactionConfig imageRedactionConfig =
ImageRedactionConfig.newBuilder().setRedactAllText(true).build();
// Construct the Redact request to be sent by the client.
// Do not specify the type of info to redact.
RedactImageRequest request =
RedactImageRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setByteItem(byteItem)
.addImageRedactionConfigs(imageRedactionConfig)
.build();
// Use the client to send the API request.
RedactImageResponse response = dlp.redactImage(request);
// Parse the response and process results.
FileOutputStream redacted = new FileOutputStream(outputPath);
redacted.write(response.getRedactedImage().toByteArray());
redacted.close();
System.out.println("Redacted image written to " + outputPath);
}
}
public static void detectBatchAnnotateFiles(String filePath) {
// String filePath = "path/to/your_file";
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
// Annotate the first two pages and the last one (max 5 pages)
// First page starts at 1, and not 0. Last page is -1.
List<Integer> pages = Arrays.asList(1, 2, -1);
ByteString pdfBytes = ByteString.readFrom(new FileInputStream(filePath));
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Other supported mime types : 'image/tiff' or 'image/gif'
InputConfig inputConfig =
InputConfig.newBuilder().setMimeType("application/pdf").setContent(pdfBytes).build();
AnnotateFileRequest request =
AnnotateFileRequest.newBuilder()
.addFeatures(feat)
.setInputConfig(inputConfig)
.addAllPages(pages)
.build();
List<AnnotateFileRequest> requests = new ArrayList<>();
requests.add(request);
BatchAnnotateFilesRequest batchAnnotateFilesRequest =
BatchAnnotateFilesRequest.newBuilder().addAllRequests(requests).build();
ApiFuture<BatchAnnotateFilesResponse> future =
client.batchAnnotateFilesCallable().futureCall(batchAnnotateFilesRequest);
BatchAnnotateFilesResponse response = future.get();
// Getting the first response
AnnotateFileResponse annotateFileResponse = response.getResponses(0);
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation textAnnotation = annotateFileResponse.getResponses(0).getFullTextAnnotation();
for (Page page : textAnnotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format(
"Symbol text: %s (Confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
System.out.format(
"Word text: %s (Confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("\nParagraph: \n" + paraText);
System.out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("\nComplete annotation:");
System.out.println(textAnnotation.getText());
} catch (Exception e) {
System.out.println("Error during detectPdfText: \n" + e.toString());
}
}
static void redactImageFile(String projectId, String inputPath, String outputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be inspected.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE).setData(fileBytes).build();
// Specify the type of info and likelihood necessary to redact.
List<InfoType> infoTypes = new ArrayList<>();
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
for (String typeName : new String[] {"PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD_NUMBER"}) {
infoTypes.add(InfoType.newBuilder().setName(typeName).build());
}
InspectConfig config =
InspectConfig.newBuilder()
.addAllInfoTypes(infoTypes)
.setMinLikelihood(Likelihood.LIKELY)
.build();
// Construct the Redact request to be sent by the client.
RedactImageRequest request =
RedactImageRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setByteItem(byteItem)
.setInspectConfig(config)
.build();
// Use the client to send the API request.
RedactImageResponse response = dlp.redactImage(request);
// Parse the response and process results.
FileOutputStream redacted = new FileOutputStream(outputPath);
redacted.write(response.getRedactedImage().toByteArray());
redacted.close();
System.out.println("Redacted image written to " + outputPath);
}
}
static void inspectImageFileListedInfoTypes(String projectId, String inputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be inspected.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE_JPEG).setData(fileBytes).build();
// Specify the type of info the inspection will look for.
List<InfoType> infoTypes = new ArrayList<>();
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
for (String typeName :
new String[] {"US_SOCIAL_SECURITY_NUMBER", "EMAIL_ADDRESS", "PHONE_NUMBER"}) {
infoTypes.add(InfoType.newBuilder().setName(typeName).build());
}
// Construct the configuration for the Inspect request.
InspectConfig inspectConfig =
InspectConfig.newBuilder()
.addAllInfoTypes(infoTypes)
.build();
// Construct the Inspect request to be sent by the client.
InspectContentRequest request =
InspectContentRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setItem(ContentItem.newBuilder().setByteItem(byteItem).build())
.setInspectConfig(inspectConfig)
.build();
// Use the client to send the API request.
InspectContentResponse response = dlp.inspectContent(request);
// Parse the response and process results.
System.out.println("Findings: " + response.getResult().getFindingsCount());
for (Finding f : response.getResult().getFindingsList()) {
System.out.println("\tQuote: " + f.getQuote());
System.out.println("\tInfo type: " + f.getInfoType().getName());
System.out.println("\tLikelihood: " + f.getLikelihood());
}
}
}
public static void inspectImageFile(String projectId, String filePath) throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the type and content to be inspected.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(filePath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE).setData(fileBytes).build();
ContentItem item = ContentItem.newBuilder().setByteItem(byteItem).build();
// Specify the type of info the inspection will look for.
List<InfoType> infoTypes = new ArrayList<>();
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
for (String typeName : new String[]{"PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD_NUMBER"}) {
infoTypes.add(InfoType.newBuilder().setName(typeName).build());
}
// Construct the configuration for the Inspect request.
InspectConfig config =
InspectConfig.newBuilder()
.addAllInfoTypes(infoTypes)
.setIncludeQuote(true)
.build();
// Construct the Inspect request to be sent by the client.
InspectContentRequest request =
InspectContentRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setItem(item)
.setInspectConfig(config)
.build();
// Use the client to send the API request.
InspectContentResponse response = dlp.inspectContent(request);
// Parse the response and process results.
System.out.println("Findings: " + response.getResult().getFindingsCount());
for (Finding f : response.getResult().getFindingsList()) {
System.out.println("\tQuote: " + f.getQuote());
System.out.println("\tInfo type: " + f.getInfoType().getName());
System.out.println("\tLikelihood: " + f.getLikelihood());
}
}
}
static void redactImageFileColoredInfoTypes(String projectId, String inputPath, String outputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be redacted.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE_JPEG).setData(fileBytes).build();
// Define types of info to redact associate each one with a different color.
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
ImageRedactionConfig ssnRedactionConfig = ImageRedactionConfig.newBuilder()
.setInfoType(InfoType.newBuilder().setName("US_SOCIAL_SECURITY_NUMBER").build())
.setRedactionColor(Color.newBuilder().setRed(.3f).setGreen(.1f).setBlue(.6f).build())
.build();
ImageRedactionConfig emailRedactionConfig = ImageRedactionConfig.newBuilder()
.setInfoType(InfoType.newBuilder().setName("EMAIL_ADDRESS").build())
.setRedactionColor(Color.newBuilder().setRed(.5f).setGreen(.5f).setBlue(1).build())
.build();
ImageRedactionConfig phoneRedactionConfig = ImageRedactionConfig.newBuilder()
.setInfoType(InfoType.newBuilder().setName("PHONE_NUMBER").build())
.setRedactionColor(Color.newBuilder().setRed(1).setGreen(0).setBlue(.6f).build())
.build();
// Create collection of all redact configurations.
List<ImageRedactionConfig> imageRedactionConfigs =
Arrays.asList(ssnRedactionConfig, emailRedactionConfig, phoneRedactionConfig);
// List types of info to search for.
InspectConfig config =
InspectConfig.newBuilder()
.addAllInfoTypes(imageRedactionConfigs.stream()
.map(ImageRedactionConfig::getInfoType)
.collect(Collectors.toList()))
.build();
// Construct the Redact request to be sent by the client.
RedactImageRequest request =
RedactImageRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setByteItem(byteItem)
.addAllImageRedactionConfigs(imageRedactionConfigs)
.setInspectConfig(config)
.build();
// Use the client to send the API request.
RedactImageResponse response = dlp.redactImage(request);
// Parse the response and process results.
FileOutputStream redacted = new FileOutputStream(outputPath);
redacted.write(response.getRedactedImage().toByteArray());
redacted.close();
System.out.println("Redacted image written to " + outputPath);
}
}
static void redactImageFileListedInfoTypes(String projectId, String inputPath, String outputPath)
throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the content to be redacted.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(inputPath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.IMAGE_JPEG).setData(fileBytes).build();
// Specify the types of info necessary to redact.
List<InfoType> infoTypes = new ArrayList<>();
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
for (String typeName :
new String[] {"US_SOCIAL_SECURITY_NUMBER", "EMAIL_ADDRESS", "PHONE_NUMBER"}) {
infoTypes.add(InfoType.newBuilder().setName(typeName).build());
}
InspectConfig inspectConfig =
InspectConfig.newBuilder()
.addAllInfoTypes(infoTypes)
.build();
// Prepare redaction configs.
List<ImageRedactionConfig> imageRedactionConfigs = infoTypes.stream()
.map(infoType -> ImageRedactionConfig.newBuilder().setInfoType(infoType).build())
.collect(Collectors.toList());
// Construct the Redact request to be sent by the client.
RedactImageRequest request =
RedactImageRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setByteItem(byteItem)
.addAllImageRedactionConfigs(imageRedactionConfigs)
.setInspectConfig(inspectConfig)
.build();
// Use the client to send the API request.
RedactImageResponse response = dlp.redactImage(request);
// Parse the response and process results.
FileOutputStream redacted = new FileOutputStream(outputPath);
redacted.write(response.getRedactedImage().toByteArray());
redacted.close();
System.out.println("Redacted image written to " + outputPath);
}
}
public static void inspectTextFile(String projectId, String filePath) throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (DlpServiceClient dlp = DlpServiceClient.create()) {
// Specify the type and content to be inspected.
ByteString fileBytes = ByteString.readFrom(new FileInputStream(filePath));
ByteContentItem byteItem =
ByteContentItem.newBuilder().setType(BytesType.TEXT_UTF8).setData(fileBytes).build();
ContentItem item = ContentItem.newBuilder().setByteItem(byteItem).build();
// Specify the type of info the inspection will look for.
List<InfoType> infoTypes = new ArrayList<>();
// See https://cloud.google.com/dlp/docs/infotypes-reference for complete list of info types
for (String typeName : new String[]{"PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD_NUMBER"}) {
infoTypes.add(InfoType.newBuilder().setName(typeName).build());
}
// Construct the configuration for the Inspect request.
InspectConfig config =
InspectConfig.newBuilder()
.addAllInfoTypes(infoTypes)
.setIncludeQuote(true)
.build();
// Construct the Inspect request to be sent by the client.
InspectContentRequest request =
InspectContentRequest.newBuilder()
.setParent(LocationName.of(projectId, "global").toString())
.setItem(item)
.setInspectConfig(config)
.build();
// Use the client to send the API request.
InspectContentResponse response = dlp.inspectContent(request);
// Parse the response and process results
System.out.println("Findings: " + response.getResult().getFindingsCount());
for (Finding f : response.getResult().getFindingsList()) {
System.out.println("\tQuote: " + f.getQuote());
System.out.println("\tInfo type: " + f.getInfoType().getName());
System.out.println("\tLikelihood: " + f.getLikelihood());
}
}
}