下面列出了android.graphics.ImageFormat#JPEG 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@CalledByNative
public int getColorspace() {
switch (mImageFormat){
case ImageFormat.YV12:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_YV12;
case ImageFormat.NV21:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_NV21;
case ImageFormat.YUY2:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_YUY2;
case ImageFormat.NV16:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_NV16;
case ImageFormat.JPEG:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_JPEG;
case ImageFormat.RGB_565:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_RGB_565;
case ImageFormat.UNKNOWN:
default:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_UNKNOWN;
}
}
private boolean setAvailableFormats(int[] value) {
int[] availableFormat = value;
if (value == null) {
// Let setBase() to handle the null value case.
return false;
}
int[] newValues = new int[availableFormat.length];
for (int i = 0; i < availableFormat.length; i++) {
newValues[i] = availableFormat[i];
if (availableFormat[i] == ImageFormat.JPEG) {
newValues[i] = NATIVE_JPEG_FORMAT;
}
}
setBase(CameraCharacteristics.SCALER_AVAILABLE_FORMATS, newValues);
return true;
}
@CalledByNative
public int getColorspace() {
switch (mImageFormat){
case ImageFormat.YV12:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_YV12;
case ImageFormat.NV21:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_NV21;
case ImageFormat.YUY2:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_YUY2;
case ImageFormat.NV16:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_NV16;
case ImageFormat.JPEG:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_JPEG;
case ImageFormat.RGB_565:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_RGB_565;
case ImageFormat.UNKNOWN:
default:
return AndroidImageFormatList.ANDROID_IMAGEFORMAT_UNKNOWN;
}
}
private int pixelFormatForCameraFormat(String format) {
if (format == null)
return ImageFormat.UNKNOWN;
if (format.equals(PIXEL_FORMAT_YUV422SP))
return ImageFormat.NV16;
if (format.equals(PIXEL_FORMAT_YUV420SP))
return ImageFormat.NV21;
if (format.equals(PIXEL_FORMAT_YUV422I))
return ImageFormat.YUY2;
if (format.equals(PIXEL_FORMAT_YUV420P))
return ImageFormat.YV12;
if (format.equals(PIXEL_FORMAT_RGB565))
return ImageFormat.RGB_565;
if (format.equals(PIXEL_FORMAT_JPEG))
return ImageFormat.JPEG;
return ImageFormat.UNKNOWN;
}
private static String imageFormatToString(int imageFormat)
{
switch (imageFormat)
{
case ImageFormat.JPEG:
return "JPEG";
case ImageFormat.NV16:
return "NV16";
case ImageFormat.NV21:
return "NV21";
case ImageFormat.RAW10:
return "RAW10";
case ImageFormat.RAW_SENSOR:
return "RAW_SENSOR";
case ImageFormat.RGB_565:
return "RGB_565";
case ImageFormat.UNKNOWN:
return "UNKNOWN";
case ImageFormat.YUV_420_888:
return "YUV_420_888";
case ImageFormat.YUY2:
return "YUY2";
case ImageFormat.YV12:
return "YV12";
}
return Integer.toString(imageFormat);
}
/**
* 设置预览编码
* <p>
* NV21 format is not supported
*/
public void setPreviewImageFormat(int imageFormat) {
if (imageFormat == ImageFormat.JPEG) {
Log.e(TAG, "JPEG 格式占用缓存过多,会导致预览卡顿,不建议使用该编码进行预览,最好使用其他编码格式");
}
Collection<CameraConfig> configs = cameraConfig.values();
for (CameraConfig config : configs) {
config.setImageFormat(imageFormat);
}
}
/**
* Given an image reader, this extracts the final image. If the image in the
* reader is JPEG, we extract and return it as is. If the image is YUV, we
* convert it to JPEG and return the result.
*
* @param image the image we got from the image reader.
* @return A valid JPEG image.
*/
private static byte[] acquireJpegBytesAndClose(Image image)
{
ByteBuffer buffer;
if (image.getFormat() == ImageFormat.JPEG)
{
Image.Plane plane0 = image.getPlanes()[0];
buffer = plane0.getBuffer();
} else if (image.getFormat() == ImageFormat.YUV_420_888)
{
buffer = ByteBuffer.allocateDirect(image.getWidth() * image.getHeight() * 3);
Log.v(TAG, "Compressing JPEG with software encoder.");
int numBytes = JpegUtilNative.compressJpegFromYUV420Image(new AndroidImageProxy(image), buffer,
JPEG_QUALITY);
if (numBytes < 0)
{
throw new RuntimeException("Error compressing jpeg.");
}
buffer.limit(numBytes);
} else
{
throw new RuntimeException("Unsupported image format.");
}
byte[] imageBytes = new byte[buffer.remaining()];
buffer.get(imageBytes);
buffer.rewind();
image.close();
return imageBytes;
}
private static double getFormatCost(int format) {
switch (format) {
case ImageFormat.UNKNOWN:
return 1.0;
case ImageFormat.NV21:
return 0.8;
case ImageFormat.NV16:
// This format has never been seen in the wild, but is compatible as we only care
// about the Y channel, so allow it.
return 0.8;
case ImageFormat.YV12:
case ImageFormat.YUY2:
case ImageFormat.YUV_420_888:
return 0.5; // pure guesswork - but it IS faster than JPEG
case ImageFormat.YUV_422_888:
// only varies from yuv_420_888 in chroma-subsampling, which I'm guessing
// doesn't affect the luminance much
// (see https://en.wikipedia.org/wiki/Chroma_subsampling)
return 0.5;
case ImageFormat.YUV_444_888:
// only varies from yuv_420_888 in chroma-subsampling, which I'm guessing
// doesn't affect the luminance much
// (see https://en.wikipedia.org/wiki/Chroma_subsampling)
return 0.5;
case ImageFormat.FLEX_RGB_888:
case ImageFormat.FLEX_RGBA_8888:
case ImageFormat.RGB_565:
return 0.8; // pure guesswork
case ImageFormat.JPEG:
return 1.0; // duh...?
case ImageFormat.RAW_SENSOR:
case ImageFormat.RAW10:
case ImageFormat.RAW12:
return 0.4; // pure guesswork - but any RAW format must be optimal (wrt capture speed)?
case ImageFormat.DEPTH16:
case ImageFormat.DEPTH_POINT_CLOUD:
return 1.5; // sound terribly complicated - but I'm just guessing....
//ImageFormat.Y8:
//ImageFormat.Y16:
}
return 1.0;
}
public CameraConfig(String cameraId, StreamConfigurationMap map, @Nullable View view, OnImageAvailableListener listener, Handler handler) {
if (view != null) {
this.view = view;
if (view instanceof TextureView) {
type = TYPE_TEXTURE_VIEW;
} else if (view instanceof SurfaceView) {
type = TYPE_SURFACE_VIEW;
} else {
throw new IllegalArgumentException("不支持类型");
}
}
this.streamConfigurationMap = map;
this.cameraId = cameraId;
this.imageAvailableListener = listener;
this.handler = handler;
int format = ImageFormat.JPEG;
if (map.isOutputSupportedFor(ImageFormat.YUV_420_888)) {
format = ImageFormat.YUV_420_888;
Log.i(TAG, "support YUV_420_888");
} else if (map.isOutputSupportedFor(ImageFormat.YV12)) {
format = ImageFormat.YV12;
}
Log.e(TAG, "current ImageFormat = " + format);
largest = calculationSize(map);
Log.d(TAG, "width = " + largest.getWidth() + " height = " + largest.getHeight());
//三通道 YUV YV12,YUV_420_888,不支持 NV21
imageReader = ImageReader.newInstance(largest.getWidth(), largest.getHeight(), format, 1);
imageReader.setOnImageAvailableListener(imageAvailableListener, handler);
this.cameraStateCallback = new CameraDevice.StateCallback() {
@Override
public void onOpened(@NonNull CameraDevice camera) {
cameraDevice = camera;
createCameraSession();
}
@Override
public void onDisconnected(@NonNull CameraDevice camera) {
camera.close();
cameraDevice = null;
}
@Override
public void onError(@NonNull CameraDevice camera, int error) {
camera.close();
cameraDevice = null;
Log.e(TAG, _error[error]);
}
};
}
public static byte[] ToJpeg(byte[] imageData, int imageFormat, int width, int height)
{
if (imageData == null)
return null;
switch (imageFormat)
{
case ImageFormat.NV21:
case ImageFormat.YUY2:
YuvImage img = new YuvImage(imageData, imageFormat, width, height, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int quality = 20; //set quality
img.compressToJpeg(new Rect(0, 0, width, height), quality, baos);//this line decreases the image quality
return baos.toByteArray();
case ImageFormat.YUV_420_888:
return JpegFromYuv420888(imageData, imageFormat, width, height);
case ImageFormat.UNKNOWN:
return null;
case ImageFormat.NV16:
// Source: http://www.programcreek.com/java-api-examples/index.php?source_dir=Roid-Library-master/src/com/rincliu/library/common/persistence/zxing/camera/CameraManager.java
// This format has never been seen in the wild, but is compatible as we only care
// about the Y channel, so allow it.
case ImageFormat.YV12:
// source: https://github.com/evopark/tiqr-android/blob/master/src/main/java/de/evopark/tiqr/android/processing/ZxingQrScanner.java
case ImageFormat.YUV_422_888:
// only varies from yuv_420_888 in chroma-subsampling, which I'm guessing
// doesn't affect the luminance much
// (see https://en.wikipedia.org/wiki/Chroma_subsampling)
case ImageFormat.YUV_444_888:
// only varies from yuv_420_888 in chroma-subsampling, which I'm guessing
// doesn't affect the luminance much
// (see https://en.wikipedia.org/wiki/Chroma_subsampling)
return null;//new PlanarYUVLuminanceSource(data, width, height, 0, 0, width, height, false);
case ImageFormat.FLEX_RGB_888:
case ImageFormat.FLEX_RGBA_8888:
return null;//new RGBLuminanceSource(width, height, uncompress(data, width, height));// PlanarYUVLuminanceSource(bytes, width, height, 0, 0, width, height, false);
case ImageFormat.JPEG:
// Tried and tested myself
return null;//new RGBLuminanceSource(width, height, uncompress(data, width, height));// PlanarYUVLuminanceSource(bytes, width, height, 0, 0, width, height, false);
case ImageFormat.RGB_565:
return null;//new RGB565(width, height, uncompress(data, width, height));// PlanarYUVLuminanceSource(bytes, width, height, 0, 0, width, height, false);
case ImageFormat.RAW_SENSOR:
case ImageFormat.RAW10:
case ImageFormat.RAW12:
case ImageFormat.DEPTH16:
case ImageFormat.DEPTH_POINT_CLOUD:
//ImageFormat.Y8:
//ImageFormat.Y16:
return null;
default:
throw new IllegalArgumentException("No support for image format " + imageFormat);
}
}
private void initCamera(boolean front) {
CameraInfo info = null;
ArrayList<CameraInfo> cameraInfos = CameraController.getInstance().getCameras();
if (cameraInfos == null) {
return;
}
for (int a = 0; a < cameraInfos.size(); a++) {
CameraInfo cameraInfo = cameraInfos.get(a);
if (isFrontface && cameraInfo.frontCamera != 0 || !isFrontface && cameraInfo.frontCamera == 0) {
info = cameraInfo;
break;
}
}
if (info == null) {
return;
}
float size4to3 = 4.0f / 3.0f;
float size16to9 = 16.0f / 9.0f;
float screenSize = (float) Math.max(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y) / Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
Size aspectRatio;
int wantedWidth;
int wantedHeight;
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(4, 3);
wantedWidth = 1280;
wantedHeight = 960;
} else {
aspectRatio = new Size(16, 9);
wantedWidth = 1280;
wantedHeight = 720;
}
if (textureView.getWidth() > 0 && textureView.getHeight() > 0) {
int width = Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
int height = width * aspectRatio.getHeight() / aspectRatio.getWidth();
previewSize = CameraController.chooseOptimalSize(info.getPreviewSizes(), width, height, aspectRatio);
}
Size pictureSize = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedWidth, wantedHeight, aspectRatio);
if (pictureSize.getWidth() >= 1280 && pictureSize.getHeight() >= 1280) {
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(3, 4);
} else {
aspectRatio = new Size(9, 16);
}
Size pictureSize2 = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedHeight, wantedWidth, aspectRatio);
if (pictureSize2.getWidth() < 1280 || pictureSize2.getHeight() < 1280) {
pictureSize = pictureSize2;
}
}
SurfaceTexture surfaceTexture = textureView.getSurfaceTexture();
if (previewSize != null && surfaceTexture != null) {
surfaceTexture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight());
cameraSession = new CameraSession(info, previewSize, pictureSize, ImageFormat.JPEG);
CameraController.getInstance().open(cameraSession, surfaceTexture, new Runnable() {
@Override
public void run() {
if (cameraSession != null) {
cameraSession.setInitied();
}
checkPreviewMatrix();
}
}, new Runnable() {
@Override
public void run() {
if (delegate != null) {
delegate.onCameraCreated(cameraSession.cameraInfo.camera);
}
}
});
}
}
private String formatToString(int format) {
switch (format) {
case ImageFormat.YV12:
return "YV12";
case ImageFormat.YUV_420_888:
return "YUV_420_888";
case ImageFormat.NV21:
return "NV21";
case ImageFormat.NV16:
return "NV16";
case PixelFormat.RGB_565:
return "RGB_565";
case PixelFormat.RGBA_8888:
return "RGBA_8888";
case PixelFormat.RGBX_8888:
return "RGBX_8888";
case PixelFormat.RGB_888:
return "RGB_888";
case ImageFormat.JPEG:
return "JPEG";
case ImageFormat.YUY2:
return "YUY2";
case ImageFormat.Y8:
return "Y8";
case ImageFormat.Y16:
return "Y16";
case ImageFormat.RAW_SENSOR:
return "RAW_SENSOR";
case ImageFormat.RAW_PRIVATE:
return "RAW_PRIVATE";
case ImageFormat.RAW10:
return "RAW10";
case ImageFormat.DEPTH16:
return "DEPTH16";
case ImageFormat.DEPTH_POINT_CLOUD:
return "DEPTH_POINT_CLOUD";
case ImageFormat.RAW_DEPTH:
return "RAW_DEPTH";
case ImageFormat.PRIVATE:
return "PRIVATE";
default:
return "UNKNOWN";
}
}
private void processStill(final Frame frame, String basename) {
File captureDir = new File(context.getExternalFilesDir(null), basename);
if (!captureDir.exists() && !captureDir.mkdirs()) {
throw new IllegalStateException("Could not create dir " + captureDir);
}
// Timestamp in local domain ie. time since boot in nanoseconds.
long localSensorTimestampNs = frame.result.get(CaptureResult.SENSOR_TIMESTAMP);
// Timestamp in leader domain ie. synchronized time on leader device in nanoseconds.
long syncedSensorTimestampNs =
timeDomainConverter.leaderTimeForLocalTimeNs(localSensorTimestampNs);
// Use syncedSensorTimestamp in milliseconds for filenames.
long syncedSensorTimestampMs = (long) TimeUtils.nanosToMillis(syncedSensorTimestampNs);
String filenameTimeString = getTimeStr(syncedSensorTimestampMs);
// Save timing metadata.
{
String metaFilename = "sync_metadata_" + filenameTimeString + ".txt";
File metaFile = new File(captureDir, metaFilename);
saveTimingMetadata(syncedSensorTimestampNs, localSensorTimestampNs, metaFile);
}
for (int i = 0; i < frame.output.images.size(); ++i) {
Image image = frame.output.images.get(i);
int format = image.getFormat();
if (format == ImageFormat.RAW_SENSOR) {
// Note: while using DngCreator works, streaming RAW_SENSOR is too slow.
Log.e(TAG, "RAW_SENSOR saving not implemented!");
} else if (format == ImageFormat.JPEG) {
Log.e(TAG, "JPEG saving not implemented!");
} else if (format == ImageFormat.RAW10) {
Log.e(TAG, "RAW10 saving not implemented!");
} else if (format == ImageFormat.YUV_420_888) {
// TODO(jiawen): We know that on Pixel devices, the YUV format is NV21, consisting of a luma
// plane and separate interleaved chroma planes.
// <--w-->
// ^ YYYYYYYZZZ
// | YYYYYYYZZZ
// h ...
// | ...
// v YYYYYYYZZZ
//
// <--w-->
// ^ VUVUVUVZZZZZ
// | VUVUVUVZZZZZ
// h/2 ...
// | ...
// v VUVUVUVZZZZZ
//
// where Z is padding bytes.
//
// TODO(jiawen): To determine if it's NV12 vs NV21, we need JNI to compare the buffer start
// addresses.
context.notifyCapturing("img_" + filenameTimeString);
// Save NV21 raw + metadata.
{
File nv21File = new File(captureDir, "img_" + filenameTimeString + ".nv21");
File nv21MetadataFile =
new File(captureDir, "nv21_metadata_" + filenameTimeString + ".txt");
saveNv21(image, nv21File, nv21MetadataFile);
context.notifyCaptured(nv21File.getName());
}
// TODO(samansari): Make save JPEG a checkbox in the UI.
if (saveJpgFromNv21) {
YuvImage yuvImage = yuvImageFromNv21Image(image);
File jpgFile = new File(captureDir, "img_" + filenameTimeString + ".jpg");
// Push saving JPEG onto queue to let the frame close faster, necessary for some devices.
handler.post(() -> saveJpg(yuvImage, jpgFile));
}
} else {
Log.e(TAG, String.format("Cannot save unsupported image format: %d", image.getFormat()));
}
}
frame.close();
}
public void initCamera() {
CameraInfo info = null;
ArrayList<CameraInfo> cameraInfos = CameraController.getInstance().getCameras();
if (cameraInfos == null) {
return;
}
for (int a = 0; a < cameraInfos.size(); a++) {
CameraInfo cameraInfo = cameraInfos.get(a);
if (isFrontface && cameraInfo.frontCamera != 0 || !isFrontface && cameraInfo.frontCamera == 0) {
info = cameraInfo;
break;
}
}
if (info == null) {
return;
}
float size4to3 = 4.0f / 3.0f;
float size16to9 = 16.0f / 9.0f;
float screenSize = (float) Math.max(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y) / Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
org.telegram.messenger.camera.Size aspectRatio;
int wantedWidth;
int wantedHeight;
if (initialFrontface) {
aspectRatio = new Size(16, 9);
wantedWidth = 480;
wantedHeight = 270;
} else {
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(4, 3);
wantedWidth = 1280;
wantedHeight = 960;
} else {
aspectRatio = new Size(16, 9);
wantedWidth = 1280;
wantedHeight = 720;
}
}
if (textureView.getWidth() > 0 && textureView.getHeight() > 0) {
int width;
if (useMaxPreview) {
width = Math.max(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
} else {
width = Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
}
int height = width * aspectRatio.getHeight() / aspectRatio.getWidth();
previewSize = CameraController.chooseOptimalSize(info.getPreviewSizes(), width, height, aspectRatio);
}
org.telegram.messenger.camera.Size pictureSize = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedWidth, wantedHeight, aspectRatio);
if (pictureSize.getWidth() >= 1280 && pictureSize.getHeight() >= 1280) {
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(3, 4);
} else {
aspectRatio = new Size(9, 16);
}
org.telegram.messenger.camera.Size pictureSize2 = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedHeight, wantedWidth, aspectRatio);
if (pictureSize2.getWidth() < 1280 || pictureSize2.getHeight() < 1280) {
pictureSize = pictureSize2;
}
}
SurfaceTexture surfaceTexture = textureView.getSurfaceTexture();
if (previewSize != null && surfaceTexture != null) {
surfaceTexture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight());
cameraSession = new CameraSession(info, previewSize, pictureSize, ImageFormat.JPEG);
if (optimizeForBarcode) {
cameraSession.setOptimizeForBarcode(optimizeForBarcode);
}
CameraController.getInstance().open(cameraSession, surfaceTexture, () -> {
if (cameraSession != null) {
cameraSession.setInitied();
}
checkPreviewMatrix();
}, () -> {
if (delegate != null) {
delegate.onCameraCreated(cameraSession.cameraInfo.camera);
}
});
}
}
private static String getFormat(int format) {
switch (format) {
case ImageFormat.DEPTH16:
return "DEPTH16";
case ImageFormat.DEPTH_POINT_CLOUD:
return "DEPTH_POINT_CLOUD";
case ImageFormat.FLEX_RGBA_8888:
return "FLEX_RGBA_8888";
case ImageFormat.FLEX_RGB_888:
return "FLEX_RGB_888";
case ImageFormat.JPEG:
return "JPEG";
case ImageFormat.NV16:
return "NV16";
case ImageFormat.NV21:
return "NV21";
case ImageFormat.PRIVATE:
return "PRIVATE";
case ImageFormat.RAW10:
return "RAW10";
case ImageFormat.RAW12:
return "RAW12";
case ImageFormat.RAW_PRIVATE:
return "RAW_PRIVATE";
case ImageFormat.RAW_SENSOR:
return "RAW_SENSOR";
case ImageFormat.RGB_565:
return "RGB_565";
case ImageFormat.YUV_420_888:
return "YUV_420_888";
case ImageFormat.YUV_422_888:
return "YUV_422_888";
case ImageFormat.YUV_444_888:
return "YUV_444_888";
case ImageFormat.YUY2:
return "YUY2";
case ImageFormat.YV12:
return "YV12";
default:
return UNKNOWN + "-" + format;
}
}
@Override
public void run()
{
ImageToProcess img = mImage;
Rect safeCrop = guaranteedSafeCrop(img.proxy, img.crop);
final List<ImageProxy.Plane> planeList = img.proxy.getPlanes();
final TaskImage inputImage = new TaskImage(mImage.rotation, img.proxy.getWidth(),
img.proxy.getHeight(), img.proxy.getFormat(), safeCrop);
final TaskImage resultImage = new TaskImage(mImage.rotation, img.proxy.getWidth(),
img.proxy.getHeight(), ImageFormat.JPEG, safeCrop);
byte[] dataCopy;
int[] strides = new int[3];
try
{
onStart(mId, inputImage, resultImage, TaskInfo.Destination.FINAL_IMAGE);
// Do the byte copy
strides[0] = planeList.get(0).getRowStride()
/ planeList.get(0).getPixelStride();
strides[1] = planeList.get(1).getRowStride()
/ planeList.get(1).getPixelStride();
strides[2] = 2 * planeList.get(2).getRowStride()
/ planeList.get(2).getPixelStride();
// TODO: For performance, use a cache subsystem for buffer reuse.
dataCopy = convertYUV420ImageToPackedNV21(img.proxy);
} finally
{
// Release the image now that you have a usable copy
mImageTaskManager.releaseSemaphoreReference(img, mExecutor);
}
final byte[] chainedDataCopy = dataCopy;
final int[] chainedStrides = strides;
// This task drops the image reference.
TaskImageContainer chainedTask = new TaskJpegEncode(this, ProcessingPriority.SLOW)
{
@Override
public void run()
{
// Image is closed by now. Do NOT reference image directly.
byte[] compressedData = convertNv21toJpeg(chainedDataCopy,
resultImage.height, resultImage.width, chainedStrides);
onJpegEncodeDone(mId, inputImage, resultImage, compressedData,
TaskInfo.Destination.FINAL_IMAGE);
logWrapper("Finished off a chained task now that image is released.");
}
};
// Passed null, since the image has already been released.
mImageTaskManager.appendTasks(null, chainedTask);
logWrapper("Kicking off a chained task now that image is released.");
}
/**
* Request a single image.
*
* @return true if successful, false if there was an error submitting the
* capture request.
*/
private boolean sendSingleRequest(OneCamera.PhotoCaptureParameters params)
{
Log.v(TAG, "sendSingleRequest()");
try
{
CaptureRequest.Builder builder;
builder = mDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE);
builder.addTarget(mPreviewSurface);
// Always add this surface for single image capture requests.
builder.addTarget(mCaptureImageReader.getSurface());
builder.set(CaptureRequest.CONTROL_MODE, CaptureRequest.CONTROL_MODE_AUTO);
Flash flashMode = Flash.OFF;
addFlashToCaptureRequestBuilder(builder, flashMode);
addRegionsToCaptureRequestBuilder(builder);
builder.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO);
builder.set(CaptureRequest.CONTROL_AF_TRIGGER, CaptureRequest.CONTROL_AF_TRIGGER_IDLE);
// Tag this as a special request which should be saved.
builder.setTag(RequestTag.EXPLICIT_CAPTURE);
if (sCaptureImageFormat == ImageFormat.JPEG)
{
builder.set(CaptureRequest.JPEG_QUALITY, (byte) (JPEG_QUALITY));
builder.set(CaptureRequest.JPEG_ORIENTATION,
CameraUtil.getJpegRotation(params.orientation, mCharacteristics));
}
mCaptureSession.capture(builder.build(), mCaptureManager, mCameraHandler);
return true;
} catch (CameraAccessException e)
{
Log.v(TAG, "Could not execute single still capture request.", e);
return false;
}
}
public static OneCamera create(
CameraDevice device,
CameraCharacteristics characteristics,
OneCameraFeatureConfig featureConfig,
OneCameraCaptureSetting captureSetting,
DisplayMetrics displayMetrics,
Context context,
MainThread mainThread,
ImageRotationCalculator imageRotationCalculator,
BurstFacade burstController,
SoundPlayer soundPlayer,
FatalErrorHandler fatalErrorHandler) throws OneCameraAccessException
{
// TODO: Might want to switch current camera to vendor HDR.
CaptureSupportLevel captureSupportLevel = featureConfig
.getCaptureSupportLevel(characteristics);
Log.i(TAG, "Camera support level: " + captureSupportLevel.name());
OneCameraCharacteristics oneCharacteristics =
new OneCameraCharacteristicsImpl(characteristics);
PictureSizeCalculator pictureSizeCalculator =
new PictureSizeCalculator(oneCharacteristics);
PictureSizeCalculator.Configuration configuration = null;
OneCameraFactory cameraFactory = null;
ImageSaver.Builder imageSaverBuilder = null;
ImageBackend imageBackend = ProcessingServiceManager.instance().getImageBackend();
// Depending on the support level of the camera, choose the right
// configuration.
switch (captureSupportLevel)
{
case LIMITED_JPEG:
case LEGACY_JPEG:
// LIMITED and LEGACY have different picture takers which will
// be selected by the support level that is passes into
// #createOneCamera below - otherwise they use the same OneCamera and image backend.
cameraFactory = new SimpleOneCameraFactory(ImageFormat.JPEG,
featureConfig.getMaxAllowedImageReaderCount(),
imageRotationCalculator);
configuration = pictureSizeCalculator.computeConfiguration(
captureSetting.getCaptureSize(),
ImageFormat.JPEG);
imageSaverBuilder = new JpegImageBackendImageSaver(imageRotationCalculator,
imageBackend, configuration.getPostCaptureCrop());
break;
case LIMITED_YUV:
// Same as above, but we're using YUV images.
cameraFactory = new SimpleOneCameraFactory(ImageFormat.YUV_420_888,
featureConfig.getMaxAllowedImageReaderCount(),
imageRotationCalculator);
configuration = pictureSizeCalculator.computeConfiguration(
captureSetting.getCaptureSize(),
ImageFormat.YUV_420_888);
imageSaverBuilder = new YuvImageBackendImageSaver(imageRotationCalculator,
imageBackend,
configuration.getPostCaptureCrop());
break;
case ZSL:
// ZSL has its own OneCamera and produces YUV images.
cameraFactory = new ZslOneCameraFactory(ImageFormat.YUV_420_888,
featureConfig.getMaxAllowedImageReaderCount());
configuration = pictureSizeCalculator.computeConfiguration(
captureSetting.getCaptureSize(),
ImageFormat.YUV_420_888);
imageSaverBuilder = new YuvImageBackendImageSaver(imageRotationCalculator,
imageBackend, configuration.getPostCaptureCrop());
break;
}
Log.i(TAG, "Picture Size Configuration: " + configuration);
return cameraFactory.createOneCamera(new AndroidCameraDeviceProxy(device),
new OneCameraCharacteristicsImpl(characteristics),
captureSupportLevel,
mainThread,
configuration.getNativeOutputSize(),
imageSaverBuilder,
captureSetting.getFlashSetting(),
captureSetting.getExposureSetting(),
captureSetting.getHdrSceneSetting(),
burstController,
fatalErrorHandler);
}
private void initCamera() {
CameraInfo info = null;
ArrayList<CameraInfo> cameraInfos = CameraController.getInstance().getCameras();
if (cameraInfos == null) {
return;
}
for (int a = 0; a < cameraInfos.size(); a++) {
CameraInfo cameraInfo = cameraInfos.get(a);
if (isFrontface && cameraInfo.frontCamera != 0 || !isFrontface && cameraInfo.frontCamera == 0) {
info = cameraInfo;
break;
}
}
if (info == null) {
return;
}
float size4to3 = 4.0f / 3.0f;
float size16to9 = 16.0f / 9.0f;
float screenSize = (float) Math.max(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y) / Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
org.telegram.messenger.camera.Size aspectRatio;
int wantedWidth;
int wantedHeight;
if (initialFrontface) {
aspectRatio = new Size(16, 9);
wantedWidth = 480;
wantedHeight = 270;
} else {
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(4, 3);
wantedWidth = 1280;
wantedHeight = 960;
} else {
aspectRatio = new Size(16, 9);
wantedWidth = 1280;
wantedHeight = 720;
}
}
if (textureView.getWidth() > 0 && textureView.getHeight() > 0) {
int width = Math.min(AndroidUtilities.displaySize.x, AndroidUtilities.displaySize.y);
int height = width * aspectRatio.getHeight() / aspectRatio.getWidth();
previewSize = CameraController.chooseOptimalSize(info.getPreviewSizes(), width, height, aspectRatio);
}
org.telegram.messenger.camera.Size pictureSize = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedWidth, wantedHeight, aspectRatio);
if (pictureSize.getWidth() >= 1280 && pictureSize.getHeight() >= 1280) {
if (Math.abs(screenSize - size4to3) < 0.1f) {
aspectRatio = new Size(3, 4);
} else {
aspectRatio = new Size(9, 16);
}
org.telegram.messenger.camera.Size pictureSize2 = CameraController.chooseOptimalSize(info.getPictureSizes(), wantedHeight, wantedWidth, aspectRatio);
if (pictureSize2.getWidth() < 1280 || pictureSize2.getHeight() < 1280) {
pictureSize = pictureSize2;
}
}
SurfaceTexture surfaceTexture = textureView.getSurfaceTexture();
if (previewSize != null && surfaceTexture != null) {
surfaceTexture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight());
cameraSession = new CameraSession(info, previewSize, pictureSize, ImageFormat.JPEG);
CameraController.getInstance().open(cameraSession, surfaceTexture, new Runnable() {
@Override
public void run() {
if (cameraSession != null) {
cameraSession.setInitied();
}
checkPreviewMatrix();
}
}, new Runnable() {
@Override
public void run() {
if (delegate != null) {
delegate.onCameraCreated(cameraSession.cameraInfo.camera);
}
}
});
}
}
/**
* Convert a public format compatible with {@code ImageFormat} to an internal format
* from {@code graphics.h}.
*
* <p>In particular these formats are converted:
* <ul>
* <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB
* <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB
* <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16
* </ul>
* </p>
*
* <p>Passing in an internal format which has a different public format equivalent will fail.
* See {@link #checkArgumentFormat} for more details about a legal public format.</p>
*
* <p>All other formats are returned as-is, no invalid check is performed.</p>
*
* <p>This function is the dual of {@link #imageFormatToPublic}.</p>
*
* @param format public image format from {@link ImageFormat} or {@link PixelFormat}
* @return the converted image formats
*
* @see ImageFormat
* @see PixelFormat
*
* @throws IllegalArgumentException
* if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
*/
static int imageFormatToInternal(int format) {
switch (format) {
case ImageFormat.JPEG:
case ImageFormat.DEPTH_POINT_CLOUD:
return HAL_PIXEL_FORMAT_BLOB;
case ImageFormat.DEPTH16:
return HAL_PIXEL_FORMAT_Y16;
case ImageFormat.RAW_DEPTH:
return HAL_PIXEL_FORMAT_RAW16;
default:
return format;
}
}