下面列出了android.hardware.Camera.Area#android.hardware.camera2.params.Face 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Decorate the collector when the CaptureResult becomes available, which happens sometime
* after picture is taken. In the current implementation, we query this structure for
* two fields: 1) CaptureResult.STATISTICS_FACES and 2) CaptureResult.LENS_FOCUS_DISTANCE
*
* @param captureResult CaptureResults to be queried for capture event information
*/
public void decorateAtTimeOfCaptureRequestAvailable(CaptureResultProxy captureResult)
{
Face[] facesCaptured = captureResult.get(CaptureResult.STATISTICS_FACES);
if (facesCaptured == null)
{
mFaceProxies = null;
} else
{
mFaceProxies = new ArrayList<>(facesCaptured.length);
for (Face face : facesCaptured)
{
mFaceProxies.add(Camera2FaceProxy.from(face));
}
}
mLensFocusDistance = captureResult.get(CaptureResult.LENS_FOCUS_DISTANCE);
}
/**
* Convert an api1 face into an active-array based api2 face.
*
* <p>Out-of-ranges scores and ids will be clipped to be within range (with a warning).</p>
*
* @param face a non-{@code null} api1 face
* @param activeArraySize active array size of the sensor (e.g. max jpeg size)
* @param zoomData the calculated zoom data corresponding to this request
*
* @return a non-{@code null} api2 face
*
* @throws NullPointerException if the {@code face} was {@code null}
*/
public static Face convertFaceFromLegacy(Camera.Face face, Rect activeArray,
ZoomData zoomData) {
checkNotNull(face, "face must not be null");
Face api2Face;
Camera.Area fakeArea = new Camera.Area(face.rect, /*weight*/1);
WeightedRectangle faceRect =
convertCameraAreaToActiveArrayRectangle(activeArray, zoomData, fakeArea);
Point leftEye = face.leftEye, rightEye = face.rightEye, mouth = face.mouth;
if (leftEye != null && rightEye != null && mouth != null && leftEye.x != -2000 &&
leftEye.y != -2000 && rightEye.x != -2000 && rightEye.y != -2000 &&
mouth.x != -2000 && mouth.y != -2000) {
leftEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData,
leftEye, /*usePreviewCrop*/true);
rightEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData,
leftEye, /*usePreviewCrop*/true);
mouth = convertCameraPointToActiveArrayPoint(activeArray, zoomData,
leftEye, /*usePreviewCrop*/true);
api2Face = faceRect.toFace(face.id, leftEye, rightEye, mouth);
} else {
api2Face = faceRect.toFace();
}
return api2Face;
}
@Override
public void onCaptureCompleted(@NonNull CameraCaptureSession session,
@NonNull CaptureRequest request, @NonNull TotalCaptureResult result) {
Face[] faces = result.get(CaptureResult.STATISTICS_FACES);
if (faceDetectorCallback != null) {
faceDetectorCallback.onGetFaces(faces);
}
}
@RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
public FaceParsed camera2Parse(Face face, View view, PointF scale, int rotation,
boolean isFrontCamera) {
//Parse face
RectF rect = new RectF(face.getBounds());
Matrix matrix = new Matrix();
matrix.setScale(isFrontCamera ? -1 : 1, 1);
matrix.postRotate(rotation);
matrix.postScale(1f, 1f);
matrix.postTranslate(view.getWidth(), view.getHeight());
matrix.mapRect(rect);
return getFace(rect, scale, view);
}
public FaceParsed camera1Parse(Camera.Face face, View view, PointF scale, int rotation,
boolean isFrontCamera) {
//Parse face
RectF rect = new RectF(face.rect);
Matrix matrix = new Matrix();
matrix.setScale(isFrontCamera ? -1 : 1, 1);
matrix.postRotate(rotation);
matrix.postScale(view.getWidth() / 2000f, view.getHeight() / 2000f);
matrix.postTranslate(view.getWidth() / 2f, view.getHeight() / 2f);
matrix.mapRect(rect);
return getFace(rect, scale, view);
}
/**
* Update the {@code result} camera metadata map with the new value for the
* {@code statistics.faces} and {@code statistics.faceDetectMode}.
*
* <p>Face detect callbacks are processed in the background, and each call to
* {@link #mapResultFaces} will have the latest faces as reflected by the camera1 callbacks.</p>
*
* <p>If the scene mode was set to {@code FACE_PRIORITY} but face detection is disabled,
* the camera will still run face detection in the background, but no faces will be reported
* in the capture result.</p>
*
* @param result a non-{@code null} result
* @param legacyRequest a non-{@code null} request (read-only)
*/
public void mapResultFaces(CameraMetadataNative result, LegacyRequest legacyRequest) {
checkNotNull(result, "result must not be null");
checkNotNull(legacyRequest, "legacyRequest must not be null");
Camera.Face[] faces, previousFaces;
int fdMode;
boolean fdScenePriority;
synchronized (mLock) {
fdMode = mFaceDetectReporting ?
STATISTICS_FACE_DETECT_MODE_SIMPLE : STATISTICS_FACE_DETECT_MODE_OFF;
if (mFaceDetectReporting) {
faces = mFaces;
} else {
faces = null;
}
fdScenePriority = mFaceDetectScenePriority;
previousFaces = mFacesPrev;
mFacesPrev = faces;
}
CameraCharacteristics characteristics = legacyRequest.characteristics;
CaptureRequest request = legacyRequest.captureRequest;
Size previewSize = legacyRequest.previewSize;
Camera.Parameters params = legacyRequest.parameters;
Rect activeArray = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
ZoomData zoomData = ParameterUtils.convertScalerCropRegion(activeArray,
request.get(CaptureRequest.SCALER_CROP_REGION), previewSize, params);
List<Face> convertedFaces = new ArrayList<>();
if (faces != null) {
for (Camera.Face face : faces) {
if (face != null) {
convertedFaces.add(
ParameterUtils.convertFaceFromLegacy(face, activeArray, zoomData));
} else {
Log.w(TAG, "mapResultFaces - read NULL face from camera1 device");
}
}
}
if (DEBUG && previousFaces != faces) { // Log only in verbose and IF the faces changed
Log.v(TAG, "mapResultFaces - changed to " + ListUtils.listToString(convertedFaces));
}
result.set(CaptureResult.STATISTICS_FACES, convertedFaces.toArray(new Face[0]));
result.set(CaptureResult.STATISTICS_FACE_DETECT_MODE, fdMode);
// Override scene mode with FACE_PRIORITY if the request was using FACE_PRIORITY
if (fdScenePriority) {
result.set(CaptureResult.CONTROL_SCENE_MODE, CONTROL_SCENE_MODE_FACE_PRIORITY);
}
}
private Face[] getFaces() {
Integer faceDetectMode = get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
byte[] faceScores = get(CaptureResult.STATISTICS_FACE_SCORES);
Rect[] faceRectangles = get(CaptureResult.STATISTICS_FACE_RECTANGLES);
int[] faceIds = get(CaptureResult.STATISTICS_FACE_IDS);
int[] faceLandmarks = get(CaptureResult.STATISTICS_FACE_LANDMARKS);
if (areValuesAllNull(faceDetectMode, faceScores, faceRectangles, faceIds, faceLandmarks)) {
return null;
}
if (faceDetectMode == null) {
Log.w(TAG, "Face detect mode metadata is null, assuming the mode is SIMPLE");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else if (faceDetectMode > CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
// Face detect mode is larger than FULL, assuming the mode is FULL
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL;
} else {
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_OFF) {
return new Face[0];
}
if (faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE &&
faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
Log.w(TAG, "Unknown face detect mode: " + faceDetectMode);
return new Face[0];
}
}
// Face scores and rectangles are required by SIMPLE and FULL mode.
if (faceScores == null || faceRectangles == null) {
Log.w(TAG, "Expect face scores and rectangles to be non-null");
return new Face[0];
} else if (faceScores.length != faceRectangles.length) {
Log.w(TAG, String.format("Face score size(%d) doesn match face rectangle size(%d)!",
faceScores.length, faceRectangles.length));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
int numFaces = Math.min(faceScores.length, faceRectangles.length);
// Face id and landmarks are only required by FULL mode.
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
if (faceIds == null || faceLandmarks == null) {
Log.w(TAG, "Expect face ids and landmarks to be non-null for FULL mode," +
"fallback to SIMPLE mode");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else {
if (faceIds.length != numFaces ||
faceLandmarks.length != numFaces * FACE_LANDMARK_SIZE) {
Log.w(TAG, String.format("Face id size(%d), or face landmark size(%d) don't" +
"match face number(%d)!",
faceIds.length, faceLandmarks.length * FACE_LANDMARK_SIZE, numFaces));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
numFaces = Math.min(numFaces, faceIds.length);
numFaces = Math.min(numFaces, faceLandmarks.length / FACE_LANDMARK_SIZE);
}
}
ArrayList<Face> faceList = new ArrayList<Face>();
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE) {
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX &&
faceScores[i] >= Face.SCORE_MIN) {
faceList.add(new Face(faceRectangles[i], faceScores[i]));
}
}
} else {
// CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX &&
faceScores[i] >= Face.SCORE_MIN &&
faceIds[i] >= 0) {
Point leftEye = new Point(faceLandmarks[i*FACE_LANDMARK_SIZE],
faceLandmarks[i*FACE_LANDMARK_SIZE+1]);
Point rightEye = new Point(faceLandmarks[i*FACE_LANDMARK_SIZE+2],
faceLandmarks[i*FACE_LANDMARK_SIZE+3]);
Point mouth = new Point(faceLandmarks[i*FACE_LANDMARK_SIZE+4],
faceLandmarks[i*FACE_LANDMARK_SIZE+5]);
Face face = new Face(faceRectangles[i], faceScores[i], faceIds[i],
leftEye, rightEye, mouth);
faceList.add(face);
}
}
}
Face[] faces = new Face[faceList.size()];
faceList.toArray(faces);
return faces;
}
@Override
public <T> void setValue(CameraMetadataNative metadata, T value) {
metadata.setFaces((Face[])value);
}
public static Camera2FaceProxy from(Face face)
{
Camera2FaceProxy convertedFace = new Camera2FaceProxy(face.getBounds(), face.getScore());
return convertedFace;
}
/**
* Convert to a face; the rect is considered to be the bounds, and the weight
* is considered to be the score.
*
* <p>If the score is out of range of {@value Face#SCORE_MIN}, {@value Face#SCORE_MAX},
* the score is clipped first and a warning is printed to logcat.</p>
*
* <p>If the id is negative, the id is changed to 0 and a warning is printed to
* logcat.</p>
*
* <p>All other parameters are passed-through as-is.</p>
*
* @return a new face with the optional features set
*/
public Face toFace(
int id, Point leftEyePosition, Point rightEyePosition, Point mouthPosition) {
int idSafe = clipLower(id, /*lo*/0, rect, "id");
int score = clip(weight,
Face.SCORE_MIN,
Face.SCORE_MAX,
rect,
"score");
return new Face(rect, score, idSafe, leftEyePosition, rightEyePosition, mouthPosition);
}
/**
* Convert to a face; the rect is considered to be the bounds, and the weight
* is considered to be the score.
*
* <p>If the score is out of range of {@value Face#SCORE_MIN}, {@value Face#SCORE_MAX},
* the score is clipped first and a warning is printed to logcat.</p>
*
* <p>All other parameters are passed-through as-is.</p>
*
* @return a new face without the optional features
*/
public Face toFace() {
int score = clip(weight,
Face.SCORE_MIN,
Face.SCORE_MAX,
rect,
"score");
return new Face(rect, score);
}
void onGetFaces(Face[] faces);