类android.media.FaceDetector.Face源码实例Demo

下面列出了怎么用android.media.FaceDetector.Face的API类实例代码及写法,或者点击链接到github查看源代码。

private void updateMeasurement(final FaceDetector.Face currentFace) {
	if (currentFace == null) {
		// _facesFoundInMeasurement--;
		return;
	}

	_foundFace = _currentFaceDetectionThread.getCurrentFace();

	_points.add(new Point(_foundFace.eyesDistance(),
			CALIBRATION_DISTANCE_A4_MM
					* (_distanceAtCalibrationPoint / _foundFace
							.eyesDistance())));

	while (_points.size() > _threashold) {
		_points.remove(0);
	}

	float sum = 0;
	for (Point p : _points) {
		sum += p.getEyeDistance();
	}

	_currentAvgEyeDistance = sum / _points.size();

	_currentDistanceToFace = CALIBRATION_DISTANCE_A4_MM
			* (_distanceAtCalibrationPoint / _currentAvgEyeDistance);

	_currentDistanceToFace = Util.MM_TO_CM(_currentDistanceToFace);

	MeasurementStepMessage message = new MeasurementStepMessage();
	message.setConfidence(currentFace.confidence());
	message.setCurrentAvgEyeDistance(_currentAvgEyeDistance);
	message.setDistToFace(_currentDistanceToFace);
	message.setEyesDistance(currentFace.eyesDistance());
	message.setMeasurementsLeft(_calibrationsLeft);
	message.setProcessTimeForLastFrame(_processTimeForLastFrame);

	MessageHUB.get().sendMessage(MessageHUB.MEASUREMENT_STEP, message);
}
 
源代码2 项目: 365browser   文件: FaceDetectionImpl.java
@Override
public void detect(SharedBufferHandle frameData, final int width, final int height,
        final DetectResponse callback) {
    final long numPixels = (long) width * height;
    // TODO(xianglu): https://crbug.com/670028 homogeneize overflow checking.
    if (!frameData.isValid() || width <= 0 || height <= 0 || numPixels > (Long.MAX_VALUE / 4)) {
        Log.d(TAG, "Invalid argument(s).");
        callback.call(new FaceDetectionResult[0]);
        return;
    }

    ByteBuffer imageBuffer = frameData.map(0, numPixels * 4, MapFlags.none());
    if (imageBuffer.capacity() <= 0) {
        Log.d(TAG, "Failed to map from SharedBufferHandle.");
        callback.call(new FaceDetectionResult[0]);
        return;
    }

    Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);

    // An int array is needed to construct a Bitmap. However the Bytebuffer
    // we get from |sharedBufferHandle| is directly allocated and does not
    // have a supporting array. Therefore we need to copy from |imageBuffer|
    // to create this intermediate Bitmap.
    // TODO(xianglu): Consider worker pool as appropriate threads.
    // http://crbug.com/655814
    bitmap.copyPixelsFromBuffer(imageBuffer);

    // A Bitmap must be in 565 format for findFaces() to work. See
    // http://androidxref.com/7.0.0_r1/xref/frameworks/base/media/java/android/media/FaceDetector.java#124
    //
    // It turns out that FaceDetector is not able to detect correctly if
    // simply using pixmap.setConfig(). The reason might be that findFaces()
    // needs non-premultiplied ARGB arrangement, while the alpha type in the
    // original image is premultiplied. We can use getPixels() which does
    // the unmultiplication while copying to a new array. See
    // http://androidxref.com/7.0.0_r1/xref/frameworks/base/graphics/java/android/graphics/Bitmap.java#538
    int[] pixels = new int[width * height];
    bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
    final Bitmap unPremultipliedBitmap =
            Bitmap.createBitmap(pixels, width, height, Bitmap.Config.RGB_565);

    // FaceDetector creation and findFaces() might take a long time and trigger a
    // "StrictMode policy violation": they should happen in a background thread.
    AsyncTask.THREAD_POOL_EXECUTOR.execute(new Runnable() {
        @Override
        public void run() {
            final FaceDetector detector = new FaceDetector(width, height, mMaxFaces);
            Face[] detectedFaces = new Face[mMaxFaces];
            // findFaces() will stop at |mMaxFaces|.
            final int numberOfFaces = detector.findFaces(unPremultipliedBitmap, detectedFaces);

            FaceDetectionResult[] faceArray = new FaceDetectionResult[numberOfFaces];

            for (int i = 0; i < numberOfFaces; i++) {
                faceArray[i] = new FaceDetectionResult();

                final Face face = detectedFaces[i];
                final PointF midPoint = new PointF();
                face.getMidPoint(midPoint);
                final float eyesDistance = face.eyesDistance();

                faceArray[i].boundingBox = new RectF();
                faceArray[i].boundingBox.x = midPoint.x - eyesDistance;
                faceArray[i].boundingBox.y = midPoint.y - eyesDistance;
                faceArray[i].boundingBox.width = 2 * eyesDistance;
                faceArray[i].boundingBox.height = 2 * eyesDistance;
                // TODO(xianglu): Consider adding Face.confidence and Face.pose.

                faceArray[i].landmarks = new Landmark[0];
            }

            callback.call(faceArray);
        }
    });
}
 
public Face getCurrentFace() {
	return _currentFace;
}
 
/**
 * bla bla bla
 */
@Override
public void run() {

	long t = System.currentTimeMillis();

	YuvImage yuvimage = new YuvImage(_data, ImageFormat.NV21,
			_previewSize.width, _previewSize.height, null);

	ByteArrayOutputStream baos = new ByteArrayOutputStream();

	if (!yuvimage.compressToJpeg(new Rect(0, 0, _previewSize.width,
			_previewSize.height), 100, baos)) {

		Log.e("Camera", "compressToJpeg failed");

	}

	Log.i("Timing", "Compression finished: "
			+ (System.currentTimeMillis() - t));
	t = System.currentTimeMillis();

	BitmapFactory.Options bfo = new BitmapFactory.Options();
	bfo.inPreferredConfig = Bitmap.Config.RGB_565;

	_currentFrame = BitmapFactory.decodeStream(new ByteArrayInputStream(
			baos.toByteArray()), null, bfo);

	Log.i("Timing", "Decode Finished: " + (System.currentTimeMillis() - t));
	t = System.currentTimeMillis();

	// Rotate the so it siuts our portrait mode
	Matrix matrix = new Matrix();
	matrix.postRotate(90);
	matrix.preScale(-1, 1);
	// We rotate the same Bitmap
	_currentFrame = Bitmap.createBitmap(_currentFrame, 0, 0,
			_previewSize.width, _previewSize.height, matrix, false);

	Log.i("Timing",
			"Rotate, Create finished: " + (System.currentTimeMillis() - t));
	t = System.currentTimeMillis();

	if (_currentFrame == null) {
		Log.e(FACEDETECTIONTHREAD_TAG, "Could not decode Image");
		return;
	}

	FaceDetector d = new FaceDetector(_currentFrame.getWidth(),
			_currentFrame.getHeight(), 1);

	Face[] faces = new Face[1];
	d.findFaces(_currentFrame, faces);

	Log.i("Timing",
			"FaceDetection finished: " + (System.currentTimeMillis() - t));
	t = System.currentTimeMillis();

	_currentFace = faces[0];
	Log.d(FACEDETECTIONTHREAD_TAG, "Found: " + faces[0] + " Faces");
}
 
源代码5 项目: Masaccio   文件: MasaccioImageView.java
public void setFaces(final Face[] faces) {

        if ((faces != null) && (faces.length > 0)) {

            mDetectedFaces = faces;

        } else {

            mDetectedFaces = null;
        }

        applyCrop();
    }
 
源代码6 项目: Masaccio   文件: MasaccioImageView.java
private void getFaceOffsets(final Face[] faces, final float[] offsets, final float scaleFactor,
        final float width, final float height, final float maxOffsetX, final float maxOffsetY) {

    try {

        Face bestFace = null;

        float maxConfidence = 0;

        for (final Face face : faces) {

            final float faceConfidence = face.confidence();

            if (faceConfidence > maxConfidence) {

                maxConfidence = faceConfidence;
                bestFace = face;
            }
        }

        if (bestFace == null) {

            getDefaultOffsets(offsets, maxOffsetX, maxOffsetY);

            return;
        }

        final PointF midPoint = new PointF();

        bestFace.getMidPoint(midPoint);

        final float scaledOffsetX =
                (midPoint.x * scaleFactor) - ((width - maxOffsetX) * FACE_POSITION_RATIO_X);

        final float scaledOffsetY =
                (midPoint.y * scaleFactor) - ((height - maxOffsetY) * FACE_POSITION_RATIO_Y);

        if (Math.round(maxOffsetX) >= 0) {

            offsets[0] = Math.min(Math.max(0, scaledOffsetX), maxOffsetX);

        } else {

            offsets[0] = scaledOffsetX;
        }

        if (Math.round(maxOffsetY) >= 0) {

            offsets[1] = Math.min(Math.max(0, scaledOffsetY), maxOffsetY);

        } else {

            offsets[1] = scaledOffsetY;
        }


    } catch (final Exception e) {

        getDefaultOffsets(offsets, maxOffsetX, maxOffsetY);
    }
}
 
源代码7 项目: Masaccio   文件: MasaccioImageView.java
@Override
public Face[] process(final Bitmap bitmap) {

    final Map<Bitmap, Face[]> facesMap = mFacesMap;

    final Face[] preProcessed = facesMap.get(bitmap);

    if (preProcessed != null) {

        if (preProcessed == NO_FACES) {

            return null;
        }

        return preProcessed;
    }

    final Face[] faces = new Face[MAX_FACES];

    final Bitmap bitmap565 = convertTo565(bitmap);

    if (bitmap565 != null) {

        final FaceDetector faceDetector =
                new FaceDetector(bitmap565.getWidth(), bitmap565.getHeight(), MAX_FACES);

        final int faceCount = faceDetector.findFaces(bitmap565, faces);

        if (faceCount > 0) {

            final Face[] detected = new Face[faceCount];

            System.arraycopy(faces, 0, detected, 0, faceCount);

            facesMap.put(bitmap, detected);

            return detected;
        }
    }

    facesMap.put(bitmap, NO_FACES);

    return null;
}
 
源代码8 项目: Masaccio   文件: MasaccioImageView.java
private Face[] getFaces(final Bitmap bitmap) {

            return mFacesMap.get(bitmap);
        }
 
源代码9 项目: Masaccio   文件: MasaccioImageView.java
public Face[] process(Bitmap bitmap); 
 类所在包
 同包方法