You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
diplomatic-quarter/packages/vital_sign_camera/ios/Classes/MLKitFaceDetector.swift

219 lines
7.0 KiB
Swift

//
// MLKitFaceDetector.swift
// VitalSignEngineCore
//
// Created by Eddie Lau on 24/8/2022.
//
import Foundation
import MLKitFaceDetection
import MLKitVision
import VitalSignEngineCore
public class MLKitFaceDetector : VitalSignFaceDetector {
public typealias Face = MLKitFace
public let supportedFeatures: Set<VitalSignEngineCore.FaceDetectorFeature>
= [.eyesOpenProbability, .eularAngle]
public init() {
}
public let maxDetectionHeight:CGFloat = 240
let fps = Fps(name: "MLKitFaceDetector")
lazy var faceDetector:FaceDetector = {
let option = FaceDetectorOptions()
option.contourMode = .all
option.classificationMode = .all
option.landmarkMode = .none
option.performanceMode = .fast
return FaceDetector.faceDetector(options: option)
}()
var face:Face? {
didSet {
fps.tick()
}
}
public var frameRate: Double? { fps.lastReportedAverage }
public var maxConcurrency = 2
var jobCount = 0
public func detect(from frame: UIImage) -> Face? {
guard jobCount < maxConcurrency else {
return face
}
jobCount += 1
DispatchQueue.global().async {
let scaledFrame = self.scaled(videoFrame: frame)
let frameSize = scaledFrame.size
let image = VisionImage(image: scaledFrame)
image.orientation = image.orientation
DispatchQueue.main.async {
self.faceDetector.process(image) { faces, error in
guard let face = faces?.first else {
self.face = nil
self.jobCount -= 1
return
}
self.face = MLKitFace(face: face, frameSize: frameSize)
self.jobCount -= 1
}
}
}
return face
}
func scaled(videoFrame frame:UIImage) -> UIImage {
var scale = CGFloat(1.0)
if frame.size.height > maxDetectionHeight {
scale = maxDetectionHeight / frame.size.height
}
let scaledScale = CGSize(width:frame.size.width * scale, height:frame.size.height * scale)
return frame.resizedImage(size: scaledScale)!
}
}
public struct MLKitFaceLandmarks: FaceLandmarks {
public typealias ContourType = FaceContour
let mlkitFace:MLKitFaceDetection.Face
public let videoFrameSize: CGSize
init(mlkitFace: MLKitFaceDetection.Face, videoFrameSize: CGSize) {
self.mlkitFace = mlkitFace
self.videoFrameSize = videoFrameSize
}
public var faceOval: FaceContour? { mlkitFace.contour(ofType: .face) }
public func contour(of type: VitalSignEngineCore.FaceLandmarkType) -> FaceContour? {
guard let contourType = type.asContourType else { return nil }
return mlkitFace.contour(ofType: contourType)
}
}
public class MLKitFace : FaceProtocol {
public typealias ContourType = FaceContour
let mlkitFace:MLKitFaceDetection.Face
public let faceBox: NormalizedFaceBox
public let faceLandmarks: MLKitFaceLandmarks
public let eulerAngle: (x: CGFloat, y: CGFloat, z: CGFloat)?
public let eyesOpenProbability: (leftEye:CGFloat, rightEye:CGFloat)?
init?(face:MLKitFaceDetection.Face, frameSize:CGSize) {
self.mlkitFace = face
self.faceLandmarks = MLKitFaceLandmarks(mlkitFace: face, videoFrameSize: frameSize)
guard let faceContour = face.contour(ofType: FaceContourType.face) else {
return nil
}
let eyesOpenPropbability: (leftEye:CGFloat, rightEye:CGFloat)?
if face.hasLeftEyeOpenProbability && face.hasRightEyeOpenProbability {
eyesOpenPropbability = (face.leftEyeOpenProbability, face.rightEyeOpenProbability)
} else {
eyesOpenPropbability = nil
}
self.eyesOpenProbability = eyesOpenPropbability
let eulerAngle: (x:CGFloat, y:CGFloat, z:CGFloat)?
if face.hasHeadEulerAngleX && face.hasHeadEulerAngleY && face.hasHeadEulerAngleZ {
eulerAngle = (face.headEulerAngleX, face.headEulerAngleY, face.headEulerAngleZ)
} else {
eulerAngle = nil
}
self.eulerAngle = eulerAngle
self.faceBox = faceContour.toFacebox(frameSize: frameSize)
}
}
fileprivate extension MLKitFaceDetection.FaceContour {
func toFacebox(frameSize:CGSize) -> NormalizedFaceBox {
var minX = CGFloat.greatestFiniteMagnitude
var minY = CGFloat.greatestFiniteMagnitude
var maxX = -CGFloat.greatestFiniteMagnitude
var maxY = -CGFloat.greatestFiniteMagnitude
points.forEach { point in
minX = min(minX, point.x)
minY = min(minY, point.y)
maxX = max(maxX, point.x)
maxY = max(maxY, point.y)
}
return NormalizedFaceBox(
xCenter: ((maxX - minX)/2 + minX) / frameSize.width,
yCenter: ((maxY - minY)/2 + minY) / frameSize.height,
width: (maxX - minX) / frameSize.width,
height: (maxY - minY) / frameSize.height
)
}
}
fileprivate extension UIImage {
func resizedImage(size: CGSize) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer { UIGraphicsEndImageContext() }
draw(in: CGRect(origin: .zero, size: size))
let image = UIGraphicsGetImageFromCurrentImageContext()
return image?.data.flatMap(UIImage.init) // This line is copied from MLKit, Not sure why is needed
}
private var data: Data? {
#if swift(>=4.2)
return pngData() ?? jpegData(compressionQuality: Constant.jpegCompressionQuality)
#else
return pngData() ?? jpegData(compressionQuality: Constant.jpegCompressionQuality)
#endif // swift(>=4.2)
}
private enum Constant {
static let jpegCompressionQuality: CGFloat = 0.8
}
}
extension VisionPoint : FaceContourPoint {
}
extension FaceContour : Contour {
}
extension VitalSignEngineCore.FaceLandmarkType {
var asContourType:MLKitFaceDetection.FaceContourType? {
switch self {
case .faceOval: return .face
case .leftEyebrowTop: return .leftEyebrowTop
case .leftEyebrowBottom: return .leftEyebrowBottom
case .rightEyebrowTop: return .rightEyebrowTop
case .rightEyebrowBottom: return .rightEyebrowBottom
case .leftEye: return .leftEye
case .rightEye: return .rightEye
case .upperLipTop: return .upperLipTop
case .upperLipBottom: return .upperLipBottom
case .lowerLipTop: return .lowerLipTop
case .lowerLipBottom: return .lowerLipBottom
case .noseBridge: return .noseBridge
case .noseBottom: return .noseBottom
case .leftCheek: return .leftCheek
case .rightCheek: return .rightCheek
@unknown default: return nil
}
}
}