I’m attempting to combine openCV
with native iOS
app, The issue I’m going through is my code is just not capable of detect faces correctly and draw a bounding field across the similar. Right here is the entire supply code, please drag and drop the openCV
iOS framework within the venture should you determine to clone because it was too giant for GitHub so I needed to take away it.
So right here is my Goal-C++
code
#import <opencv2/opencv.hpp>
#import <opencv2/imgcodecs/ios.h>
#import "OpenCVWrapper.h"
/*
* Add a technique convertToMat to UIImage class
*/
@interface UIImage (OpenCVWrapper)
- (void)convertToMat: (cv::Mat *)pMat: (bool)alphaExists;
@finish
@implementation UIImage (OpenCVWrapper)
- (void)convertToMat: (cv::Mat *)pMat: (bool)alphaExists {
UIImageOrientation orientation = self.imageOrientation;
cv::Mat mat;
UIImageToMat(self, mat, alphaExists);
swap (orientation) {
case UIImageOrientationRight:
cv::rotate(mat, *pMat, cv::ROTATE_90_CLOCKWISE);
break;
case UIImageOrientationLeft:
cv::rotate(mat, *pMat, cv::ROTATE_90_COUNTERCLOCKWISE);
break;
case UIImageOrientationDown:
cv::rotate(mat, *pMat, cv::ROTATE_180);
break;
case UIImageOrientationUp:
default:
*pMat = mat;
break;
}
}
@finish
@implementation OpenCVWrapper
+ (NSArray<NSValue *> *)detectFaceRectsInUIImage:(UIImage *)picture {
// Convert UIImage to cv::Mat
cv::Mat mat;
[image convertToMat:&mat :false];
// Load the face detection mannequin
NSString *faceCascadePath = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
cv::CascadeClassifier faceCascade;
if (!faceCascade.load([faceCascadePath UTF8String])) {
NSLog(@"Error loading face detection mannequin");
return @[];
}
// Convert the picture to grayscale
cv::Mat grey;
cv::cvtColor(mat, grey, cv::COLOR_BGR2GRAY);
cv::equalizeHist(grey, grey);
// Detect faces
std::vector<cv::Rect> faces;
faceCascade.detectMultiScale(grey, faces, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Measurement(30, 30));
// Convert cv::Rect to CGRect and wrap in NSValue
NSMutableArray<NSValue *> *faceRects = [NSMutableArray arrayWithCapacity:faces.size()];
for (const auto &face : faces) {
CGRect faceRect = CGRectMake(face.x, face.y, face.width, face.peak);
[faceRects addObject:[NSValue valueWithCGRect:faceRect]];
}
return [faceRects copy];
}
@finish
Following is my swift code
import UIKit
import AVFoundation
import VideoToolbox
class ViewController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
var previewView : UIView!
var boxView:UIView!
//Digital camera Seize requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()
personal var faceOverlayView: FaceOverlayView!
override func viewDidLoad() {
tremendous.viewDidLoad()
previewView = UIView(body: CGRect(x: 0,
y: 0,
width: UIScreen.most important.bounds.measurement.width,
peak: UIScreen.most important.bounds.measurement.peak))
previewView.contentMode = UIView.ContentMode.scaleAspectFit
view.addSubview(previewView)
boxView = UIView(body: self.view.body)
view.addSubview(boxView)
// Initialize face overlay view
faceOverlayView = FaceOverlayView(body: view.bounds)
view.addSubview(faceOverlayView)
setupAVCapture()
}
override var shouldAutorotate: Bool {
if (UIDevice.present.orientation == UIDeviceOrientation.landscapeLeft ||
UIDevice.present.orientation == UIDeviceOrientation.landscapeRight ||
UIDevice.present.orientation == UIDeviceOrientation.unknown) {
return false
}
else {
return true
}
}
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let system = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
place: AVCaptureDevice.Place.again) else {
return
}
captureDevice = system
beginSession()
}
func beginSession(){
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = attempt AVCaptureDeviceInput(system: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connection(with: .video)?.isEnabled = true
previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
let rootLayer :CALayer = self.previewView.layer
rootLayer.masksToBounds=true
previewLayer.body = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
DispatchQueue.international(qos: .userInitiated).async {
self.session.startRunning()
}
} catch let error as NSError {
deviceInput = nil
print("error: (error.localizedDescription)")
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let picture = UIImage(pixelBuffer: imageBuffer) else {
return
}
detectFaces(in: picture)
//stopCamera()
}
func stopCamera(){
session.stopRunning()
}
personal func detectFaces(in picture: UIImage) {
guard let faceRects = OpenCVWrapper.detectFaceRects(in: picture) else { return }
DispatchQueue.most important.async {
let viewWidth = self.faceOverlayView.bounds.width
let viewHeight = self.faceOverlayView.bounds.peak
let imageWidth = picture.measurement.width
let imageHeight = picture.measurement.peak
let scaleX = viewWidth / imageWidth
let scaleY = viewHeight / imageHeight
let scaleFactor = min(scaleX, scaleY)
let offsetX = (viewWidth - imageWidth * scaleFactor) / 2
let offsetY = (viewHeight - imageHeight * scaleFactor) / 2
let transformedRects = faceRects.map { $0.cgRectValue }.map { face in
return CGRect(
x: face.origin.x * scaleFactor + offsetX,
y: face.origin.y * scaleFactor + offsetY,
width: face.measurement.width * scaleFactor,
peak: face.measurement.peak * scaleFactor
)
}
self.faceOverlayView.setFaces(transformedRects)
}
}
}
extension UIImage {
public comfort init?(pixelBuffer: CVPixelBuffer) {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, choices: nil, imageOut: &cgImage)
guard let cgImage = cgImage else {
return nil
}
self.init(cgImage: cgImage)
}
}
I’m able to construct the venture correctly, simply the face detection half is a bit off and unsure what else must be added