Sunday, July 7, 2024
HomeiOS Developmentios - Remodel AVMetadataObject coordinates to captured picture

ios – Remodel AVMetadataObject coordinates to captured picture


I am attempting to detect a QRCode and take a cropped picture of it utilizing Swift. Nevertheless I can not remodel the coordinates of the detected AVMetadataObject to the right picture house coordinates. That is my prototyping code:

import SwiftUI
import AVFoundation

class CodeScannerViewController: UIViewController {
    
    var captureSession: AVCaptureSession!
    var previewLayer: AVCaptureVideoPreviewLayer!
    var photoOutput: AVCapturePhotoOutput!
    var supportedCodeTypes: [AVMetadataObject.ObjectType] = [.qr]
    var imageView: UIImageView!
    var isCapturing = false
    var codeCorners = [CGPoint]()

    override func viewDidLoad() {
        tremendous.viewDidLoad()
        
        captureSession = AVCaptureSession()
        
        guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
            print("Did not get the digicam gadget")
            return
        }
        
        do {
            let enter = strive AVCaptureDeviceInput(gadget: videoCaptureDevice)
            if captureSession.canAddInput(enter) {
                captureSession.addInput(enter)
            } else {
                print("Failed so as to add enter to seize session")
                return
            }
        } catch {
            print("Did not create enter from video seize gadget")
            return
        }
        
        photoOutput = AVCapturePhotoOutput()
        captureSession.addOutput(photoOutput)
        
        let metadataOutput = AVCaptureMetadataOutput()
        if captureSession.canAddOutput(metadataOutput) {
            captureSession.addOutput(metadataOutput)
            metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.important)
            metadataOutput.metadataObjectTypes = supportedCodeTypes
        } else {
            print("Failed so as to add metadata output to seize session")
            return
        }
        
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        previewLayer.body = view.layer.bounds
        previewLayer.videoGravity = .resizeAspectFill
        view.layer.addSublayer(previewLayer)
        
        captureSession.startRunning()
        imageView = UIImageView()
        imageView.translatesAutoresizingMaskIntoConstraints = false
        imageView.contentMode = .scaleAspectFit
        view.addSubview(imageView)
        
        NSLayoutConstraint.activate([
            imageView.heightAnchor.constraint(equalToConstant: 300),
            imageView.widthAnchor.constraint(equalToConstant: 300),
            imageView.centerXAnchor.constraint(equalTo: view.centerXAnchor),
            imageView.bottomAnchor.constraint(equalTo: view.safeAreaLayoutGuide.bottomAnchor)
        ])
    }

    override func viewWillDisappear(_ animated: Bool) {
        tremendous.viewWillDisappear(animated)
        if captureSession.isRunning {
            captureSession.stopRunning()
        }
    }
    
    func drawRectangleOn(picture: UIImage, corners: [CGPoint]) -> UIImage {
        let imageSize = picture.dimension
        let scale: CGFloat = 0
        UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
        let context = UIGraphicsGetCurrentContext()!

        picture.draw(at: CGPoint.zero)
        
        previewLayer.draw(in: context)
        
        let path = CGMutablePath()
        path.addLines(between: corners)
        path.addLine(to: corners[0])
        context.addPath(path)
        context.setLineWidth(5)
        context.setStrokeColor(UIColor.purple.cgColor)
        context.drawPath(utilizing: .stroke)

        let newImage = UIGraphicsGetImageFromCurrentImageContext()!
        UIGraphicsEndImageContext()
        return newImage
    }
}

extension CodeScannerViewController: AVCaptureMetadataOutputObjectsDelegate {
    func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
        guard let metadataObject = metadataObjects.first else {
            return
        }

        guard let photoCodeObject = photoOutput.transformedMetadataObject(for: metadataObject, connection: photoOutput.connection(with: .video)!) as? AVMetadataMachineReadableCodeObject else {
            return
        }
        
        if photoCodeObject.stringValue != nil && !isCapturing {
            isCapturing = true
            codeCorners = photoCodeObject.corners
            let photoSettings = AVCapturePhotoSettings()
            photoSettings.photoQualityPrioritization = .pace
            photoOutput.capturePhoto(with: photoSettings, delegate: self)
        }
    }
}

extension CodeScannerViewController: AVCapturePhotoCaptureDelegate {
    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto picture: AVCapturePhoto, error: Error?) {
        guard let imageData = picture.fileDataRepresentation() else {
            print("Error whereas producing picture from picture seize knowledge.");
            return
        }
        guard let codeImage = UIImage(knowledge: imageData) else {
            print("Unable to generate UIImage from picture knowledge.");
            return
        }
        imageView.picture = drawRectangleOn(picture: codeImage, corners: codeCorners)
        DispatchQueue.important.asyncAfter(deadline: .now() + 1) {
            self.isCapturing = false
        }
     }
}
        
struct CodeScannerView: UIViewControllerRepresentable {
    func makeUIViewController(context: Context) -> CodeScannerViewController {
        return CodeScannerViewController()
    }

    func updateUIViewController(_ uiViewController: CodeScannerViewController, context: Context) {
        // Replace the view controller if wanted.
    }
}

For testing functions I draw a rectangle across the detected corners in drawRectangleOn(picture:). Utilizing photoOutput.transformedMetadataObject does not appear to provide the right consequence, the rectangle is not wherever near the QR-Code on the picture.

I additionally tried to make use of previewLayer.transformedMetadataObject(for: metadataObject) and changing the coordinates utilizing this operate:

    func convertToImageCoordinates(factors: [CGPoint], picture: UIImage, layer: CALayer) -> [CGPoint] {
        let scaleFactorX = picture.dimension.width / layer.bounds.dimension.width
        let scaleFactorY = picture.dimension.peak / layer.bounds.dimension.peak
        return factors.map { CGPoint(x: $0.x * scaleFactorX, y: $0.y * scaleFactorY) }
    }

Which just about works however I believe this fails to take the video gravity into consideration.

I am additionally not completely happy that the picture output is separate from the detection, which may result in inconsistencies however I used to be additionally unable to get the picture knowledge from the previewLayer.



Supply hyperlink

RELATED ARTICLES

LEAVE A REPLY

Please enter your comment!
Please enter your name here

- Advertisment -
Google search engine

Most Popular

Recent Comments