I’ve made use of Apple ARKit documentations to create a easy ARKit utility which makes use of SceneKit & RealityKit.
I’m at the moment confronted with the duty so as to add Pointers(Mesh: To insert .showSceneUnderstanding)
is there any specific methodology or one thing that i’m lacking to entry this knowledge for displaying the Mesh depth.
I’ve hooked up the code beneath (★signifies the road i’m at the moment going through an error at)
import UIKit
import SceneKit
import ARKit
import RealityKit
class ViewController: UIViewController, ARSessionDelegate {
var trackingStateOK: Bool = false
let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.01))
var tappedPointNodeOrigin: SCNNode?
var tappedPointNodeDest: SCNNode?
var lineNode = SCNNode()
var objectNode: SCNNode!
var distanceLabel = UILabel()
let coachingOverlayView = UIView()
//separating knowledge acquisition from show
//var arProvider: ARProvider = ARProvider()
@IBOutlet var sceneView: ARSCNView!
//var sceneView:ARSCNView!
override func viewDidLoad() {
tremendous.viewDidLoad()
//sceneView = ARSCNView(body: view.bounds)
view.addSubview(sceneView)
sceneView.scene.rootNode.addChildNode(lineNode)
distanceLabel.textual content = ""
distanceLabel.body = CGRect(x: 0, y: view.bounds.maxY - 200, width: view.bounds.width, top: 200)
view.addSubview(distanceLabel)
distanceLabel.textColor = .pink
distanceLabel.textAlignment = .middle
distanceLabel.numberOfLines = 3
distanceLabel.font = .systemFont(ofSize: 40, weight: .daring)
view.addGestureRecognizer(UITapGestureRecognizer(goal: self, motion: #selector(faucet(recognizer:))))
//setupCoachingOverlay()
}
override func viewWillAppear(_ animated: Bool) {
tremendous.viewWillAppear(animated)
let worldtracking = ARWorldTrackingConfiguration()
//varun added 28.12.2023
//including the rules
worldtracking.sceneReconstruction = .meshWithClassification
worldtracking.environmentTexturing = .computerized
worldtracking.planeDetection = [.horizontal, .vertical]
//varun added code. requesting data of scene depth within the configuration.
worldtracking.frameSemantics = [.sceneDepth, .smoothedSceneDepth]
//sceneView.session.run(worldtracking, choices: [.removeExistingAnchors])
sceneView.session.run(worldtracking)
 ★//sceneView.debugOptions.insert(.showSceneUnderstanding) ★
sceneView.debugOptions = [.showPhysicsFields,.showCameras,.showFeaturePoints,.showSkeletons, .showWireframe , .showWorldOrigin]
sceneView.session.delegate = self
}
//func session(_ session: ARSession, cameraDidChangeTrackingState digicam: ARCamera) {
func session(_ session: ARSession, didUpdate body: ARFrame) {
trackingStateOK = true
if(body.sceneDepth != nil) && (body.smoothedSceneDepth != nil){
//arData:
guard let sceneDepth = body.smoothedSceneDepth ?? body.sceneDepth else{
print("failed to amass scene depth")
return
}
var pixelBuffer: CVPixelBuffer!
pixelBuffer = sceneDepth.depthMap
print(pixelBuffer)
}
could be grateful with any inputs or ideas in the appropriate route.
Thanks upfront