Be aware: Code works advantageous in home windows however crashes in android
I transformed the kivy app to apk utilizing buildozer in google colab that works advantageous however after I set up apk the app crashes.
I’ve fundamental.py and NewAction3.h5 in /content material folder of google colab. I used buildozer to generate buildozer.spec.
I’m utilizing tensorflow,mediapipe,opencv-python,numpy,kivy,kivymd
I’m additionally utilizing digital camera to seize video and retailer after which use that in cell.
**beneath is my buildozer.spec:**
`
[app]
title = My Utility
bundle.title = myapp
bundle.area = org.take a look at
supply.dir = .
supply.include_exts = py,png,jpg,kv,atlas,h5
model = 0.1
necessities = python3,kivy,kivymd,mediapipe,tensorflow,numpy,opencv-python
orientation = portrait
osx.python_version = 3
osx.kivy_version = 1.9.1
fullscreen = 0
android.archs = arm64-v8a, armeabi-v7a
android.allow_backup = True
ios.kivy_ios_url = https://github.com/kivy/kivy-ios
ios.kivy_ios_branch = grasp
ios.ios_deploy_url = https://github.com/phonegap/ios-deploy
ios.ios_deploy_branch = 1.10.0
ios.codesign.allowed = false
[buildozer]
log_level = 2
warn_on_root = 1
`
Beneath is my fundamental.py code:
from kivymd.app import MDApp
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.button import MDRaisedButton
from kivy.uix.picture import Picture
from kivy.graphics.texture import Texture
import cv2
from kivy.clock import Clock
import numpy as np
import mediapipe
import cv2
from tensorflow.keras.fashions import load_model
from kivymd.uix.label import MDLabel
class MainApp(MDApp):
def construct(self):
structure=MDBoxLayout(orientation='vertical')
self.picture=Picture()
structure.add_widget(self.picture)
self.save_img_button=MDRaisedButton(textual content="Begin Recording",pos_hint={'center_x':.5,'center_y':.5,},size_hint=(None,None))
self.save_img_button.bind(on_press=self.take_pic)
structure.add_widget(self.save_img_button)
self.prediction_label = MDLabel(textual content="", halign='middle')
structure.add_widget(self.prediction_label)
self.seize=cv2.VideoCapture(0)
Clock.schedule_interval(self.load_video,1.0/30.0)
self.recording = False
return structure
def load_video(self,*args):
if self.recording:
ret,body=self.seize.learn()
self.out.write(body)
else:
ret,body=self.seize.learn()
self.image_frame=body
buffer=cv2.flip(body,0).tostring()
texture=Texture.create(dimension=(body.form[1],body.form[0]),colorfmt="bgr")
texture.blit_buffer(buffer,colorfmt="bgr",bufferfmt="ubyte")
self.picture.texture=texture
def take_pic(self,*args):
if not self.recording:
self.recording = True
self.out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'MJPG'), 8, (640,480))
self.save_img_button.textual content="Cease Recording"
Clock.schedule_once(self.stop_recording, 6)
else:
self.recording = False
self.out.launch()
self.convert_video()
self.save_img_button.textual content="Begin Recording"
def stop_recording(self, *args):
self.recording = False
self.out.launch()
self.convert_video()
lstm_model = load_model('NewAction3.h5')
self.prediction_label.textual content=self.prediction(r'converted_output.avi', lstm_model)
self.save_img_button.textual content="Begin Recording"
def convert_video(self):
video_capture = cv2.VideoCapture('output.avi')
frame_count = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(video_capture.get(cv2.CAP_PROP_FPS))
width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
peak = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter('converted_output.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, peak))
for i in vary(frame_count):
ret, body = video_capture.learn()
out.write(body)
video_capture.launch()
out.launch()
def on_stop(self):
self.seize.launch()
def prediction(self,video_path, lstm_model):
actions = ['banana', 'bar', 'basement', 'basketball', 'bath', 'bathroom', 'bear', 'beard', 'bed', 'bedroom']
sequence = []
cap = cv2.VideoCapture(video_path) # video path
mp_holistic = mediapipe.options.holistic
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
toskip = int(frame_count // 50)
if toskip == 0:
toskip = 1
frame_num = 0
whereas (cap.isOpened()):
ret, body = cap.learn()
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
frame_num = frame_num + toskip
rotate video proper manner up
> (h, w) = body.form[:2]
rotpoint = (w // 2, h // 2)
rotmat = cv2.getRotationMatrix2D(rotpoint, 180, 1.0)
dim = (w, h)
intermediateFrame = cv2.warpAffine(body, rotmat, dim)
cropping
dimension = intermediateFrame.form
finalFrame = intermediateFrame[80:(size[0] - 200), 30:(dimension[1] - 30)]
keypoint prediction
picture = cv2.cvtColor(finalFrame, cv2.COLOR_BGR2RGB) # COLOR CONVERSION BGR 2 RGB
picture.flags.writeable = False # Picture is now not writeable
outcomes = holistic.course of(picture) # Make prediction
picture.flags.writeable = True # Picture is now writeable
picture = cv2.cvtColor(picture, cv2.COLOR_RGB2BGR) # COLOR COVERSION RGB 2 BGR
extract and append keypoints
pose = np.array([[res.x, res.y, res.z, res.visibility] for res in
outcomes.pose_landmarks.landmark]).flatten() if outcomes.pose_landmarks else np.zeros(33 * 4)
lh = np.array([[res.x, res.y, res.z] for res in
outcomes.left_hand_landmarks.landmark]).flatten() if outcomes.left_hand_landmarks else np.zeros(
21 * 3)
rh = np.array([[res.x, res.y, res.z] for res in
outcomes.right_hand_landmarks.landmark]).flatten() if outcomes.right_hand_landmarks else np.zeros(
21 * 3)
keypoints = np.concatenate([pose, lh, rh])
sequence.append(keypoints)
if len(sequence) == 50:
cap.launch()
break
cap.launch()
cv2.destroyAllWindows()
sequence = np.expand_dims(sequence, axis=0)[0]
res = lstm_model.predict(np.expand_dims(sequence, axis=0))
print(actions[np.argmax(res)])
return actions[np.argmax(res)]
if __name__=='__main__':
MainApp().run()