I'm doing a motion detection program where it snaps an image when it detects movement and snaps an image of the person's face if in view while this is all recorded and sends it all to Dropbox.
It's moving very slowly and lagging like crazy, showing 1 frame in like a minute. Is there a way to optimize it?
I'm using a Raspberry Pi to code all this, and a webcam.
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
import numpy as np
import cv2
import imutils
from imutils import contours
import datetime
import time
import dropbox
#Function fo Drawing rect and changing text to REC
def draw_rect_movement(c):
#Draw Rectangle around found contour object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
text = "REC"
return c
def saveNupload(roi_color):
#writing image of face as png in the file
timestring = time.strftime("%Y_%m_%d_%H_%M_%S")
face_timestr = 'face_' + timestring + '.png'
cv2.imwrite(face_timestr, roi_color)
#Opening for [r]eading as [b]inary
FaceFile = open(face_timestr, mode = "rb")
#Reads the number of bytes of the video
data = FaceFile.read()
#Setting the save location with file name
SavetoLocation = '/FYP_Face_Save/'+ face_timestr
SaveToLocation = str(SavetoLocation)
dbx.files_upload(data, SaveToLocation)
#Close for reading and binary
FaceFile.close()
dbx = dropbox.Dropbox('Access Token')
dbx.users_get_current_account()
#cap = cv2.VideoCapture("/home/pi/Desktop/Proj/VideoTestSample.mp4")
cap = cv2.VideoCapture(1)
#Creating froeground and removing Background
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
#Set format
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#Get Datetime
timestr = time.strftime("%Y_%m_%d_%H_%M_%S")
#Creating name of folder
timestr = timestr + '.avi'
#Setting Name, Format, FPS, FrameSize
out = cv2.VideoWriter(timestr,fourcc, 10.0, (640, 480))
#setting casacade for use
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#setting criteria for termination
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
#As long as the VideoCapture is open loop to show the frames
while (cap.isOpened()):
#capture frame-by-frame
(grabbed, frame) = cap.read()
text = " "
if not grabbed:
break
#Convert frame to Black white and gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#placing Cascade detection
faces = face_cascade.detectMultiScale(gray, 1.2,)
#Drawing around the detected "face"
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x -20,y-20), (x + w + 20, y + h + 20), (255,0,0), 2)
roi_color = frame[y-20:y + h + 20, x -20:x + w + 20]
saveNupload(roi_color = roi_color)
#Apply the Background SubtractionMOG2
fgmask = fgbg.apply(gray)
#Erode away the boundaries of the foreground object
thresh = cv2.erode(fgmask, None, iterations=2)
#Set detect as none
detect = None
#FindContours returns a list of the outlines of the white shapes in the mask (and a heirarchy that we shall ignore)
(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#Draw the DateTime on the bottom left hand corner
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,(0,0,255), 1)
#detect is object found or not found
detect = (_,cnts,hierarchy)
#if object found is detected run these codes
if detect == (_,cnts,hierarchy):
#if area of object is lower than 300 ignore it
for (i,c) in enumerate(cnts):
if cv2.contourArea(c) < 1100:
print("ignore small contours", cv2.contourArea(c))
continue
#Uncomment this function call to display motion detected
###draw_rect_movement(c = c)
#Temporary code
text = "Movement Detected ... Snapping"
#Capture image
timestring = time.strftime("%Y_%m_%d_%H_%M_%S")
image_timestr = 'image_' + timestring + '.png'
cv2.imwrite(image_timestr, frame)
#Opening for [r]eading as [b]inary
ImageFile = open(image_timestr, mode = "rb")
#Reads the number of bytes of the video
data = ImageFile.read()
#Setting the save location with file name
SavetoLocation = '/FYP_Image_Save/'+ image_timestr
SaveToLocation = str(SavetoLocation)
dbx.files_upload(data, SaveToLocation)
#Close for reading and binary
ImageFile.close()
detect= None
if detect != (_,cnts,hierarchy):
continue
elif detect != (_,cnts,hierarchy):
print("Not Snaping")
else:
continue
#Draw the text at top right hand corner
cv2.putText(frame, "{}". format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#Write which window into video in this case Frame
out.write(frame)
#Display the following windows
cv2.imshow('frame',frame)
cv2.imshow('gray', gray)
cv2.imshow('fgmask', fgmask)
#if q is pressed break loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Stop recording
out.release()
#Kill all windows
cap.release()
cv2.destroyAllWindows()
#Opening for [r]eading as [b]inary
VideoFile = open(timestr, mode = "rb")
#Reads the number of bytes of the video
data = VideoFile.read()
#Setting the save location with file name
SavetoLocation = '/FYP_Video_Save/'+timestr
SaveToLocation = str(SavetoLocation)
#Upload the file
print("Sending to Dropbox")
dbx.files_upload(data, SaveToLocation)
#Close for reading and binary
VideoFile.close()
-
\$\begingroup\$ how often is it detecting movement? Make it less often. What is the criteria for "movement" being made? make it looser. How much data is saved in the picture? reduce the size of the picture. secondly why are you converting each picture? that takes a lot of computational power. why not just stick with the original colour? just some suggestions to get you started. \$\endgroup\$BenKoshy– BenKoshy2017年05月16日 14:14:16 +00:00Commented May 16, 2017 at 14:14
-
\$\begingroup\$ @BKSpurgeon The "movement" detection is non-stop, i have already limited the amount of movement by alot so only if a person were to enter the frame or the door opening (The camera is only 1-1.5 meters away from the door) it will start to activate the snapping. I dont understand what you mean by What is the criteria for "movement" being made? make it looser. I am converting the pictures to read binary because if i don't it will not be able to upload the pictures to dropbox \$\endgroup\$Marciano Ng– Marciano Ng2017年05月17日 00:21:57 +00:00Commented May 17, 2017 at 0:21
-
\$\begingroup\$ @MarcianNg what i mean is: if one pixel changes will that register a movement? or if many pixels change will that register a movement? unfortunately, i couldn't understand your code: when and what are you uploading to drop box? \$\endgroup\$BenKoshy– BenKoshy2017年05月17日 00:32:52 +00:00Commented May 17, 2017 at 0:32
-
\$\begingroup\$ @BKSpurgeon It has to be a group of pixels together for it to detect as movement i have already limited it to an area of 1100 for it to recognize as a movement. It is immediately saved as a png file then converted to binary and is uploaded to dropbox. Same goes for the face as well. But the video is uploaded at the very end AFTER the program is breaked. \$\endgroup\$Marciano Ng– Marciano Ng2017年05月17日 03:50:16 +00:00Commented May 17, 2017 at 3:50
-
\$\begingroup\$ ok. i wish i could help more but i couldn't make out much from the code. anyways, good luck. \$\endgroup\$BenKoshy– BenKoshy2017年05月17日 03:54:49 +00:00Commented May 17, 2017 at 3:54
1 Answer 1
sys.path.append('/usr/local/lib/python3.4/site-packages')
Recommend you use a proper package manager to install numpy
and friends,
such as conda
, or pip
virtualenv.
(x, y, w, h) = cv2.boundingRect(c)
No need for (
extra parens )
on the tuple unpack.
Recommend you run $ flake8
, and heed its advice,
preferring identifiers like e.g. save_and_upload
or face_file
.
SaveToLocation = str(SavetoLocation)
You already had a str
, so the function call does nothing.
#Creating froeground
Typo.
while (cap.isOpened()):
No need for (
extra parens )
.
Same remark for the grabbed, frame
tuple unpack.
detect= None
if detect != (_,cnts,hierarchy):
continue
An unconditional continue
would suffice.
The while
loop in __main__
is far too long, and should be packaged up
in one or more helper functions.
You didn't post any profiling / timing data, but I assume you spend the bulk of elapsed time here:
faces = face_cascade.detectMultiScale(gray, 1.2)
Following the advice of BKSpurgeon and Aleksandar,
it would make sense to guard this with some cheap check for changed pixels,
perhaps using cv2.absdiff()
,
before requesting the full-blown face finder.
Histograms certainly are a good way of summarizing images and noticing gross differences.
Explore related questions
See similar questions with these tags.