1

I'm currently working on a project where I need to activate a python script through my phone. I got dataplicity installed and got their led light working (here). I made my python script executable and tried to set a path using the same format as the dataplicity led light but can't get it to work.

Because this project is for a class and I am short on time to finish it, I was wondering if I could loop a wire from the GPIO that is being used to turn on the led (which works through the dataplicity app) and connect the other end of the wire into another GPIO set as a input, then set my python script to activate when it receives that signal in.

I understand that this is a pretty ghetto idea, but like I said, I'm running low on time and I just need it to work.

#!usr/bin/python 
##################import the necessary packages###########################
from imutils.video import VideoStream;from imutils.video import FPS
import numpy as np;import argparse;import imutils;import time;import cv2
import I2C_LCD_driver
import RPi.GPIO as GPIO
import motors
##########################Set up LCD screen################################
mylcd = I2C_LCD_driver.lcd()
mylcd.backlight(1)
mylcd.lcd_display_string("Initiating", 1, 0)
mylcd.lcd_display_string("Program", 2, 0)
#####################Set up object recognition stuff#######################
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--prototxt", required=True,
# help="path to Caffe 'deploy' prototxt file")
#ap.add_argument("-m", "--model", required=True,
# help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
 help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
 "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
 "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
 "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
#net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
net = cv2.dnn.readNetFromCaffe('/home/pi/google_home_starter/public/python/MobileNetSSD_deploy.prototxt.txt','/home/pi/google_home_starter/public/python/MobileNetSSD_deploy.caffemodel')
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
# vs = VideoStream(src=0).start() #For USB camera
vs = VideoStream(usePiCamera=True).start() #For Rpi camera
time.sleep(2.0)
#fps = FPS().start()
mylcd.lcd_clear()
##############################BEGIN#######################################
a = 0
# loop over the frames from the video stream
while a != 1:
 # grab the frame from the threaded video stream and resize it
 # to have a maximum width of 400 pixels
 frame = vs.read()
 if frame is None:
 continue
 frame = imutils.resize(frame, width=400)
 # grab the frame dimensions and convert it to a blob
 (h, w) = frame.shape[:2]
 blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
 0.007843, (300, 300), 127.5)
 # pass the blob through the network and obtain the detections and
 # predictions
 net.setInput(blob)
 detections = net.forward()
 # loop over the detections
 for i in np.arange(0, detections.shape[2]):
 # extract the confidence (i.e., probability) associated with
 # the prediction
 confidence = detections[0, 0, i, 2]
 # filter out weak detections by ensuring the `confidence` is
 # greater than the minimum confidence
 if confidence > args["confidence"]:
 # extract the index of the class label from the
 # `detections`, then compute the (x, y)-coordinates of
 # the bounding box for the object
 idx = int(detections[0, 0, i, 1])
 #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
 #(startX, startY, endX, endY) = box.astype("int")
 # draw the prediction on the frame
 #label = "{}: {:.2f}%".format(CLASSES[idx],
 # confidence * 100)
 #cv2.rectangle(frame, (startX, startY), (endX, endY),
 # COLORS[idx], 2)
 #y = startY - 15 if startY - 15 > 15 else startY + 15
 #cv2.putText(frame, label, (startX, y),
 # cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
 if confidence * 100 > 75:
 mylcd.lcd_clear()
 mylcd.lcd_display_string(CLASSES[idx], 1, 0)
 confi = int(np.floor(confidence * 100))
 mylcd.lcd_display_string("{:d}".format(confi), 2, 1)
 mylcd.lcd_display_string("%", 2, 3)
 mylcd.lcd_display_string("confidence", 2, 5)
 time.sleep(1)
 if CLASSES[idx] == "person" and confidence * 100 > 80:
 a = 1
 else:
 mylcd.lcd_clear()
 mylcd.lcd_display_string("I dont recognize", 1, 0)
 mylcd.lcd_display_string("anything!", 2, 0)
 # show the output frame
 #cv2.imshow("Frame", frame)
 key = cv2.waitKey(1) & 0xFF
 # if the `q` key was pressed, break from the loop
 if key == ord("q"):
 break
 #fps.update()
motors.stepper()
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
GPIO.cleanup()
mylcd.lcd_clear()
mylcd.backlight(0)
asked Feb 22, 2018 at 17:46

1 Answer 1

1

What you describe should work. I am assuming both the GPIO are on the same Pi. If they are on different Pis you would also need to connect the grounds.

Why not post your script and see if that is easy to fix?

answered Feb 22, 2018 at 18:15
1
  • Ok, I got the script up Commented Feb 22, 2018 at 20:36

Your Answer

Draft saved
Draft discarded

Sign up or log in

Sign up using Google
Sign up using Email and Password

Post as a guest

Required, but never shown

Post as a guest

Required, but never shown

By clicking "Post Your Answer", you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.