|
| 1 | +import speech_recognition as sr |
| 2 | +import pyttsx3 |
| 3 | +import pywhatkit |
| 4 | +import datetime |
| 5 | +import pyjokes |
| 6 | +import cv2 |
| 7 | + |
| 8 | + |
| 9 | +listener = sr.Recognizer() |
| 10 | +engine = pyttsx3.init() |
| 11 | +voices = engine.getProperty("voices") |
| 12 | +engine.setProperty("voice",voices[1].id) #use [1]->female voice and [0]-> male voice |
| 13 | + |
| 14 | +def talk(text): |
| 15 | + engine.say(text) |
| 16 | + engine.runAndWait() |
| 17 | + |
| 18 | +def take_command(): |
| 19 | + try: |
| 20 | + with sr.Microphone() as source: |
| 21 | + print("Listening.....(speak now)") |
| 22 | + voice = listener.listen(source) |
| 23 | + command = listener.recognize_google(voice) |
| 24 | + command=command.lower() |
| 25 | + if "google" in command: |
| 26 | + command = command.replace("google", '') |
| 27 | + print(command) |
| 28 | + except: |
| 29 | + print("Ooops something went wrong!") |
| 30 | + |
| 31 | + pass |
| 32 | + return command |
| 33 | + |
| 34 | +def run_mini_google_assistant(): |
| 35 | + |
| 36 | + command = take_command() |
| 37 | + print(command) |
| 38 | + |
| 39 | + if "play" in command: |
| 40 | + song = command.replace("play","") |
| 41 | + talk("playing the song" + song) |
| 42 | + print(song) |
| 43 | + pywhatkit.playonyt(song) |
| 44 | + elif "time" in command: |
| 45 | + time = datetime.datetime.now().strftime("%I:%M%p") |
| 46 | + print(time) |
| 47 | + talk("Current time is" + time) |
| 48 | + elif "joke" in command: |
| 49 | + talk("Here is the joke") |
| 50 | + talk(pyjokes.get_joke()) |
| 51 | + talk(" heeheehehe quite funny! ") |
| 52 | + elif 'date' in command: |
| 53 | + date = datetime.date.today() |
| 54 | + print(date) |
| 55 | + talk( "Today is") |
| 56 | + talk (date) |
| 57 | + elif 'how are you' in command: |
| 58 | + talk('I am good. Nice to see you here!') |
| 59 | + elif "capture" or "camera" in command: |
| 60 | + talk("Ok I'll do it for you!") |
| 61 | + talk("Remenber, You can use s button to quit") |
| 62 | + vid = cv2.VideoCapture(0) |
| 63 | + |
| 64 | + while (True): |
| 65 | + |
| 66 | + # Capture the photo/video frame by frame |
| 67 | + ret, frame = vid.read() |
| 68 | + |
| 69 | + # Display the resulting frame |
| 70 | + cv2.imshow('frame', frame) |
| 71 | + |
| 72 | + |
| 73 | + if "photo" in command: |
| 74 | + if cv2.waitKey(0) & 0xFF == ord('s'): # used 's' as quitting button |
| 75 | + #talk("You can use s button to quit") |
| 76 | + break |
| 77 | + elif "video" in command: |
| 78 | + if cv2.waitKey(1) & 0xFF == ord('s'): # used 's' as quitting button |
| 79 | + #talk("You can use s button to quit") |
| 80 | + |
| 81 | + break |
| 82 | + |
| 83 | + # After the loop release the cap object |
| 84 | + vid.release() |
| 85 | + # Destroy all the windows |
| 86 | + cv2.destroyAllWindows() |
| 87 | + |
| 88 | + else: |
| 89 | + talk("Sorry i am not getting you! Can you please repeat!") |
| 90 | + |
| 91 | + |
| 92 | +talk("Hello my friend, i am your personal mini google assistant.") |
| 93 | +talk("And i can help you to play song, tell time, tell date, tell joke and i can also capture photo and video for you") |
| 94 | +talk("Now please tell me how can i help you!") |
| 95 | +while True: |
| 96 | + run_mini_google_assistant() |
| 97 | + #talk("Nice to see you here, I belive that you enjoyed!") |
0 commit comments