-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathhand_gestures.py
More file actions
108 lines (95 loc) · 3.67 KB
/
hand_gestures.py
File metadata and controls
108 lines (95 loc) · 3.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
"""
Gesture Controlled Video Player using Raspberry Pi and MediaPipe - Play, Pause and Control Volume using Gestures
https://circuitdigest.com/microcontroller-projects/gesture-controlled-media-player-using-raspberry-pi-and-mediapipe
"""
import cv2 as cv
import mediapipe as mp
import pyautogui as gui
import sys
USE_WEBCAM = (len(sys.argv) > 1) and (sys.argv[1] == "WEBCAM")
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
tipIds = (4, 8, 12, 16, 20)
state = None
Gesture = None
wCam, hCam = 720, 640
def finger_position(image, handNo=0):
lmList = []
if results.multi_hand_landmarks:
myHand = results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id,lm)
h, w, c = image.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmList.append([id, cx, cy])
return lmList
if USE_WEBCAM:
# For webcam input:
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, wCam)
cap.set(cv.CAP_PROP_FRAME_WIDTH, hCam)
with mp_hands.Hands(
min_detection_confidence=0.8,
min_tracking_confidence=0.5) as hands:
while True if not USE_WEBCAM else cap.isOpened():
success, image = (True, cv.imread("hand.png")) if not USE_WEBCAM else cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# flip the image horizontally for a later selfie-view display, and convert BGR to RGB
image = cv.cvtColor(cv.flip(image, 1), cv.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
# draw an annotation for gesture control in the picture
image.flags.writeable = True
image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
lmList = finger_position(image)
# print(lmList)
if len(lmList) != 0:
fingers = []
for id in range(1, 5):
if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]:
# state = "Play"
fingers.append(1)
if (lmList[tipIds[id]][2] > lmList[tipIds[id] - 2][2]):
# state = "Pause"
# gui.press('space')
# print("Space")
fingers.append(0)
totalFingers = fingers.count(1)
print(totalFingers)
# print(lmList[9][2])
if totalFingers == 4:
state = "Play"
# fingers.append(1)
if totalFingers == 0 and state == "Play":
state = "Pause"
gui.press('space')
print("Space")
if totalFingers == 1:
if lmList[8][1] < 300:
print("left")
gui.press('left')
if lmList[8][1] > 400:
print("Right")
gui.press('Right')
if totalFingers == 2:
if lmList[9][2] < 210:
print("Up")
gui.press('Up')
if lmList[9][2] > 230:
print("Down")
gui.press('Down')
# cv.putText(image, str("Gesture"), (10, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
cv.imshow("Media Controller", image)
key = cv.waitKey(1) & 0xFF
if key == 27:
break
if USE_WEBCAM:
cap.release()
cv.destroyAllWindows()