I’m trying to run the cvzone HandGesturesImp.py with the cvzone package (version 1.5.6). The HandGestureImp.py script contains the following script:
import cv2
from cvzone.HandTrackingModule import HandDetector
from cvzone.FaceDetectionModule import FaceDetector
import cvzone
cap = cv2.VideoCapture(0)
detectorHand = HandDetector(maxHands=1, detectionCon=0.9)
detectorFace = FaceDetector()
gesture = ""
while True:
_, img = cap.read()
img = cv2.resize(img, (640, 480))
img = detectorHand.findHands(img)
print(img)
allHands, img = detectorHand.findHands(img, draw=True)
pdb.set_trace()
img, bboxs = detectorFace.findFaces(img, draw=True)
if bboxs:
x, y, w, h = bboxs[0]["bbox"]
bboxRegion = x - 175 - 25, y - 75, 175, h + 75
cvzone.cornerRect(img, bboxRegion, rt=0, t=10, colorC=(0, 0, 255))
if allHands["lmList"] and allHands["type"] == "Right":
handCenter = allHands["center"]
# x < cx < x+w
inside = bboxRegion[0] < handCenter[0] < bboxRegion[0] + bboxRegion[2] and
bboxRegion[1] < handCenter[1] < bboxRegion[1] + bboxRegion[3]
if inside:
cvzone.cornerRect(img, bboxRegion, rt=0, t=10, colorC=(0, 255, 0))
fingers = detectorHand.fingersUp()
# print(fingers)
if fingers == [1, 1, 1, 1, 1]:
gesture = " Stop"
elif fingers == [0, 1, 0, 0, 0]:
gesture = " UP"
me.move_up(20)
elif fingers == [1, 1, 0, 0, 1]:
gesture = "Flip"
me.flip_left()
elif fingers == [0, 1, 1, 0, 0]:
gesture = " Down"
me.move_down(20)
elif fingers == [0, 0, 0, 0, 1]:
gesture = " Left"
me.move_left(40)
elif fingers == [1, 0, 0, 0, 0]:
gesture = " Right"
me.move_right(40)
cv2.rectangle(img, (bboxRegion[0], bboxRegion[1] + bboxRegion[3] + 10),
(bboxRegion[0] + bboxRegion[2], bboxRegion[1] + bboxRegion[3] + 60),
(0, 255, 0), cv2.FILLED)
cv2.putText(img, f'{gesture}',
(bboxRegion[0] + 10, bboxRegion[1] + bboxRegion[3] + 50),
cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 0), 2)
cv2.imshow("Image", img)
if cv2.waitKey(5) & 0xFF == ord('q'):
cap.release()
img.stop()
break
cv2.destroyAllWindows()
Here is the code for the HandTrackModule.py
import cv2
import mediapipe as mp
import math
class HandDetector:
"""
Finds Hands using the mediapipe library. Exports the landmarks
in pixel format. Adds extra functionalities like finding how
many fingers are up or the distance between two fingers. Also
provides bounding box info of the hand found.
"""
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, minTrackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower
:param maxHands: Maximum number of hands to detect
:param detectionCon: Minimum Detection Confidence Threshold
:param minTrackCon: Minimum Tracking Confidence Threshold
"""
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.minTrackCon = minTrackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(static_image_mode=self.mode, max_num_hands=self.maxHands,
min_detection_confidence=self.detectionCon,
min_tracking_confidence=self.minTrackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
self.fingers = []
self.lmList = []
def findHands(self, img, draw=True, flipType=True):
"""
Finds hands in a BGR image.
:param img: Image to find the hands in.
:param draw: Flag to draw the output on the image.
:return: Image with or without drawings
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
allHands = []
h, w, c = img.shape
if self.results.multi_hand_landmarks:
for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):
myHand = {}
## lmList
mylmList = []
xList = []
yList = []
for id, lm in enumerate(handLms.landmark):
px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
mylmList.append([px, py, pz])
xList.append(px)
yList.append(py)
## bbox
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
boxW, boxH = xmax - xmin, ymax - ymin
bbox = xmin, ymin, boxW, boxH
cx, cy = bbox[0] + (bbox[2] // 2),
bbox[1] + (bbox[3] // 2)
myHand["lmList"] = mylmList
myHand["bbox"] = bbox
myHand["center"] = (cx, cy)
if flipType:
if handType.classification[0].label == "Right":
myHand["type"] = "Left"
else:
myHand["type"] = "Right"
else:
myHand["type"] = handType.classification[0].label
allHands.append(myHand)
## draw
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20),
(bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20),
(255, 0, 255), 2)
cv2.putText(img, myHand["type"], (bbox[0] - 30, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN,
2, (255, 0, 255), 2)
if draw:
return allHands, img
else:
return allHands
I get the following error
Traceback (most recent call last):
File "Part3/HandGesturesImp.py", line 32, in <module>
allHands, img = detectorHand.findHands(img, draw=True)
File "./cvzone/HandTrackingModule.py", line 48, in findHands
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
TypeError: Expected Ptr<cv::UMat> for argument 'src'
My img array looks like this
([], array([[[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
...,
[0, 1, 2],
[0, 3, 4],
[0, 1, 0]],
...,
[0, 0, 0],
[0, 1, 1],
[0, 0, 1]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
...,
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype=uint8))
I don’t quite understand the error, is the image array not in the correct format. What changes need to be made?