Home > Software engineering >  Combining two scripts [ Mirroring the webcam ]
Combining two scripts [ Mirroring the webcam ]

Time:09-01

Dear friends in Stack overflow, I have problems in combining these two scripts in my virtual Keyboard project. The project is based on Python 3 and OpenCV. Please help.

Details of import

CVZone 1.41

Mediapipe 0.88

Script 1:

import cv2
from time import sleep
import mediapipe as mp
from cvzone.HandTrackingModule import HandDetector
import numpy as np
import cvzone



detector = HandDetector(detectionCon=0.8)
keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
        ["A", "S", "D", "F", "G", "H", "J", "K", "L", ";"],
        ["Z", "X", "C", "V", "B", "N", "M", ",", ".", "/"]]

finalText = ""
# def drawALL(img, buttonList):
#
#     for button in buttonList:
#         x, y = button.pos
#         w, h = button.size
#         cv2.rectangle(img, button.pos, (x   w, y   h), (255, 0, 255), cv2.FILLED)
#         cv2.putText(img, button.text, (x   20, y   65),
#                      cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
#     return img


def drawAll(img, buttonList):
    imgNew = np.zeros_like(img, np.uint8)
    for button in buttonList:
        x, y = button.pos
        cvzone.cornerRect(imgNew, (button.pos[0], button.pos[1], button.size[0], button.size[1]),
                          20, rt=0)
        cv2.rectangle(imgNew, button.pos, (x   button.size[0], y   button.size[1]),
                      (255, 0, 255), cv2.FILLED)
        cv2.putText(imgNew, button.text, (x   40, y   60),
                    cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3)

    out = img.copy()
    alpha = 0.5
    mask = imgNew.astype(bool)
    print(mask.shape)
    out[mask] = cv2.addWeighted(img, alpha, imgNew, 1 - alpha, 0)[mask]
    return out

class Button():
    def __init__(self,pos,text,size=[85,85]):
        self.pos = pos
        self.size = size
        self.text = text


buttonList =[]

for i in range(len(keys)):
    for j, key in enumerate(keys[i]):
        buttonList.append(Button([100 * j   50, 100 * i   50], key))

video = cv2.VideoCapture(0)
video.set(3, 1280)
video.set(4, 720)

while True:

    success, img = video.read()
    img = detector.findHands(img)
    lmList, bboxInfo = detector.findPosition(img)
    img = drawAll(img, buttonList)

    if lmList:
        for button in buttonList:
            x,y = button.pos
            w,h = button.size

            if x< lmList[8][0] <x w and y<lmList[8][1]<y h:
                cv2.rectangle(img, button.pos, (x   w, y   h), (175, 0, 175), cv2.FILLED)
                cv2.putText(img, button.text, (x   20, y   65),
                            cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
                l,_,_ = detector.findDistance(8,12,img,draw=False)
                print(l)
                if l<30:
                    cv2.rectangle(img, button.pos, (x   w, y   h), (0, 255, 0), cv2.FILLED)
                    cv2.putText(img, button.text, (x   20, y   65),
                                cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
                    finalText  = button.text
                    sleep(0.25)
    cv2.rectangle(img, (50,350), (700,450), (175, 0, 175), cv2.FILLED)
    cv2.putText(img, finalText, (60, 435),
                cv2.FONT_HERSHEY_PLAIN,5 , (255, 255, 255), 5)
    cv2.imshow("Image",img)
    cv2.waitKey(1)

For script 1, the program is working but the webcam shows inverted output. It shows opposite direction of my hand movement.

For script 2:

class Mirror:
    def __init__(self):
        self.__setupCamera()
        # If you have problems running this code on MacOS X you probably have to reinstall opencv
        # with qt backend because cocoa support seems to be broken:
        #   brew reinstall opencv --HEAD --qith-qt
        self.__setupWindow()

    # Set camera resolution. The max resolution is webcam dependent
    # so change it to a resolution that is both supported by your camera
    # and compatible with your monitor
    def __setupCamera(self):


    def __setupWindow(self):
        cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

    def update(self):
        ret, frame = self.cam.read()
        cv2.imshow('frame', frame)

    def release(self):
        self.cam.release()


    def read(self):
        ret, frame = self.cam.read()
        return cv2.flip(frame, 1)

    def update(self):
        cv2.imshow('frame', self.read())


if __name__ == "__main__":
    mirror = Mirror()
    while True:
        mirror.update()
        if cv2.waitKey(1) == 27:
            break
    mirror.release()

Script 2 also works like charm in separate window. But to combine them, I got error and replacing mirror output in the video output.

CodePudding user response:

I would suggest to replace the following lines in script 1:

while True:
   success, img = video.read()
   img = detector.findHands(img)

with:

while True:
   success, img = video.read()
   img = cv2.flip(img, 1) 
   img = detector.findHands(img)

If its the wrong "flip" check the docu for the options here or use the transpose function here

CodePudding user response:

You may have omitted a part of script 2, so I merged script 1 and script 2 according to my understanding, I hope it will be helpful.

First, you need to initialize the videocapture:

def __setupCamera(self):
    self.cam = cv2.VideoCapture(0)
    self.cam.set(3, 1280)
    self.cam.set(4, 720)
    self.finalText = ""

As main shown:

if __name__ == "__main__":
    mirror = Mirror()
    while True:
        mirror.update()
        if cv2.waitKey(1) == 27:
            break
    mirror.release()

only the update is called(you should keep only one update), and read is called in update, so add find hand in read:

def read(self):
    ret, img = self.cam.read()
    img = cv2.flip(img, 1)
    img = detector.findHands(img)
    lmList, bboxInfo = detector.find
    img = drawAll(img, buttonList)
    if lmList:
    ...
    ...
  • Related