Home > OS >  When I try to declare a global variable it throws the error Statement expected
When I try to declare a global variable it throws the error Statement expected

Time:12-11

Good evening! I need a global variable in a function to be used in another function, however, when I try to declare this variable as a global variable, it throws the error "Statement expected, found Py:EQ", this in the line where the global code snippet is id, confidence = recognizer.predict(faceimage) specifically above the = sign on line 53. How do I fix this error?

# install opencv "pip install opencv-python"
import cv2

# distance from camera to object(face) measured
# centimeter
Known_distance = 76.2

# width of face in the real world or Object Plane
# centimeter
Known_width = 14.3

# Colors
GREEN = (0, 255, 0)
RED = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)

# defining the fonts
fonts = cv2.FONT_HERSHEY_COMPLEX

# face detector object
face_detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

# focal length finder function
def Focal_Length_Finder(measured_distance, real_width, width_in_rf_image):
     # finding the focal length
     focal_length = (width_in_rf_image * measured_distance) / real_width
     return focal_length


# distance estimation function
def Distance_finder(Focal_Length, real_face_width, face_width_in_frame):
     distance = (real_face_width * Focal_Length) / face_width_in_frame

     # return the distance
     return distance

def microFacialExpressions(recognizer, width, height):
     font = cv2.FONT_HERSHEY_COMPLEX_SMALL
     detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
     camera = cv2.VideoCapture(0)
     recognizer = cv2.face.EigenFaceRecognizer_create()
     recognizer.read("classifierEigen.yml")
     width, height = 220, 220
     while(True):
         connected, image = camera.read()
         # Grayscale conversion
         grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
         facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
         for (x, y, l, a) in facesDetected:
             faceimage = cv2.resize(greyimage[y:y   a, x:x   l], (width, height))
             cv2.rectangle(image, (x, y), (x   l, y   a), (0,0,255), 2)
             global id, confidence = recognizer.predict(faceimage)
             #If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
             if id == 1:
                 warning="Safe to exit"
             else:
                 warning = "Hostile area"
         cv2.putText(image, warning, (x,y  (a 30)), font, 2, (0,0,255))

         return warning


def face_data(image):
     face_width = 0 # making face width to zero

     # converting color image to gray scale image
     gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

     # detecting face in the image
     faces = face_detector.detectMultiScale(gray_image, 1.3, 5)

     # looping through the faces detect in the image
     # getting coordinates x, y , width and height
     for (x, y, h, w) in faces:
         # draw the rectangle on the face
         cv2.rectangle(image, (x, y), (x   w, y   h), GREEN, 2)

         # getting face width in the pixels
         face_width = w

     # return the face width in pixel
     return face_width


# reading reference_image from directory
ref_image = cv2.imread("Ref_image.jpg")

# find the face width(pixels) in the reference_image
ref_image_face_width = face_data(ref_image)

# get the focal by calling "Focal_Length_Finder"
# face width in reference(pixels),
# Known_distance(centimeters),
# known_width(centimeters)
Focal_length_found = Focal_Length_Finder(
     Known_distance, Known_width, ref_image_face_width)

print(Focal_length_found)

# show the reference image
cv2.imshow("ref_image", ref_image)

# initialize the camera object so that we
# can get frame from it
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)

# looping through frame, incoming from
# camera/video
while True:

     # reading the frame from camera
     _, frame = cap.read()

     # calling face_data function to find
     # the width of face(pixels) in the frame
     face_width_in_frame = face_data(frame)

     # check if the face is zero then not
     # find the distance
     if face_width_in_frame != 0:
         # finding the distance by calling function
         # Distance finder function need
         # these arguments the Focal_Length,
         # known_width(centimeters),
         # and Known_distance(centimeters)
         Distance = Distance_finder(
             Focal_length_found, Known_width, face_width_in_frame)

         if Distance <= 50 and id:
             print("Level S Alert!")

         # draw line as background of text
         cv2.line(frame, (30, 30), (230, 30), RED, 32)
         cv2.line(frame, (30, 30), (230, 30), BLACK, 28)

         # Drawing Text on the screen
         cv2.putText(
             frame, f"Distance: {round(Distance, 2)} CM", (30, 35),
             fonts, 0.6, GREEN, 2)

     # show the frame on the screen
     cv2.imshow("frame", frame)

     # quit the program if you press 'q' on keyboard
     if cv2.waitKey(1) == ord("q"):
         break

# closing the camera
cap.release()

# closing the windows that are opened
cv2.destroyAllWindows()

CodePudding user response:

The global statement does not support assigning to a name, only declaring the name to be a global variable, rather than local variable. While global statements are legal pretty much anywhere, it is strongly recommended to put such declarations at the top of the function.

def microFacialExpressions(recognizer, width, height):
     global id, confidence

     font = cv2.FONT_HERSHEY_COMPLEX_SMALL
     detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
     camera = cv2.VideoCapture(0)
     recognizer = cv2.face.EigenFaceRecognizer_create()
     recognizer.read("classifierEigen.yml")
     width, height = 220, 220
     while(True):
         connected, image = camera.read()
         # Grayscale conversion
         grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
         facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
         for (x, y, l, a) in facesDetected:
             faceimage = cv2.resize(greyimage[y:y   a, x:x   l], (width, height))
             cv2.rectangle(image, (x, y), (x   l, y   a), (0,0,255), 2)
             confidence = recognizer.predict(faceimage)
             #If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
             if id == 1:
                 warning="Safe to exit"
             else:
                 warning = "Hostile area"
         cv2.putText(image, warning, (x,y  (a 30)), font, 2, (0,0,255))

         return warning

Given that both variables are repeatedly changed in the loop, it's not clear why the last value of either is special enough to need in the global scope. I suspect neither variable needs to be declared global at all.

  • Related