Home > Net >  Gradual face_recognition encoding python - Numpy error?
Gradual face_recognition encoding python - Numpy error?

Time:08-24

I am using the "Extracting features from Face" code from https://www.mygreatlearning.com/blog/face-recognition/ in order to encode facial recognition data in to make an attendance marker. However, this code creates the data from scratch each time. Looping through each file again on every run takes substantial time. For efficiency sake, I aim to reduce this by only encoding new images and adding to the existing encoding. I have tried to modify the code so that existing encodings, names and image paths are saved to a text file and are read and appended when rerun and existing paths are not encoded again.

    folder_count = 0  # type: int
    encnum = 1
    input_path = "Images"  # type: str
    for folders in os.listdir(input_path):
        folder_count  = 1  # increment counter
    print("Encoding Start")
    start = time.time()
    #get paths of each file in folder named Images
    #Images here contains my data(folders of various persons)
    imagePaths = list(paths.list_images('Images'))
    knownImages = []
    knownEncodings = []
    knownNames = []
    with open('encodings.txt', 'r') as filehandle:
        for line in filehandle:
            # remove linebreak which is the last character of the string
            currentPlace = line[:-1]
            # add item to the list
            knownEncodings.append(currentPlace)
    with open('names.txt', 'r') as filehandle:
        for line in filehandle:
            # remove linebreak which is the last character of the string
            currentPlace = line[:-1]

            # add item to the list
            knownNames.append(currentPlace)
    with open('paths.txt', 'r') as filehandle:
        for line in filehandle:
            # remove linebreak which is the last character of the string
            currentPlace = line[:-1]

            # add item to the list
            knownImages.append(currentPlace)
    while '' in knownEncodings:
        knownEncodings.remove('')
    while '' in knownImages:
        knownImages.remove('')
    while '' in knownNames:
        knownNames.remove('')
    # loop over the image paths
    for (i, imagePath) in enumerate(imagePaths):
        if imagePath not in knownImages:
            # extract the person name from the image path
            name = imagePath.split(os.path.sep)[-2]
            # load the input image and convert it from BGR (OpenCV ordering)
            # to dlib ordering (RGB)
            image = cv2.imread(imagePath)
            rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            #Use Face_recognition to locate faces
            boxes = face_recognition.face_locations(rgb,model='hog')
            # compute the facial embedding for the face
            encodings = face_recognition.face_encodings(rgb, boxes)
            # loop over the encodings
            for encoding in encodings:
                print("Encoding...{}".format(encnum))
                encnum = encnum   1
                knownEncodings.append(encoding)
                knownNames.append(name)
    #save emcodings along with their names in dictionary data
    data = {"encodings": knownEncodings, "names": knownNames}
    #use pickle to save data into a file for later use
    with open('encodings.txt', 'w') as filehandle:
        for listitem in knownEncodings:
            filehandle.write('%s\n' % listitem)
    with open('names.txt', 'w') as filehandle:
        for listitem in knownNames:
            filehandle.write('%s\n' % listitem)
    with open('paths.txt', 'w') as filehandle:
        for listitem in imagePaths:
            filehandle.write('%s\n' % listitem)
    f = open("face_enc", "wb")
    f.write(pickle.dumps(data))
    f.close()
    end = time.time()
    total_time = end - start
    print(str(total_time), "seconds encoding for ", folder_count, " people")

This seems to work to make a new face_enc file however when called with matches = face_recognition.compare_faces(data["encodings"] in the "Face Recognition in Live webcam Feed" code from the website:

#find path of xml file containing haarcascade file 
cascPathface = os.path.dirname(
 cv2.__file__)   "/data/haarcascade_frontalface_alt2.xml"
# load the harcaascade in the cascade classifier
faceCascade = cv2.CascadeClassifier(cascPathface)
# load the known faces and embeddings saved in last file
data = pickle.loads(open('face_enc', "rb").read())
 
print("Streaming started")
video_capture = cv2.VideoCapture(0)
# loop over frames from the video file stream
while True:
    # grab the frame from the threaded video stream
    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(60, 60),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
 
    # convert the input frame from BGR to RGB 
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # the facial embeddings for face in input
    encodings = face_recognition.face_encodings(rgb)
    names = []
    # loop over the facial embeddings incase
    # we have multiple embeddings for multiple fcaes
    for encoding in encodings:
       #Compare encodings with encodings in data["encodings"]
       #Matches contain array with boolean values and True for the embeddings it matches closely
       #and False for rest
        matches = face_recognition.compare_faces(data["encodings"],
         encoding)
        #set name =inknown if no encoding matches
        name = "Unknown"
        # check to see if we have found a match
        if True in matches:
            #Find positions at which we get True and store them
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}
            # loop over the matched indexes and maintain a count for
            # each recognized face face
            for i in matchedIdxs:
                #Check the names at respective indexes we stored in matchedIdxs
                name = data["names"][i]
                #increase count for the name we got
                counts[name] = counts.get(name, 0)   1
            #set name which has highest count
            name = max(counts, key=counts.get)
 
 
        # update the list of names
        names.append(name)
        # loop over the recognized faces
        for ((x, y, w, h), name) in zip(faces, names):
            # rescale the face coordinates
            # draw the predicted face name on the image
            cv2.rectangle(frame, (x, y), (x   w, y   h), (0, 255, 0), 2)
            cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
             0.75, (0, 255, 0), 2)
    cv2.imshow("Frame", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
video_capture.release()
cv2.destroyAllWindows()

I get this error after successfully creating a face_enc and running the recognition file the first time, adding new photos then running the recognition again:

C:\Users\ABE0001\AppData\Local\Programs\Python\Python310\lib\site-packages\face_recognition\api.py:75: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
  return np.linalg.norm(face_encodings - face_to_compare, axis=1)
Traceback (most recent call last):
  File "c:\Users\ABE0001\Documents\Coding\RollCall\tempCodeRunnerFile.python", line 39, in <module>
    matches = face_recognition.compare_faces(data["encodings"],
  File "C:\Users\ABE0001\AppData\Local\Programs\Python\Python310\lib\site-packages\face_recognition\api.py", line 226, 
in compare_faces
    return list(face_distance(known_face_encodings, face_encoding_to_check) <= tolerance)
  File "C:\Users\ABE0001\AppData\Local\Programs\Python\Python310\lib\site-packages\face_recognition\api.py", line 75, in face_distance
    return np.linalg.norm(face_encodings - face_to_compare, axis=1)
ValueError: operands could not be broadcast together with shapes (2572,) (128,)

I am assuming there is something wrong with how face_enc is created. My apologies if this question is a little overly specific or badly structured, I'm new here.

CodePudding user response:

Fixed by converting the method of storage to pickle. I assume my numpy formatting was the source of the error.

Reading files with pickle:

    with open('encodings.txt', 'rb') as filehandle:
        try:
            knownEncodings = pickle.load(filehandle)
        except EOFError:
            knownEncodings = []
    with open('names.txt', 'rb') as filehandle:
        try:
            knownNames = pickle.load(filehandle)
        except EOFError:
            knownNames = []
    with open('paths.txt', 'rb') as filehandle:
        try:    
            knownImages = pickle.load(filehandle)
        except EOFError:
            knownImages = []

Writing files with pickle:

    with open('encodings.txt', 'wb') as filehandle:
        pickle.dump(knownEncodings, filehandle)
    with open('names.txt', 'wb') as filehandle:
        pickle.dump(knownNames, filehandle)
    with open('paths.txt', 'wb') as filehandle:
        pickle.dump(imagePaths, filehandle)
  • Related