Home > Back-end >  Apply mask to Webcam streaming in opencv
Apply mask to Webcam streaming in opencv

Time:11-18

I applied mask to video feed using OpenCV and want to display the Live streaming on the website but the following code stops streaming once it starts. I've been wrapping my head around but couldn't figure out the solution. Any help would be greatly appreciated.

views.py

def gen(frame):
    while True:
        # frame = camera.get_frame()
        yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n'   frame   b'\r\n\r\n')
@api_view(['GET'])
def seasoncolor(request):
    while True:
        return StreamingHttpResponse(gen(color_detection.color_detection(0)),
                    content_type='multipart/x-mixed-replace; boundary=frame')  

color_detection.py

import numpy as np
import cv2
import sys
'''
ML object detection algo(haarcascade)used to identify objects. 
the XML file consists of trained Haar Cascade models.
'''

def color_detection(season):
    face_cascade = cv2.CascadeClassifier(
        'accounts/personal_color/self_detection/haarcascade_frontalface_default.xml')
        # 'accounts/personal_color/self_detection/haarcascade_frontalface_default.xml'
    # initialize video from the webcam
    video = cv2.VideoCapture(1)
    # Spring/summer/fall/winter
    while True:
        # ret tells if the camera works properly. Frame is an actual frame from the video feed
        ret, frame = video.read()
        # make sure port is working and read the image
        if frame is not None and video.isOpened():
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Detect the faces within the subregions of the image in scales
            scaleFactor indicates how much the image size is reduced at each image scale.
            minNeighbors: Higher value results in higher quality of the detected face.
            '''
            faces = face_cascade.detectMultiScale(
            gray, scaleFactor=1.1, minNeighbors=6)
            # Draw the rectangle around each face
            for (x, y, w, h) in faces:
                # Use the stcoordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
                center_coordinates = x   w // 2, y   h // 2
                radius = w // 2  # or can be h / 2 or can be anything based on your requirements
                # background color(black)
                mask = np.zeros(frame.shape[:2], dtype="uint8")
                # Draw the desired region to crop out in white
                cv2.circle(mask, center_coordinates, radius, (255, 255, 255), -1)
                masked = cv2.bitwise_and(frame, frame, mask=mask)
                if int(season) ==0: # Spring
                    # Replace all (0,0,0)channel with Coral pink
                    masked[np.where((masked == [0, 0, 0]).all(axis=2))] = [121, 131, 248]
                elif int(season) ==1: # Summer
                    #Replace all (0,0,0)channel with Rose Red  
                    masked[np.where((masked==[0,0,0]).all(axis=2))]=[86,30,194]
                elif int(season) ==2: # Fall
                    #Replace all (0,0,0)channel with Red Brown /Cinnamon
                    masked[np.where((masked==[0,0,0]).all(axis=2))]=[30,105,210]
                else: # Winter
                    #Replace all (0,0,0)channel with Burgundy Red
                    masked[np.where((masked==[0,0,0]).all(axis=2))]=[31,2,141]
                # cv2.imshow('mask applied', masked)
                ret, jpeg = cv2.imencode('.jpg', masked)
                return jpeg.tobytes()
            if cv2.waitKey(30) & 0xff == 27:
                break

    video.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
     arg = sys.argv[1]
     color_detection(arg)

Self_color_diagnosis.js

import React, { useState, useEffect } from 'react';
import ReactDOM from 'react-dom';
import CameraScreen from './CameraScreen'; 
import { StyleSheet, Text, View, Image } from 'react-native';
import { NavigationContainer } from '@react-navigation/native';
import { createStackNavigator } from '@react-navigation/stack';
import axios from 'axios';

function Self_color_diagnosis({navigation,route}) {
return (
    <View style={styles.title_container}>
    <Image style={styles.video} source={{
        uri: 'http://localhost:8000/seasoncolor/',}}/>
        </View>

);
}

const styles = StyleSheet.create({
    video: {
        width: 500,
        height: 500
    },
    title_container: {
        flex: 1,
        justifyContent: 'center'
      },
  });

export default Self_color_diagnosis;

The above code results in the pic below. The streaming stops and does not change at all. enter image description here

CodePudding user response:

gen() runs loop which all time uses the same frame().

You have to get frame inside this loop.

def gen():
    while True:
        frame = color_detection.color_detection(0)
        if frame:
            yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'   frame   b'\r\n\r\n'
        #else:
        #    print('no frame')

But color_detection should run without loop.
And you should create VideoCapture(1) only once.
And you should return frame even if you didn't detect any face.

path = os.path.join(cv2.data.haarcascades, 'haarcascade_frontalface_default.xml')
face_cascade = cv2.CascadeClassifier(path)
    
video = cv2.VideoCapture(1)

def color_detection(season):
    # ret tells if the camera works properly. Frame is an actual frame from the video feed
    ret, frame = video.read()
    # make sure port is working and read the image
    if frame is not None and video.isOpened():
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        '''
        Detect the faces within the subregions of the image in scales
        scaleFactor indicates how much the image size is reduced at each image scale.
        minNeighbors: Higher value results in higher quality of the detected face.
        '''
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)

        # Draw circle around each face
        for (x, y, w, h) in faces:
            # Use the stcoordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
            center_coordinates = x   w // 2, y   h // 2
            radius = w // 2  # or can be h / 2 or can be anything based on your requirements
            # background color(black)
            mask = np.zeros(frame.shape[:2], dtype="uint8")
            # Draw the desired region to crop out in white
            cv2.circle(mask, center_coordinates, radius, (255, 255, 255), -1)
            masked = cv2.bitwise_and(frame, frame, mask=mask)
            if season == 0: # Spring
                # Replace all (0,0,0)channel with Coral pink
                masked[np.where((masked == [0, 0, 0]).all(axis=2))] = [121, 131, 248]
            elif season == 1: # Summer
                #Replace all (0,0,0)channel with Rose Red  
                masked[np.where((masked==[0,0,0]).all(axis=2))] = [86,30,194]
            elif season == 2: # Fall
                #Replace all (0,0,0)channel with Red Brown /Cinnamon
                masked[np.where((masked==[0,0,0]).all(axis=2))] = [30,105,210]
            else: # Winter
                #Replace all (0,0,0)channel with Burgundy Red
                masked[np.where((masked==[0,0,0]).all(axis=2))] = [31,2,141]
            ret, jpeg = cv2.imencode('.jpg', masked)
        else: # it is `for/else` construction, not `if/else`
            ret, jpeg = cv2.imencode('.jpg', frame)
    
        return jpeg.tobytes()

    #return None

BTW:

I see other problem. When it detects many faces then it creates new mask for every face and assigns every mask to original image - so every mask skip previous mask - so it should show only last face, and hide other faces. You should first create one mask with all circles and next use it on image.


EDIT:

I don't know what web framework you use so I used Flask to create minimal working example.

import os
from flask import Flask, Response
import cv2
import numpy as np

app = Flask(__name__)

print('\n'.join(sorted(os.listdir(cv2.data.haarcascades))))
    
path = os.path.join(cv2.data.haarcascades, 'haarcascade_frontalface_default.xml')
face_cascade = cv2.CascadeClassifier(path)
    
#video = cv2.VideoCapture(0)  # my webcam
video = cv2.VideoCapture(0)   # your webcam

def color_detection(season):    
    ret, frame = video.read()

    if frame is not None and video.isOpened():
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)

        # test two circles on image 640x480
        #faces = [[100, 100, 250, 250], [640-100-250, 480-100-250, 250, 250]]

        if len(faces) > 0:  # it can be `if faces:` because `faces` is `numpy array` which need different method to check if not empty
            # background color(black)
            mask = np.zeros(frame.shape[:2], dtype="uint8")
            
            # draw all circles on mask
            for (x, y, w, h) in faces:
                #print(x, y, w, h)
                
                # use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
                center_coordinates = x   w // 2, y   h // 2
                radius = max(w, h) // 2  # or can be h / 2 or can be anything based on your requirements
                
                # draw the desired region to crop out in white
                cv2.circle(mask, center_coordinates, radius, (255, 255, 255), -1)

            # use mask with all circles
            masked = cv2.bitwise_and(frame, frame, mask=mask)
            
            if season == 0: # Spring - Coral pink
                color = [121, 131, 248]
            elif season == 1: # Summer - Rose Red  
                color = [86,30,194]
            elif season == 2: # Fall - Red Brown /Cinnamon
                color = [30,105,210]
            else: # Winter - Burgundy Red
                color = [31,2,141]

            masked[np.where((masked == [0,0,0]).all(axis=2))] = color
        else:  # no faces
            masked = frame
                        
        ret, jpeg = cv2.imencode('.jpg', masked)
        return jpeg.tobytes()
    

def gen():
    while True:
        frame = color_detection(0)
        if frame:
            yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'   frame   b'\r\n\r\n')
        #else:
        #    print('no frame')
                    
                    
@app.route('/')
def index():
    return '<image src="/seasoncolor">'
                    
@app.route('/seasoncolor')
def seasoncolor():
    return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
    
    
if __name__ == '__main__':
    #app.debug = True
    app.run()

BTW:

To make sure: VideoCapture can work only with local camera where you run web server. It can't work with remote camera on user computer. Only user's browser has access to its camera. To work with remote camera on user computer you would have to use JavaScript to access camera in user browser and send frames to server - like in my examples in my GitHub python-examples: web camera in browser - canvas - take image and upload to server

  • Related