Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 4b0ee9c

Browse files
face_recognition_api_Added
1 parent d8c3a05 commit 4b0ee9c

File tree

1 file changed

+44
-1
lines changed

1 file changed

+44
-1
lines changed

‎Flask_Apis/Image_recognition_from_File_format.py

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@
77

88
app = Flask(__name__)
99

10+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
11+
1012
@app.route('/')
1113
def home():
1214
return jsonify({'message':'Welcome to Flask Apis'})
1315

14-
@app.route('/img',methods=['POST'])
16+
@app.route('/image_Compare',methods=['POST'])
1517
def predict():
1618
file1 = request.files['file1']
1719
file2 = request.files['file2']
@@ -37,6 +39,47 @@ def predict():
3739
# Return the similarity percentage in a JSON response
3840
return jsonify({'similarity_percentage': similarity_percentage})
3941

42+
@app.route('/face_recognize',methods=['POST'])
43+
def predictface():
44+
# Get the uploaded files from the request
45+
file1 = request.files['file1']
46+
file2 = request.files['file2']
47+
48+
# Read the images using OpenCV directly from the request files
49+
img1 = cv2.imdecode(np.frombuffer(file1.read(), np.uint8), cv2.IMREAD_COLOR)
50+
img2 = cv2.imdecode(np.frombuffer(file2.read(), np.uint8), cv2.IMREAD_COLOR)
51+
52+
# Convert the images to grayscale
53+
gray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
54+
gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
55+
56+
# Detect faces in the images
57+
faces1 = face_cascade.detectMultiScale(gray_img1, scaleFactor=1.1, minNeighbors=5)
58+
faces2 = face_cascade.detectMultiScale(gray_img2, scaleFactor=1.1, minNeighbors=5)
59+
60+
# Compare only the first detected face in each image
61+
if len(faces1) > 0 and len(faces2) > 0:
62+
x1, y1, w1, h1 = faces1[0]
63+
x2, y2, w2, h2 = faces2[0]
64+
65+
# Extract the face regions from the images
66+
face1 = gray_img1[y1:y1+h1, x1:x1+w1]
67+
face2 = gray_img2[y2:y2+h2, x2:x2+w2]
68+
69+
# Resize the face regions to the same dimensions
70+
resized_face1 = cv2.resize(face1, (face2.shape[1], face2.shape[0]))
71+
72+
# Calculate the structural similarity index between the face regions
73+
score = ssim(resized_face1, face2, full=True)[0]
74+
75+
# Convert the similarity score to a percentage
76+
similarity_percentage = score * 100
77+
78+
# Return the similarity percentage in a JSON response
79+
return jsonify({'similarity_percentage': similarity_percentage})
80+
81+
else:
82+
return jsonify({'similarity_percentage': 'Could not detect faces in both images.'})
4083

4184
if __name__ == '__main__':
4285
app.run(debug=True)

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /