Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 1880799

Browse files
commit on 'master'
1 parent f9e6a6e commit 1880799

File tree

15 files changed

+2813
-0
lines changed

15 files changed

+2813
-0
lines changed
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# GENDER-AGE DETECTION
2+
3+
## DESCRIPTION
4+
5+
The objective of this project is to identify the gender and approximate age of a person given in an image automatically.
6+
7+
## PROJECT REQUISITES
8+
9+
To build this project we will use
10+
11+
- Deep Learning to accurately identify the gender and age of a person from a single image of a face.
12+
- We will use this [dataset](https://www.kaggle.com/ttungl/adience-benchmark-gender-and-age-classification)
13+
- The predicted gender may be one of ‘Male’ and ‘Female’, and the predicted age may be one of the following 8 ranges- (0 – 2), (4 – 6), (8 – 12), (15 – 20), (25 – 32), (38 – 43), (48 – 53), (60 – 100).
14+
15+
## PROJECT STRUCTURE
16+
17+
These are the step to build Gender-Age Detection python project-
18+
19+
- Detect faces
20+
- Classify into Male/Female
21+
- Classify into one of the 8 age ranges
22+
- Put the results on the image and display it
23+
24+
## AUTHOR NAME
25+
26+
[ANUSHKA CHITRANSHI](https://github.com/codebuzzer01)
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
name: "CaffeNet"
2+
input: "data"
3+
input_dim: 1
4+
input_dim: 3
5+
input_dim: 227
6+
input_dim: 227
7+
layers {
8+
name: "conv1"
9+
type: CONVOLUTION
10+
bottom: "data"
11+
top: "conv1"
12+
convolution_param {
13+
num_output: 96
14+
kernel_size: 7
15+
stride: 4
16+
}
17+
}
18+
layers {
19+
name: "relu1"
20+
type: RELU
21+
bottom: "conv1"
22+
top: "conv1"
23+
}
24+
layers {
25+
name: "pool1"
26+
type: POOLING
27+
bottom: "conv1"
28+
top: "pool1"
29+
pooling_param {
30+
pool: MAX
31+
kernel_size: 3
32+
stride: 2
33+
}
34+
}
35+
layers {
36+
name: "norm1"
37+
type: LRN
38+
bottom: "pool1"
39+
top: "norm1"
40+
lrn_param {
41+
local_size: 5
42+
alpha: 0.0001
43+
beta: 0.75
44+
}
45+
}
46+
layers {
47+
name: "conv2"
48+
type: CONVOLUTION
49+
bottom: "norm1"
50+
top: "conv2"
51+
convolution_param {
52+
num_output: 256
53+
pad: 2
54+
kernel_size: 5
55+
}
56+
}
57+
layers {
58+
name: "relu2"
59+
type: RELU
60+
bottom: "conv2"
61+
top: "conv2"
62+
}
63+
layers {
64+
name: "pool2"
65+
type: POOLING
66+
bottom: "conv2"
67+
top: "pool2"
68+
pooling_param {
69+
pool: MAX
70+
kernel_size: 3
71+
stride: 2
72+
}
73+
}
74+
layers {
75+
name: "norm2"
76+
type: LRN
77+
bottom: "pool2"
78+
top: "norm2"
79+
lrn_param {
80+
local_size: 5
81+
alpha: 0.0001
82+
beta: 0.75
83+
}
84+
}
85+
layers {
86+
name: "conv3"
87+
type: CONVOLUTION
88+
bottom: "norm2"
89+
top: "conv3"
90+
convolution_param {
91+
num_output: 384
92+
pad: 1
93+
kernel_size: 3
94+
}
95+
}
96+
layers{
97+
name: "relu3"
98+
type: RELU
99+
bottom: "conv3"
100+
top: "conv3"
101+
}
102+
layers {
103+
name: "pool5"
104+
type: POOLING
105+
bottom: "conv3"
106+
top: "pool5"
107+
pooling_param {
108+
pool: MAX
109+
kernel_size: 3
110+
stride: 2
111+
}
112+
}
113+
layers {
114+
name: "fc6"
115+
type: INNER_PRODUCT
116+
bottom: "pool5"
117+
top: "fc6"
118+
inner_product_param {
119+
num_output: 512
120+
}
121+
}
122+
layers {
123+
name: "relu6"
124+
type: RELU
125+
bottom: "fc6"
126+
top: "fc6"
127+
}
128+
layers {
129+
name: "drop6"
130+
type: DROPOUT
131+
bottom: "fc6"
132+
top: "fc6"
133+
dropout_param {
134+
dropout_ratio: 0.5
135+
}
136+
}
137+
layers {
138+
name: "fc7"
139+
type: INNER_PRODUCT
140+
bottom: "fc6"
141+
top: "fc7"
142+
inner_product_param {
143+
num_output: 512
144+
}
145+
}
146+
layers {
147+
name: "relu7"
148+
type: RELU
149+
bottom: "fc7"
150+
top: "fc7"
151+
}
152+
layers {
153+
name: "drop7"
154+
type: DROPOUT
155+
bottom: "fc7"
156+
top: "fc7"
157+
dropout_param {
158+
dropout_ratio: 0.5
159+
}
160+
}
161+
layers {
162+
name: "fc8"
163+
type: INNER_PRODUCT
164+
bottom: "fc7"
165+
top: "fc8"
166+
inner_product_param {
167+
num_output: 8
168+
}
169+
}
170+
layers {
171+
name: "prob"
172+
type: SOFTMAX
173+
bottom: "fc8"
174+
top: "prob"
175+
}
43.5 MB
Binary file not shown.
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import cv2
2+
import math
3+
import argparse
4+
5+
def highlightFace(net, frame, conf_threshold=0.7):
6+
frameOpencvDnn=frame.copy()
7+
frameHeight=frameOpencvDnn.shape[0]
8+
frameWidth=frameOpencvDnn.shape[1]
9+
blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
10+
11+
net.setInput(blob)
12+
detections=net.forward()
13+
faceBoxes=[]
14+
for i in range(detections.shape[2]):
15+
confidence=detections[0,0,i,2]
16+
if confidence>conf_threshold:
17+
x1=int(detections[0,0,i,3]*frameWidth)
18+
y1=int(detections[0,0,i,4]*frameHeight)
19+
x2=int(detections[0,0,i,5]*frameWidth)
20+
y2=int(detections[0,0,i,6]*frameHeight)
21+
faceBoxes.append([x1,y1,x2,y2])
22+
cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)
23+
return frameOpencvDnn,faceBoxes
24+
25+
26+
parser=argparse.ArgumentParser()
27+
parser.add_argument('--image')
28+
29+
args=parser.parse_args()
30+
31+
faceProto="opencv_face_detector.pbtxt"
32+
faceModel="opencv_face_detector_uint8.pb"
33+
ageProto="age_deploy.prototxt"
34+
ageModel="age_net.caffemodel"
35+
genderProto="gender_deploy.prototxt"
36+
genderModel="gender_net.caffemodel"
37+
38+
MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)
39+
ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
40+
genderList=['Male','Female']
41+
42+
faceNet=cv2.dnn.readNet(faceModel,faceProto)
43+
ageNet=cv2.dnn.readNet(ageModel,ageProto)
44+
genderNet=cv2.dnn.readNet(genderModel,genderProto)
45+
46+
video=cv2.VideoCapture(args.image if args.image else 0)
47+
padding=20
48+
while cv2.waitKey(1)<0:
49+
hasFrame,frame=video.read()
50+
if not hasFrame:
51+
cv2.waitKey()
52+
break
53+
54+
resultImg,faceBoxes=highlightFace(faceNet,frame)
55+
if not faceBoxes:
56+
print("No face detected")
57+
58+
for faceBox in faceBoxes:
59+
face=frame[max(0,faceBox[1]-padding):
60+
min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
61+
:min(faceBox[2]+padding, frame.shape[1]-1)]
62+
63+
blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)
64+
genderNet.setInput(blob)
65+
genderPreds=genderNet.forward()
66+
gender=genderList[genderPreds[0].argmax()]
67+
print(f'Gender: {gender}')
68+
69+
ageNet.setInput(blob)
70+
agePreds=ageNet.forward()
71+
age=ageList[agePreds[0].argmax()]
72+
print(f'Age: {age[1:-1]} years')
73+
74+
cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)
75+
cv2.imshow("Detecting age and gender", resultImg)

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /