Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit cb27e5e

Browse files
renamed object_detection_with_webcam.ipynb to live_object_detection.ipynb, fixed labelmap google colab bug and added two more tensorflow 2 examples
1 parent e01d71f commit cb27e5e

File tree

4 files changed

+247
-19
lines changed

4 files changed

+247
-19
lines changed

‎.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
.ipynb_checkpoints/
1+
.ipynb_checkpoints/
2+
.idea/

‎detect_from_webcam.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
import numpy as np
2+
import argparse
3+
import tensorflow as tf
4+
import cv2
5+
import pathlib
6+
7+
from object_detection.utils import ops as utils_ops
8+
from object_detection.utils import label_map_util
9+
from object_detection.utils import visualization_utils as vis_util
10+
11+
# patch tf1 into `utils.ops`
12+
utils_ops.tf = tf.compat.v1
13+
14+
# Patch the location of gfile
15+
tf.gfile = tf.io.gfile
16+
17+
18+
def load_model(model_name):
19+
base_url = 'http://download.tensorflow.org/models/object_detection/'
20+
model_file = model_name + '.tar.gz'
21+
model_dir = tf.keras.utils.get_file(
22+
fname=model_name,
23+
origin=base_url + model_file,
24+
untar=True)
25+
26+
model_dir = pathlib.Path(model_dir)/"saved_model"
27+
28+
model = tf.saved_model.load(str(model_dir))
29+
model = model.signatures['serving_default']
30+
31+
return model
32+
33+
34+
def run_inference_for_single_image(model, image):
35+
image = np.asarray(image)
36+
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
37+
input_tensor = tf.convert_to_tensor(image)
38+
# The model expects a batch of images, so add an axis with `tf.newaxis`.
39+
input_tensor = input_tensor[tf.newaxis,...]
40+
41+
# Run inference
42+
output_dict = model(input_tensor)
43+
44+
# All outputs are batches tensors.
45+
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
46+
# We're only interested in the first num_detections.
47+
num_detections = int(output_dict.pop('num_detections'))
48+
output_dict = {key: value[0, :num_detections].numpy()
49+
for key, value in output_dict.items()}
50+
output_dict['num_detections'] = num_detections
51+
52+
# detection_classes should be ints.
53+
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
54+
55+
# Handle models with masks:
56+
if 'detection_masks' in output_dict:
57+
# Reframe the the bbox mask to the image size.
58+
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
59+
output_dict['detection_masks'], output_dict['detection_boxes'],
60+
image.shape[0], image.shape[1])
61+
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)
62+
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
63+
64+
return output_dict
65+
66+
67+
def run_inference(model, category_index, cap):
68+
while True:
69+
ret, image_np = cap.read()
70+
# Actual detection.
71+
output_dict = run_inference_for_single_image(model, image_np)
72+
# Visualization of the results of a detection.
73+
vis_util.visualize_boxes_and_labels_on_image_array(
74+
image_np,
75+
output_dict['detection_boxes'],
76+
output_dict['detection_classes'],
77+
output_dict['detection_scores'],
78+
category_index,
79+
instance_masks=output_dict.get('detection_masks_reframed', None),
80+
use_normalized_coordinates=True,
81+
line_thickness=8)
82+
cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
83+
if cv2.waitKey(25) & 0xFF == ord('q'):
84+
cap.release()
85+
cv2.destroyAllWindows()
86+
break
87+
88+
89+
if __name__ == '__main__':
90+
parser = argparse.ArgumentParser(description='Detect objects inside webcam videostream')
91+
parser.add_argument('-m', '--model', type=str, required=True, help='Model Path')
92+
parser.add_argument('-l', '--labelmap', type=str, required=True, help='Path to Labelmap')
93+
args = parser.parse_args()
94+
95+
detection_model = load_model(args.model)
96+
category_index = label_map_util.create_category_index_from_labelmap(args.labelmap, use_display_name=True)
97+
98+
cap = cv2.VideoCapture(0)
99+
run_inference(detection_model, category_index, cap)

‎object_detection_with_webcam.ipynb renamed to ‎live_object_detection.ipynb

Lines changed: 9 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77
"id": "V8-yl-s-WKMG"
88
},
99
"source": [
10-
"# Object Detection on WebCam Video with Tensorflow 2.0\n",
10+
"# Live Object Detection with Tensorflow 2.0\n",
1111
"\n",
1212
"<table align=\"left\"><td>\n",
13-
" <a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/TannerGilbert/Tensorflow-Object-Detection-with-Tensorflow-2.0/object_detection_with_webcam.ipynb\">\n",
13+
" <a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/TannerGilbert/Tensorflow-Object-Detection-with-Tensorflow-2.0/blob/master/live_object_detection.ipynb\">\n",
1414
" <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab\n",
1515
" </a>\n",
1616
"</td><td>\n",
17-
" <a target=\"_blank\" href=\"https://github.com/TannerGilbert/Tensorflow-Object-Detection-with-Tensorflow-2.0/object_detection_with_webcam.ipynb\">\n",
17+
" <a target=\"_blank\" href=\"https://github.com/TannerGilbert/Tensorflow-Object-Detection-with-Tensorflow-2.0/blob/master/live_object_detection.ipynb\">\n",
1818
" <img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
1919
"</td></table>"
2020
]
@@ -69,7 +69,8 @@
6969
},
7070
"outputs": [],
7171
"source": [
72-
"!pip install -U --pre tensorflow==\"2.*\""
72+
"!pip install -U --pre tensorflow==\"2.*\"\n",
73+
"!pip install tf_slim"
7374
]
7475
},
7576
{
@@ -166,16 +167,6 @@
166167
"pip install ."
167168
]
168169
},
169-
{
170-
"cell_type": "code",
171-
"execution_count": null,
172-
"metadata": {},
173-
"outputs": [],
174-
"source": [
175-
"%%bash\n",
176-
"os.chdir('models/research/object_detection')"
177-
]
178-
},
179170
{
180171
"cell_type": "markdown",
181172
"metadata": {
@@ -345,7 +336,7 @@
345336
"outputs": [],
346337
"source": [
347338
"# List of the strings that is used to add correct label for each box.\n",
348-
"PATH_TO_LABELS = 'data/mscoco_label_map.pbtxt'\n",
339+
"PATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'\n",
349340
"category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)"
350341
]
351342
},
@@ -506,10 +497,10 @@
506497
"outputs": [],
507498
"source": [
508499
"import cv2\n",
509-
"cap = cv2.VideoCapture(0)\n",
500+
"cap = cv2.VideoCapture(0) # or cap = cv2.VideoCapture(\"<video-path>\")\n",
510501
"\n",
511502
"def run_inference(model, cap):\n",
512-
" while True:\n",
503+
" while cap.isOpened():\n",
513504
" ret, image_np = cap.read()\n",
514505
" # Actual detection.\n",
515506
" output_dict = run_inference_for_single_image(model, image_np)\n",
@@ -664,4 +655,4 @@
664655
},
665656
"nbformat": 4,
666657
"nbformat_minor": 4
667-
}
658+
}

‎person_detection.py

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
import numpy as np
2+
import argparse
3+
import tensorflow as tf
4+
import cv2
5+
import pathlib
6+
import os
7+
import pandas as pd
8+
from PIL import Image
9+
import datetime
10+
11+
from object_detection.utils import ops as utils_ops
12+
from object_detection.utils import label_map_util
13+
from object_detection.utils import visualization_utils as vis_util
14+
15+
# patch tf1 into `utils.ops`
16+
utils_ops.tf = tf.compat.v1
17+
18+
# Patch the location of gfile
19+
tf.gfile = tf.io.gfile
20+
21+
22+
def load_model(model_name):
23+
base_url = 'http://download.tensorflow.org/models/object_detection/'
24+
model_file = model_name + '.tar.gz'
25+
model_dir = tf.keras.utils.get_file(
26+
fname=model_name,
27+
origin=base_url + model_file,
28+
untar=True)
29+
30+
model_dir = pathlib.Path(model_dir) / "saved_model"
31+
32+
model = tf.saved_model.load(str(model_dir))
33+
model = model.signatures['serving_default']
34+
35+
return model
36+
37+
38+
def run_inference_for_single_image(model, image):
39+
image = np.asarray(image)
40+
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
41+
input_tensor = tf.convert_to_tensor(image)
42+
# The model expects a batch of images, so add an axis with `tf.newaxis`.
43+
input_tensor = input_tensor[tf.newaxis, ...]
44+
45+
# Run inference
46+
output_dict = model(input_tensor)
47+
48+
# All outputs are batches tensors.
49+
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
50+
# We're only interested in the first num_detections.
51+
num_detections = int(output_dict.pop('num_detections'))
52+
output_dict = {key: value[0, :num_detections].numpy()
53+
for key, value in output_dict.items()}
54+
output_dict['num_detections'] = num_detections
55+
56+
# detection_classes should be ints.
57+
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
58+
59+
# Handle models with masks:
60+
if 'detection_masks' in output_dict:
61+
# Reframe the the bbox mask to the image size.
62+
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
63+
output_dict['detection_masks'], output_dict['detection_boxes'],
64+
image.shape[0], image.shape[1])
65+
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)
66+
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
67+
68+
return output_dict
69+
70+
71+
def run_inference(model, category_index, cap, show_video_steam, label_to_look_for, output_directory):
72+
os.makedirs(output_directory, exist_ok=True)
73+
os.makedirs(output_directory + '/images', exist_ok=True)
74+
75+
if os.path.exists(output_directory + '/results.csv'):
76+
df = pd.read_csv(output_directory + '/results.csv')
77+
else:
78+
df = pd.DataFrame(columns=['timestamp', 'img_path'])
79+
80+
while True:
81+
ret, image_np = cap.read()
82+
# Copy image for later
83+
image_show = np.copy(image_np)
84+
image_height, image_width, _ = image_np.shape
85+
# Actual detection.
86+
output_dict = run_inference_for_single_image(model, image_np)
87+
# Visualization of the results of a detection.
88+
if show_video_steam:
89+
vis_util.visualize_boxes_and_labels_on_image_array(
90+
image_np,
91+
output_dict['detection_boxes'],
92+
output_dict['detection_classes'],
93+
output_dict['detection_scores'],
94+
category_index,
95+
instance_masks=output_dict.get('detection_masks_reframed', None),
96+
use_normalized_coordinates=True,
97+
line_thickness=8)
98+
cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
99+
if cv2.waitKey(25) & 0xFF == ord('q'):
100+
cap.release()
101+
cv2.destroyAllWindows()
102+
break
103+
104+
# Get data(label, xmin, ymin, xmax, ymax)
105+
output = []
106+
for index, score in enumerate(output_dict['detection_scores']):
107+
label = category_index[output_dict['detection_classes'][index]]['name']
108+
ymin, xmin, ymax, xmax = output_dict['detection_boxes'][index]
109+
output.append((label, int(xmin * image_width), int(ymin * image_height), int(xmax * image_width),
110+
int(ymax * image_height)))
111+
112+
# Save incident (could be extended to send a email or something)
113+
for l, x_min, y_min, x_max, y_max in output:
114+
if l == label_to_look_for:
115+
array = cv2.cvtColor(np.array(image_show), cv2.COLOR_RGB2BGR)
116+
image = Image.fromarray(array)
117+
cropped_img = image.crop((x_min, y_min, x_max, y_max))
118+
file_path = output_directory + '/images/' + str(len(df)) + '.jpg'
119+
cropped_img.save(file_path, "JPEG", icc_profile=cropped_img.info.get('icc_profile'))
120+
df.loc[len(df)] = [datetime.datetime.now(), file_path]
121+
df.to_csv(output_directory + '/results.csv', index=None)
122+
123+
124+
if __name__ == '__main__':
125+
parser = argparse.ArgumentParser(description='Detect objects inside webcam videostream')
126+
parser.add_argument('-m', '--model', type=str, required=True, help='Model Path')
127+
parser.add_argument('-l', '--labelmap', type=str, required=True, help='Path to Labelmap')
128+
parser.add_argument('-s', '--show', default=True, action='store_true', help='Show window')
129+
parser.add_argument('-la', '--label', default='person', type=str, help='Label name to detect')
130+
parser.add_argument('-o', '--output_directory', default='results', type=str, help='Directory for the outputs')
131+
args = parser.parse_args()
132+
133+
detection_model = load_model(args.model)
134+
category_index = label_map_util.create_category_index_from_labelmap(args.labelmap, use_display_name=True)
135+
136+
cap = cv2.VideoCapture(0)
137+
run_inference(detection_model, category_index, cap, args.show, args.label, args.output_directory)

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /