1 /*
2 * Copyright (c) 2020
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * DNN OpenVINO backend implementation.
24 */
25
33 #include "../internal.h"
35 #if HAVE_OPENVINO2
36 #include <openvino/c/openvino.h>
37 #else
38 #include <c_api/ie_c_api.h>
39 #endif
41
52
57
61 #if HAVE_OPENVINO2
70 #else
76 #endif
82
83 // one request for one call to openvino
87 #if HAVE_OPENVINO2
90 #else
93 #endif
95
96 #define APPEND_STRING(generated_string, iterate_string) \
97 generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
98 av_asprintf("%s", iterate_string);
99
100 #define OFFSET(x) offsetof(OVContext, x)
101 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
111 {
"scale",
"Add scale preprocess operation. Divide each element of input by specified value.",
OFFSET(
options.scale),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
112 {
"mean",
"Add mean preprocess operation. Subtract specified value from each element of input.",
OFFSET(
options.mean),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
114 };
115
117
118 #if HAVE_OPENVINO2
119 static const struct {
124 { OK, 0, "success" },
126 { NOT_IMPLEMENTED,
AVERROR(ENOSYS),
"not implemented" },
128 { PARAMETER_MISMATCH,
AVERROR(EINVAL),
"parameter mismatch" },
130 { OUT_OF_BOUNDS,
AVERROR(EOVERFLOW),
"out of bounds" },
132 { REQUEST_BUSY,
AVERROR(EBUSY),
"request busy" },
133 { RESULT_NOT_READY,
AVERROR(EBUSY),
"result not ready" },
134 { NOT_ALLOCATED,
AVERROR(ENODATA),
"not allocated" },
137 { INFER_CANCELLED,
AVERROR(ECANCELED),
"infer cancelled" },
138 { INVALID_C_PARAM,
AVERROR(EINVAL),
"invalid C parameter" },
140 { NOT_IMPLEMENT_C_METHOD,
AVERROR(ENOSYS),
"not implement C method" },
142 };
143
145 {
152 }
153 }
155 *
desc =
"unknown error";
157 }
158 #endif
159
160 #if HAVE_OPENVINO2
162 #else
164 #endif
165 {
166 switch (precision)
167 {
168 #if HAVE_OPENVINO2
169 case F32:
170 #else
171 case FP32:
172 #endif
174 case U8:
176 default:
179 }
180 }
181
183 {
184 switch (dt)
185 {
187 return sizeof(
float);
189 return sizeof(uint8_t);
190 default:
192 return 1;
193 }
194 }
195
197 {
202 #if HAVE_OPENVINO2
205 ov_tensor_t* tensor =
NULL;
206 ov_shape_t input_shape = {0};
207 ov_element_type_e precision;
208 char *port_name;
209 #else
210 dimensions_t dims;
211 precision_e precision;
212 ie_blob_buffer_t blob_buffer;
214 ie_blob_t *input_blob =
NULL;
215 #endif
216
221
222 #if HAVE_OPENVINO2
224 ov_output_const_port_free(ov_model->
input_port);
226 }
229 else
234 }
239 }
241 ov_free(port_name);
243
248 }
249 dims = input_shape.dims;
253 ov_shape_free(&input_shape);
255 }
256 for (
int i = 0;
i < input_shape.rank;
i++)
260 #else
265 }
266
267 status |= ie_blob_get_dims(input_blob, &dims);
268 status |= ie_blob_get_precision(input_blob, &precision);
270 ie_blob_free(&input_blob);
273 }
274
275 status = ie_blob_get_buffer(input_blob, &blob_buffer);
277 ie_blob_free(&input_blob);
280 }
281 for (
int i = 0;
i < input_shape.rank;
i++)
284 input.data = blob_buffer.buffer;
286 #endif
287 // all models in openvino open model zoo use BGR as input,
288 // change to be an option when necessary.
290 // We use preprocess_steps to scale input data, so disable scale and mean here.
293
294 for (
int i = 0;
i <
ctx->options.batch_size; ++
i) {
296 if (!lltask) {
297 break;
298 }
302 #if HAVE_OPENVINO2
303 if (tensor)
304 ov_tensor_free(tensor);
305 status = ov_tensor_create(precision, input_shape, &tensor);
306 ov_shape_free(&input_shape);
310 }
315 }
320 }
321 #endif
327 } else {
329 }
330 }
331 break;
334 break;
337 break;
338 default:
340 break;
341 }
344 }
345 #if HAVE_OPENVINO2
346 ov_tensor_free(tensor);
347 #else
348 ie_blob_free(&input_blob);
349 #endif
350
351 return 0;
352 }
353
355 {
363 #if HAVE_OPENVINO2
364 size_t* dims;
366 ov_tensor_t *output_tensor;
367 ov_shape_t output_shape = {0};
368 ov_element_type_e precision;
369
373 return;
374 }
375
379 &output_tensor);
382 "Failed to get output tensor.");
383 goto end;
384 }
385
389 "Failed to get output data.");
390 goto end;
391 }
392
393 status = ov_tensor_get_shape(output_tensor, &output_shape);
396 goto end;
397 }
398 dims = output_shape.dims;
399
403 goto end;
404 }
408 outputs[
i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
409 outputs[
i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
410 outputs[
i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
415 ov_shape_free(&output_shape);
416 ov_tensor_free(output_tensor);
417 output_tensor =
NULL;
418 }
419 #else
421 dimensions_t dims;
422 ie_blob_t *output_blob =
NULL;
423 ie_blob_buffer_t blob_buffer;
424 precision_e precision;
429 "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
431 return;
432 }
433
434 status = ie_blob_get_buffer(output_blob, &blob_buffer);
436 ie_blob_free(&output_blob);
438 return;
439 }
440
441 status |= ie_blob_get_dims(output_blob, &dims);
442 status |= ie_blob_get_precision(output_blob, &precision);
444 ie_blob_free(&output_blob);
446 return;
447 }
448 output.data = blob_buffer.buffer;
450 for (
int i = 0;
i < 4;
i++)
458 #endif
459
463
469 } else {
471 }
472 } else {
477 }
478 break;
482 goto end;
483 }
487 break;
491 goto end;
492 }
493 for (
int output_i = 0; output_i < ov_model->
nb_outputs; output_i++)
497 break;
498 default:
500 break;
501 }
502
509 }
510 end:
511 #if HAVE_OPENVINO2
513 ov_shape_free(&output_shape);
514 if (output_tensor)
515 ov_tensor_free(output_tensor);
516 #else
517 ie_blob_free(&output_blob);
518 #endif
521 #if HAVE_OPENVINO2
523 #else
525 #endif
528 return;
529 }
530 }
531
533 {
535
536 if (!model || !*model)
537 return;
538
539 ov_model = (*model)->
model;
543 #if HAVE_OPENVINO2
545 #else
547 #endif
548 }
551 }
553
557 }
559
565 }
567 #if HAVE_OPENVINO2
569 ov_output_const_port_free(ov_model->
input_port);
575 ov_preprocess_prepostprocessor_free(ov_model->
preprocess);
581 ov_core_free(ov_model->
core);
582 #else
586 ie_network_free(&ov_model->
network);
588 ie_core_free(&ov_model->
core);
591 #endif
595 }
596
597
599 {
602 #if HAVE_OPENVINO2
604 ov_preprocess_input_tensor_info_t* input_tensor_info =
NULL;
605 ov_preprocess_output_tensor_info_t* output_tensor_info =
NULL;
606 ov_preprocess_input_model_info_t* input_model_info =
NULL;
607 ov_model_t *tmp_ov_model;
608 ov_layout_t* NHWC_layout =
NULL;
609 ov_layout_t* NCHW_layout =
NULL;
610 const char* NHWC_desc = "NHWC";
611 const char* NCHW_desc = "NCHW";
612 const char* device =
ctx->options.device_type;
613 #else
615 ie_available_devices_t a_dev;
617 char *all_dev_names =
NULL;
618 #endif
619 // We scale pixel by default when do frame processing.
622 // batch size
623 if (
ctx->options.batch_size <= 0) {
624 ctx->options.batch_size = 1;
625 }
626 #if HAVE_OPENVINO2
627 if (
ctx->options.batch_size > 1) {
629 "change batch_size to 1.\n");
630 ctx->options.batch_size = 1;
631 }
632
637 goto err;
638 }
639
640 if (input_name)
642 else
647 goto err;
648 }
649
650 status = ov_preprocess_input_info_get_tensor_info(ov_model->
input_info, &input_tensor_info);
654 goto err;
655 }
656
657 //set input layout
658 status = ov_layout_create(NHWC_desc, &NHWC_layout);
659 status |= ov_layout_create(NCHW_desc, &NCHW_layout);
663 goto err;
664 }
665
666 status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
670 goto err;
671 }
672
673 status = ov_preprocess_input_info_get_model_info(ov_model->
input_info, &input_model_info);
677 goto err;
678 }
680 status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
682 status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
686 goto err;
687 }
688
689 status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
693 goto err;
694 }
695
696 if (!nb_outputs) {
697 size_t output_size;
702 goto err;
703 }
704 nb_outputs = output_size;
705 }
707 for (
int i = 0;
i < nb_outputs;
i++) {
708 if (output_names)
709 status = ov_preprocess_prepostprocessor_get_output_info_by_name(
711 else
712 status = ov_preprocess_prepostprocessor_get_output_info_by_index(
717 goto err;
718 }
719 status |= ov_preprocess_output_info_get_tensor_info(ov_model->
output_info, &output_tensor_info);
723 goto err;
724 }
726 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
728 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
729 else
730 status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
734 goto err;
735 }
736 ov_preprocess_output_tensor_info_free(output_tensor_info);
737 output_tensor_info =
NULL;
738 ov_preprocess_output_info_free(ov_model->
output_info);
740 }
741 // set preprocess steps.
743 ov_preprocess_preprocess_steps_t* input_process_steps =
NULL;
744 status = ov_preprocess_input_info_get_preprocess_steps(ov_model->
input_info, &input_process_steps);
748 goto err;
749 }
750 status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
751 status |= ov_preprocess_preprocess_steps_mean(input_process_steps,
ctx->options.mean);
752 status |= ov_preprocess_preprocess_steps_scale(input_process_steps,
ctx->options.scale);
755 ov_preprocess_preprocess_steps_free(input_process_steps);
756 input_process_steps =
NULL;
758 goto err;
759 }
760 ov_preprocess_preprocess_steps_free(input_process_steps);
761 input_process_steps =
NULL;
762 }
763 ov_preprocess_input_tensor_info_free(input_tensor_info);
764 input_tensor_info =
NULL;
765 ov_preprocess_input_info_free(ov_model->
input_info);
767
768 //update model
774 ov_model_free(tmp_ov_model);
777 goto err;
778 }
779 ov_model_free(tmp_ov_model);
780
781 //update output_port
786 goto err;
787 }
788 } else
789 for (
int i = 0;
i < nb_outputs;
i++) {
792 }
793
794 for (
int i = 0;
i < nb_outputs;
i++) {
795 char *port_name;
796 if (output_names)
797 status = ov_model_const_output_by_name(ov_model->
ov_model, output_names[
i],
799 else
804 goto err;
805 }
809 goto err;
810 }
812 ov_free(port_name);
814 }
815 //compile network
819 goto err;
820 }
821 ov_preprocess_input_model_info_free(input_model_info);
822 input_model_info =
NULL;
823 ov_layout_free(NCHW_layout);
824 ov_layout_free(NHWC_layout);
825 #else
826 if (
ctx->options.batch_size > 1) {
827 input_shapes_t input_shapes;
828 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
831 goto err;
832 }
833 for (
int i = 0;
i < input_shapes.shape_num;
i++)
834 input_shapes.shapes[
i].shape.dims[0] =
ctx->options.batch_size;
835 status = ie_network_reshape(ov_model->
network, input_shapes);
836 ie_network_input_shapes_free(&input_shapes);
839 goto err;
840 }
841 }
842
843 // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
844 // while we pass NHWC data from FFmpeg to openvino
845 status = ie_network_set_input_layout(ov_model->
network, input_name, NHWC);
847 if (
status == NOT_FOUND) {
850 } else{
852 }
854 goto err;
855 }
856 status = ie_network_set_output_layout(ov_model->
network, output_name, NHWC);
858 if (
status == NOT_FOUND) {
861 } else{
863 }
865 goto err;
866 }
868
869 // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
870 // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
871 // ask openvino to do the conversion internally.
872 // the current supported SR model (frame processing) is generated from tensorflow model,
873 // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
874 // TODO: we need to get a final clear&general solution with all backends/formats considered.
876 status = ie_network_set_input_precision(ov_model->
network, input_name, U8);
880 goto err;
881 }
882 }
883
887 status = ie_core_get_available_devices(ov_model->
core, &a_dev);
891 goto err;
892 }
893 for (
int i = 0;
i < a_dev.num_devices;
i++) {
894 APPEND_STRING(all_dev_names, a_dev.devices[
i])
895 }
897 ctx->options.device_type, all_dev_names);
899 goto err;
900 }
901 #endif
902 // create infer_requests for async execution
903 if (
ctx->options.nireq <= 0) {
904 // the default value is a rough estimation
906 }
907
911 goto err;
912 }
913
914 for (
int i = 0;
i <
ctx->options.nireq;
i++) {
916 if (!item) {
918 goto err;
919 }
920
921 #if HAVE_OPENVINO2
923 #else
925 #endif
930 goto err;
931 }
932
933 #if HAVE_OPENVINO2
937 goto err;
938 }
939 #else
943 goto err;
944 }
945 #endif
946
950 goto err;
951 }
953 }
954
958 goto err;
959 }
960
964 goto err;
965 }
966
967 return 0;
968
969 err:
970 #if HAVE_OPENVINO2
971 if (output_tensor_info)
972 ov_preprocess_output_tensor_info_free(output_tensor_info);
974 ov_preprocess_output_info_free(ov_model->
output_info);
975 if (NCHW_layout)
976 ov_layout_free(NCHW_layout);
977 if (NHWC_layout)
978 ov_layout_free(NHWC_layout);
979 if (input_model_info)
980 ov_preprocess_input_model_info_free(input_model_info);
981 #endif
984 }
985
987 {
988 #if HAVE_OPENVINO2
990 #else
992 #endif
998
1000 #if HAVE_OPENVINO2
1002 #else
1004 #endif
1006 return 0;
1007 }
1008
1010 task = lltask->
task;
1011 ov_model = task->
model;
1013
1016 goto err;
1017 }
1018
1019 #if HAVE_OPENVINO2
1025 goto err;
1026 }
1027
1032 goto err;
1033 }
1034 return 0;
1035 } else {
1040 goto err;
1041 }
1044 }
1045 #else
1051 goto err;
1052 }
1057 goto err;
1058 }
1059 return 0;
1060 } else {
1065 goto err;
1066 }
1069 }
1070 #endif
1071 err:
1073 #if HAVE_OPENVINO2
1075 #else
1077 #endif
1079 }
1081 }
1082
1084 {
1087 int input_resizable =
ctx->options.input_resizable;
1088
1089 #if HAVE_OPENVINO2
1090 ov_shape_t input_shape = {0};
1091 ov_element_type_e precision;
1093 if (input_name)
1095 else
1100 }
1105 }
1110 }
1111 for (
int i = 0;
i < 4;
i++)
1112 input->dims[
i] = input_shape.dims[
i];
1113 if (input_resizable) {
1116 }
1117
1118 if (input_shape.dims[1] <= 3) // NCHW
1120 else // NHWC
1122
1124 ov_shape_free(&input_shape);
1125 return 0;
1126 #else
1127 char *model_input_name =
NULL;
1129 size_t model_input_count = 0;
1130 dimensions_t dims;
1131 precision_e precision;
1132 status = ie_network_get_inputs_number(ov_model->
network, &model_input_count);
1136 }
1137 for (
size_t i = 0;
i < model_input_count;
i++) {
1138 status = ie_network_get_input_name(ov_model->
network,
i, &model_input_name);
1142 }
1143 if (strcmp(model_input_name, input_name) == 0) {
1144 ie_network_name_free(&model_input_name);
1145 status |= ie_network_get_input_dims(ov_model->
network, input_name, &dims);
1146 status |= ie_network_get_input_precision(ov_model->
network, input_name, &precision);
1150 }
1151
1152 for (
int i = 0;
i < 4;
i++)
1153 input->dims[
i] = input_shape.dims[
i];
1154 if (input_resizable) {
1157 }
1158
1159 if (input_shape.dims[1] <= 3) // NCHW
1161 else // NHWC
1163
1165 return 0;
1166 }
1167
1168 ie_network_name_free(&model_input_name);
1169 }
1170
1173 #endif
1174 }
1175
1177 {
1181
1183 if (!sd) { // this frame has nothing detected
1184 return 0;
1185 }
1186
1188 return 0;
1189 }
1190
1193 return 0;
1194 }
1195
1196 for (uint32_t
i = 0;
i <
header->nb_bboxes;
i++) {
1198 if (bbox->
x < 0 || bbox->
w < 0 || bbox->
x + bbox->
w >=
frame->
width) {
1199 return 0;
1200 }
1201 if (bbox->
y < 0 || bbox->
h < 0 || bbox->
y + bbox->
h >=
frame->
width) {
1202 return 0;
1203 }
1204
1206 return 0;
1207 }
1208 }
1209
1210 return 1;
1211 }
1212
1214 {
1215 switch (func_type) {
1218 {
1220 if (!lltask) {
1222 }
1225 lltask->
task = task;
1229 }
1230 return 0;
1231 }
1233 {
1238
1241
1243 return 0;
1244 }
1245
1248
1252
1255 continue;
1256 }
1257 }
1258
1260 if (!lltask) {
1262 }
1264 lltask->
task = task;
1269 }
1270 }
1271 return 0;
1272 }
1273 default:
1276 }
1277 }
1278
1279 static int get_output_ov(
void *model,
const char *input_name,
int input_width,
int input_height,
1280 const char *output_name, int *output_width, int *output_height)
1281 {
1282 #if HAVE_OPENVINO2
1283 ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1285 ov_shape_t input_shape = {0};
1286 ov_partial_shape_t partial_shape;
1287 #else
1289 input_shapes_t input_shapes;
1290 #endif
1298 .output_names = output_name ? &output_name :
NULL,
1299 .nb_output = 1,
1302 };
1303
1307 }
1308
1309 #if HAVE_OPENVINO2
1310 if (
ctx->options.input_resizable) {
1311 status = ov_partial_shape_create(4, dims, &partial_shape);
1315 }
1320 }
1321 input_shape.dims[2] = input_height;
1322 input_shape.dims[3] = input_width;
1323
1324 status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1325 ov_shape_free(&input_shape);
1329 }
1330
1331 status = ov_model_reshape_single_input(ov_model->
ov_model, partial_shape);
1332 ov_partial_shape_free(&partial_shape);
1336 }
1337 }
1338
1340 #else
1341 if (
ctx->options.input_resizable) {
1342 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
1343 input_shapes.shapes->shape.dims[2] = input_height;
1344 input_shapes.shapes->shape.dims[3] = input_width;
1345 status |= ie_network_reshape(ov_model->
network, input_shapes);
1346 ie_network_input_shapes_free(&input_shapes);
1350 }
1351 }
1353 #endif
1358 }
1359 }
1360
1363 goto err;
1364 }
1365
1369 goto err;
1370 }
1371
1373 if (!request) {
1376 goto err;
1377 }
1378
1382 err:
1386 }
1387
1389 {
1393 #if HAVE_OPENVINO2
1394 ov_core_t* core =
NULL;
1395 ov_model_t* ovmodel =
NULL;
1397 #else
1398 size_t node_count = 0;
1399 char *node_name =
NULL;
1401 #endif
1402
1404 if (!model){
1406 }
1407
1409 if (!ov_model) {
1412 }
1413 model->
model = ov_model;
1414 ov_model->
model = model;
1415 ov_model->
ctx.
class = &dnn_openvino_class;
1417
1418 //parse options
1422 goto err;
1423 }
1424
1425 #if HAVE_OPENVINO2
1426 status = ov_core_create(&core);
1428 goto err;
1429 }
1430 ov_model->
core = core;
1431
1432 status = ov_core_read_model(core, model_filename,
NULL, &ovmodel);
1434 ov_version_t ver;
1435 status = ov_get_openvino_version(&ver);
1437 "Please check if the model version matches the runtime OpenVINO Version:\n",
1438 model_filename);
1441 }
1442 ov_version_free(&ver);
1443 goto err;
1444 }
1446 #else
1449
1450 status = ie_core_create(
"", &ov_model->
core);
1452 goto err;
1453
1456 ie_version_t ver;
1457 ver = ie_c_api_version();
1459 "Please check if the model version matches the runtime OpenVINO %s\n",
1460 model_filename, ver.api_version);
1461 ie_version_free(&ver);
1462 goto err;
1463 }
1464
1465 //get all the input and output names
1466 status = ie_network_get_inputs_number(ov_model->
network, &node_count);
1469 goto err;
1470 }
1471 for (
size_t i = 0;
i < node_count;
i++) {
1472 status = ie_network_get_input_name(ov_model->
network,
i, &node_name);
1475 goto err;
1476 }
1478 ie_network_name_free(&node_name);
1479 }
1480 status = ie_network_get_outputs_number(ov_model->
network, &node_count);
1483 goto err;
1484 }
1485 for (
size_t i = 0;
i < node_count;
i++) {
1486 status = ie_network_get_output_name(ov_model->
network,
i, &node_name);
1489 goto err;
1490 }
1492 ie_network_name_free(&node_name);
1493 }
1494 #endif
1495
1501
1502 return model;
1503
1504 err:
1507 }
1508
1510 {
1516
1520 }
1521
1522 #if HAVE_OPENVINO2
1524 #else
1526 #endif
1532 }
1533 }
1534
1536 if (!task) {
1539 }
1540
1545 }
1546
1551 }
1552
1557 }
1558
1559 if (
ctx->options.async) {
1562 if (!request) {
1565 }
1566
1570 }
1571 }
1572
1573 return 0;
1574 }
1575 else {
1577 // Classification filter has not been completely
1578 // tested with the sync mode. So, do not support now.
1581 }
1582
1583 if (
ctx->options.batch_size > 1) {
1586 }
1587
1589 if (!request) {
1592 }
1594 }
1595 }
1596
1598 {
1601 }
1602
1603 static int dnn_flush_ov(
const DNNModel *model)
1604 {
1608 #if HAVE_OPENVINO2
1610 #else
1612 #endif
1614
1616 // no pending task need to flush
1617 return 0;
1618 }
1619
1621 if (!request) {
1624 }
1625
1630 }
1631 #if HAVE_OPENVINO2
1636 }
1637 #else
1642 }
1647 }
1648 #endif
1649
1650 return 0;
1651 }
1652
1655 .execute_model = dnn_execute_model_ov,
1656 .get_result = dnn_get_result_ov,
1657 .flush = dnn_flush_ov,
1659 };