1 /*
2 * Copyright (c) 2018 Sergey Lavrushkin
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * DNN tensorflow backend implementation.
24 */
25
36 #include <tensorflow/c/c_api.h>
37
48
49 /**
50 * Stores execution parameters for single
51 * call to the TensorFlow C API
52 */
59
66
67 #define OFFSET(x) offsetof(TFOptions, x)
68 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
72 };
73
74
78
80 {
82 }
83
84 /**
85 * Free the contents of TensorFlow inference request.
86 * It does not free the TFInferRequest instance.
87 *
88 * @param request pointer to TFInferRequest instance.
89 * NULL pointer is allowed.
90 */
92 {
93 if (!request)
94 return;
98 }
103 for (uint32_t
i = 0;
i < nb_output; ++
i) {
107 }
108 }
110 }
111 }
112
113 /**
114 * Create a TensorFlow inference request. All properties
115 * are initially unallocated and set as NULL.
116 *
117 * @return pointer to the allocated TFInferRequest instance.
118 */
120 {
122 if (!infer_request) {
124 }
129 return infer_request;
130 }
131
132 /**
133 * Start synchronous inference for the TensorFlow model.
134 *
135 * @param request pointer to the TFRequestItem for inference
136 * @retval 0 if execution is successful
137 * @retval AVERROR(EINVAL) if request is NULL
138 * @retval DNN_GENERIC_ERROR if execution fails
139 */
141 {
147
148 if (!request) {
151 }
152
158 if (TF_GetCode(request->
status) != TF_OK) {
161 }
162 return 0;
163 }
164
165 /**
166 * Free the TFRequestItem completely.
167 *
168 * @param arg Address of the TFInferRequest instance.
169 */
173 return;
174 }
179 TF_DeleteStatus(request->
status);
182 }
183
185 {
189 if (!lltask) {
192 }
200 }
201 return 0;
202 }
203
205 {
206 TF_Buffer *graph_buf;
207 unsigned char *graph_data =
NULL;
209 long size, bytes_read;
210
213 }
214
216
218 if (!graph_data){
221 }
222 bytes_read =
avio_read(model_file_context, graph_data,
size);
224 if (bytes_read !=
size){
227 }
228
229 graph_buf = TF_NewBuffer();
230 graph_buf->data = graph_data;
231 graph_buf->length =
size;
233
234 return graph_buf;
235 }
236
238 {
239 TF_DataType dt;
242
243 input_dims[0] = 1;
249 dt = TF_FLOAT;
251 break;
253 dt = TF_UINT8;
255 break;
256 default:
258 }
259
260 return TF_AllocateTensor(dt, input_dims, 4,
261 input_dims[1] * input_dims[2] * input_dims[3] *
size);
262 }
263
265 {
269 TF_DataType dt;
271
272 TF_Output tf_output;
273 tf_output.oper = TF_GraphOperationByName(tf_model->
graph, input_name);
274 if (!tf_output.oper) {
277 }
278
279 tf_output.index = 0;
280 dt = TF_OperationOutputType(tf_output);
281 switch (dt) {
282 case TF_FLOAT:
284 break;
285 case TF_UINT8:
287 break;
288 default:
291 }
293
295 TF_GraphGetTensorShape(tf_model->
graph, tf_output, dims, 4,
status);
296 if (TF_GetCode(
status) != TF_OK){
300 }
302
303 // currently only NHWC is supported
305 for (
int i = 0;
i < 4;
i++)
308
309 return 0;
310 }
311
313 const char *output_name, int *output_width, int *output_height)
314 {
322 .output_names = &output_name,
323 .nb_output = 1,
326 };
327
330 goto err;
331 }
332
336 goto err;
337 }
338
340 if (!request) {
343 goto err;
344 }
345
349
350 err:
354 }
355
356 #define SPACE_CHARS " \t\r\n"
358 {
360
362 v = 1;
363 for (;;) {
366 break;
368 if (
c >=
'0' &&
c <=
'9')
370 else if (
c >=
'A' &&
c <=
'F')
372 else
373 break;
375 if (v & 0x100) {
378 }
380 v = 1;
381 }
382 }
384 }
385
387 {
389 TF_Buffer *graph_def;
390 TF_ImportGraphDefOptions *graph_opts;
391 TF_SessionOptions *sess_opts;
392 const TF_Operation *init_op;
393 uint8_t *sess_config =
NULL;
394 int sess_config_length = 0;
395
396 // prepare the sess config data
397 if (
ctx->tf_option.sess_config !=
NULL) {
399 /*
400 tf_model->ctx.options.sess_config is hex to present the serialized proto
401 required by TF_SetConfig below, so we need to first generate the serialized
402 proto in a python script, tools/python/tf_sess_config.py is a script example
403 to generate the configs of sess_config.
404 */
405 if (strncmp(
ctx->tf_option.sess_config,
"0x", 2) != 0) {
408 }
411
413 if (!sess_config) {
416 }
420 }
421 }
422
424 if (!graph_def){
428 }
429 tf_model->
graph = TF_NewGraph();
430 tf_model->
status = TF_NewStatus();
431 graph_opts = TF_NewImportGraphDefOptions();
432 TF_GraphImportGraphDef(tf_model->
graph, graph_def, graph_opts, tf_model->
status);
433 TF_DeleteImportGraphDefOptions(graph_opts);
434 TF_DeleteBuffer(graph_def);
435 if (TF_GetCode(tf_model->
status) != TF_OK){
439 }
440
441 init_op = TF_GraphOperationByName(tf_model->
graph,
"init");
442 sess_opts = TF_NewSessionOptions();
443
444 if (sess_config) {
445 TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->
status);
447 if (TF_GetCode(tf_model->
status) != TF_OK) {
448 TF_DeleteSessionOptions(sess_opts);
450 ctx->tf_option.sess_config);
452 }
453 }
454
456 TF_DeleteSessionOptions(sess_opts);
457 if (TF_GetCode(tf_model->
status) != TF_OK)
458 {
462 }
463
464 // Run initialization operation with name "init" if it is present in graph
465 if (init_op){
470 if (TF_GetCode(tf_model->
status) != TF_OK)
471 {
475 }
476 }
477
478 return 0;
479 }
480
482 {
484
485 if (!model || !*model)
486 return;
487
488 tf_model = (
TFModel *)(*model);
492 }
494
498 }
500
506 }
508
509 if (tf_model->
graph){
510 TF_DeleteGraph(tf_model->
graph);
511 }
515 }
517 TF_DeleteStatus(tf_model->
status);
518 }
521 }
522
524 {
527
529 if (!tf_model)
531 model = &tf_model->
model;
533
536 goto err;
537 }
538
539 if (
ctx->nireq <= 0) {
541 }
542
543 #if !HAVE_PTHREAD_CANCEL
544 if (
ctx->options.async) {
545 ctx->options.async = 0;
547 }
548 #endif
549
552 goto err;
553 }
554
555 for (
int i = 0;
i <
ctx->nireq;
i++) {
557 if (!item) {
558 goto err;
559 }
565 goto err;
566 }
567 item->
status = TF_NewStatus();
571
574 goto err;
575 }
576 }
577
580 goto err;
581 }
582
585 goto err;
586 }
587
592
593 return model;
594 err:
597 }
598
606
611
614 goto err;
615 }
616
620
625 goto err;
626 }
627
629 if (!infer_request->
tf_input->oper){
632 goto err;
633 }
635
640 goto err;
641 }
643
649 } else {
651 }
652 }
653 break;
656 break;
657 default:
659 break;
660 }
661
666 goto err;
667 }
668
673 goto err;
674 }
675
682 goto err;
683 }
685 }
686
687 return 0;
688 err:
691 }
692
701
705 goto err;
706 }
707
717 }
720 //it only support 1 output if it's frame in & frame out
724 } else {
726 }
727 } else {
732 }
733 break;
737 return;
738 }
740 break;
741 default:
743 goto err;
744 }
746 err:
749
753 }
754 }
755
757 {
763
766 return 0;
767 }
768
771 tf_model = task->
model;
773
776 goto err;
777 }
778
781 goto err;
782 }
783 return 0;
784 }
785 else {
788 goto err;
789 }
792 }
793 err:
797 }
798
800 }
801
803 {
809
813 }
814
816 if (!task) {
819 }
820
826 }
827
832 }
833
838 }
839
841 if (!request) {
844 }
846 }
847
849 {
852 }
853
855 {
860
862 // no pending task need to flush
863 return 0;
864 }
865
867 if (!request) {
870 }
871
877 }
879 }
880
882 }
883
892 };