1 /*
2 * Copyright (c) 2011 Stefano Sabatini
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Compute a look-up table for binding the input value to the output
24 * value, and apply it to input video.
25 */
26
27 #include "config_components.h"
28
41
43 "w", ///< width of the input video
44 "h", ///< height of the input video
45 "val", ///< input value for the pixel
46 "maxval", ///< max value for the pixel
47 "minval", ///< min value for the pixel
48 "negval", ///< negated value
49 "clipval",
51 };
52
62 };
63
66 uint16_t
lut[4][256 * 256];
///< lookup table for each component
76
84
85 #define OFFSET(x) offsetof(LutContext, x)
86 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
87
101 };
102
104 {
107
108 for (
i = 0;
i < 4;
i++) {
112 }
113 }
114
115 #define YUV_FORMATS \
116 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
117 AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
118 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
119 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
120 AV_PIX_FMT_YUVJ440P, \
121 AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \
122 AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \
123 AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \
124 AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \
125 AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \
126 AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE
127
128 #define RGB_FORMATS \
129 AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
130 AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
131 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, \
132 AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGBA64LE, \
133 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, \
134 AV_PIX_FMT_GBRP9LE, AV_PIX_FMT_GBRP10LE, \
135 AV_PIX_FMT_GBRAP10LE, \
136 AV_PIX_FMT_GBRP12LE, AV_PIX_FMT_GBRP14LE, \
137 AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRAP12LE, \
138 AV_PIX_FMT_GBRAP16LE
139
140 #define GRAY_FORMATS \
141 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9LE, AV_PIX_FMT_GRAY10LE, \
142 AV_PIX_FMT_GRAY12LE, AV_PIX_FMT_GRAY14LE, AV_PIX_FMT_GRAY16LE
143
147
151 {
153
158 }
159
160 /**
161 * Clip value val in the minval - maxval range.
162 */
164 {
168
170 }
171
172 /**
173 * Compute gamma correction for value val, assuming the minval-maxval
174 * range, val is clipped to a value contained in the same interval.
175 */
177 {
182
183 return pow((
val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
184 }
185
186 /**
187 * Compute ITU Rec.709 gamma correction of value val.
188 */
190 {
195 double level = (
val - minval) / (maxval - minval);
197 : 1.099 * pow(
level, 1.0 / gamma) - 0.099;
198 return level * (maxval - minval) + minval;
199 }
200
206 };
207
209 "clip",
210 "gammaval",
211 "gammaval709",
213 };
214
216 {
220 uint8_t rgba_map[4]; /* component index -> RGBA color index map */
223
224 s->hsub =
desc->log2_chroma_w;
225 s->vsub =
desc->log2_chroma_h;
226
229 s->is_16bit =
desc->comp[0].depth > 8;
230
267 min[
Y] = 16 * (1 << (
desc->comp[0].depth - 8));
268 min[
U] = 16 * (1 << (
desc->comp[1].depth - 8));
269 min[
V] = 16 * (1 << (
desc->comp[2].depth - 8));
271 max[
Y] = 235 * (1 << (
desc->comp[0].depth - 8));
272 max[
U] = 240 * (1 << (
desc->comp[1].depth - 8));
273 max[
V] = 240 * (1 << (
desc->comp[2].depth - 8));
274 max[
A] = (1 <<
desc->comp[0].depth) - 1;
275 break;
280 break;
281 default:
284 }
285
286 s->is_yuv =
s->is_rgb = 0;
290
295 s->step =
s->step >> 1;
296 }
297 }
298
300 double res;
302
303 /* create the parsed expression */
310 "Error when parsing the expression '%s' for the component %d and color %d.\n",
313 }
314
315 /* compute the lut */
318
325
329 "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
332 }
335 }
336 }
337
338 return 0;
339 }
340
344
347 };
348
349 #define LOAD_PACKED_COMMON\
350 LutContext *s = ctx->priv;\
351 const struct thread_data *td = arg;\
352 \
353 int i, j;\
354 const int w = td->w;\
355 const int h = td->h;\
356 AVFrame *in = td->in;\
357 AVFrame *out = td->out;\
358 const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;\
359 const int step = s->step;\
360 \
361 const int slice_start = (h * jobnr ) / nb_jobs;\
362 const int slice_end = (h * (jobnr+1)) / nb_jobs;\
363
364 /* packed, 16-bit */
366 {
368
369 uint16_t *inrow, *outrow, *inrow0, *outrow0;
370 const int in_linesize = in->linesize[0] / 2;
371 const int out_linesize =
out->linesize[0] / 2;
372 inrow0 = (uint16_t *)in ->
data[0];
373 outrow0 = (uint16_t *)
out->data[0];
374
376 inrow = inrow0 +
i * in_linesize;
377 outrow = outrow0 +
i * out_linesize;
378 for (j = 0; j <
w; j++) {
379
381 #if HAVE_BIGENDIAN
386 #else
387 case 4: outrow[3] =
tab[3][inrow[3]];
// Fall-through
388 case 3: outrow[2] =
tab[2][inrow[2]];
// Fall-through
389 case 2: outrow[1] =
tab[1][inrow[1]];
// Fall-through
390 default: outrow[0] =
tab[0][inrow[0]];
391 #endif
392 }
395 }
396 }
397
398 return 0;
399 }
400
401 /* packed, 8-bit */
403 {
405
406 uint8_t *inrow, *outrow, *inrow0, *outrow0;
407 const int in_linesize = in->linesize[0];
408 const int out_linesize =
out->linesize[0];
409 inrow0 = in ->data[0];
410 outrow0 =
out->data[0];
411
413 inrow = inrow0 +
i * in_linesize;
414 outrow = outrow0 +
i * out_linesize;
415 for (j = 0; j <
w; j++) {
417 case 4: outrow[3] =
tab[3][inrow[3]];
// Fall-through
418 case 3: outrow[2] =
tab[2][inrow[2]];
// Fall-through
419 case 2: outrow[1] =
tab[1][inrow[1]];
// Fall-through
420 default: outrow[0] =
tab[0][inrow[0]];
421 }
424 }
425 }
426
427 return 0;
428 }
429
430 #define LOAD_PLANAR_COMMON\
431 LutContext *s = ctx->priv;\
432 const struct thread_data *td = arg;\
433 int i, j, plane;\
434 AVFrame *in = td->in;\
435 AVFrame *out = td->out;\
436
437 #define PLANAR_COMMON\
438 int vsub = plane == 1 || plane == 2 ? s->vsub : 0;\
439 int hsub = plane == 1 || plane == 2 ? s->hsub : 0;\
440 int h = AV_CEIL_RSHIFT(td->h, vsub);\
441 int w = AV_CEIL_RSHIFT(td->w, hsub);\
442 const uint16_t *tab = s->lut[plane];\
443 \
444 const int slice_start = (h * jobnr ) / nb_jobs;\
445 const int slice_end = (h * (jobnr+1)) / nb_jobs;\
446
447 /* planar >8 bit depth */
449 {
451
452 uint16_t *inrow, *outrow;
453
454 for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
456
457 const int in_linesize = in->linesize[plane] / 2;
458 const int out_linesize =
out->linesize[plane] / 2;
459
461 outrow = (uint16_t *)
out->data[plane] +
slice_start * out_linesize;
462
464 for (j = 0; j <
w; j++) {
465 #if HAVE_BIGENDIAN
467 #else
468 outrow[j] =
tab[inrow[j]];
469 #endif
470 }
471 inrow += in_linesize;
472 outrow += out_linesize;
473 }
474 }
475
476 return 0;
477 }
478
479 /* planar 8bit depth */
481 {
483
484 uint8_t *inrow, *outrow;
485
486 for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
488
489 const int in_linesize = in->linesize[plane];
490 const int out_linesize =
out->linesize[plane];
491
492 inrow = in ->data[plane] +
slice_start * in_linesize;
494
496 for (j = 0; j <
w; j++)
497 outrow[j] =
tab[inrow[j]];
498 inrow += in_linesize;
499 outrow += out_linesize;
500 }
501 }
502
503 return 0;
504 }
505
506 #define PACKED_THREAD_DATA\
507 struct thread_data td = {\
508 .in = in,\
509 .out = out,\
510 .w = inlink->w,\
511 .h = in->height,\
512 };\
513
514 #define PLANAR_THREAD_DATA\
515 struct thread_data td = {\
516 .in = in,\
517 .out = out,\
518 .w = inlink->w,\
519 .h = inlink->h,\
520 };\
521
523 {
528 int direct = 0;
529
531 direct = 1;
533 } else {
538 }
540 }
541
544
545 if (
s->is_rgb &&
s->is_16bit && !
s->is_planar) {
546 /* packed, 16-bit */
550 }
else if (
s->is_rgb && !
s->is_planar) {
551 /* packed 8 bits */
555 }
else if (
s->is_16bit) {
556 /* planar >8 bit depth */
560 } else {
561 /* planar 8bit depth */
565 }
566
567 if (!direct)
569
571 }
572
574 char *res,
int res_len,
int flags)
575 {
577
580
582 }
583
589 },
590 };
591
592 #define DEFINE_LUT_FILTER(name_, description_, priv_class_) \
593 const FFFilter ff_vf_##name_ = { \
594 .p.name = #name_, \
595 .p.description = NULL_IF_CONFIG_SMALL(description_), \
596 .p.priv_class = &priv_class_ ## _class, \
597 .p.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | \
598 AVFILTER_FLAG_SLICE_THREADS, \
599 .priv_size = sizeof(LutContext), \
600 .init = name_##_init, \
601 .uninit = uninit, \
602 FILTER_INPUTS(inputs), \
603 FILTER_OUTPUTS(ff_video_default_filterpad), \
604 FILTER_QUERY_FUNC2(query_formats), \
605 .process_command = process_command, \
606 }
607
609
610 #if CONFIG_LUT_FILTER
611
612 #define lut_init NULL
613 DEFINE_LUT_FILTER(lut,
"Compute and apply a lookup table to the RGB/YUV input video.",
614 lut);
615 #undef lut_init
616 #endif
617
618 #if CONFIG_LUTYUV_FILTER
619
621 {
623
625
626 return 0;
627 }
628
629 DEFINE_LUT_FILTER(lutyuv,
"Compute and apply a lookup table to the YUV input video.",
630 lut);
631 #endif
632
633 #if CONFIG_LUTRGB_FILTER
634
636 {
638
640
641 return 0;
642 }
643
644 DEFINE_LUT_FILTER(lutrgb,
"Compute and apply a lookup table to the RGB input video.",
645 lut);
646 #endif