Go to the documentation of this file. 1 /*
2 * Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
3 * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * gradfun debanding filter, ported from MPlayer
25 * libmpcodecs/vf_gradfun.c
26 *
27 * Apply a boxblur debanding algorithm (based on the gradfun2db
28 * AviSynth filter by prunedtree).
29 * For each pixel, if it is within the threshold of the blurred value, make it
30 * closer. So now we have a smoothed and higher bitdepth version of all the
31 * shallow gradients, while leaving detailed areas untouched.
32 * Dither it back to 8bit.
33 */
34
45
47 {0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
48 {0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E},
49 {0x10,0x70,0x08,0x68,0x16,0x76,0x0E,0x6E},
50 {0x50,0x30,0x48,0x28,0x56,0x36,0x4E,0x2E},
51 {0x04,0x64,0x1C,0x7C,0x02,0x62,0x1A,0x7A},
52 {0x44,0x24,0x5C,0x3C,0x42,0x22,0x5A,0x3A},
53 {0x14,0x74,0x0C,0x6C,0x12,0x72,0x0A,0x6A},
54 {0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
55 };
56
58 {
59 int x;
60 for (x = 0; x <
width;
dc += x & 1, x++) {
64 m =
FFMAX(0, 127 - m);
65 m = m * m *
delta >> 14;
68 }
69 }
70
72 {
73 int x, v, old;
74 for (x = 0; x <
width; x++) {
75 v = buf1[x] +
src[2 * x] +
src[2 * x + 1] +
src[2 * x + src_linesize] +
src[2 * x + 1 + src_linesize];
76 old = buf[x];
77 buf[x] = v;
79 }
80 }
81
83 {
85 int y;
86 uint32_t dc_factor = (1 << 21) / (
r *
r);
87 uint16_t *
dc =
ctx->buf + 16;
88 uint16_t *buf =
ctx->buf + bstride + 32;
89 int thresh =
ctx->thresh;
90
91 memset(
dc, 0, (bstride + 16) *
sizeof(*buf));
92 for (y = 0; y <
r; y++)
93 ctx->blur_line(
dc, buf + y * bstride, buf + (y - 1) * bstride,
src + 2 * y * src_linesize, src_linesize,
width / 2);
94 for (;;) {
96 int mod = ((y +
r) / 2) %
r;
97 uint16_t *buf0 = buf +
mod * bstride;
98 uint16_t *buf1 = buf + (
mod ?
mod - 1 :
r - 1) * bstride;
99 int x, v;
100 ctx->blur_line(
dc, buf0, buf1,
src + (y +
r) * src_linesize, src_linesize,
width / 2);
101 for (x = v = 0; x <
r; x++)
103 for (; x <
width / 2; x++) {
104 v +=
dc[x] -
dc[x-
r];
105 dc[x-
r] = v * dc_factor >> 16;
106 }
107 for (; x < (
width +
r + 1) / 2; x++)
108 dc[x-
r] = v * dc_factor >> 16;
109 for (x = -
r / 2; x < 0; x++)
111 }
113 for (y = 0; y <
r; y++)
115 }
120 }
121 }
122
124 {
126
127 s->thresh = (1 << 15) /
s->strength;
128 s->radius =
av_clip((
s->radius + 1) & ~1, 4, 32);
129
132
133 #if ARCH_X86
135 #endif
136
138
139 return 0;
140 }
141
143 {
146 }
147
155 };
156
158 {
162 int vsub =
desc->log2_chroma_h;
163
168
171 s->chroma_r =
av_clip(((((
s->radius >>
hsub) + (
s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
172
173 return 0;
174 }
175
177 {
182
184 direct = 1;
186 } else {
187 direct = 0;
192 }
194 }
195
204 }
205
210 }
211
212 if (!direct)
214
216 }
217
218 #define OFFSET(x) offsetof(GradFunContext, x)
219 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
220
222 {
"strength",
"The maximum amount by which the filter will change any one pixel.",
OFFSET(strength),
AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64,
FLAGS },
225 };
226
228
230 {
235 },
236 };
237
241 .p.priv_class = &gradfun_class,
249 };
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]