1 /*
2 * FFV1 decoder
3 *
4 * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * FF Video Codec 1 (a lossless codec) decoder
26 */
27
41
43 int is_signed)
44 {
46 return 0;
47 else {
48 int i, e;
50 e = 0;
52 e++;
53 if (e > 31)
55 }
56
57 a = 1;
58 for (i = e - 1; i >= 0; i--)
60
61 e = -(is_signed &&
get_rac(c, state + 11 +
FFMIN(e, 10)));
// 11..21
62 return (a ^ e) - e;
63 }
64 }
65
67 {
69 }
70
72 int bits)
73 {
74 int k, i, v, ret;
75
77 k = 0;
78 while (i < state->error_sum) { // FIXME: optimize
79 k++;
80 i += i;
81 }
82
84 ff_dlog(
NULL,
"v:%d bias:%d error:%d drift:%d count:%d k:%d",
86
87 v ^= ((2 * state->
drift + state->
count) >> 31);
88
90
92
93 return ret;
94 }
95
97 {
102 } else {
105 }
106 return 0;
107 }
108
109 #define TYPE int16_t
110 #define RENAME(name) name
112 #undef TYPE
113 #undef RENAME
114
116 #define RENAME(name) name ## 32
118
120 int w,
int h,
int stride,
int plane_index,
121 int pixel_stride)
122 {
123 int x, y;
127
129
131
132 for (y = 0; y <
h; y++) {
133 int16_t *
temp = sample[0];
// FIXME: try a normal buffer
134
135 sample[0] = sample[1];
137
138 sample[1][-1] = sample[0][0];
139 sample[0][
w] = sample[0][w - 1];
140
141 // { START_TIMER
143 int ret = decode_line(s, w, sample, plane_index, 8);
144 if (ret < 0)
145 return ret;
146 for (x = 0; x <
w; x++)
147 src[x*pixel_stride + stride * y] = sample[1][x];
148 } else {
150 if (ret < 0)
151 return ret;
153 for (x = 0; x <
w; x++) {
154 ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
155 }
156 } else {
157 for (x = 0; x <
w; x++) {
159 }
160 }
161 }
162 // STOP_TIMER("decode-line") }
163 }
164 return 0;
165 }
166
168 {
172 memset(state, 128, sizeof(state));
173
175
180
186 return -1;
189 return -1;
190
196 return -1;
197 }
201
205 }
207 }
208
210 if (ps == 1) {
213 } else if (ps == 2) {
216 } else if (ps == 3) {
218 }
221
228 }
229
239 }
240 }
241 }
242
243 return 0;
244 }
245
247 {
253 int i, si;
254
256 ;
257
260
266
269
273
276 memcpy(pdst, psrc, sizeof(*pdst));
279
283 } else {
286 }
287 }
288 }
289
292
300 }
301 }
303 return ret;
306
311
319 }
320
322 if (f->colorspace == 0 && (f->chroma_planes || !fs->transparency)) {
325 const int cx = x >> f->chroma_h_shift;
326 const int cy = y >> f->chroma_v_shift;
328
329 if (f->chroma_planes) {
330 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
331 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
332 }
333 if (fs->transparency)
334 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3],
width,
height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1);
335 } else if (f->colorspace == 0) {
338 } else if (f->use32bit) {
339 uint8_t *
planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
340 p->data[1] + ps * x + y * p->linesize[1],
341 p->data[2] + ps * x + y * p->linesize[2],
342 p->data[3] + ps * x + y * p->linesize[3] };
343 decode_rgb_frame32(fs, planes,
width,
height, p->linesize);
344 } else {
345 uint8_t *
planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
346 p->data[1] + ps * x + y * p->linesize[1],
347 p->data[2] + ps * x + y * p->linesize[2],
348 p->data[3] + ps * x + y * p->linesize[3] };
349 decode_rgb_frame(fs, planes,
width,
height, p->linesize);
350 }
352 int v;
354 v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
355 if (v) {
357 fs->slice_damaged = 1;
358 }
359 }
360
361 emms_c();
362
364
365 return 0;
366 }
367
369 {
370 int v;
371 int i = 0;
373
374 memset(state, 128, sizeof(state));
375
376 for (v = 0; i < 128; v++) {
378
379 if (len > 128 - i || !len)
381
382 while (len--) {
383 quant_table[i] = scale * v;
384 i++;
385 }
386 }
387
388 for (i = 1; i < 128; i++)
389 quant_table[256 - i] = -quant_table[i];
390 quant_table[128] = -quant_table[127];
391
392 return 2 * v - 1;
393 }
394
397 {
398 int i;
400
401 for (i = 0; i < 5; i++) {
403 if (ret < 0)
404 return ret;
405 context_count *= ret;
406 if (context_count > 32768
U) {
408 }
409 }
410 return (context_count + 1) / 2;
411 }
412
414 {
417 int i, j, k, ret;
419 unsigned crc = 0;
420
421 memset(state2, 128, sizeof(state2));
422 memset(state, 128, sizeof(state));
423
426
431 }
437 }
439
441 for (i = 1; i < 256; i++)
443 }
444
454
459 }
460
463 ) {
466 }
467
473 }
474
480 }
481 }
483 return ret;
484
492 }
493 }
494
499 }
500
502 unsigned v;
508 }
510 }
511
514 "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
525 crc
526 );
527 return 0;
528 }
529
531 {
535
536 memset(state, 128, sizeof(state));
537
541 if (v >= 2) {
544 }
547
549 for (i = 1; i < 256; i++)
551 }
552
553 colorspace =
get_symbol(c, state, 0);
//YUV cs type
555 chroma_planes =
get_rac(c, state);
558 transparency =
get_rac(c, state);
560 transparency = 0;
561
571 }
572 }
573
574 if (chroma_h_shift > 4
U || chroma_v_shift > 4
U) {
576 chroma_h_shift, chroma_v_shift);
578 }
579
586
588 }
589
608 } else
613 else
623 }
629 }
636 }
643 }
651 }
658 }
666 }
673 }
680 }
687 }
688 }
692 "chroma subsampling not supported in this colorspace\n");
694 }
714 }
718 }
719 } else {
722 }
726 }
727
732 if (context_count < 0) {
735 }
739 } else {
744 int trailer = 3 + 5*!!f->
ec;
747 break;
748 p -= size + trailer;
749 }
750 }
754 }
755
760
762
768
779 }
780
783
788 "quant_table_index out of range\n");
790 }
795 } else {
797 }
798
804 }
806 }
807 }
808 }
809 return 0;
810 }
811
813 {
815 int ret;
816
818 return ret;
819
821 return ret;
822
824 return ret;
825
827
828 return 0;
829 }
830
832 {
834 int buf_size = avpkt->
size;
837 int i, ret;
841
845
847
849 /* we have interlaced material flagged in container */
853 }
854
858
864 return ret;
866 } else {
869 "Cannot decode non-keyframe without valid keyframe\n");
871 }
873 }
874
876 return ret;
877
881
883
884 buf_p = buf + buf_size;
887 int trailer = 3 + 5*!!f->
ec;
888 int v;
889
896 }
897 buf_p -= v;
898
901 if (crc) {
908 } else {
910 }
912 }
915 }
916 }
917
918 if (i) {
920 } else
922
925 }
926
932 sizeof(void*));
933
936 int j;
950
951 }
956 }
962 }
963 }
965
967
972 return ret;
973
974 *got_frame = 1;
975
976 return buf_size;
977 }
978
979 #if HAVE_THREADS
981 {
983 int i, ret;
984
990
995 }
996
999
1001 return ret;
1002
1003 return 0;
1004 }
1005 #endif
1006
1008 {
1016 fsdst->
ac = fsrc->
ac;
1018
1019 fsdst->
ec = fsrc->
ec;
1023
1032 }
1033 }
1034
1035 #if HAVE_THREADS
1037 {
1040 int i, ret;
1041
1042 if (dst == src)
1043 return 0;
1044
1045 {
1051
1052 memcpy(fdst, fsrc, sizeof(*fdst));
1061 }
1064 }
1065
1067
1068
1072 return ret;
1073 }
1074
1076
1077 return 0;
1078 }
1079 #endif
1080
1095 };
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_always_inline int fold(int diff, int bits)
#define AV_PIX_FMT_YUVA422P16
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
static int init_thread_copy(AVCodecContext *avctx)
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
static av_cold int init(AVCodecContext *avctx)
static int decode_slice(AVCodecContext *c, void *arg)
#define MAX_CONTEXT_INPUTS
#define AV_PIX_FMT_GBRP10
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
FF Video Codec 1 (a lossless codec)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
enum AVPictureType last_picture
#define AV_PIX_FMT_GRAY10
static int is_input_end(FFV1Context *s)
#define AV_PIX_FMT_GRAY12
#define av_assert0(cond)
assert() equivalent, that is always enabled.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int get_rac(RangeCoder *c, uint8_t *const state)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define FF_DEBUG_PICT_INFO
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_PIX_FMT_YUVA420P9
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
Public header for CRC hash function implementation.
av_cold int ff_ffv1_close(AVCodecContext *avctx)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
bitstream reader API header.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
#define AV_PIX_FMT_YUV422P12
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define AV_PIX_FMT_YUVA420P16
high precision timer, useful to profile code
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static int get_bits_left(GetBitContext *gb)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256]
int skip_alpha
Skip processing alpha if supported by codec.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
int ff_ffv1_allocate_initial_states(FFV1Context *f)
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
#define AV_PIX_FMT_GBRAP16
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ac
1=range coder <-> 0=golomb rice
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
#define AC_RANGE_CUSTOM_TAB
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
#define AV_PIX_FMT_YUV422P9
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
uint8_t state_transition[256]
uint8_t nb_components
The number of components each pixel has, (1-4)
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
static float quant_table[96]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void update_vlc_state(VlcState *const state, const int v)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P10
int ac_byte_count
number of bytes used for AC coding
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
static int read_header(FFV1Context *f)
static const float pred[4]
#define AV_PIX_FMT_YUV420P16
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_PIX_FMT_YUV420P14
int context_count[MAX_QUANT_TABLES]
Libavcodec external API header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Rational number (pair of numerator and denominator).
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
int allocate_progress
Whether to allocate progress for frame threading.
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P9
uint8_t(* state)[CONTEXT_SIZE]
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static const struct @272 planes[]
uint8_t * bytestream_start
static av_cold int decode_init(AVCodecContext *avctx)
PlaneContext plane[MAX_PLANES]
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
struct FFV1Context * fsrc
int top_field_first
If the content is interlaced, is top field displayed first.
struct AVCodecInternal * internal
Private context used for internal data.
int key_frame
1 -> keyframe, 0-> not
struct FFV1Context * slice_context[MAX_SLICES]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
int depth
Number of bits in the component.
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
This structure stores compressed data.
static int read_extra_header(FFV1Context *f)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_PIX_FMT_0RGB32
#define AV_CEIL_RSHIFT(a, b)