FFmpeg: libavcodec/h264dec.c Source File
Go to the documentation of this file. 1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * H.264 / AVC / MPEG-4 part10 codec.
25 * @author Michael Niedermayer <michaelni@gmx.at>
26 */
27
28 #define UNCHECKED_BITSTREAM_READER 1
29
30 #include "config_components.h"
31
39
41 #include "internal.h"
58
60
62 {
64 return h &&
h->ps.sps ?
h->ps.sps->num_reorder_frames : 0;
65 }
66
69 int mb_x, int mb_y, int mb_intra, int mb_skipped)
70 {
73
76 sl->
mb_xy = mb_x + mb_y *
h->mb_stride;
79 /* FIXME: It is possible albeit uncommon that slice references
80 * differ between slices. We take the easy approach and ignore
81 * it for now. If this turns out to have any relevance in
82 * practice then correct remapping should be added. */
88 }
91 return;
92 }
101 }
102
105 {
110 int vshift;
111 const int field_pic =
h->picture_structure !=
PICT_FRAME;
112
114 return;
115
117 return;
118
119 if (field_pic) {
121 y <<= 1;
122 }
123
125
127 vshift =
desc->log2_chroma_h;
128
131 offset[2] = (y >> vshift) *
src->linesize[1];
134
136
138 y,
h->picture_structure,
height);
139 }
140
142 {
144
153 h->slice_table =
NULL;
155
158
163
164 #if CONFIG_ERROR_RESILIENCE
169 #endif
170
171 for (
i = 0;
i <
h->nb_slice_ctx;
i++) {
173
178
183 }
184 }
185
187 {
189 const int big_mb_num =
h->mb_stride * (
h->mb_height + 1);
190 const int row_mb_num = 2*
h->mb_stride*
FFMAX(
h->nb_slice_ctx, 1);
191 const int st_size = big_mb_num +
h->mb_stride;
192 int x, y;
193
206 h->slice_ctx[0].intra4x4_pred_mode =
h->intra4x4_pred_mode;
207 h->slice_ctx[0].mvd_table[0] =
h->mvd_table[0];
208 h->slice_ctx[0].mvd_table[1] =
h->mvd_table[1];
209 memset(
h->slice_table_base, -1,
210 st_size *
sizeof(*
h->slice_table_base));
211 h->slice_table =
h->slice_table_base +
h->mb_stride * 2 + 1;
212 for (y = 0; y <
h->mb_height; y++)
213 for (x = 0; x <
h->mb_width; x++) {
214 const int mb_xy = x + y *
h->mb_stride;
215 const int b_xy = 4 * x + 4 * y *
h->b_stride;
216
217 h->mb2b_xy[mb_xy] = b_xy;
218 h->mb2br_xy[mb_xy] = 8 * (
FMO ? mb_xy : (mb_xy % (2 *
h->mb_stride)));
219 }
220
221 if (CONFIG_ERROR_RESILIENCE) {
222 const int er_size =
h->mb_height *
h->mb_stride * (4*
sizeof(int) + 1);
223 int mb_array_size =
h->mb_height *
h->mb_stride;
224 int y_size = (2 *
h->mb_width + 1) * (2 *
h->mb_height + 1);
225 int yc_size = y_size + 2 * big_mb_num;
226
227 /* init ER */
232
238
239 // error resilience code looks cleaner with this
244 return AVERROR(ENOMEM);
// ff_h264_free_tables will clean up for us
245
246 for (y = 0; y <
h->mb_height; y++)
247 for (x = 0; x <
h->mb_width; x++)
249
250 er->
mb_index2xy[
h->mb_height *
h->mb_width] = (
h->mb_height - 1) *
251 h->mb_stride +
h->mb_width;
252 er->
dc_val[0] =
h->dc_val_base +
h->mb_width * 2 + 2;
253 er->
dc_val[1] =
h->dc_val_base + y_size +
h->mb_stride + 1;
255 for (
int i = 0;
i < yc_size;
i++)
256 h->dc_val_base[
i] = 1024;
257 }
258
259 return 0;
260 }
261
262 /**
263 * Init slice context
264 */
266 {
273
275 }
276
278 {
282
286
287 return 0;
288 }
289
291 {
293
295 h->cur_chroma_format_idc = -1;
296
297 h->width_from_caller = avctx->
width;
298 h->height_from_caller = avctx->
height;
299
302 h->poc.prev_poc_msb = 1 << 16;
303 h->recovery_frame = -1;
304 h->frame_recovered = 0;
305 h->poc.prev_frame_num = -1;
306 h->sei.common.frame_packing.arrangement_cancel_flag = -1;
307 h->sei.common.unregistered.x264_build = -1;
308
309 h->next_outputed_poc = INT_MIN;
311 h->last_pocs[
i] = INT_MIN;
312
314
317 if (!
h->decode_error_flags_pool)
319 }
320
322 h->slice_ctx =
av_calloc(
h->nb_slice_ctx,
sizeof(*
h->slice_ctx));
326 }
327
331 }
332
335
338
339 for (
i = 0;
i <
h->nb_slice_ctx;
i++)
340 h->slice_ctx[
i].h264 =
h;
341
342 return 0;
343 }
344
346 {
350 }
351
353 {
356
359
362 }
363 memset(
h->delayed_pic, 0,
sizeof(
h->delayed_pic));
364
365 h->cur_pic_ptr =
NULL;
366
368
371
374
376
379
380 return 0;
381 }
382
384
386 {
389
393
398 }
399
400 #if FF_API_TICKS_PER_FRAME
404 #endif
405
409 &
h->ps, &
h->is_avc, &
h->nal_length_size,
414 "Error decoding the extradata\n");
415 if (explode) {
417 }
419 }
420 }
421 }
422
423 if (
h->ps.sps &&
h->ps.sps->bitstream_restriction_flag &&
424 h->avctx->has_b_frames <
h->ps.sps->num_reorder_frames) {
425 h->avctx->has_b_frames =
h->ps.sps->num_reorder_frames;
426 }
427
429
432
435 "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
436 "Use it at your own risk\n");
437 }
438
439 return 0;
440 }
441
442 /**
443 * instantaneous decoder refresh.
444 */
446 {
449 h->poc.prev_frame_num =
450 h->poc.prev_frame_num_offset = 0;
451 h->poc.prev_poc_msb = 1<<16;
452 h->poc.prev_poc_lsb = -1;
454 h->last_pocs[
i] = INT_MIN;
455 }
456
457 /* forget old pics after a seek */
459 {
461
462 h->next_outputed_poc = INT_MIN;
463 h->prev_interlaced_frame = 1;
465
466 h->poc.prev_frame_num = -1;
467 if (
h->cur_pic_ptr) {
468 h->cur_pic_ptr->reference = 0;
469 for (j=
i=0;
h->delayed_pic[
i];
i++)
470 if (
h->delayed_pic[
i] !=
h->cur_pic_ptr)
471 h->delayed_pic[j++] =
h->delayed_pic[
i];
472 h->delayed_pic[j] =
NULL;
473 }
475
477 h->recovery_frame = -1;
478 h->frame_recovered = 0;
479 h->current_slice = 0;
481 }
482
484 {
487
488 memset(
h->delayed_pic, 0,
sizeof(
h->delayed_pic));
489
492
495 h->cur_pic_ptr =
NULL;
497
500
502 h->context_initialized = 0;
503
506 }
507
509 {
510 int nals_needed = 0;
511 int slice_type = 0;
512 int picture_intra_only = 1;
513 int first_slice = 0;
515
516 for (
i = 0;
i <
h->pkt.nb_nals;
i++) {
519
520 /* packets can sometimes contain multiple PPS/SPS,
521 * e.g. two PAFF field pictures in one packet, or a demuxer
522 * which splits NALs strangely if so, when frame threading we
523 * can't start the next thread until we've read all of them */
528 break;
537
538 break;
539 }
541 !first_slice ||
542 first_slice != nal->
type)
545 if (slice_type > 9)
546 slice_type = 0;
547 if (slice_type > 4)
548 slice_type -= 5;
549
552 if (!first_slice)
553 first_slice = nal->
type;
554 }
555 }
556
557 h->picture_intra_only = picture_intra_only;
558
559 return nals_needed;
560 }
561
563 {
566
569
574
580
583
587 }
588 }
589
591 {
593 int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
594 int idr_cleared=0;
596
599
601 h->current_slice = 0;
602 if (!
h->first_field) {
603 h->cur_pic_ptr =
NULL;
605 }
606 }
607
608 if (
h->nal_length_size == 4) {
609 if (buf_size > 8 &&
AV_RB32(buf) == 1 &&
AV_RB32(buf+5) > (
unsigned)buf_size) {
611 }
else if(buf_size > 3 &&
AV_RB32(buf) > 1 &&
AV_RB32(buf) <= (
unsigned)buf_size)
613 }
614
619 "Error splitting the input into NAL units.\n");
621 }
622
625 if (nals_needed < 0)
626 return nals_needed;
627
628 for (
i = 0;
i <
h->pkt.nb_nals;
i++) {
630 int max_slice_ctx, err;
631
634 continue;
635
636 // FIXME these should stop being context-global variables
638 h->nal_unit_type = nal->
type;
639
640 err = 0;
643 if ((nal->
data[1] & 0xFC) == 0x98) {
645 h->next_outputed_poc = INT_MIN;
647 goto end;
648 }
649 if(!idr_cleared) {
650 idr(
h);
// FIXME ensure we don't lose some frames if there is reordering
651 }
652 idr_cleared = 1;
653 h->has_recovery_point = 1;
656
660 break;
661 }
662
663 if (
h->current_slice == 1) {
665 i >= nals_needed && !
h->setup_finished &&
h->cur_pic_ptr) {
667 h->setup_finished = 1;
668 }
669
670 if (
h->avctx->hwaccel &&
672 goto end;
673 }
674
675 max_slice_ctx = avctx->
hwaccel ? 1 :
h->nb_slice_ctx;
676 if (
h->nb_slice_ctx_queued == max_slice_ctx) {
677 if (
h->avctx->hwaccel) {
679 h->nb_slice_ctx_queued = 0;
680 } else
683 goto end;
684 }
685 break;
690 break;
692 if (
h->setup_finished) {
694 break;
695 }
697 h->has_recovery_point =
h->has_recovery_point ||
h->sei.recovery_point.recovery_frame_cnt != -1;
701 goto end;
702 break;
709 goto end;
710 }
712 break;
714 "SPS decoding failure, trying again with the complete NAL\n");
717 break;
719 break;
720 }
726 goto end;
727 }
731 goto end;
732 break;
739 break;
740 default:
743 }
744
745 if (err < 0) {
747 }
748 }
749
752 goto end;
753
754 // set decode_error_flags to allow users to detect concealed decoding errors
755 if ((ret < 0 || h->er.error_occurred) &&
h->cur_pic_ptr) {
756 if (
h->cur_pic_ptr->decode_error_flags) {
757 /* Frame-threading in use */
758 atomic_int *decode_error =
h->cur_pic_ptr->decode_error_flags;
759 /* Using atomics here is not supposed to provide syncronisation;
760 * they are merely used to allow to set decode_error from both
761 * decoding threads in case of coded slices. */
763 memory_order_relaxed);
764 } else
766 }
767
769 end:
770
771 #if CONFIG_ERROR_RESILIENCE
772 /*
773 * FIXME: Error handling code does not seem to support interlaced
774 * when slices span multiple rows
775 * The ff_er_add_slice calls don't work right for bottom
776 * fields; they cause massive erroneous error concealing
777 * Error marking covers both fields (top and bottom).
778 * This causes a mismatched s->error_count
779 * and a bad error table. Further, the error count goes to
780 * INT_MAX when called for bottom field, because mb_y is
781 * past end by one (callers fault) and resync_mb_y != 0
782 * causes problems for the first MB line, too.
783 */
785
787 int use_last_pic =
h->last_pic_for_ec.f->buf[0] && !sl->
ref_count[0];
788 int decode_error_flags = 0;
789
791
792 if (use_last_pic) {
800 } else
802
805
807 if (decode_error_flags) {
808 if (
h->cur_pic_ptr->decode_error_flags) {
809 atomic_int *decode_error =
h->cur_pic_ptr->decode_error_flags;
811 memory_order_relaxed);
812 } else
813 h->cur_pic_ptr->f->decode_error_flags |= decode_error_flags;
814 }
815 if (use_last_pic)
817 }
818 #endif /* CONFIG_ERROR_RESILIENCE */
819 /* clean up */
820 if (
h->cur_pic_ptr && !
h->droppable &&
h->has_slice) {
823 }
824
825 return (
ret < 0) ?
ret : buf_size;
826 }
827
828 /**
829 * Return the number of bytes consumed for building the current frame.
830 */
832 {
834 pos = 1;
// avoid infinite loops (I doubt that is needed but...)
835 if (
pos + 10 > buf_size)
836 pos = buf_size;
// oops ;)
837
839 }
840
842 {
845 unsigned int x, y;
846
848 if (!par)
850
852
857
860 const unsigned int block_idx = y * p->
mb_width + x;
861 const unsigned int mb_xy = y * p->
mb_stride + x;
863
868
870 }
871
872 return 0;
873 }
874
876 {
878
882
885
888 /* The following is not supposed to provide synchronisation at all:
889 * given that srcp has already finished decoding, decode_error
890 * has already been set to its final value. */
892 }
893
895
898
903 }
904
907
908 return 0;
912 }
913
915 {
916 int cnt= buf[5]&0x1f;
917 const uint8_t *p= buf+6;
918 if (!cnt)
919 return 0;
920 while(cnt--){
922 if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
923 return 0;
924 p += nalsize;
925 }
926 cnt = *(p++);
927 if(!cnt)
928 return 0;
929 while(cnt--){
931 if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
932 return 0;
933 p += nalsize;
934 }
935 return 1;
936 }
937
939 {
941
945
946 if (
h->skip_gray > 0 &&
947 h->non_gray &&
out->gray &&
949 )
950 return 0;
951
952 if (!
h->avctx->hwaccel &&
953 (
out->field_poc[0] == INT_MAX ||
954 out->field_poc[1] == INT_MAX)
955 ) {
956 int p;
958 int field =
out->field_poc[0] == INT_MAX;
959 uint8_t *dst_data[4];
960 int linesizes[4];
961 const uint8_t *src_data[4];
962
964
965 for (p = 0; p<4; p++) {
966 dst_data[p] =
f->data[p] + (
field^1)*
f->linesize[p];
967 src_data[p] =
f->data[p] +
field *
f->linesize[p];
968 linesizes[p] = 2*
f->linesize[p];
969 }
970
972 f->format,
f->width,
f->height>>1);
973 }
974
978
979 *got_frame = 1;
980
981 if (CONFIG_MPEGVIDEODEC) {
986 out->mb_width,
out->mb_height,
out->mb_stride, 1);
987 }
988 }
989
990 return 0;
991 }
992
994 int *got_frame, int buf_index)
995 {
998
999 h->cur_pic_ptr =
NULL;
1001
1002 while (
h->delayed_pic[0]) {
1003 out =
h->delayed_pic[0];
1004 out_idx = 0;
1006 h->delayed_pic[
i] &&
1008 !
h->delayed_pic[
i]->mmco_reset;
1010 if (
h->delayed_pic[
i]->poc <
out->poc) {
1011 out =
h->delayed_pic[
i];
1013 }
1014
1015 for (
i = out_idx;
h->delayed_pic[
i];
i++)
1016 h->delayed_pic[
i] =
h->delayed_pic[
i + 1];
1017
1019 h->frame_recovered |=
out->recovered;
1021
1026 if (*got_frame)
1027 break;
1028 }
1029 }
1030
1031 return buf_index;
1032 }
1033
1036 {
1037 const uint8_t *buf = avpkt->
data;
1038 int buf_size = avpkt->
size;
1040 int buf_index;
1042
1044 h->setup_finished = 0;
1045 h->nb_slice_ctx_queued = 0;
1046
1048
1049 /* end of stream, output what is still in the buffers */
1050 if (buf_size == 0)
1052
1054 size_t side_size;
1057 &
h->ps, &
h->is_avc, &
h->nal_length_size,
1059 }
1060 if (
h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
1063 &
h->ps, &
h->is_avc, &
h->nal_length_size,
1065 }
1066
1068 if (buf_index < 0)
1070
1074 }
1075
1078 buf_size >= 4 && !memcmp("Q264", buf, 4))
1079 return buf_size;
1082 }
1083
1085 (
h->mb_y >=
h->mb_height &&
h->mb_height)) {
1088
1089 /* Wait for second field. */
1090 if (
h->next_output_pic) {
1094 }
1095 }
1096
1098
1100
1102 }
1103
1104 #define OFFSET(x) offsetof(H264Context, x)
1105 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1106 #define VDX VD | AV_OPT_FLAG_EXPORT
1109 {
"nal_length_size",
"nal_length_size",
OFFSET(nal_length_size),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, VDX },
1110 {
"enable_er",
"Enable error resilience on damaged frames (unsafe)",
OFFSET(enable_er),
AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1,
VD },
1111 {
"x264_build",
"Assume this x264 version if no x264 version found in any SEI",
OFFSET(x264_build),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX,
VD },
1113 {
"noref_gray",
"Avoid using gray gap frames as references",
OFFSET(noref_gray),
AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1,
VD },
1115 };
1116
1122 };
1123
1137 #if CONFIG_H264_DXVA2_HWACCEL
1139 #endif
1140 #if CONFIG_H264_D3D11VA_HWACCEL
1142 #endif
1143 #if CONFIG_H264_D3D11VA2_HWACCEL
1145 #endif
1146 #if CONFIG_H264_D3D12VA_HWACCEL
1148 #endif
1149 #if CONFIG_H264_NVDEC_HWACCEL
1151 #endif
1152 #if CONFIG_H264_VAAPI_HWACCEL
1154 #endif
1155 #if CONFIG_H264_VDPAU_HWACCEL
1157 #endif
1158 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
1160 #endif
1161 #if CONFIG_H264_VULKAN_HWACCEL
1163 #endif
1165 },
1173 };
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
#define HWACCEL_D3D12VA(codec)
int32_t qp
Base quantisation parameter for the frame.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
int8_t ref_cache[2][5 *8]
void ff_h264_free_tables(H264Context *h)
#define AV_EF_EXPLODE
abort decoding on minor error detection
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word "frame" indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int decode_slice(AVCodecContext *c, void *arg)
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
const uint16_t ff_h264_mb_sizes[4]
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
static void idr(H264Context *h)
instantaneous decoder refresh.
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static av_cold int h264_decode_init(AVCodecContext *avctx)
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static const int8_t mv[256][2]
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
int ref_idc
H.264 only, nal_ref_idc.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
This structure describes decoded (raw) audio or video data.
#define HWACCEL_DXVA2(codec)
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
#define HWACCEL_D3D11VA2(codec)
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
int chroma_qp_index_offset[2]
#define PICT_BOTTOM_FIELD
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
#define FF_HW_SIMPLE_CALL(avctx, function)
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding.
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
#define FF_DECODE_ERROR_DECODE_SLICES
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
AVCodec p
The public AVCodec.
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
enum AVDiscard skip_frame
Skip decoding for selected frames.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
@ AV_VIDEO_ENC_PARAMS_H264
H.264 stores:
int flags
AV_CODEC_FLAG_*.
#define HWACCEL_VDPAU(codec)
static const uint8_t scan8[16 *3+3]
void ff_h264_flush_change(H264Context *h)
static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
const uint8_t ff_h264_golomb_to_pict_type[5]
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Video encoding parameters for a given frame.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
#define FF_ARRAY_ELEMS(a)
static void h264_free_pic(H264Context *h, H264Picture *pic)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static int get_last_needed_nal(H264Context *h)
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
#define FF_CODEC_DECODE_CB(func)
static int h264_init_pic(H264Picture *pic)
static av_cold int h264_decode_end(AVCodecContext *avctx)
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_h264_remove_all_refs(H264Context *h)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
const FFCodec ff_h264_decoder
int sei_recovery_frame_cnt
#define CODEC_LONG_NAME(str)
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
static const AVClass h264_class
Describe the class of an AVClass context structure.
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
uint8_t * edge_emu_buffer
int slice_flags
slice flags
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
const H264Picture * parent
struct AVCodecInternal * internal
Private context used for internal data.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
const AVProfile ff_h264_profiles[]
int init_qp
pic_init_qp_minus26 + 26
void(* flush)(AVBSFContext *ctx)
@ H264_NAL_AUXILIARY_SLICE
#define UPDATE_THREAD_CONTEXT(func)
#define FF_HW_HAS_CB(avctx, function)
int top_borders_allocated[2]
#define atomic_load_explicit(object, order)
static AVOnce h264_vlc_init
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
int flags2
AV_CODEC_FLAG2_*.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const AVOption h264_options[]
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
#define AV_NUM_DATA_POINTERS
atomic_int * decode_error_flags
RefStruct reference; its pointee is shared between decoding threads.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
uint8_t(*[2] top_borders)[(16 *3) *2]
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define HWACCEL_D3D11VA(codec)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define HWACCEL_NVDEC(codec)
#define FF_THREAD_FRAME
Decode more than one frame at once.
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
int needs_fg
whether picture needs film grain synthesis (see f_grain)
static int is_avcc_extradata(const uint8_t *buf, int buf_size)
#define i(width, name, range_min, range_max)
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Data structure for storing block-level encoding information.
av_cold void ff_h264_decode_init_vlc(void)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
void * av_calloc(size_t nmemb, size_t size)
#define HWACCEL_VIDEOTOOLBOX(codec)
static int h264_export_enc_params(AVFrame *f, const H264Picture *p)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define FF_DEBUG_GREEN_MD
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band.
#define HWACCEL_VULKAN(codec)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
uint8_t * bipred_scratchpad
#define FF_HW_CALL(avctx, function,...)
@ AV_OPT_TYPE_INT
Underlying C type is int.
void ff_h264_unref_picture(H264Picture *pic)
#define atomic_fetch_or_explicit(object, operand, order)
static void fill_rectangle(int x, int y, int w, int h)
uint8_t * error_status_table
static int ref[MAX_W *MAX_W]
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define PART_NOT_AVAILABLE
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
#define FF_DISABLE_DEPRECATION_WARNINGS
static void h264_decode_flush(AVCodecContext *avctx)
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
const char * ff_h264_sei_stereo_mode(const H2645SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
#define avpriv_request_sample(...)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define HWACCEL_VAAPI(codec)
int width
picture width / height.
int edge_emu_buffer_allocated
#define UPDATE_THREAD_CONTEXT_FOR_USER(func)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
#define H264_MAX_PICTURE_COUNT
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int nal_length_size, enum AVCodecID codec_id, int flags)
Split an input packet into NAL units.
int bipred_scratchpad_allocated
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
@ AVDISCARD_NONREF
discard all non reference
static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame, int *got_frame, int buf_index)
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
int mb_field_decoding_flag
void ff_h264_set_erpic(ERPicture *dst, const H264Picture *src)
Generated on Fri Aug 22 2025 13:58:23 for FFmpeg by
doxygen
1.8.17