FFmpeg: libavutil/hwcontext.c Source File
Go to the documentation of this file. 1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "config.h"
20
31
33 #if CONFIG_CUDA
35 #endif
36 #if CONFIG_D3D11VA
38 #endif
39 #if CONFIG_LIBDRM
41 #endif
42 #if CONFIG_DXVA2
44 #endif
45 #if CONFIG_OPENCL
47 #endif
48 #if CONFIG_QSV
50 #endif
51 #if CONFIG_VAAPI
53 #endif
54 #if CONFIG_VDPAU
56 #endif
57 #if CONFIG_VIDEOTOOLBOX
59 #endif
60 #if CONFIG_MEDIACODEC
62 #endif
63 #if CONFIG_VULKAN
65 #endif
67 };
68
81 };
82
84 {
89 }
91 }
92
94 {
98 else
100 }
101
103 {
108 continue;
112 }
113 }
115 }
116
121 };
122
124 {
126
127 /* uninit might still want access the hw context and the user
128 * free() callback might destroy it, so uninit has to be called first */
129 if (
ctx->internal->hw_type->device_uninit)
130 ctx->internal->hw_type->device_uninit(
ctx);
131
134
136
141 }
142
144 {
149
153 break;
154 }
155 }
156 if (!hw_type)
158
162
166
169 if (!
ctx->internal->priv)
171 }
172
177 }
178
182 if (!buf)
184
187
188 ctx->internal->hw_type = hw_type;
189
190 return buf;
191
199 }
200
202 {
205
206 if (
ctx->internal->hw_type->device_init) {
207 ret =
ctx->internal->hw_type->device_init(
ctx);
210 }
211
212 return 0;
214 if (
ctx->internal->hw_type->device_uninit)
215 ctx->internal->hw_type->device_uninit(
ctx);
217 }
218
223 };
224
226 {
228
229 if (
ctx->internal->pool_internal)
231
232 if (
ctx->internal->hw_type->frames_uninit)
233 ctx->internal->hw_type->frames_uninit(
ctx);
234
237
239
241
246 }
247
249 {
254
258
262
263 if (hw_type->frames_priv_size) {
265 if (!
ctx->internal->priv)
267 }
268
269 if (hw_type->frames_hwctx_size) {
273 }
274
276 if (!device_ref)
278
282 if (!buf)
284
286 ctx->device_ref = device_ref;
287 ctx->device_ctx = device_ctx;
290
291 ctx->internal->hw_type = hw_type;
292
293 return buf;
294
296 if (device_ref)
304 }
305
307 {
311
315
316 for (
i = 0;
i <
ctx->initial_pool_size;
i++) {
320
324 }
325
327 for (
i = 0;
i <
ctx->initial_pool_size;
i++)
330
332 }
333
335 {
339
340 if (
ctx->internal->source_frames) {
341 /* A derived frame context is already initialised. */
342 return 0;
343 }
344
345 /* validate the pixel format */
348 break;
349 }
352 "The hardware pixel format '%s' is not supported by the device type '%s'\n",
355 }
356
357 /* validate the dimensions */
361
362 /* format-specific init */
363 if (
ctx->internal->hw_type->frames_init) {
364 ret =
ctx->internal->hw_type->frames_init(
ctx);
367 }
368
369 if (
ctx->internal->pool_internal && !
ctx->pool)
370 ctx->pool =
ctx->internal->pool_internal;
371
372 /* preallocate the frames in the pool, if requested */
373 if (
ctx->initial_pool_size > 0) {
377 }
378
379 return 0;
381 if (
ctx->internal->hw_type->frames_uninit)
382 ctx->internal->hw_type->frames_uninit(
ctx);
384 }
385
389 {
391
392 if (!
ctx->internal->hw_type->transfer_get_formats)
394
395 return ctx->internal->hw_type->transfer_get_formats(
ctx, dir,
formats);
396 }
397
399 {
403
405 if (!frame_tmp)
407
408 /* if the format is set, use that
409 * otherwise pick the first supported one */
411 frame_tmp->format = dst->
format;
412 } else {
414
420 frame_tmp->format =
formats[0];
422 }
423 frame_tmp->width =
ctx->width;
424 frame_tmp->height =
ctx->height;
425
429
433
434 frame_tmp->width =
src->width;
435 frame_tmp->height =
src->height;
436
438
442 }
443
445 {
448
451
452 /*
453 * Hardware -> Hardware Transfer.
454 * Unlike Software -> Hardware or Hardware -> Software, the transfer
455 * function could be provided by either the src or dst, depending on
456 * the specific combination of hardware.
457 */
463
466 "A device with a derived frame context cannot be used as "
467 "the source of a HW -> HW transfer.");
469 }
470
471 if (dst_ctx->internal->source_frames) {
473 "A device with a derived frame context cannot be used as "
474 "the destination of a HW -> HW transfer.");
476 }
477
480 ret = dst_ctx->internal->hw_type->transfer_data_to(dst_ctx, dst,
src);
483 } else {
484 if (
src->hw_frames_ctx) {
486
487 ret =
ctx->internal->hw_type->transfer_data_from(
ctx, dst,
src);
492
493 ret =
ctx->internal->hw_type->transfer_data_to(
ctx, dst,
src);
496 } else {
498 }
499 }
500 return 0;
501 }
502
504 {
507
508 if (
ctx->internal->source_frames) {
509 // This is a derived frame context, so we allocate in the source
510 // and map the frame immediately.
512
515 if (!
frame->hw_frames_ctx)
517
519 if (!src_frame)
521
523 src_frame, 0);
527 }
528
530 ctx->internal->source_allocation_map_flags);
533 "frame context: %d.\n",
ret);
536 }
537
538 // Free the source frame immediately - the mapped frame still
539 // contains a reference to it.
541
542 return 0;
543 }
544
545 if (!
ctx->internal->hw_type->frames_get_buffer)
547
550
552 if (!
frame->hw_frames_ctx)
554
559 }
560
562
563 return 0;
564 }
565
567 {
570
571 if (hw_type->device_hwconfig_size == 0)
573
574 return av_mallocz(hw_type->device_hwconfig_size);
575 }
576
578 const void *hwconfig)
579 {
583
584 if (!hw_type->frames_get_constraints)
586
587 constraints =
av_mallocz(
sizeof(*constraints));
588 if (!constraints)
590
591 constraints->min_width = constraints->min_height = 0;
592 constraints->max_width = constraints->max_height = INT_MAX;
593
594 if (hw_type->frames_get_constraints(
ctx, hwconfig, constraints) >= 0) {
595 return constraints;
596 } else {
599 }
600 }
601
603 {
604 if (*constraints) {
605 av_freep(&(*constraints)->valid_hw_formats);
606 av_freep(&(*constraints)->valid_sw_formats);
607 }
609 }
610
613 {
617
619 if (!device_ref) {
622 }
624
628 }
629
634
638
639 *pdevice_ref = device_ref;
640 return 0;
645 }
646
651 {
655
656 tmp_ref = src_ref;
657 while (tmp_ref) {
661 if (!dst_ref) {
664 }
665 goto done;
666 }
668 }
669
671 if (!dst_ref) {
674 }
676
677 tmp_ref = src_ref;
678 while (tmp_ref) {
682 tmp_ctx,
690 }
694 goto done;
695 }
698 }
700 }
701
704
705 done:
706 *dst_ref_ptr = dst_ref;
707 return 0;
708
713 }
714
718 {
721 }
722
724 {
727
730
732
734
736 }
737
742 void *priv)
743 {
747
749 if (!hwmap) {
752 }
753
758 }
762
767 }
768
769 hwmap->
unmap = unmap;
771
777 }
778
779 return 0;
780
782 if (hwmap) {
785 }
788 }
789
791 {
797
801
802 if ((src_frames == dst_frames &&
807 (uint8_t*)dst_frames)) {
808 // This is an unmap operation. We don't need to directly
809 // do anything here other than fill in the original frame,
810 // because the real unmap will be invoked when the last
811 // reference to the mapped frame disappears.
814 "found when attempting unmap.\n");
816 }
820 }
821 }
822
823 if (
src->hw_frames_ctx) {
825
834 }
835 }
836
839
848 }
849 }
850
852
854 // if the caller provided dst frames context, it should be preserved
855 // by this function
858
859 // preserve user-provided dst frame fields, but clean
860 // anything we might have set
863
865 dst->
format = orig_dst_fmt;
866
868 }
869
875 {
880
881 if (
src->internal->source_frames) {
886
888 // This is actually an unmapping, so we just return a
889 // reference to the source frame context.
890 *derived_frame_ctx =
892 if (!*derived_frame_ctx) {
895 }
896 return 0;
897 }
898 }
899
901 if (!dst_ref) {
904 }
905
907
912
917 }
918
924
926 if (
src->internal->hw_type->frames_derive_from)
927 ret =
src->internal->hw_type->frames_derive_from(dst,
src,
flags);
935
936 *derived_frame_ctx = dst_ref;
937 return 0;
938
940 if (dst)
944 }
945
947 {
951 }
static void hwframe_ctx_free(void *opaque, uint8_t *data)
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
AVBufferRef * source_device
For a derived device, a reference to the original device context it was derived from.
Filter the word "frame" indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_HWFRAME_TRANSFER_DIRECTION_FROM
Transfer the data from the queried hw frame.
AVFrame * source
A reference to the original source of the mapping.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
void * av_hwdevice_hwconfig_alloc(AVBufferRef *ref)
Allocate a HW-specific configuration structure for a given HW device.
uint8_t * data
The data buffer.
static void ff_hwframe_unmap(void *opaque, uint8_t *data)
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
static void hwdevice_ctx_free(void *opaque, uint8_t *data)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
This structure describes decoded (raw) audio or video data.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
const HWContextType ff_hwcontext_type_qsv
int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
Map a hardware frame.
const HWContextType ff_hwcontext_type_drm
@ AV_HWDEVICE_TYPE_MEDIACODEC
int(* map_to)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
const HWContextType ff_hwcontext_type_vdpau
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
const HWContextType ff_hwcontext_type_vaapi
int(* map_from)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev)
Iterate over supported device types.
AVHWDeviceInternal * internal
Private data used internally by libavutil.
@ AV_HWFRAME_MAP_DIRECT
The mapping must be direct.
AVHWFramesInternal * internal
Private data used internally by libavutil.
AVBufferRef * hw_frames_ctx
A reference to the hardware frames context in which this mapping was made.
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
@ AV_HWDEVICE_TYPE_VIDEOTOOLBOX
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
void * priv
Hardware-specific private data associated with the mapping.
int width
The allocated dimensions of the frames in this pool.
AVHWFramesConstraints * av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, const void *hwconfig)
Get the constraints on HW frames given a device and the HW-specific configuration to be used with tha...
int av_hwdevice_ctx_init(AVBufferRef *ref)
Finalize the device context before use.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
AVBufferRef * source_frames
For a derived context, a reference to the original frames context it was derived from.
@ AV_HWDEVICE_TYPE_VULKAN
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
const HWContextType ff_hwcontext_type_d3d11va
int(* device_derive)(AVHWDeviceContext *dst_ctx, AVHWDeviceContext *src_ctx, AVDictionary *opts, int flags)
if it could not because there are no more frames
@ AV_HWDEVICE_TYPE_D3D11VA
int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, AVDictionary *options, int flags)
Create a new device of the specified type from an existing device.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
const HWContextType ff_hwcontext_type_mediacodec
int source_allocation_map_flags
Flags to apply to the mapping from the source to the derived frame context when trying to allocate in...
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int hwframe_pool_prealloc(AVBufferRef *ref)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const HWContextType ff_hwcontext_type_dxva2
#define FF_ARRAY_ELEMS(a)
static void set(uint8_t *a[], int ch, int index, int ch_count, enum AVSampleFormat f, double v)
static const char *const hw_type_names[]
AVBufferRef * av_hwdevice_ctx_alloc(enum AVHWDeviceType type)
Allocate an AVHWDeviceContext for a given hardware type.
static const HWContextType *const hw_table[]
void av_hwframe_constraints_free(AVHWFramesConstraints **constraints)
Free an AVHWFrameConstraints structure.
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Filter the word "frame" indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmt
int(* device_create)(AVHWDeviceContext *ctx, const char *device, AVDictionary *opts, int flags)
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
const HWContextType ff_hwcontext_type_videotoolbox
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
const char * av_default_item_name(void *ptr)
Return the context name.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, enum AVPixelFormat format, AVBufferRef *derived_device_ctx, AVBufferRef *source_frame_ctx, int flags)
Create and initialise an AVHWFramesContext as a mapping of another existing AVHWFramesContext on a di...
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
const OptionDef options[]
int(* frames_derive_to)(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags)
@ AV_HWDEVICE_TYPE_OPENCL
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
const HWContextType ff_hwcontext_type_cuda
const HWContextType * hw_type
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const AVClass hwframe_ctx_class
static const AVClass hwdevice_ctx_class
const HWContextType ff_hwcontext_type_vulkan
@ AV_HWFRAME_MAP_READ
The mapping must be readable.
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
size_t device_hwctx_size
size of the public hardware-specific context, i.e.
#define i(width, name, range_min, range_max)
void(* unmap)(AVHWFramesContext *ctx, struct HWMapDescriptor *hwmap)
Unmap function.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const HWContextType * hw_type
@ AV_HWFRAME_MAP_WRITE
The mapping must be writeable.
int ff_hwframe_map_replace(AVFrame *dst, const AVFrame *src)
Replace the current hwmap of dst with the one from src, used for indirect mappings like VAAPI->(DRM)-...
void * av_calloc(size_t nmemb, size_t size)
AVHWFrameTransferDirection
This struct describes a set or pool of "hardware" frames (i.e.
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
const AVClass * av_class
A class for logging and AVOptions.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
@ AV_HWFRAME_MAP_OVERWRITE
The mapped frame will be overwritten completely in subsequent operations, so the current frame data n...
static int ref[MAX_W *MAX_W]
size_t device_priv_size
size of the private data, i.e.
int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ref, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats, int flags)
Get a list of possible source or target formats usable in av_hwframe_transfer_data().
A reference to a data buffer.
#define flags(name, subs,...)
int(* transfer_data_from)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
const HWContextType ff_hwcontext_type_opencl
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Generated on Wed Aug 24 2022 21:42:22 for FFmpeg by
doxygen
1.8.17