1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "config.h"
20
21 #if HAVE_VAAPI_WIN32
22 # include <windows.h>
23 #define COBJMACROS
24 # include <initguid.h>
25 # include <dxgi1_2.h>
27 # include <va/va_win32.h>
29 #endif
30 #if HAVE_VAAPI_X11
31 # include <va/va_x11.h>
32 #endif
33 #if HAVE_VAAPI_DRM
34 # include <va/va_drm.h>
35 #endif
36
37 #if CONFIG_LIBDRM
38 # include <va/va_drmcommon.h>
39 # include <xf86drm.h>
40 # include <drm_fourcc.h>
41 # ifndef DRM_FORMAT_MOD_INVALID
42 # define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
43 # endif
44 #endif
45
46 #include <fcntl.h>
47 #if HAVE_UNISTD_H
48 # include <unistd.h>
49 #endif
50
51
62
63
65 #if HAVE_VAAPI_X11
66 Display *x11_display;
67 #endif
68
71
76
78 /**
79 * The public AVVAAPIDeviceContext. See hwcontext_vaapi.h for it.
80 */
82
83 // Surface formats which can be used with this device.
87
89 /**
90 * The public AVVAAPIFramesContext. See hwcontext_vaapi.h for it.
91 */
93
94 // Surface attributes set at create time.
97 // RT format of the underlying surface (Intel driver ignores this anyway).
99 // Whether vaDeriveImage works.
101 // Caches whether VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2 is unsupported for
102 // surface imports.
105
107 // Handle to the derived or copied image which is mapped.
109 // The mapping flags actually used.
112
118 } VAAPIFormatDescriptor;
119
120 #define MAP(va, rt, av, swap_uv) { \
121 VA_FOURCC_ ## va, \
122 VA_RT_FORMAT_ ## rt, \
123 AV_PIX_FMT_ ## av, \
124 swap_uv, \
125 }
126 // The map fourcc <-> pix_fmt isn't bijective because of the annoying U/V
127 // plane swap cases. The frame handling below tries to hide these.
129 MAP(NV12, YUV420, NV12, 0),
130 #ifdef VA_FOURCC_I420
131 MAP(I420, YUV420, YUV420P, 0),
132 #endif
133 MAP(YV12, YUV420, YUV420P, 1),
134 MAP(IYUV, YUV420, YUV420P, 0),
135 MAP(422
H, YUV422, YUV422P, 0),
136 #ifdef VA_FOURCC_YV16
137 MAP(YV16, YUV422, YUV422P, 1),
138 #endif
139 MAP(UYVY, YUV422, UYVY422, 0),
140 MAP(YUY2, YUV422, YUYV422, 0),
141 #ifdef VA_FOURCC_Y210
142 MAP(Y210, YUV422_10, Y210, 0),
143 #endif
144 #ifdef VA_FOURCC_Y212
145 MAP(Y212, YUV422_12, Y212, 0),
146 #endif
147 MAP(411
P, YUV411, YUV411P, 0),
148 MAP(422
V, YUV422, YUV440P, 0),
149 MAP(444
P, YUV444, YUV444P, 0),
150 #ifdef VA_FOURCC_XYUV
151 MAP(XYUV, YUV444, VUYX, 0),
152 #endif
153 MAP(Y800, YUV400, GRAY8, 0),
154 #ifdef VA_FOURCC_P010
155 MAP(P010, YUV420_10BPP, P010, 0),
156 #endif
157 #ifdef VA_FOURCC_P012
158 MAP(P012, YUV420_12, P012, 0),
159 #endif
160 MAP(BGRA, RGB32, BGRA, 0),
161 MAP(BGRX, RGB32, BGR0, 0),
163 MAP(RGBX, RGB32, RGB0, 0),
164 #ifdef VA_FOURCC_ABGR
165 MAP(ABGR, RGB32, ABGR, 0),
166 MAP(XBGR, RGB32, 0BGR, 0),
167 #endif
168 MAP(ARGB, RGB32, ARGB, 0),
169 MAP(XRGB, RGB32, 0RGB, 0),
170 #ifdef VA_FOURCC_X2R10G10B10
171 MAP(X2R10G10B10, RGB32_10, X2RGB10, 0),
172 #endif
173 #ifdef VA_FOURCC_Y410
174 // libva doesn't include a fourcc for XV30 and the driver only declares
175 // support for Y410, so we must fudge the mapping here.
176 MAP(Y410, YUV444_10, XV30, 0),
177 #endif
178 #ifdef VA_FOURCC_Y412
179 // libva doesn't include a fourcc for XV36 and the driver only declares
180 // support for Y412, so we must fudge the mapping here.
181 MAP(Y412, YUV444_12, XV36, 0),
182 #endif
183 };
184 #undef MAP
185
186 static const VAAPIFormatDescriptor *
188 {
194 }
195
196 static const VAAPIFormatDescriptor *
198 {
204 }
205
207 {
208 const VAAPIFormatDescriptor *
desc;
211 return desc->pix_fmt;
212 else
214 }
215
218 VAImageFormat **image_format)
219 {
222
223 for (
i = 0;
i <
ctx->nb_formats;
i++) {
225 if (image_format)
226 *image_format = &
ctx->formats[
i].image_format;
227 return 0;
228 }
229 }
231 }
232
234 const void *hwconfig,
236 {
240 VASurfaceAttrib *attr_list =
NULL;
241 VAStatus vas;
244 int err,
i, j, attr_count, pix_fmt_count;
245
248 attr_count = 0;
249 vas = vaQuerySurfaceAttributes(hwctx->
display,
config->config_id,
250 0, &attr_count);
251 if (vas != VA_STATUS_SUCCESS) {
253 "%d (%s).\n", vas, vaErrorStr(vas));
256 }
257
258 attr_list =
av_malloc(attr_count *
sizeof(*attr_list));
259 if (!attr_list) {
262 }
263
264 vas = vaQuerySurfaceAttributes(hwctx->
display,
config->config_id,
265 attr_list, &attr_count);
266 if (vas != VA_STATUS_SUCCESS) {
268 "%d (%s).\n", vas, vaErrorStr(vas));
271 }
272
273 pix_fmt_count = 0;
274 for (
i = 0;
i < attr_count;
i++) {
275 switch (attr_list[
i].
type) {
276 case VASurfaceAttribPixelFormat:
277 fourcc = attr_list[
i].value.value.i;
280 ++pix_fmt_count;
281 } else {
282 // Something unsupported - ignore.
283 }
284 break;
285 case VASurfaceAttribMinWidth:
286 constraints->
min_width = attr_list[
i].value.value.i;
287 break;
288 case VASurfaceAttribMinHeight:
289 constraints->
min_height = attr_list[
i].value.value.i;
290 break;
291 case VASurfaceAttribMaxWidth:
292 constraints->
max_width = attr_list[
i].value.value.i;
293 break;
294 case VASurfaceAttribMaxHeight:
295 constraints->
max_height = attr_list[
i].value.value.i;
296 break;
297 }
298 }
299 if (pix_fmt_count == 0) {
300 // Nothing usable found. Presumably there exists something which
301 // works, so leave the set null to indicate unknown.
303 } else {
309 }
310
311 for (
i = j = 0;
i < attr_count;
i++) {
312 int k;
313
314 if (attr_list[
i].
type != VASurfaceAttribPixelFormat)
315 continue;
316 fourcc = attr_list[
i].value.value.i;
318
320 continue;
321
322 for (k = 0; k < j; k++) {
324 break;
325 }
326
327 if (k == j)
329 }
331 }
332 } else {
333 // No configuration supplied.
334 // Return the full set of image formats known by the implementation.
340 }
341 for (
i = j = 0;
i <
ctx->nb_formats;
i++) {
342 int k;
343
344 for (k = 0; k < j; k++) {
346 break;
347 }
348
349 if (k == j)
351 }
352
354 }
355
360 }
363
364 err = 0;
367 return err;
368 }
369
370 static const struct {
375 #if !VA_CHECK_VERSION(1, 0, 0)
376 // The i965 driver did not conform before version 2.0.
377 {
378 "Intel i965 (Quick Sync)",
379 "i965",
381 },
382 #endif
383 {
384 "Intel iHD",
385 "ubit",
387 },
388 {
389 "VDPAU wrapper",
390 "Splitted-Desktop Systems VDPAU backend for VA-API",
392 },
393 };
394
396 {
399 VAImageFormat *image_list =
NULL;
400 VAStatus vas;
401 const char *vendor_string;
402 int err,
i, image_count;
405
406 image_count = vaMaxNumImageFormats(hwctx->
display);
407 if (image_count <= 0) {
410 }
411 image_list =
av_malloc(image_count *
sizeof(*image_list));
412 if (!image_list) {
415 }
416 vas = vaQueryImageFormats(hwctx->
display, image_list, &image_count);
417 if (vas != VA_STATUS_SUCCESS) {
420 }
421
426 }
428 for (
i = 0;
i < image_count;
i++) {
434 } else {
438 ctx->formats[
ctx->nb_formats].image_format = image_list[
i];
440 }
441 }
442
443 vendor_string = vaQueryVendorString(hwctx->
display);
444 if (vendor_string)
446
450 } else {
451 // Detect the driver in use and set quirk flags if necessary.
453 if (vendor_string) {
455 if (strstr(vendor_string,
458 "as known nonstandard driver \"%s\", setting "
459 "quirks (%#x).\n",
464 break;
465 }
466 }
469 "nonstandard list, using standard behaviour.\n");
470 }
471 } else {
473 "assuming standard behaviour.\n");
474 }
475 }
476
478 return 0;
482 return err;
483 }
484
486 {
488
490 }
491
493 {
496 VASurfaceID surface_id;
497 VAStatus vas;
498
499 surface_id = (VASurfaceID)(uintptr_t)
data;
500
501 vas = vaDestroySurfaces(hwctx->
display, &surface_id, 1);
502 if (vas != VA_STATUS_SUCCESS) {
504 "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
505 }
506 }
507
509 {
514 VASurfaceID surface_id;
515 VAStatus vas;
517
521
522 vas = vaCreateSurfaces(hwctx->
display,
ctx->rt_format,
524 &surface_id, 1,
525 ctx->attributes,
ctx->nb_attributes);
526 if (vas != VA_STATUS_SUCCESS) {
528 "%d (%s).\n", vas, vaErrorStr(vas));
530 }
532
537 vaDestroySurfaces(hwctx->
display, &surface_id, 1);
539 }
540
542 // This is a fixed-size pool, so we must still be in the initial
543 // allocation sequence.
547 }
548
550 }
551
553 {
557 const VAAPIFormatDescriptor *
desc;
558 VAImageFormat *expected_format;
560 VASurfaceID test_surface_id;
561 VAImage test_image;
562 VAStatus vas;
564
570 }
571
575 int need_pixel_format = 1;
577 if (avfc->
attributes[
i].type == VASurfaceAttribMemoryType)
578 need_memory_type = 0;
579 if (avfc->
attributes[
i].type == VASurfaceAttribPixelFormat)
580 need_pixel_format = 0;
581 }
584
586 sizeof(*
ctx->attributes));
587 if (!
ctx->attributes) {
590 }
591
594 if (need_memory_type) {
595 ctx->attributes[
i++] = (VASurfaceAttrib) {
596 .type = VASurfaceAttribMemoryType,
597 .
flags = VA_SURFACE_ATTRIB_SETTABLE,
598 .value.type = VAGenericValueTypeInteger,
599 .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_VA,
600 };
601 }
602 if (need_pixel_format) {
603 ctx->attributes[
i++] = (VASurfaceAttrib) {
604 .type = VASurfaceAttribPixelFormat,
605 .
flags = VA_SURFACE_ATTRIB_SETTABLE,
606 .value.type = VAGenericValueTypeInteger,
607 .value.value.i =
desc->fourcc,
608 };
609 }
611 } else {
613 ctx->nb_attributes = 0;
614 }
615
616 ctx->rt_format =
desc->rt_format;
617
619 // This pool will be usable as a render target, so we need to store
620 // all of the surface IDs somewhere that vaCreateContext() calls
621 // will be able to access them.
628 }
629 } else {
630 // This pool allows dynamic sizing, and will not be usable as a
631 // render target.
634 }
635
643 }
644 }
645
646 // Allocate a single surface to test whether vaDeriveImage() is going
647 // to work for the specific configuration.
650 if (!test_surface) {
652 "user-configured buffer pool.\n");
655 }
656 } else {
658 if (!test_surface) {
660 "internal buffer pool.\n");
663 }
664 }
665 test_surface_id = (VASurfaceID)(uintptr_t)test_surface->
data;
666
667 ctx->derive_works = 0;
668
671 if (err == 0) {
672 vas = vaDeriveImage(hwctx->
display, test_surface_id, &test_image);
673 if (vas == VA_STATUS_SUCCESS) {
674 if (expected_format->fourcc == test_image.format.fourcc) {
676 ctx->derive_works = 1;
677 } else {
679 "derived image format %08x does not match "
680 "expected format %08x.\n",
681 expected_format->fourcc, test_image.format.fourcc);
682 }
683 vaDestroyImage(hwctx->
display, test_image.image_id);
684 } else {
686 "deriving image does not work: "
687 "%d (%s).\n", vas, vaErrorStr(vas));
688 }
689 } else {
691 "image format is not supported.\n");
692 }
693
695 return 0;
696
701 return err;
702 }
703
705 {
708
711 }
712
714 {
718
723
724 return 0;
725 }
726
730 {
733 int i, k, sw_format_available;
734
735 sw_format_available = 0;
736 for (
i = 0;
i <
ctx->nb_formats;
i++) {
738 sw_format_available = 1;
739 }
740
744
745 if (sw_format_available) {
747 k = 1;
748 } else {
749 k = 0;
750 }
751 for (
i = 0;
i <
ctx->nb_formats;
i++) {
753 continue;
756 }
758
760 return 0;
761 }
762
765 {
768 VASurfaceID surface_id;
769 VAStatus vas;
770
771 surface_id = (VASurfaceID)(uintptr_t)hwmap->
source->
data[3];
773
774 vas = vaUnmapBuffer(hwctx->
display,
map->image.buf);
775 if (vas != VA_STATUS_SUCCESS) {
777 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
778 }
779
782 vas = vaPutImage(hwctx->
display, surface_id,
map->image.image_id,
785 if (vas != VA_STATUS_SUCCESS) {
787 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
788 }
789 }
790
791 vas = vaDestroyImage(hwctx->
display,
map->image.image_id);
792 if (vas != VA_STATUS_SUCCESS) {
794 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
795 }
796
798 }
799
802 {
805 VASurfaceID surface_id;
806 const VAAPIFormatDescriptor *
desc;
807 VAImageFormat *image_format;
809 VAStatus vas;
810 void *address =
NULL;
812
813 surface_id = (VASurfaceID)(uintptr_t)
src->data[3];
815
817 // Requested direct mapping but it is not possible.
819 }
823 // Requested direct mapping but the formats do not match.
825 }
826
828 if (err < 0) {
829 // Requested format is not a valid output format.
830 return err;
831 }
832
837 map->image.image_id = VA_INVALID_ID;
838
839 vas = vaSyncSurface(hwctx->
display, surface_id);
840 if (vas != VA_STATUS_SUCCESS) {
842 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
845 }
846
847 // The memory which we map using derive need not be connected to the CPU
848 // in a way conducive to fast access. On Gen7-Gen9 Intel graphics, the
849 // memory is mappable but not cached, so normal memcpy()-like access is
850 // very slow to read it (but writing is ok). It is possible to read much
851 // faster with a copy routine which is aware of the limitation, but we
852 // assume for now that the user is not aware of that and would therefore
853 // prefer not to be given direct-mapped memory if they request read access.
856 vas = vaDeriveImage(hwctx->
display, surface_id, &
map->image);
857 if (vas != VA_STATUS_SUCCESS) {
859 "surface %#x: %d (%s).\n",
860 surface_id, vas, vaErrorStr(vas));
863 }
864 if (
map->image.format.fourcc != image_format->fourcc) {
866 "is in wrong format: expected %#08x, got %#08x.\n",
867 surface_id, image_format->fourcc,
map->image.format.fourcc);
870 }
872 } else {
873 vas = vaCreateImage(hwctx->
display, image_format,
875 if (vas != VA_STATUS_SUCCESS) {
877 "surface %#x: %d (%s).\n",
878 surface_id, vas, vaErrorStr(vas));
881 }
883 vas = vaGetImage(hwctx->
display, surface_id, 0, 0,
885 if (vas != VA_STATUS_SUCCESS) {
887 "surface %#x: %d (%s).\n",
888 surface_id, vas, vaErrorStr(vas));
891 }
892 }
893 }
894
895 vas = vaMapBuffer(hwctx->
display,
map->image.buf, &address);
896 if (vas != VA_STATUS_SUCCESS) {
898 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
901 }
902
905 if (err < 0)
907
910
911 for (
i = 0;
i <
map->image.num_planes;
i++) {
912 dst->
data[
i] = (uint8_t*)address +
map->image.offsets[
i];
914 }
915
917 if (
desc &&
desc->chroma_planes_swapped) {
918 // Chroma planes are YVU rather than YUV, so swap them.
920 }
921
922 return 0;
923
926 if (address)
927 vaUnmapBuffer(hwctx->
display,
map->image.buf);
928 if (
map->image.image_id != VA_INVALID_ID)
929 vaDestroyImage(hwctx->
display,
map->image.image_id);
931 }
932 return err;
933 }
934
937 {
939 int err;
940
943
948
950 if (err)
952
955
957 if (err)
959
960 err = 0;
963 return err;
964 }
965
968 {
970 int err;
971
974
978 map->format =
src->format;
979
981 if (err)
983
985 map->height =
src->height;
986
988 if (err)
990
991 err = 0;
994 return err;
995 }
996
999 {
1000 int err;
1001
1004 if (err < 0)
1005 return err;
1006 }
1007
1009 if (err)
1010 return err;
1011
1013 if (err)
1014 return err;
1015
1016 return 0;
1017 }
1018
1019 #if CONFIG_LIBDRM
1020
1021 #define DRM_MAP(va, layers, ...) { \
1022 VA_FOURCC_ ## va, \
1023 layers, \
1024 { __VA_ARGS__ } \
1025 }
1026 static const struct {
1027 uint32_t va_fourcc;
1028 int nb_layer_formats;
1030 } vaapi_drm_format_map[] = {
1031 #ifdef DRM_FORMAT_R8
1032 DRM_MAP(NV12, 2, DRM_FORMAT_R8, DRM_FORMAT_RG88),
1033 DRM_MAP(NV12, 2, DRM_FORMAT_R8, DRM_FORMAT_GR88),
1034 #endif
1035 DRM_MAP(NV12, 1, DRM_FORMAT_NV12),
1036 #if defined(VA_FOURCC_P010) && defined(DRM_FORMAT_R16)
1037 DRM_MAP(P010, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
1038 #endif
1039 #if defined(VA_FOURCC_P012) && defined(DRM_FORMAT_R16)
1040 DRM_MAP(P012, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
1041 #endif
1042 DRM_MAP(BGRA, 1, DRM_FORMAT_ARGB8888),
1043 DRM_MAP(BGRX, 1, DRM_FORMAT_XRGB8888),
1044 DRM_MAP(
RGBA, 1, DRM_FORMAT_ABGR8888),
1045 DRM_MAP(RGBX, 1, DRM_FORMAT_XBGR8888),
1046 #ifdef VA_FOURCC_ABGR
1047 DRM_MAP(ABGR, 1, DRM_FORMAT_RGBA8888),
1048 DRM_MAP(XBGR, 1, DRM_FORMAT_RGBX8888),
1049 #endif
1050 DRM_MAP(ARGB, 1, DRM_FORMAT_BGRA8888),
1051 DRM_MAP(XRGB, 1, DRM_FORMAT_BGRX8888),
1052 #if defined(VA_FOURCC_XYUV) && defined(DRM_FORMAT_XYUV8888)
1053 DRM_MAP(XYUV, 1, DRM_FORMAT_XYUV8888),
1054 #endif
1055 #if defined(VA_FOURCC_Y412) && defined(DRM_FORMAT_XVYU2101010)
1056 DRM_MAP(Y410, 1, DRM_FORMAT_XVYU2101010),
1057 #endif
1058 #if defined(VA_FOURCC_Y412) && defined(DRM_FORMAT_XVYU12_16161616)
1059 DRM_MAP(Y412, 1, DRM_FORMAT_XVYU12_16161616),
1060 #endif
1061 #if defined(VA_FOURCC_X2R10G10B10) && defined(DRM_FORMAT_XRGB2101010)
1062 DRM_MAP(X2R10G10B10, 1, DRM_FORMAT_XRGB2101010),
1063 #endif
1064 };
1065 #undef DRM_MAP
1066
1069 {
1071
1072 VASurfaceID surface_id = (VASurfaceID)(uintptr_t)hwmap->
priv;
1073
1075
1076 vaDestroySurfaces(dst_dev->
display, &surface_id, 1);
1077 }
1078
1081 {
1082 #if VA_CHECK_VERSION(1, 1, 0)
1084 int use_prime2;
1085 #else
1086 int k;
1087 #endif
1092 const VAAPIFormatDescriptor *format_desc;
1093 VASurfaceID surface_id;
1094 VAStatus vas = VA_STATUS_SUCCESS;
1095 uint32_t va_fourcc;
1097
1098 #if !VA_CHECK_VERSION(1, 1, 0)
1099 unsigned long buffer_handle;
1100 VASurfaceAttribExternalBuffers buffer_desc;
1101 VASurfaceAttrib attrs[2] = {
1102 {
1103 .type = VASurfaceAttribMemoryType,
1104 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1105 .value.type = VAGenericValueTypeInteger,
1106 .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
1107 },
1108 {
1109 .type = VASurfaceAttribExternalBufferDescriptor,
1110 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1111 .value.type = VAGenericValueTypePointer,
1112 .value.value.p = &buffer_desc,
1113 }
1114 };
1115 #endif
1116
1118
1119 if (
desc->nb_objects != 1) {
1121 "made from a single DRM object.\n");
1123 }
1124
1125 va_fourcc = 0;
1127 if (
desc->nb_layers != vaapi_drm_format_map[
i].nb_layer_formats)
1128 continue;
1129 for (j = 0; j <
desc->nb_layers; j++) {
1130 if (
desc->layers[j].format !=
1131 vaapi_drm_format_map[
i].layer_formats[j])
1132 break;
1133 }
1134 if (j !=
desc->nb_layers)
1135 continue;
1136 va_fourcc = vaapi_drm_format_map[
i].va_fourcc;
1137 break;
1138 }
1139 if (!va_fourcc) {
1141 "by VAAPI.\n");
1143 }
1144
1146 "%08x.\n",
desc->objects[0].fd, va_fourcc);
1147
1150
1151 #if VA_CHECK_VERSION(1, 1, 0)
1154 if (use_prime2) {
1155 VADRMPRIMESurfaceDescriptor prime_desc;
1156 VASurfaceAttrib prime_attrs[2] = {
1157 {
1158 .type = VASurfaceAttribMemoryType,
1159 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1160 .value.type = VAGenericValueTypeInteger,
1161 .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
1162 },
1163 {
1164 .type = VASurfaceAttribExternalBufferDescriptor,
1165 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1166 .value.type = VAGenericValueTypePointer,
1167 .value.value.p = &prime_desc,
1168 }
1169 };
1170 prime_desc.fourcc = va_fourcc;
1171 prime_desc.width = src_fc->
width;
1172 prime_desc.height = src_fc->
height;
1173 prime_desc.num_objects =
desc->nb_objects;
1174 for (
i = 0;
i <
desc->nb_objects; ++
i) {
1175 prime_desc.objects[
i].fd =
desc->objects[
i].fd;
1176 prime_desc.objects[
i].size =
desc->objects[
i].size;
1177 prime_desc.objects[
i].drm_format_modifier =
1178 desc->objects[
i].format_modifier;
1179 }
1180
1181 prime_desc.num_layers =
desc->nb_layers;
1182 for (
i = 0;
i <
desc->nb_layers; ++
i) {
1183 prime_desc.layers[
i].drm_format =
desc->layers[
i].format;
1184 prime_desc.layers[
i].num_planes =
desc->layers[
i].nb_planes;
1185 for (j = 0; j <
desc->layers[
i].nb_planes; ++j) {
1186 prime_desc.layers[
i].object_index[j] =
1187 desc->layers[
i].planes[j].object_index;
1188 prime_desc.layers[
i].offset[j] =
desc->layers[
i].planes[j].offset;
1189 prime_desc.layers[
i].pitch[j] =
desc->layers[
i].planes[j].pitch;
1190 }
1191
1192 if (format_desc->chroma_planes_swapped &&
1193 desc->layers[
i].nb_planes == 3) {
1194 FFSWAP(uint32_t, prime_desc.layers[
i].pitch[1],
1195 prime_desc.layers[
i].pitch[2]);
1196 FFSWAP(uint32_t, prime_desc.layers[
i].offset[1],
1197 prime_desc.layers[
i].offset[2]);
1198 }
1199 }
1200
1201 /*
1202 * We can query for PRIME_2 support with vaQuerySurfaceAttributes, but that
1203 * that needs the config_id which we don't have here . Both Intel and
1204 * Gallium seem to do the correct error checks, so lets just try the
1205 * PRIME_2 import first.
1206 */
1207 vas = vaCreateSurfaces(dst_dev->
display, format_desc->rt_format,
1208 src->width,
src->height, &surface_id, 1,
1210 if (vas != VA_STATUS_SUCCESS)
1212 }
1213
1214 if (!use_prime2 || vas != VA_STATUS_SUCCESS) {
1215 int k;
1216 uintptr_t buffer_handle;
1217 VASurfaceAttribExternalBuffers buffer_desc;
1218 VASurfaceAttrib buffer_attrs[2] = {
1219 {
1220 .type = VASurfaceAttribMemoryType,
1221 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1222 .value.type = VAGenericValueTypeInteger,
1223 .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
1224 },
1225 {
1226 .type = VASurfaceAttribExternalBufferDescriptor,
1227 .flags = VA_SURFACE_ATTRIB_SETTABLE,
1228 .value.type = VAGenericValueTypePointer,
1229 .value.value.p = &buffer_desc,
1230 }
1231 };
1232
1233 buffer_handle =
desc->objects[0].fd;
1234 buffer_desc.pixel_format = va_fourcc;
1235 buffer_desc.width = src_fc->
width;
1236 buffer_desc.height = src_fc->
height;
1237 buffer_desc.data_size =
desc->objects[0].size;
1238 buffer_desc.buffers = &buffer_handle;
1239 buffer_desc.num_buffers = 1;
1240 buffer_desc.flags = 0;
1241
1242 k = 0;
1243 for (
i = 0;
i <
desc->nb_layers;
i++) {
1244 for (j = 0; j <
desc->layers[
i].nb_planes; j++) {
1245 buffer_desc.pitches[k] =
desc->layers[
i].planes[j].pitch;
1246 buffer_desc.offsets[k] =
desc->layers[
i].planes[j].offset;
1247 ++k;
1248 }
1249 }
1250 buffer_desc.num_planes = k;
1251
1252 if (format_desc->chroma_planes_swapped &&
1253 buffer_desc.num_planes == 3) {
1254 FFSWAP(uint32_t, buffer_desc.pitches[1], buffer_desc.pitches[2]);
1255 FFSWAP(uint32_t, buffer_desc.offsets[1], buffer_desc.offsets[2]);
1256 }
1257
1258 vas = vaCreateSurfaces(dst_dev->
display, format_desc->rt_format,
1260 &surface_id, 1,
1262 }
1263 #else
1264 buffer_handle =
desc->objects[0].fd;
1265 buffer_desc.pixel_format = va_fourcc;
1266 buffer_desc.width = src_fc->
width;
1267 buffer_desc.height = src_fc->
height;
1268 buffer_desc.data_size =
desc->objects[0].size;
1269 buffer_desc.buffers = &buffer_handle;
1270 buffer_desc.num_buffers = 1;
1271 buffer_desc.flags = 0;
1272
1273 k = 0;
1274 for (
i = 0;
i <
desc->nb_layers;
i++) {
1275 for (j = 0; j <
desc->layers[
i].nb_planes; j++) {
1276 buffer_desc.pitches[k] =
desc->layers[
i].planes[j].pitch;
1277 buffer_desc.offsets[k] =
desc->layers[
i].planes[j].offset;
1278 ++k;
1279 }
1280 }
1281 buffer_desc.num_planes = k;
1282
1283 if (format_desc->chroma_planes_swapped &&
1284 buffer_desc.num_planes == 3) {
1285 FFSWAP(uint32_t, buffer_desc.pitches[1], buffer_desc.pitches[2]);
1286 FFSWAP(uint32_t, buffer_desc.offsets[1], buffer_desc.offsets[2]);
1287 }
1288
1289 vas = vaCreateSurfaces(dst_dev->
display, format_desc->rt_format,
1291 &surface_id, 1,
1293 #endif
1294 if (vas != VA_STATUS_SUCCESS) {
1296 "object: %d (%s).\n", vas, vaErrorStr(vas));
1298 }
1300
1302 &vaapi_unmap_from_drm,
1303 (void*)(uintptr_t)surface_id);
1304 if (err < 0)
1305 return err;
1306
1309 dst->
data[3] = (uint8_t*)(uintptr_t)surface_id;
1310
1312 "surface %#x.\n",
desc->objects[0].fd, surface_id);
1313
1314 return 0;
1315 }
1316
1317 #if VA_CHECK_VERSION(1, 1, 0)
1320 {
1323
1326
1328 }
1329
1332 {
1334 VASurfaceID surface_id;
1335 VAStatus vas;
1336 VADRMPRIMESurfaceDescriptor va_desc;
1338 uint32_t export_flags;
1340
1341 surface_id = (VASurfaceID)(uintptr_t)
src->data[3];
1342
1343 export_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
1345 export_flags |= VA_EXPORT_SURFACE_READ_ONLY;
1346
1347 vas = vaSyncSurface(hwctx->
display, surface_id);
1348 if (vas != VA_STATUS_SUCCESS) {
1350 "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
1352 }
1353 }
1354
1356 export_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;
1357
1358 vas = vaExportSurfaceHandle(hwctx->
display, surface_id,
1359 VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
1360 export_flags, &va_desc);
1361 if (vas != VA_STATUS_SUCCESS) {
1362 if (vas == VA_STATUS_ERROR_UNIMPLEMENTED)
1365 "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
1367 }
1368
1370 if (!drm_desc) {
1373 }
1374
1375 // By some bizarre coincidence, these structures are very similar...
1377 for (
i = 0;
i < va_desc.num_objects;
i++) {
1381 va_desc.objects[
i].drm_format_modifier;
1382 }
1383 drm_desc->
nb_layers = va_desc.num_layers;
1384 for (
i = 0;
i < va_desc.num_layers;
i++) {
1387 for (j = 0; j < va_desc.layers[
i].num_planes; j++) {
1389 va_desc.layers[
i].object_index[j];
1391 va_desc.layers[
i].offset[j];
1393 va_desc.layers[
i].pitch[j];
1394 }
1395 }
1396
1398 &vaapi_unmap_to_drm_esh, drm_desc);
1399 if (err < 0)
1401
1404 dst->
data[0] = (uint8_t*)drm_desc;
1405
1406 return 0;
1407
1409 for (
i = 0;
i < va_desc.num_objects;
i++)
1410 close(va_desc.objects[
i].fd);
1412 return err;
1413 }
1414 #endif
1415
1416 #if VA_CHECK_VERSION(0, 36, 0)
1417 typedef struct VAAPIDRMImageBufferMapping {
1418 VAImage image;
1419 VABufferInfo buffer_info;
1420
1422 } VAAPIDRMImageBufferMapping;
1423
1426 {
1428 VAAPIDRMImageBufferMapping *mapping = hwmap->
priv;
1429 VASurfaceID surface_id;
1430 VAStatus vas;
1431
1432 surface_id = (VASurfaceID)(uintptr_t)hwmap->
source->
data[3];
1434 surface_id);
1435
1436 // DRM PRIME file descriptors are closed by vaReleaseBufferHandle(),
1437 // so we shouldn't close them separately.
1438
1439 vas = vaReleaseBufferHandle(hwctx->
display, mapping->image.buf);
1440 if (vas != VA_STATUS_SUCCESS) {
1442 "handle of image %#x (derived from surface %#x): "
1443 "%d (%s).\n", mapping->image.buf, surface_id,
1444 vas, vaErrorStr(vas));
1445 }
1446
1447 vas = vaDestroyImage(hwctx->
display, mapping->image.image_id);
1448 if (vas != VA_STATUS_SUCCESS) {
1450 "derived from surface %#x: %d (%s).\n",
1451 surface_id, vas, vaErrorStr(vas));
1452 }
1453
1455 }
1456
1459 {
1461 VAAPIDRMImageBufferMapping *mapping =
NULL;
1462 VASurfaceID surface_id;
1463 VAStatus vas;
1465
1466 surface_id = (VASurfaceID)(uintptr_t)
src->data[3];
1468 surface_id);
1469
1471 if (!mapping)
1473
1474 vas = vaDeriveImage(hwctx->
display, surface_id,
1475 &mapping->image);
1476 if (vas != VA_STATUS_SUCCESS) {
1478 "surface %#x: %d (%s).\n",
1479 surface_id, vas, vaErrorStr(vas));
1482 }
1483
1485 if (vaapi_drm_format_map[
i].va_fourcc ==
1486 mapping->image.format.fourcc)
1487 break;
1488 }
1491 "VAAPI format %#x.\n", mapping->image.format.fourcc);
1493 goto fail_derived;
1494 }
1495
1496 mapping->buffer_info.mem_type =
1497 VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;
1498
1499 mapping->drm_desc.nb_layers =
1500 vaapi_drm_format_map[
i].nb_layer_formats;
1501 if (mapping->drm_desc.nb_layers > 1) {
1502 if (mapping->drm_desc.nb_layers != mapping->image.num_planes) {
1504 "expected format: got %d planes, but expected %d.\n",
1505 mapping->image.num_planes, mapping->drm_desc.nb_layers);
1507 goto fail_derived;
1508 }
1509
1510 for(p = 0; p < mapping->drm_desc.nb_layers; p++) {
1512 .format = vaapi_drm_format_map[
i].layer_formats[p],
1513 .nb_planes = 1,
1514 .planes[0] = {
1515 .object_index = 0,
1516 .offset = mapping->image.offsets[p],
1517 .pitch = mapping->image.pitches[p],
1518 },
1519 };
1520 }
1521 } else {
1522 mapping->drm_desc.layers[0].format =
1523 vaapi_drm_format_map[
i].layer_formats[0];
1524 mapping->drm_desc.layers[0].nb_planes = mapping->image.num_planes;
1525 for (p = 0; p < mapping->image.num_planes; p++) {
1527 .object_index = 0,
1528 .offset = mapping->image.offsets[p],
1529 .pitch = mapping->image.pitches[p],
1530 };
1531 }
1532 }
1533
1534 vas = vaAcquireBufferHandle(hwctx->
display, mapping->image.buf,
1535 &mapping->buffer_info);
1536 if (vas != VA_STATUS_SUCCESS) {
1538 "handle from image %#x (derived from surface %#x): "
1539 "%d (%s).\n", mapping->image.buf, surface_id,
1540 vas, vaErrorStr(vas));
1542 goto fail_derived;
1543 }
1544
1546 mapping->buffer_info.handle);
1547
1548 mapping->drm_desc.nb_objects = 1;
1550 .fd = mapping->buffer_info.handle,
1551 .size = mapping->image.data_size,
1552 // There is no way to get the format modifier with this API.
1554 };
1555
1557 dst,
src, &vaapi_unmap_to_drm_abh,
1558 mapping);
1559 if (err < 0)
1560 goto fail_mapped;
1561
1562 dst->
data[0] = (uint8_t*)&mapping->drm_desc;
1565
1566 return 0;
1567
1568 fail_mapped:
1569 vaReleaseBufferHandle(hwctx->
display, mapping->image.buf);
1570 fail_derived:
1571 vaDestroyImage(hwctx->
display, mapping->image.image_id);
1574 return err;
1575 }
1576 #endif
1577
1580 {
1581 #if VA_CHECK_VERSION(1, 1, 0)
1582 int err;
1583 err = vaapi_map_to_drm_esh(hwfc, dst,
src,
flags);
1585 return err;
1586 #endif
1587 #if VA_CHECK_VERSION(0, 36, 0)
1588 return vaapi_map_to_drm_abh(hwfc, dst,
src,
flags);
1589 #endif
1591 }
1592
1593 #endif /* CONFIG_LIBDRM */
1594
1597 {
1598 switch (
src->format) {
1599 #if CONFIG_LIBDRM
1601 return vaapi_map_from_drm(hwfc, dst,
src,
flags);
1602 #endif
1603 default:
1605 }
1606 }
1607
1610 {
1612 #if CONFIG_LIBDRM
1614 return vaapi_map_to_drm(hwfc, dst,
src,
flags);
1615 #endif
1616 default:
1618 }
1619 }
1620
1622 {
1625
1628
1629 #if HAVE_VAAPI_X11
1630 if (priv->x11_display)
1631 XCloseDisplay(priv->x11_display);
1632 #endif
1633
1636
1638 }
1639
1640 #if CONFIG_VAAPI_1
1641 static void vaapi_device_log_error(
void *
context,
const char *
message)
1642 {
1644
1646 }
1647
1648 static void vaapi_device_log_info(
void *
context,
const char *
message)
1649 {
1651
1653 }
1654 #endif
1655
1657 VADisplay display)
1658 {
1660 int major, minor;
1661 VAStatus vas;
1662
1663 #if CONFIG_VAAPI_1
1664 vaSetErrorCallback(display, &vaapi_device_log_error,
ctx);
1665 vaSetInfoCallback (display, &vaapi_device_log_info,
ctx);
1666 #endif
1667
1669
1670 vas = vaInitialize(display, &major, &minor);
1671 if (vas != VA_STATUS_SUCCESS) {
1673 "connection: %d (%s).\n", vas, vaErrorStr(vas));
1675 }
1677 "version %d.%d\n", major, minor);
1678
1679 return 0;
1680 }
1681
1684 {
1686 VADisplay display =
NULL;
1688 int try_drm, try_x11, try_win32, try_all;
1689
1691 if (!priv)
1693
1695
1696 ctx->user_opaque = priv;
1698
1700 if (ent) {
1701 try_all = try_drm = try_x11 = try_win32 = 0;
1702 if (!strcmp(ent->
value,
"drm")) {
1703 try_drm = 1;
1704 }
else if (!strcmp(ent->
value,
"x11")) {
1705 try_x11 = 1;
1706 }
else if (!strcmp(ent->
value,
"win32")) {
1707 try_win32 = 1;
1708 } else {
1712 }
1713 } else {
1714 try_all = 1;
1715 try_drm = HAVE_VAAPI_DRM;
1716 try_x11 = HAVE_VAAPI_X11;
1717 try_win32 = HAVE_VAAPI_WIN32;
1718 }
1719
1720 #if HAVE_VAAPI_DRM
1721 while (!display && try_drm) {
1722 // If the device is specified, try to open it as a DRM device node.
1723 // If not, look for a usable render node, possibly restricted to those
1724 // using a specified kernel driver.
1726 if (device) {
1727 priv->
drm_fd = open(device, O_RDWR);
1729 av_log(
ctx, loglevel,
"Failed to open %s as "
1730 "DRM device node.\n", device);
1731 break;
1732 }
1733 } else {
1734 char path[64];
1735 int n, max_devices = 8;
1736 #if CONFIG_LIBDRM
1740 #endif
1741 for (n = 0; n < max_devices; n++) {
1743 "/dev/dri/renderD%d", 128 + n);
1744 priv->
drm_fd = open(path, O_RDWR);
1746 if (errno == ENOENT) {
1747 if (n != max_devices - 1) {
1749 "No render device %s, try next device for "
1750 "DRM render node.\n", path);
1751 continue;
1752 }
1753
1755 "for DRM render node.\n");
1756 } else
1758 "DRM render node for device %d.\n", n);
1759 break;
1760 }
1761 #if CONFIG_LIBDRM
1765 "Failed to get DRM version for device %d.\n", n);
1768 continue;
1769 }
1770 if (kernel_driver) {
1771 if (strcmp(kernel_driver->
value,
info->name)) {
1773 "with non-matching kernel driver (%s).\n",
1775 drmFreeVersion(
info);
1778 continue;
1779 }
1781 "DRM render node for device %d, "
1782 "with matching kernel driver (%s).\n",
1784 drmFreeVersion(
info);
1785 break;
1786 // drmGetVersion() ensures |info->name| is 0-terminated.
1787 }
else if (!strcmp(
info->name,
"vgem")) {
1789 "Skipping vgem node for device %d.\n", n);
1790 drmFreeVersion(
info);
1793 continue;
1794 }
1795 drmFreeVersion(
info);
1796 #endif
1798 "DRM render node for device %d.\n", n);
1799 break;
1800 }
1801 if (n >= max_devices)
1802 break;
1803 }
1804
1805 display = vaGetDisplayDRM(priv->
drm_fd);
1806 if (!display) {
1808 "from DRM device %s.\n", device);
1810 }
1811 break;
1812 }
1813 #endif
1814
1815 #if HAVE_VAAPI_X11
1816 if (!display && try_x11) {
1817 // Try to open the device as an X11 display.
1818 priv->x11_display = XOpenDisplay(device);
1819 if (!priv->x11_display) {
1821 "%s.\n", XDisplayName(device));
1822 } else {
1823 display = vaGetDisplay(priv->x11_display);
1824 if (!display) {
1826 "from X11 display %s.\n", XDisplayName(device));
1828 }
1829
1831 "X11 display %s.\n", XDisplayName(device));
1832 }
1833 }
1834 #endif
1835
1836 #if HAVE_VAAPI_WIN32
1837 if (!display && try_win32) {
1838 // Try to create a display from the specified device, if any.
1839 if (!device) {
1840 display = vaGetDisplayWin32(
NULL);
1841 } else {
1842 IDXGIFactory2 *pDXGIFactory =
NULL;
1843 IDXGIAdapter *pAdapter =
NULL;
1844 #if !HAVE_UWP
1845 HANDLE dxgi = dlopen("dxgi.dll", 0);
1846 if (!dxgi) {
1849 }
1852 if (!pfnCreateDXGIFactory) {
1854 dlclose(dxgi);
1856 }
1857 #else
1858 // In UWP (which lacks LoadLibrary), CreateDXGIFactory isn't
1859 // available, only CreateDXGIFactory1
1862 #endif
1863 if (SUCCEEDED(pfnCreateDXGIFactory(&IID_IDXGIFactory2,
1864 (void **)&pDXGIFactory))) {
1865 int adapter = atoi(device);
1866 if (SUCCEEDED(IDXGIFactory2_EnumAdapters(pDXGIFactory,
1867 adapter,
1868 &pAdapter))) {
1869 DXGI_ADAPTER_DESC
desc;
1870 if (SUCCEEDED(IDXGIAdapter2_GetDesc(pAdapter, &
desc))) {
1872 "Using device %04x:%04x (%ls) - LUID %lu %ld.\n",
1874 desc.AdapterLuid.LowPart,
1875 desc.AdapterLuid.HighPart);
1876 display = vaGetDisplayWin32(&
desc.AdapterLuid);
1877 }
1878 IDXGIAdapter_Release(pAdapter);
1879 }
1880 IDXGIFactory2_Release(pDXGIFactory);
1881 }
1882 #if !HAVE_UWP
1883 dlclose(dxgi);
1884 #endif
1885 }
1886
1887 if (!display) {
1889 "from Win32 display.\n");
1891 }
1892
1894 "Win32 display.\n");
1895 }
1896 #endif
1897
1898 if (!display) {
1899 if (device)
1901 "device %s.\n", device);
1902 else
1904 "any default device.\n");
1906 }
1907
1909 if (ent) {
1910 #if VA_CHECK_VERSION(0, 38, 0)
1911 VAStatus vas;
1912 vas = vaSetDriverName(display, ent->
value);
1913 if (vas != VA_STATUS_SUCCESS) {
1915 "%s: %d (%s).\n", ent->
value, vas, vaErrorStr(vas));
1916 vaTerminate(display);
1918 }
1919 #else
1921 "supported with this VAAPI version.\n");
1922 #endif
1923 }
1924
1926 }
1927
1931 {
1932 #if HAVE_VAAPI_DRM
1935 VADisplay *display;
1937 int fd;
1938
1939 if (src_hwctx->
fd < 0) {
1941 "device to derive a VA display from.\n");
1943 }
1944
1945 #if CONFIG_LIBDRM
1946 {
1947 int node_type = drmGetNodeTypeFromFd(src_hwctx->
fd);
1948 char *render_node;
1949 if (node_type < 0) {
1951 "to refer to a DRM device.\n");
1953 }
1954 if (node_type == DRM_NODE_RENDER) {
1956 } else {
1957 render_node = drmGetRenderDeviceNameFromFd(src_hwctx->
fd);
1958 if (!render_node) {
1960 "because the device does not have an "
1961 "associated render node.\n");
1963 } else {
1964 fd = open(render_node, O_RDWR);
1965 if (fd < 0) {
1967 "because the associated render node "
1968 "could not be opened.\n");
1970 } else {
1972 "in place of non-render DRM device.\n",
1973 render_node);
1974 }
1975 free(render_node);
1976 }
1977 }
1978 }
1979 #else
1981 #endif
1982
1984 if (!priv) {
1985 if (fd != src_hwctx->
fd) {
1986 // The fd was opened in this function.
1987 close(fd);
1988 }
1990 }
1991
1992 if (fd == src_hwctx->
fd) {
1993 // The fd is inherited from the source context and we are holding
1994 // a reference to that, we don't want to close it from here.
1996 } else {
1998 }
1999
2000 ctx->user_opaque = priv;
2002
2003 display = vaGetDisplayDRM(fd);
2004 if (!display) {
2006 "DRM device.\n");
2008 }
2009
2011 }
2012 #endif
2014 }
2015
2018 .name = "VAAPI",
2019
2023
2037
2041 },
2042 };