1 /*
2 * Videotoolbox hardware acceleration
3 *
4 * copyright (c) 2012 Sebastien Zwickert
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "config.h"
24 #include "config_components.h"
40 #include <Availability.h>
41 #include <AvailabilityMacros.h>
42 #include <TargetConditionals.h>
43
44 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
45 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
46 #endif
47 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
48 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
49 #endif
50
51 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
53 #endif
54
55 #if !HAVE_KCMVIDEOCODECTYPE_VP9
57 #endif
58
59 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
60
65
67 {
70 CVPixelBufferRelease(
ref->pixbuf);
71
73 }
74
78 {
80
84
87
91
92 return 0;
93 }
94
96 {
99
104 }
105
106 frame->crop_right = 0;
107 frame->crop_left = 0;
109 frame->crop_bottom = 0;
110
113
114 frame->data[3] = (uint8_t*)
ref->pixbuf;
115
116 if (
ref->hw_frames_ctx) {
119 if (!
frame->hw_frames_ctx)
121 }
122
123 return 0;
124 }
125
127 {
135
140 if (!buf) {
143 }
145
148
152
153 return 0;
154 }
155
156 #define AV_W8(p, v) *(p) = (v)
157
159 {
163
164 for (
i = 0;
i < src_size;
i++) {
165 if (
i + 2 < src_size &&
167 src[
i + 1] == 0x00 &&
168 src[
i + 2] <= 0x03) {
172 *p++ = 0x03;
173 } else {
175 }
179 }
180
183
185 }
186
188 {
192 uint8_t *p;
195 int vt_extradata_size;
196 uint8_t *vt_extradata;
197
198 vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
199 vt_extradata =
av_malloc(vt_extradata_size);
200
201 if (!vt_extradata)
203
204 p = vt_extradata;
205
206 AV_W8(p + 0, 1);
/* version */
207 AV_W8(p + 1,
h->ps.sps->data[1]);
/* profile */
208 AV_W8(p + 2,
h->ps.sps->data[2]);
/* profile compat */
209 AV_W8(p + 3,
h->ps.sps->data[3]);
/* level */
210 AV_W8(p + 4, 0xff);
/* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
211 AV_W8(p + 5, 0xe1);
/* 3 bits reserved (111) + 5 bits number of sps (00001) */
213 p += 8;
214 p +=
escape_ps(p,
h->ps.sps->data,
h->ps.sps->data_size);
215 AV_W8(p + 0, 1);
/* number of pps */
217 p += 3;
218 p +=
escape_ps(p,
h->ps.pps->data,
h->ps.pps->data_size);
219
220 av_assert0(p - vt_extradata == vt_extradata_size);
221
222 // save sps header (profile/level) used to create decoder session,
223 // so we can detect changes and recreate it.
224 if (vtctx)
225 memcpy(vtctx->
sps,
h->ps.sps->data + 1, 3);
226
227 data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
230 }
231
233 {
235 int i, num_vps = 0, num_sps = 0, num_pps = 0;
241 uint8_t parallelismType;
243 uint8_t *p;
244 int vt_extradata_size = 23 + 3 + 3 + 3;
245 uint8_t *vt_extradata;
246
247 #define COUNT_SIZE_PS(T, t) \
248 for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
249 if (h->ps.t##ps_list[i]) { \
250 const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
251 vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
252 num_##t##ps++; \
253 } \
254 }
255
259
260 vt_extradata =
av_malloc(vt_extradata_size);
261 if (!vt_extradata)
263 p = vt_extradata;
264
265 /* unsigned int(8) configurationVersion = 1; */
267
268 /*
269 * unsigned int(2) general_profile_space;
270 * unsigned int(1) general_tier_flag;
271 * unsigned int(5) general_profile_idc;
272 */
276
277 /* unsigned int(32) general_profile_compatibility_flags; */
278 for (
i = 0;
i < 4;
i++) {
287 }
288
289 /* unsigned int(48) general_constraint_indicator_flags; */
296
297 /* unsigned int(8) general_level_idc; */
299
300 /*
301 * bit(4) reserved = ‘1111’b;
302 * unsigned int(12) min_spatial_segmentation_idc;
303 */
306
307 /*
308 * bit(6) reserved = ‘111111’b;
309 * unsigned int(2) parallelismType;
310 */
312 parallelismType = 0;
313 else if (
pps->entropy_coding_sync_enabled_flag &&
pps->tiles_enabled_flag)
314 parallelismType = 0;
315 else if (
pps->entropy_coding_sync_enabled_flag)
316 parallelismType = 3;
317 else if (
pps->tiles_enabled_flag)
318 parallelismType = 2;
319 else
320 parallelismType = 1;
321 AV_W8(p + 15, 0xfc | parallelismType);
322
323 /*
324 * bit(6) reserved = ‘111111’b;
325 * unsigned int(2) chromaFormat;
326 */
327 AV_W8(p + 16,
sps->chroma_format_idc | 0xfc);
328
329 /*
330 * bit(5) reserved = ‘11111’b;
331 * unsigned int(3) bitDepthLumaMinus8;
332 */
333 AV_W8(p + 17, (
sps->bit_depth - 8) | 0xf8);
334
335 /*
336 * bit(5) reserved = ‘11111’b;
337 * unsigned int(3) bitDepthChromaMinus8;
338 */
339 AV_W8(p + 18, (
sps->bit_depth_chroma - 8) | 0xf8);
340
341 /* bit(16) avgFrameRate; */
343
344 /*
345 * bit(2) constantFrameRate;
346 * bit(3) numTemporalLayers;
347 * bit(1) temporalIdNested;
348 * unsigned int(2) lengthSizeMinusOne;
349 */
350 AV_W8(p + 21, 0 << 6 |
351 sps->max_sub_layers << 3 |
352 sps->temporal_id_nesting << 2 |
353 3);
354
355 /* unsigned int(8) numOfArrays; */
357
358 p += 23;
359
360 #define APPEND_PS(T, t) \
361 /* \
362 * bit(1) array_completeness; \
363 * unsigned int(1) reserved = 0; \
364 * unsigned int(6) NAL_unit_type; \
365 */ \
366 AV_W8(p, 1 << 7 | \
367 HEVC_NAL_##T##PS & 0x3f); \
368 /* unsigned int(16) numNalus; */ \
369 AV_WB16(p + 1, num_##t##ps); \
370 p += 3; \
371 for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
372 if (h->ps.t##ps_list[i]) { \
373 const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
374 int size = escape_ps(p + 2, lps->data, lps->data_size); \
375 /* unsigned int(16) nalUnitLength; */ \
376 AV_WB16(p, size); \
377 /* bit(8*nalUnitLength) nalUnit; */ \
378 p += 2 + size; \
379 } \
380 }
381
385
386 av_assert0(p - vt_extradata == vt_extradata_size);
387
388 data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
391 }
392
396 {
399
400 if (
h->is_avc == 1) {
402 }
404 return 0;
405 }
406
411 {
414
415 // save sps header (profile/level) used to create decoder session
417 memcpy(vtctx->
sps,
h->ps.sps->data + 1, 3);
418
423 }
424 }
425
426 // pass-through SPS/PPS changes to the decoder
428 }
429
433 {
436
442
444
447
450 return 0;
451 }
452
456 {
458
460 return 0;
461
463 }
464
465 #if CONFIG_VIDEOTOOLBOX
466 // Return the AVVideotoolboxContext that matters currently. Where it comes from
467 // depends on the API used.
469 {
470 // Somewhat tricky because the user can call av_videotoolbox_default_free()
471 // at any time, even when the codec is closed.
476 }
478 }
479
481 {
483 if (!videotoolbox)
484 return;
485
489 }
490
492 VTDecompressionSessionInvalidate(videotoolbox->
session);
493 CFRelease(videotoolbox->
session);
495 }
496 }
497
499 {
501 if (!vtctx)
502 return 0;
503
506 CVPixelBufferRelease(vtctx->
frame);
507
509 videotoolbox_stop(avctx);
510
513
514 return 0;
515 }
516
518 {
520 CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->
frame;
521 OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
523 int width = CVPixelBufferGetWidth(pixbuf);
524 int height = CVPixelBufferGetHeight(pixbuf);
528
533 }
534
536
538 CVPixelBufferRelease(
ref->pixbuf);
541
542 // Old API code path.
544 return 0;
545
547
554 if (!hw_frames_ctx)
556
562 hw_ctx = hw_frames->
hwctx;
564
569 }
570
573 }
574
577 if (!
ref->hw_frames_ctx)
579
580 return 0;
581 }
582
583 static void videotoolbox_write_mp4_descr_length(
PutByteContext *pb,
int length)
584 {
587
588 for (
i = 3;
i >= 0;
i--) {
589 b = (length >> (
i * 7)) & 0x7F;
592
593 bytestream2_put_byteu(pb,
b);
594 }
595 }
596
597 static CFDataRef videotoolbox_esds_extradata_create(
AVCodecContext *avctx)
598 {
600 uint8_t *rw_extradata;
603 // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
606
609
611 bytestream2_put_byteu(&pb, 0); // version
613
614 // elementary stream descriptor
615 bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
616 videotoolbox_write_mp4_descr_length(&pb, full_size);
618 bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
619
620 // decoder configuration descriptor
621 bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
622 videotoolbox_write_mp4_descr_length(&pb, config_size);
623 bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
624 bytestream2_put_byteu(&pb, 0x11); // stream type
628
629 // decoder specific descriptor
630 bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
632
634
635 // SLConfigDescriptor
636 bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
637 bytestream2_put_byteu(&pb, 0x01); // length
638 bytestream2_put_byteu(&pb, 0x02); //
639
641
642 data = CFDataCreate(kCFAllocatorDefault, rw_extradata,
s);
643
646 }
647
648 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
651 {
653 CMBlockBufferRef block_buf;
654 CMSampleBufferRef sample_buf;
655
658
659 status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,
// structureAllocator
662 kCFAllocatorNull, // blockAllocator
663 NULL,
// customBlockSource
664 0, // offsetToData
666 0, // flags
667 &block_buf);
668
670 status = CMSampleBufferCreate(kCFAllocatorDefault,
// allocator
671 block_buf, // dataBuffer
672 TRUE, // dataReady
673 0, // makeDataReadyCallback
674 0, // makeDataReadyRefcon
675 fmt_desc, // formatDescription
676 1, // numSamples
677 0, // numSampleTimingEntries
678 NULL,
// sampleTimingArray
679 0, // numSampleSizeEntries
680 NULL,
// sampleSizeArray
681 &sample_buf);
682 }
683
684 if (block_buf)
685 CFRelease(block_buf);
686
687 return sample_buf;
688 }
689
690 static void videotoolbox_decoder_callback(void *opaque,
691 void *sourceFrameRefCon,
693 VTDecodeInfoFlags
flags,
694 CVImageBufferRef image_buffer,
697 {
699
701 CVPixelBufferRelease(vtctx->
frame);
703 }
704
705 if (!image_buffer) {
707 "vt decoder cb: output image buffer is null: %i\n",
status);
708 return;
709 }
710
711 vtctx->
frame = CVPixelBufferRetain(image_buffer);
712 }
713
714 static OSStatus videotoolbox_session_decode_frame(
AVCodecContext *avctx)
715 {
717 CMSampleBufferRef sample_buf;
720
721 sample_buf = videotoolbox_sample_buffer_create(videotoolbox->
cm_fmt_desc,
724
725 if (!sample_buf)
726 return -1;
727
728 status = VTDecompressionSessionDecodeFrame(videotoolbox->
session,
729 sample_buf,
730 0, // decodeFlags
731 NULL,
// sourceFrameRefCon
732 0); // infoFlagsOut
734 status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->
session);
735
736 CFRelease(sample_buf);
737
739 }
740
741 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType
codec_type,
742 CFDictionaryRef decoder_spec,
745 {
746 CMFormatDescriptionRef cm_fmt_desc;
748
749 status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
753 decoder_spec, // Dictionary of extension
754 &cm_fmt_desc);
755
758
759 return cm_fmt_desc;
760 }
761
762 static CFDictionaryRef videotoolbox_buffer_attributes_create(
int width,
765 {
766 CFMutableDictionaryRef buffer_attributes;
767 CFMutableDictionaryRef io_surface_properties;
768 CFNumberRef cv_pix_fmt;
771
772 w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &
width);
773 h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &
height);
774 cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &
pix_fmt);
775
776 buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
777 4,
778 &kCFTypeDictionaryKeyCallBacks,
779 &kCFTypeDictionaryValueCallBacks);
780 io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
781 0,
782 &kCFTypeDictionaryKeyCallBacks,
783 &kCFTypeDictionaryValueCallBacks);
784
786 CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
787 CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
788 CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey,
w);
789 CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey,
h);
790 #if TARGET_OS_IPHONE
791 CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
792 #else
793 CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
794 #endif
795
796 CFRelease(io_surface_properties);
797 CFRelease(cv_pix_fmt);
800
801 return buffer_attributes;
802 }
803
804 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType
codec_type,
806 {
807 CFMutableDictionaryRef avc_info;
809
810 CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
811 0,
812 &kCFTypeDictionaryKeyCallBacks,
813 &kCFTypeDictionaryValueCallBacks);
814
815 CFDictionarySetValue(config_info,
819 kCFBooleanTrue);
820
821 avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
822 1,
823 &kCFTypeDictionaryKeyCallBacks,
824 &kCFTypeDictionaryValueCallBacks);
825
827 case kCMVideoCodecType_MPEG4Video :
829 data = videotoolbox_esds_extradata_create(avctx);
831 CFDictionarySetValue(avc_info, CFSTR(
"esds"),
data);
832 break;
833 case kCMVideoCodecType_H264 :
836 CFDictionarySetValue(avc_info, CFSTR(
"avcC"),
data);
837 break;
841 CFDictionarySetValue(avc_info, CFSTR(
"hvcC"),
data);
842 break;
843 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
847 CFDictionarySetValue(avc_info, CFSTR(
"vpcC"),
data);
848 break;
849 #endif
850 default:
851 break;
852 }
853
854 CFDictionarySetValue(config_info,
855 kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
856 avc_info);
857
860
861 CFRelease(avc_info);
862 return config_info;
863 }
864
866 {
869 VTDecompressionOutputCallbackRecord decoder_cb;
870 CFDictionaryRef decoder_spec;
871 CFDictionaryRef buf_attr;
872
873 if (!videotoolbox) {
875 return -1;
876 }
877
881 break;
884 break;
887 break;
890 break;
893 break;
896 break;
899 default:
901 // fall-through
902 case MKTAG(
'a',
'p',
'c',
'o'):
// kCMVideoCodecType_AppleProRes422Proxy
903 case
MKTAG(
'a',
'p',
'c',
's'):
// kCMVideoCodecType_AppleProRes422LT
904 case
MKTAG(
'a',
'p',
'c',
'n'):
// kCMVideoCodecType_AppleProRes422
905 case
MKTAG(
'a',
'p',
'c',
'h'):
// kCMVideoCodecType_AppleProRes422HQ
906 case
MKTAG(
'a',
'p',
'4',
'h'):
// kCMVideoCodecType_AppleProRes4444
907 case
MKTAG(
'a',
'p',
'4',
'x'):
// kCMVideoCodecType_AppleProRes4444XQ
908 videotoolbox->cm_codec_type =
av_bswap32(avctx->codec_tag);
909 break;
910 }
911 break;
914 break;
915 default :
916 break;
917 }
918
919 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
921 if (__builtin_available(macOS 10.9, *)) {
922 VTRegisterProfessionalVideoWorkflowVideoDecoders();
923 }
924 }
925 #endif
926
927 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
928 if (__builtin_available(macOS 11.0, *)) {
929 VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->
cm_codec_type);
930 }
931 #endif
932
933 decoder_spec = videotoolbox_decoder_config_create(videotoolbox->
cm_codec_type, avctx);
934
935 if (!decoder_spec) {
937 return -1;
938 }
939
941 decoder_spec,
945 if (decoder_spec)
946 CFRelease(decoder_spec);
947
949 return -1;
950 }
951
952 buf_attr = videotoolbox_buffer_attributes_create(avctx->
width,
955
956 decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
958
959 status = VTDecompressionSessionCreate(
NULL,
// allocator
960 videotoolbox->
cm_fmt_desc,
// videoFormatDescription
961 decoder_spec, // videoDecoderSpecification
962 buf_attr, // destinationImageBufferAttributes
963 &decoder_cb, // outputCallback
964 &videotoolbox->
session);
// decompressionSessionOut
965
966 if (decoder_spec)
967 CFRelease(decoder_spec);
968 if (buf_attr)
969 CFRelease(buf_attr);
970
972 case kVTVideoDecoderNotAvailableNowErr:
975 case kVTVideoDecoderUnsupportedDataFormatErr:
978 case kVTCouldNotFindVideoDecoderErr:
981 case kVTVideoDecoderMalfunctionErr:
984 case kVTVideoDecoderBadDataErr:
987 case 0:
988 return 0;
989 default:
992 }
993 }
994
995 static const char *videotoolbox_error_string(OSStatus
status)
996 {
998 case kVTVideoDecoderBadDataErr:
999 return "bad data";
1000 case kVTVideoDecoderMalfunctionErr:
1001 return "decoder malfunction";
1002 case kVTInvalidSessionErr:
1003 return "invalid session";
1004 }
1005 return "unknown";
1006 }
1007
1009 {
1013
1017 videotoolbox_stop(avctx);
1018 if (videotoolbox_start(avctx) != 0) {
1020 }
1021 }
1022
1025
1026 status = videotoolbox_session_decode_frame(avctx);
1028 if (
status == kVTVideoDecoderMalfunctionErr ||
status == kVTInvalidSessionErr)
1032 }
1033
1034 if (!vtctx->
frame) {
1037 }
1038
1039 return videotoolbox_buffer_create(avctx,
frame);
1040 }
1041
1043 {
1050 }
1051
1055 {
1058
1059 frame->crop_right = 0;
1060 frame->crop_left = 0;
1061 frame->crop_top = 0;
1062 frame->crop_bottom = 0;
1063
1064 return 0;
1065 }
1066
1070 {
1072 }
1073
1074
1075 static int videotoolbox_hevc_decode_params(
AVCodecContext *avctx,
1079 {
1081 }
1082
1084 {
1089
1093 }
1094
1098 {
1100
1102 }
1103
1107 {
1108 return 0;
1109 }
1110
1112 {
1115
1117 }
1118
1119 static int videotoolbox_prores_start_frame(
AVCodecContext *avctx,
1122 {
1123 return 0;
1124 }
1125
1126 static int videotoolbox_prores_decode_slice(
AVCodecContext *avctx,
1129 {
1131
1133 }
1134
1136 {
1139
1141 }
1142
1144 int depth;
1146 if (!descriptor)
1148
1149
1152
1154
1155 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1156 if (depth > 10)
1158 #endif
1159
1160 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1162 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1163 if (depth <= 8)
1165 #endif
1167 }
1168 #endif
1169 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1171 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1172 if (depth <= 8)
1174 #endif
1176 }
1177 #endif
1178 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1179 if (depth > 8) {
1181 }
1182 #endif
1183
1185 }
1186
1189 {
1191
1194 if (cv_pix_fmt_type == 0) {
1195 cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1196 }
1197 ret->cv_pix_fmt_type = cv_pix_fmt_type;
1198 }
1199
1201 }
1202
1204 {
1208 int err;
1210
1212
1215 return videotoolbox_start(avctx);
1216
1219 "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1221 }
1222
1227 }
1228
1231 } else {
1236 }
1237
1240 hw_frames->
sw_format = videotoolbox_best_pixel_format(avctx);
1243 hw_ctx = hw_frames->
hwctx;
1245
1247 if (err < 0) {
1250 }
1251 }
1252
1257 }
1258
1266 "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1267 "a VideoToolbox format!\n",
1268 attempted_format ? attempted_format->
name :
"<unknown>",
1272 }
1273
1274 err = videotoolbox_start(avctx);
1275 if (err < 0)
1277
1278 return 0;
1279
1282 return err;
1283 }
1284
1287 {
1289
1293 frames_ctx->
sw_format = videotoolbox_best_pixel_format(avctx);
1294
1295 return 0;
1296 }
1297
1299 .
p.
name =
"h263_videotoolbox",
1304 .start_frame = videotoolbox_mpeg_start_frame,
1305 .decode_slice = videotoolbox_mpeg_decode_slice,
1306 .end_frame = videotoolbox_mpeg_end_frame,
1311 };
1312
1314 .
p.
name =
"hevc_videotoolbox",
1319 .start_frame = videotoolbox_hevc_start_frame,
1320 .decode_slice = videotoolbox_hevc_decode_slice,
1321 .decode_params = videotoolbox_hevc_decode_params,
1322 .end_frame = videotoolbox_hevc_end_frame,
1327 };
1328
1330 .
p.
name =
"h264_videotoolbox",
1338 .end_frame = videotoolbox_h264_end_frame,
1343 };
1344
1346 .
p.
name =
"mpeg1_videotoolbox",
1351 .start_frame = videotoolbox_mpeg_start_frame,
1352 .decode_slice = videotoolbox_mpeg_decode_slice,
1353 .end_frame = videotoolbox_mpeg_end_frame,
1358 };
1359
1361 .
p.
name =
"mpeg2_videotoolbox",
1366 .start_frame = videotoolbox_mpeg_start_frame,
1367 .decode_slice = videotoolbox_mpeg_decode_slice,
1368 .end_frame = videotoolbox_mpeg_end_frame,
1373 };
1374
1376 .
p.
name =
"mpeg4_videotoolbox",
1381 .start_frame = videotoolbox_mpeg_start_frame,
1382 .decode_slice = videotoolbox_mpeg_decode_slice,
1383 .end_frame = videotoolbox_mpeg_end_frame,
1388 };
1389
1391 .
p.
name =
"prores_videotoolbox",
1396 .start_frame = videotoolbox_prores_start_frame,
1397 .decode_slice = videotoolbox_prores_decode_slice,
1398 .end_frame = videotoolbox_prores_end_frame,
1403 };
1404
1405 #endif /* CONFIG_VIDEOTOOLBOX */