repo/extra/firefox/patches/0001-Bug-1819374-Squashed-ffmpeg-6.0-update.patch

18998 lines
681 KiB
Diff
Raw Normal View History

2023-03-19 22:20:32 +00:00
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: "Jan Alexander Steffens (heftig)" <heftig@archlinux.org>
Date: Mon, 6 Mar 2023 21:35:36 -0500
Subject: [PATCH] Bug 1819374 - Squashed ffmpeg 6.0 update
Contains the following changes:
- Update media/ffvpx/changes.patch so that it applies cleanly to current ffmpeg master. r=alwu
Differential Revision: https://phabricator.services.mozilla.com/D171244
- Update ffmpeg to current master, revert codec, bsf and parser list to ffvpx's subset, reapply in-tree patch. r=alwu
This is an rsync of the source tree, and an `hg revert`:
- `libavcodec/codec_list.c`
- `libavcodec/bsf_list.c`
- `libavcodec/parser_list.c`
Differential Revision: https://phabricator.services.mozilla.com/D171245
- Update symbol list file for ffmpeg 6.0. r=alwu
Differential Revision: https://phabricator.services.mozilla.com/D171246
- Vendor ffmpeg 6.0 headers, adjust the runtime linker bits. r=alwu
Differential Revision: https://phabricator.services.mozilla.com/D171248
- Improve documentation about ffmpeg vendoring, adding information about header vendoring. r=alwu
Differential Revision: https://phabricator.services.mozilla.com/D171249
---
.../platforms/ffmpeg/FFmpegLibWrapper.cpp | 80 +-
.../platforms/ffmpeg/FFmpegRuntimeLinker.cpp | 5 +
.../include/COPYING.LGPLv2.1 | 0
.../ffmpeg60/include/libavcodec/avcodec.h | 3230 +++++++++++++++++
.../ffmpeg60/include/libavcodec/avdct.h | 85 +
.../include/libavcodec/avfft.h | 0
.../ffmpeg/ffmpeg60/include/libavcodec/bsf.h | 335 ++
.../ffmpeg60/include}/libavcodec/codec.h | 300 +-
.../include/libavcodec/codec_desc.h | 0
.../ffmpeg60/include/libavcodec/codec_id.h | 669 ++++
.../ffmpeg60/include/libavcodec/codec_par.h | 247 ++
.../ffmpeg/ffmpeg60/include/libavcodec/defs.h | 203 ++
.../ffmpeg60/include/libavcodec/packet.h | 730 ++++
.../ffmpeg60/include/libavcodec/vdpau.h | 156 +
.../ffmpeg60/include}/libavcodec/version.h | 20 +-
.../include}/libavcodec/version_major.h | 29 +-
.../ffmpeg60/include/libavutil/attributes.h | 173 +
.../include/libavutil/avconfig.h | 0
.../ffmpeg60/include/libavutil/avutil.h | 371 ++
.../include/libavutil/buffer.h | 0
.../include/libavutil/channel_layout.h | 842 +++++
.../ffmpeg60/include/libavutil/common.h | 589 +++
.../ffmpeg/ffmpeg60/include/libavutil/cpu.h | 150 +
.../ffmpeg/ffmpeg60/include/libavutil/dict.h | 259 ++
.../include/libavutil/error.h | 0
.../ffmpeg/ffmpeg60/include/libavutil/frame.h | 960 +++++
.../ffmpeg60/include/libavutil/hwcontext.h | 606 ++++
.../include/libavutil/hwcontext_vaapi.h | 0
.../include/libavutil/intfloat.h | 0
.../ffmpeg/ffmpeg60/include/libavutil/log.h | 388 ++
.../include/libavutil/macros.h | 0
.../ffmpeg60/include/libavutil/mathematics.h | 249 ++
.../ffmpeg/ffmpeg60/include}/libavutil/mem.h | 158 +-
.../ffmpeg60/include/libavutil/pixfmt.h | 891 +++++
.../ffmpeg60/include/libavutil/rational.h | 222 ++
.../ffmpeg60/include/libavutil/samplefmt.h | 274 ++
.../ffmpeg60/include}/libavutil/version.h | 46 +-
.../ffmpeg/{ffmpeg59 => ffmpeg60}/moz.build | 0
dom/media/platforms/ffmpeg/moz.build | 1 +
media/ffvpx/README_MOZILLA | 8 +
media/ffvpx/changes.patch | 23 -
media/ffvpx/libavcodec/allcodecs.c | 16 +
media/ffvpx/libavcodec/av1_parser.c | 7 +-
media/ffvpx/libavcodec/av1dec.c | 4 +-
media/ffvpx/libavcodec/avcodec.c | 22 +-
media/ffvpx/libavcodec/avcodec.h | 235 +-
media/ffvpx/libavcodec/avcodec.symbols | 1 -
media/ffvpx/libavcodec/avpacket.c | 2 +-
media/ffvpx/libavcodec/bitstream_filters.c | 1 +
.../libavcodec/cbs_av1_syntax_template.c | 7 +-
media/ffvpx/libavcodec/codec.h | 26 +-
media/ffvpx/libavcodec/codec_desc.c | 47 +
media/ffvpx/libavcodec/codec_id.h | 15 +
media/ffvpx/libavcodec/codec_internal.h | 8 +
media/ffvpx/libavcodec/decode.c | 224 +-
media/ffvpx/libavcodec/decode.h | 6 +
media/ffvpx/libavcodec/encode.c | 71 +-
media/ffvpx/libavcodec/encode.h | 6 +
media/ffvpx/libavcodec/flac.c | 4 +-
media/ffvpx/libavcodec/flacdec.c | 218 +-
media/ffvpx/libavcodec/get_bits.h | 321 +-
media/ffvpx/libavcodec/internal.h | 1 -
media/ffvpx/libavcodec/libdav1d.c | 115 +-
media/ffvpx/libavcodec/mathops.h | 9 +
media/ffvpx/libavcodec/motion_est.h | 1 -
media/ffvpx/libavcodec/mpegpicture.h | 3 +
media/ffvpx/libavcodec/mpegvideo.h | 9 +-
media/ffvpx/libavcodec/options.c | 37 +-
media/ffvpx/libavcodec/options_table.h | 12 +-
media/ffvpx/libavcodec/profiles.c | 1 +
media/ffvpx/libavcodec/pthread.c | 3 -
media/ffvpx/libavcodec/pthread_frame.c | 258 +-
media/ffvpx/libavcodec/put_bits.h | 7 +
media/ffvpx/libavcodec/ratecontrol.h | 3 -
media/ffvpx/libavcodec/thread.h | 12 -
media/ffvpx/libavcodec/utils.c | 16 +-
media/ffvpx/libavcodec/vaapi_av1.c | 2 +-
media/ffvpx/libavcodec/vaapi_decode.c | 4 +-
media/ffvpx/libavcodec/vaapi_hevc.h | 2 +-
media/ffvpx/libavcodec/version.h | 4 +-
media/ffvpx/libavcodec/version_major.h | 29 +-
media/ffvpx/libavcodec/videodsp_template.c | 4 +-
media/ffvpx/libavcodec/vp8.c | 14 +-
media/ffvpx/libavcodec/x86/fft.asm | 8 +-
media/ffvpx/libavcodec/x86/flacdsp.asm | 8 +-
media/ffvpx/libavcodec/x86/h264_intrapred.asm | 24 +-
.../libavcodec/x86/h264_intrapred_10bit.asm | 16 +-
media/ffvpx/libavcodec/x86/videodsp.asm | 2 +-
media/ffvpx/libavcodec/x86/vp8dsp.asm | 28 +-
media/ffvpx/libavutil/avstring.c | 10 -
media/ffvpx/libavutil/avstring.h | 9 -
media/ffvpx/libavutil/avutil.symbols | 4 +-
media/ffvpx/libavutil/channel_layout.c | 1 +
media/ffvpx/libavutil/channel_layout.h | 2 +
media/ffvpx/libavutil/dict.c | 41 +-
media/ffvpx/libavutil/dict.h | 125 +-
media/ffvpx/libavutil/ffversion.h | 2 +-
media/ffvpx/libavutil/frame.c | 26 +-
media/ffvpx/libavutil/frame.h | 23 +-
media/ffvpx/libavutil/hwcontext.c | 6 +-
media/ffvpx/libavutil/hwcontext_vaapi.c | 40 +-
media/ffvpx/libavutil/mem.c | 10 -
media/ffvpx/libavutil/mem.h | 88 -
media/ffvpx/libavutil/mem_internal.h | 2 -
media/ffvpx/libavutil/opt.c | 14 +-
media/ffvpx/libavutil/pixdesc.c | 70 +-
media/ffvpx/libavutil/pixfmt.h | 32 +-
media/ffvpx/libavutil/version.h | 22 +-
media/ffvpx/libavutil/video_enc_params.c | 10 +-
media/ffvpx/libavutil/x86/float_dsp.asm | 18 +-
media/ffvpx/libavutil/x86/lls.asm | 4 +-
tools/rewriting/ThirdPartyPaths.txt | 1 +
112 files changed, 13117 insertions(+), 1615 deletions(-)
copy dom/media/platforms/ffmpeg/{ffmpeg57 => ffmpeg60}/include/COPYING.LGPLv2.1 (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avcodec.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avdct.h
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavcodec/avfft.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/bsf.h
copy {media/ffvpx => dom/media/platforms/ffmpeg/ffmpeg60/include}/libavcodec/codec.h (51%)
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavcodec/codec_desc.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_id.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_par.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/defs.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/packet.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/vdpau.h
copy {media/ffvpx => dom/media/platforms/ffmpeg/ffmpeg60/include}/libavcodec/version.h (60%)
copy {media/ffvpx => dom/media/platforms/ffmpeg/ffmpeg60/include}/libavcodec/version_major.h (55%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/attributes.h
copy dom/media/platforms/ffmpeg/{ffmpeg58 => ffmpeg60}/include/libavutil/avconfig.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avutil.h
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavutil/buffer.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/channel_layout.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/common.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/cpu.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/dict.h
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavutil/error.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/frame.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext.h
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavutil/hwcontext_vaapi.h (100%)
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavutil/intfloat.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/log.h
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/include/libavutil/macros.h (100%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mathematics.h
copy {media/ffvpx => dom/media/platforms/ffmpeg/ffmpeg60/include}/libavutil/mem.h (79%)
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/pixfmt.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/rational.h
create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/samplefmt.h
copy {media/ffvpx => dom/media/platforms/ffmpeg/ffmpeg60/include}/libavutil/version.h (66%)
copy dom/media/platforms/ffmpeg/{ffmpeg59 => ffmpeg60}/moz.build (100%)
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index 94b929f1a095..4730a0bcc70a 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -66,42 +66,47 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_57 = 1 << 4,
AV_FUNC_58 = 1 << 5,
AV_FUNC_59 = 1 << 6,
+ AV_FUNC_60 = 1 << 7,
AV_FUNC_AVUTIL_53 = AV_FUNC_53 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_54 = AV_FUNC_54 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_55 = AV_FUNC_55 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_56 = AV_FUNC_56 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_57 = AV_FUNC_57 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_58 = AV_FUNC_58 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_59 = AV_FUNC_59 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_60 = AV_FUNC_60 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVCODEC_ALL = AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 | AV_FUNC_56 |
- AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59,
+ AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60,
AV_FUNC_AVUTIL_ALL = AV_FUNC_AVCODEC_ALL | AV_FUNC_AVUTIL_MASK
};
switch (macro) {
case 53:
version = AV_FUNC_53;
break;
case 54:
version = AV_FUNC_54;
break;
case 55:
version = AV_FUNC_55;
break;
case 56:
version = AV_FUNC_56;
break;
case 57:
version = AV_FUNC_57;
break;
case 58:
version = AV_FUNC_58;
break;
case 59:
version = AV_FUNC_59;
break;
+ case 60:
+ version = AV_FUNC_60;
+ break;
default:
- FFMPEG_LOG("Unknown avcodec version");
+ FFMPEG_LOG("Unknown avcodec version: %d", macro);
Unlink();
return isFFMpeg ? ((macro > 57) ? LinkResult::UnknownFutureFFMpegVersion
: LinkResult::UnknownOlderFFMpegVersion)
@@ -155,61 +160,74 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_free_frame, AV_FUNC_54)
- AV_FUNC(avcodec_send_packet, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC(avcodec_receive_frame, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC(avcodec_default_get_buffer2,
- (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59))
+ AV_FUNC(avcodec_send_packet, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC(avcodec_receive_frame, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC(avcodec_default_get_buffer2, (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 |
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
AV_FUNC_OPTION(av_rdft_init, AV_FUNC_AVCODEC_ALL)
AV_FUNC_OPTION(av_rdft_calc, AV_FUNC_AVCODEC_ALL)
AV_FUNC_OPTION(av_rdft_end, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_frame_alloc,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC(av_frame_free,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC(av_frame_unref,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC(av_image_check_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
- AV_FUNC_OPTION(av_buffer_get_opaque, (AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59))
+ AV_FUNC_OPTION(av_buffer_get_opaque,
+ (AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
+ AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC(av_buffer_create,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC_OPTION(av_frame_get_colorspace,
AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
AV_FUNC_AVUTIL_58)
AV_FUNC_OPTION(av_frame_get_color_range,
AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
AV_FUNC_AVUTIL_58)
- AV_FUNC(av_strerror, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59)
+ AV_FUNC(av_strerror,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60)
#ifdef MOZ_WAYLAND
- AV_FUNC_OPTION_SILENT(avcodec_get_hw_config, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_codec_iterate, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_codec_is_decoder, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_init, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_alloc, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwdevice_hwconfig_alloc, AV_FUNC_58 | AV_FUNC_59)
+ AV_FUNC_OPTION_SILENT(avcodec_get_hw_config,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_codec_iterate, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_codec_is_decoder,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_init,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_alloc,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_hwconfig_alloc,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_hwdevice_get_hwframe_constraints,
- AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwframe_constraints_free, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_buffer_ref, AV_FUNC_AVUTIL_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_buffer_unref, AV_FUNC_AVUTIL_58 | AV_FUNC_59)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwframe_constraints_free,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_buffer_ref,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_buffer_unref,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_hwframe_transfer_get_formats,
- AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_create_derived, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_hwframe_ctx_alloc, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_dict_set, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_dict_free, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(avcodec_get_name, AV_FUNC_58 | AV_FUNC_59)
- AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string,
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_create_derived,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_hwframe_ctx_alloc,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_dict_set, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_dict_free, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(avcodec_get_name, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string, AV_FUNC_AVUTIL_58 |
+ AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60)
#endif
#undef AV_FUNC
#undef AV_FUNC_OPTION
diff --git a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
index 91bf38df8354..7716162e0707 100644
--- a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
@@ -27,17 +27,19 @@ static FFmpegLibWrapper sLibAV;
static const char* sLibs[] = {
// clang-format off
#if defined(XP_DARWIN)
+ "libavcodec.60.dylib",
"libavcodec.59.dylib",
"libavcodec.58.dylib",
"libavcodec.57.dylib",
"libavcodec.56.dylib",
"libavcodec.55.dylib",
"libavcodec.54.dylib",
"libavcodec.53.dylib",
#elif defined(XP_OPENBSD)
"libavcodec.so", // OpenBSD hardly controls the major/minor library version
// of ffmpeg and update it regulary on ABI/API changes
#else
+ "libavcodec.so.60",
"libavcodec.so.59",
"libavcodec.so.58",
"libavcodec-ffmpeg.so.58",
@@ -160,6 +162,9 @@ already_AddRefed<PlatformDecoderModule> FFmpegRuntimeLinker::Create() {
case 59:
module = FFmpegDecoderModule<59>::Create(&sLibAV);
break;
+ case 60:
+ module = FFmpegDecoderModule<60>::Create(&sLibAV);
+ break;
default:
module = nullptr;
}
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/ffmpeg60/include/COPYING.LGPLv2.1
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/COPYING.LGPLv2.1
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avcodec.h
new file mode 100644
index 000000000000..c7e6b609d540
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avcodec.h
@@ -0,0 +1,3230 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "codec.h"
+#include "codec_desc.h"
+#include "codec_par.h"
+#include "codec_id.h"
+#include "defs.h"
+#include "packet.h"
+#include "version_major.h"
+#ifndef HAVE_AV_CONFIG_H
+/* When included as part of the ffmpeg build, only include the major version
+ * to avoid unnecessary rebuilds. When included externally, keep including
+ * the full version information. */
+# include "version.h"
+#endif
+
+/**
+ * @defgroup libavc libavcodec
+ * Encoding/Decoding Library
+ *
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ */
+
+/**
+ * @ingroup libavc
+ * @defgroup lavc_encdec send/receive encoding and decoding API overview
+ * @{
+ *
+ * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/
+ * avcodec_receive_packet() functions provide an encode/decode API, which
+ * decouples input and output.
+ *
+ * The API is very similar for encoding/decoding and audio/video, and works as
+ * follows:
+ * - Set up and open the AVCodecContext as usual.
+ * - Send valid input:
+ * - For decoding, call avcodec_send_packet() to give the decoder raw
+ * compressed data in an AVPacket.
+ * - For encoding, call avcodec_send_frame() to give the encoder an AVFrame
+ * containing uncompressed audio or video.
+ *
+ * In both cases, it is recommended that AVPackets and AVFrames are
+ * refcounted, or libavcodec might have to copy the input data. (libavformat
+ * always returns refcounted AVPackets, and av_frame_get_buffer() allocates
+ * refcounted AVFrames.)
+ * - Receive output in a loop. Periodically call one of the avcodec_receive_*()
+ * functions and process their output:
+ * - For decoding, call avcodec_receive_frame(). On success, it will return
+ * an AVFrame containing uncompressed audio or video data.
+ * - For encoding, call avcodec_receive_packet(). On success, it will return
+ * an AVPacket with a compressed frame.
+ *
+ * Repeat this call until it returns AVERROR(EAGAIN) or an error. The
+ * AVERROR(EAGAIN) return value means that new input data is required to
+ * return new output. In this case, continue with sending input. For each
+ * input frame/packet, the codec will typically return 1 output frame/packet,
+ * but it can also be 0 or more than 1.
+ *
+ * At the beginning of decoding or encoding, the codec might accept multiple
+ * input frames/packets without returning a frame, until its internal buffers
+ * are filled. This situation is handled transparently if you follow the steps
+ * outlined above.
+ *
+ * In theory, sending input can result in EAGAIN - this should happen only if
+ * not all output was received. You can use this to structure alternative decode
+ * or encode loops other than the one suggested above. For example, you could
+ * try sending new input on each iteration, and try to receive output if that
+ * returns EAGAIN.
+ *
+ * End of stream situations. These require "flushing" (aka draining) the codec,
+ * as the codec might buffer multiple frames or packets internally for
+ * performance or out of necessity (consider B-frames).
+ * This is handled as follows:
+ * - Instead of valid input, send NULL to the avcodec_send_packet() (decoding)
+ * or avcodec_send_frame() (encoding) functions. This will enter draining
+ * mode.
+ * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet()
+ * (encoding) in a loop until AVERROR_EOF is returned. The functions will
+ * not return AVERROR(EAGAIN), unless you forgot to enter draining mode.
+ * - Before decoding can be resumed again, the codec has to be reset with
+ * avcodec_flush_buffers().
+ *
+ * Using the API as outlined above is highly recommended. But it is also
+ * possible to call functions outside of this rigid schema. For example, you can
+ * call avcodec_send_packet() repeatedly without calling
+ * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed
+ * until the codec's internal buffer has been filled up (which is typically of
+ * size 1 per output frame, after initial input), and then reject input with
+ * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to
+ * read at least some output.
+ *
+ * Not all codecs will follow a rigid and predictable dataflow; the only
+ * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on
+ * one end implies that a receive/send call on the other end will succeed, or
+ * at least will not fail with AVERROR(EAGAIN). In general, no codec will
+ * permit unlimited buffering of input or output.
+ *
+ * A codec is not allowed to return AVERROR(EAGAIN) for both sending and
+ * receiving. This would be an invalid state, which could put the codec user
+ * into an endless loop. The API has no concept of time either: it cannot happen
+ * that trying to do avcodec_send_packet() results in AVERROR(EAGAIN), but a
+ * repeated call 1 second later accepts the packet (with no other receive/flush
+ * API calls involved). The API is a strict state machine, and the passage of
+ * time is not supposed to influence it. Some timing-dependent behavior might
+ * still be deemed acceptable in certain cases. But it must never result in both
+ * send/receive returning EAGAIN at the same time at any point. It must also
+ * absolutely be avoided that the current state is "unstable" and can
+ * "flip-flop" between the send/receive APIs allowing progress. For example,
+ * it's not allowed that the codec randomly decides that it actually wants to
+ * consume a packet now instead of returning a frame, after it just returned
+ * AVERROR(EAGAIN) on an avcodec_send_packet() call.
+ * @}
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define AV_INPUT_BUFFER_MIN_SIZE 16384
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride {
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define AV_CODEC_FLAG_UNALIGNED (1 << 0)
+/**
+ * Use fixed qscale.
+ */
+#define AV_CODEC_FLAG_QSCALE (1 << 1)
+/**
+ * 4 MV per MB allowed / advanced prediction for H.263.
+ */
+#define AV_CODEC_FLAG_4MV (1 << 2)
+/**
+ * Output even those frames that might be corrupted.
+ */
+#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
+/**
+ * Use qpel MC.
+ */
+#define AV_CODEC_FLAG_QPEL (1 << 4)
+/**
+ * Don't output frames whose parameters differ from first
+ * decoded frame in stream.
+ */
+#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
+/**
+ * Request the encoder to output reconstructed frames, i.e.\ frames that would
+ * be produced by decoding the encoded bistream. These frames may be retrieved
+ * by calling avcodec_receive_frame() immediately after a successful call to
+ * avcodec_receive_packet().
+ *
+ * Should only be used with encoders flagged with the
+ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
+ */
+#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
+/**
+ * @par decoding
+ * Request the decoder to propagate each packets AVPacket.opaque and
+ * AVPacket.opaque_ref to its corresponding output AVFrame.
+ *
+ * @par encoding:
+ * Request the encoder to propagate each frame's AVFrame.opaque and
+ * AVFrame.opaque_ref values to its corresponding output AVPacket.
+ *
+ * @par
+ * May only be set on encoders that have the
+ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
+ *
+ * @note
+ * While in typical cases one input frame produces exactly one output packet
+ * (perhaps after a delay), in general the mapping of frames to packets is
+ * M-to-N, so
+ * - Any number of input frames may be associated with any given output packet.
+ * This includes zero - e.g. some encoders may output packets that carry only
+ * metadata about the whole stream.
+ * - A given input frame may be associated with any number of output packets.
+ * Again this includes zero - e.g. some encoders may drop frames under certain
+ * conditions.
+ * .
+ * This implies that when using this flag, the caller must NOT assume that
+ * - a given input frame's opaques will necessarily appear on some output
+ * packet;
+ * - every output packet will have some non-NULL opaque value.
+ * .
+ * When an output packet contains multiple frames, the opaque values will be
+ * taken from the first of those.
+ *
+ * @note
+ * The converse holds for decoders, with frames and packets switched.
+ */
+#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
+/**
+ * Signal to the encoder that the values of AVFrame.duration are valid and
+ * should be used (typically for transferring them to output packets).
+ *
+ * If this flag is not set, frame durations are ignored.
+ */
+#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
+/**
+ * Use internal 2pass ratecontrol in first pass mode.
+ */
+#define AV_CODEC_FLAG_PASS1 (1 << 9)
+/**
+ * Use internal 2pass ratecontrol in second pass mode.
+ */
+#define AV_CODEC_FLAG_PASS2 (1 << 10)
+/**
+ * loop filter.
+ */
+#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
+/**
+ * Only decode/encode grayscale.
+ */
+#define AV_CODEC_FLAG_GRAY (1 << 13)
+/**
+ * error[?] variables will be set during encoding.
+ */
+#define AV_CODEC_FLAG_PSNR (1 << 15)
+/**
+ * Use interlaced DCT.
+ */
+#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
+/**
+ * Force low delay.
+ */
+#define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
+/**
+ * Place global headers in extradata instead of every keyframe.
+ */
+#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
+/**
+ * Use only bitexact stuff (except (I)DCT).
+ */
+#define AV_CODEC_FLAG_BITEXACT (1 << 23)
+/* Fx : Flag for H.263+ extra options */
+/**
+ * H.263 advanced intra coding / MPEG-4 AC prediction
+ */
+#define AV_CODEC_FLAG_AC_PRED (1 << 24)
+/**
+ * interlaced motion estimation
+ */
+#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
+#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
+
+/**
+ * Allow non spec compliant speedup tricks.
+ */
+#define AV_CODEC_FLAG2_FAST (1 << 0)
+/**
+ * Skip bitstream encoding.
+ */
+#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
+/**
+ * Place global headers at every keyframe instead of in extradata.
+ */
+#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
+
+/**
+ * Input bitstream might be truncated at a packet boundaries
+ * instead of only at frame boundaries.
+ */
+#define AV_CODEC_FLAG2_CHUNKS (1 << 15)
+/**
+ * Discard cropping information from SPS.
+ */
+#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+
+/**
+ * Show all frames before the first keyframe
+ */
+#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22)
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28)
+/**
+ * Do not skip samples and export skip information as frame side data
+ */
+#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29)
+/**
+ * Do not reset ASS ReadOrder field on flush (subtitles decoding)
+ */
+#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30)
+/**
+ * Generate/parse ICC profiles on encode/decode, as appropriate for the type of
+ * file. No effect on codecs which cannot contain embedded ICC profiles, or
+ * when compiled without support for lcms2.
+ */
+#define AV_CODEC_FLAG2_ICC_PROFILES (1U << 31)
+
+/* Exported side data.
+ These flags can be passed in AVCodecContext.export_side_data before
+ initialization.
+*/
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_EXPORT_DATA_MVS (1 << 0)
+/**
+ * Export encoder Producer Reference Time through packet side data
+ */
+#define AV_CODEC_EXPORT_DATA_PRFT (1 << 1)
+/**
+ * Decoding only.
+ * Export the AVVideoEncParams structure through frame side data.
+ */
+#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS (1 << 2)
+/**
+ * Decoding only.
+ * Do not apply film grain, export it instead.
+ */
+#define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * The encoder will keep a reference to the packet and may reuse it later.
+ */
+#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
+
+struct AVCodecInternal;
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from
+ * user applications. The name string for AVOptions options matches the
+ * associated command line parameter name and can be found in
+ * libavcodec/options_table.h The AVOption/command line parameter names differ
+ * in some cases from the C structure field names for historic reasons or
+ * brevity. sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass* av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec* codec;
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify
+ * the codec. If there are multiple such fields in a container then the
+ * demuxer should choose the one which maximizes the information about the
+ * used codec. If the codec tag field in a container is larger than 32 bits
+ * then the demuxer should remap the longer ID to 32 bits with a table or
+ * other structure. Alternatively a new extra_codec_tag + size could be added
+ * but for this a clear advantage must be demonstrated first.
+ * - encoding: Set by user, if not then the default based on codec_id will be
+ * used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec
+ * during init.
+ */
+ unsigned int codec_tag;
+
+ void* priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal* internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void* opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by user, may be overwritten by libavcodec
+ * if this info is available in the stream
+ */
+ int64_t bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * AV_CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * AV_CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * MJPEG: Huffman tables
+ * rv10: additional flags
+ * MPEG-4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid problems if it is read with the bitstream
+ * reader. The bytewise contents of extradata must not depend on the
+ * architecture or CPU endianness. Must be allocated with the av_malloc()
+ * family of functions.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t* extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * This often, but not always is the inverse of the frame rate or field rate
+ * for video. 1/time_base is not the average frame rate if the frame rate is
+ * not constant.
+ *
+ * Like containers, elementary streams also can store timestamps, 1/time_base
+ * is the unit in which these timestamps are specified.
+ * As example of such codec time base see ISO/IEC 14496-2:2001(E)
+ * vop_time_increment_resolution and fixed_vop_rate
+ * (fixed_vop_rate == 0 implies that it is different from the framerate)
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: unused.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame
+ * rate. Most notably, H.264 and MPEG-2 specify time_base as half of frame
+ * duration if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it
+ * to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Encoding: Number of frames delay there will be from the encoder input to
+ * the decoder output. (we assume the decoder matches the spec)
+ * Decoding: Number of frames delay in addition to what a standard decoder
+ * as specified in the spec would produce.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this field is unused (see initial_padding).
+ *
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+ /* video only */
+ /**
+ * picture width / height.
+ *
+ * @note Those fields may not match the values of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output or lowres is enabled.
+ *
+ * @note Those field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int coded_width, coded_height;
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overridden by the decoder if it knows better.
+ *
+ * @note This field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec while
+ * parsing the data.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be
+ * read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext* s, const AVFrame* src,
+ int offset[AV_NUM_DATA_POINTERS], int y, int type,
+ int height);
+
+ /**
+ * Callback to negotiate the pixel format. Decoding only, may be set by the
+ * caller before avcodec_open2().
+ *
+ * Called by some decoders to select the pixel format that will be used for
+ * the output frames. This is mainly used to set up hardware acceleration,
+ * then the provided format list contains the corresponding hwaccel pixel
+ * formats alongside the "software" one. The software pixel format may also
+ * be retrieved from \ref sw_pix_fmt.
+ *
+ * This callback will be called when the coded frame properties (such as
+ * resolution, pixel format, etc.) change and more than one output format is
+ * supported for those new properties. If a hardware pixel format is chosen
+ * and initialization for it fails, the callback may be called again
+ * immediately.
+ *
+ * This callback may be called from different threads if the decoder is
+ * multi-threaded, but not from more than one thread simultaneously.
+ *
+ * @param fmt list of formats which may be used in the current
+ * configuration, terminated by AV_PIX_FMT_NONE.
+ * @warning Behavior is undefined if the callback returns a value other
+ * than one of the formats in fmt or AV_PIX_FMT_NONE.
+ * @return the chosen format or AV_PIX_FMT_NONE
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext* s,
+ const enum AVPixelFormat* fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q=
+ * lastp_q*factor+offset). If < 0 then normal ratecontrol will be done (q=
+ * -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * qscale factor between P- and I-frames
+ * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor +
+ * offset). If < 0 then normal ratecontrol will be done (q=
+ * -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int* slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for
+ * some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_MEDIAN_SAD 15
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER \
+ 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD \
+ 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE \
+ 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be
+ * freed in avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t* intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be
+ * freed in avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t* inter_matrix;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * minimum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by user.
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * number of audio channels
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated int channels;
+#endif
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then
+ * the frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+#if FF_API_AVCTX_FRAME_NUMBER
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ * @deprecated use frame_num instead
+ */
+ attribute_deprecated int frame_number;
+#endif
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by user, may be overwritten by libavcodec.
+ * @deprecated use ch_layout
+ */
+ attribute_deprecated uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated use "downmix" codec private option
+ */
+ attribute_deprecated uint64_t request_channel_layout;
+#endif
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * desired sample format
+ * - encoding: Not used.
+ * - decoding: Set by user.
+ * Decoder will decode to this format if it can.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel
+ * necessary. Each buffer must be reference-counted using the AVBuffer API
+ * (see description of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise extended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each
+ * of the frame's data and extended_data pointers must be contained in these.
+ * That is, one AVBufferRef for each allocated chunk of memory, not
+ * necessarily one AVBufferRef per data[] entry. See: av_buffer_create(),
+ * av_buffer_alloc(), and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * Some decoders do not support linesizes changing between frames.
+ *
+ * If frame multithreading is used, this callback may be called from a
+ * different thread, but not from more than one at once. Does not need to be
+ * reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext* s, AVFrame* frame, int flags);
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes
+ ///< (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride* rc_override;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: Set by user, may be overwritten by libavcodec.
+ */
+ int64_t rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t rc_min_rate;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without
+ * an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to
+ * prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding
+ * starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char* stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char* stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected
+ * automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS \
+ 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+#define FF_BUG_IEDGE 32768
+
+ /**
+ * strictly follow the standard (MPEG-4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ * This may only be set to one of the FF_COMPLIANCE_* values in defs.h.
+ */
+ int strict_std_compliance;
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+#define FF_EC_FAVOR_INTER 256
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+#define FF_DEBUG_GREEN_MD 0x00800000
+#define FF_DEBUG_NOMC 0x01000000
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * This is a bitfield of the AV_EF_* values defined in defs.h.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+#if FF_API_REORDERED_OPAQUE
+ /**
+ * opaque 64-bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * - encoding: Set by libavcodec to the reordered_opaque of the input
+ * frame corresponding to the last returned packet. Only
+ * supported by encoders with the
+ * AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability.
+ * - decoding: Set by user.
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
+ */
+ attribute_deprecated int64_t reordered_opaque;
+#endif
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ const struct AVHWAccel* hwaccel;
+
+ /**
+ * Legacy hardware accelerator context.
+ *
+ * For some hardware acceleration methods, the caller may use this field to
+ * signal hwaccel-specific data to the codec. The struct pointed to by this
+ * pointer is hwaccel-dependent and defined in the respective header. Please
+ * refer to the FFmpeg HW accelerator documentation to know how to fill
+ * this.
+ *
+ * In most cases this field is optional - the necessary information may also
+ * be provided to libavcodec through @ref hw_frames_ctx or @ref
+ * hw_device_ctx (see avcodec_get_hw_config()). However, in some cases it
+ * may be the only method of signalling some (optional) information.
+ *
+ * The struct and its contents are owned by the caller.
+ *
+ * - encoding: May be set by the caller before avcodec_open2(). Must remain
+ * valid until avcodec_free_context().
+ * - decoding: May be set by the caller in the get_format() callback.
+ * Must remain valid until the next get_format() call,
+ * or avcodec_free_context() (whichever comes first).
+ */
+ void* hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_XVID 14
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#if FF_API_IDCT_NONE
+// formerly used by xvmc
+# define FF_IDCT_NONE 24
+#endif
+#define FF_IDCT_SIMPLEAUTO 128
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int lowres;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per
+ * thread, so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE \
+ 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext* c,
+ int (*func)(struct AVCodecContext* c2, void* arg), void* arg2,
+ int* ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count"
+ * values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to
+ * count-1. threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS
+ * and so that no two instances of func executing at the same time will have
+ * the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement
+ * where when any call to func returns < 0 no further calls to func may be
+ * done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext* c,
+ int (*func)(struct AVCodecContext* c2, void* arg, int jobnr,
+ int threadnr),
+ void* arg2, int* ret, int count);
+
+ /**
+ * noise vs. sse weight for the nsse comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE 131
+
+#define FF_PROFILE_DNXHD 0
+#define FF_PROFILE_DNXHR_LB 1
+#define FF_PROFILE_DNXHR_SQ 2
+#define FF_PROFILE_DNXHR_HQ 3
+#define FF_PROFILE_DNXHR_HQX 4
+#define FF_PROFILE_DNXHR_444 5
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+#define FF_PROFILE_DTS_EXPRESS 70
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1 << 9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1 << 11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66 | FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110 | FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_MULTIVIEW_HIGH 118
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122 | FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_STEREO_HIGH 128
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244 | FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768
+#define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+#define FF_PROFILE_VP9_0 0
+#define FF_PROFILE_VP9_1 1
+#define FF_PROFILE_VP9_2 2
+#define FF_PROFILE_VP9_3 3
+
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define FF_PROFILE_HEVC_REXT 4
+#define FF_PROFILE_HEVC_SCC 9
+
+#define FF_PROFILE_VVC_MAIN_10 1
+#define FF_PROFILE_VVC_MAIN_10_444 33
+
+#define FF_PROFILE_AV1_MAIN 0
+#define FF_PROFILE_AV1_HIGH 1
+#define FF_PROFILE_AV1_PROFESSIONAL 2
+
+#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
+#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
+#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
+#define FF_PROFILE_MJPEG_JPEG_LS 0xf7
+
+#define FF_PROFILE_SBC_MSBC 1
+
+#define FF_PROFILE_PRORES_PROXY 0
+#define FF_PROFILE_PRORES_LT 1
+#define FF_PROFILE_PRORES_STANDARD 2
+#define FF_PROFILE_PRORES_HQ 3
+#define FF_PROFILE_PRORES_4444 4
+#define FF_PROFILE_PRORES_XQ 5
+
+#define FF_PROFILE_ARIB_PROFILE_A 0
+#define FF_PROFILE_ARIB_PROFILE_C 1
+
+#define FF_PROFILE_KLVA_SYNC 0
+#define FF_PROFILE_KLVA_ASYNC 1
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * Skip loop filtering for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ * Skip IDCT/dequantization for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ * Skip decoding for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t* subtitle_header;
+ int subtitle_header_size;
+
+ /**
+ * Audio only. The number of "priming" samples (padding) inserted by the
+ * encoder at the beginning of the audio. I.e. this number of leading
+ * decoded samples must be discarded by the caller to get the original audio
+ * without leading padding.
+ *
+ * - decoding: unused
+ * - encoding: Set by libavcodec. The timestamps on the output packets are
+ * adjusted by the encoder so that they always refer to the
+ * first sample of the data actually contained in the packet,
+ * including any added padding. E.g. if the timebase is
+ * 1/samplerate and the timestamp of the first input sample is
+ * 0, the timestamp of the first output packet will be
+ * -initial_padding.
+ */
+ int initial_padding;
+
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: May be used to signal the framerate of CFR content to an
+ * encoder.
+ */
+ AVRational framerate;
+
+ /**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ */
+ enum AVPixelFormat sw_pix_fmt;
+
+ /**
+ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
+ * - encoding unused.
+ * - decoding set by user.
+ */
+ AVRational pkt_timebase;
+
+ /**
+ * AVCodecDescriptor
+ * - encoding: unused.
+ * - decoding: set by libavcodec.
+ */
+ const AVCodecDescriptor* codec_descriptor;
+
+ /**
+ * Current statistics for PTS correction.
+ * - decoding: maintained and used by libavcodec, not intended to be used by
+ * user apps
+ * - encoding: unused
+ */
+ int64_t
+ pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
+ int64_t
+ pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
+ int64_t pts_correction_last_pts; /// PTS of the last frame
+ int64_t pts_correction_last_dts; /// DTS of the last frame
+
+ /**
+ * Character encoding of the input subtitles file.
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ char* sub_charenc;
+
+ /**
+ * Subtitles character encoding mode. Formats or codecs might be adjusting
+ * this setting (if they are doing the conversion themselves for instance).
+ * - decoding: set by libavcodec
+ * - encoding: unused
+ */
+ int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING \
+ -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8,
+ ///< or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC \
+ 0 ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER \
+ 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the
+ ///< decoder, requires iconv
+#define FF_SUB_CHARENC_MODE_IGNORE \
+ 2 ///< neither convert the subtitles, nor check them for valid UTF-8
+
+ /**
+ * Skip processing alpha if supported by codec.
+ * Note that if the format uses pre-multiplied alpha (common with VP6,
+ * and recommended due to better video quality/compression)
+ * the image will look as if alpha-blended onto a black background.
+ * However for formats that do not use pre-multiplied alpha
+ * there might be serious artefacts (though e.g. libswscale currently
+ * assumes pre-multiplied alpha anyway).
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int skip_alpha;
+
+ /**
+ * Number of samples to skip after a discontinuity
+ * - decoding: unused
+ * - encoding: set by libavcodec
+ */
+ int seek_preroll;
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: unused.
+ */
+ uint16_t* chroma_intra_matrix;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ uint8_t* dump_separator;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user
+ */
+ char* codec_whitelist;
+
+ /**
+ * Properties of the stream that gets decoded
+ * - encoding: unused
+ * - decoding: set by libavcodec
+ */
+ unsigned properties;
+#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+#define FF_CODEC_PROPERTY_FILM_GRAIN 0x00000004
+
+ /**
+ * Additional data associated with the entire coded stream.
+ *
+ * - decoding: unused
+ * - encoding: may be set by libavcodec after avcodec_open2().
+ */
+ AVPacketSideData* coded_side_data;
+ int nb_coded_side_data;
+
+ /**
+ * A reference to the AVHWFramesContext describing the input (for encoding)
+ * or output (decoding) frames. The reference is set by the caller and
+ * afterwards owned (and freed) by libavcodec - it should never be read by
+ * the caller after being set.
+ *
+ * - decoding: This field should be set by the caller from the get_format()
+ * callback. The previous reference (if any) will always be
+ * unreffed by libavcodec before the get_format() call.
+ *
+ * If the default get_buffer2() is used with a hwaccel pixel
+ * format, then this AVHWFramesContext will be used for
+ * allocating the frame buffers.
+ *
+ * - encoding: For hardware encoders configured to use a hwaccel pixel
+ * format, this field should be set by the caller to a reference
+ * to the AVHWFramesContext describing input frames.
+ * AVHWFramesContext.format must be equal to
+ * AVCodecContext.pix_fmt.
+ *
+ * This field should be set before avcodec_open2() is called.
+ */
+ AVBufferRef* hw_frames_ctx;
+
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ *
+ * - decoding: unused
+ * - encoding: unused
+ */
+ int trailing_padding;
+
+ /**
+ * The number of pixels per image to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_pixels;
+
+ /**
+ * A reference to the AVHWDeviceContext describing the device which will
+ * be used by a hardware encoder/decoder. The reference is set by the
+ * caller and afterwards owned (and freed) by libavcodec.
+ *
+ * This should be used if either the codec device does not require
+ * hardware frames or any that are used are to be allocated internally by
+ * libavcodec. If the user wishes to supply any of the frames used as
+ * encoder input or decoder output then hw_frames_ctx should be used
+ * instead. When hw_frames_ctx is set in get_format() for a decoder, this
+ * field will be ignored while decoding the associated stream segment, but
+ * may again be used on a following one after another get_format() call.
+ *
+ * For both encoders and decoders this field should be set before
+ * avcodec_open2() is called and must not be written to thereafter.
+ *
+ * Note that some decoders may require this field to be set initially in
+ * order to support hw_frames_ctx at all - in that case, all frames
+ * contexts used must be created on the same device.
+ */
+ AVBufferRef* hw_device_ctx;
+
+ /**
+ * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated
+ * decoding (if active).
+ * - encoding: unused
+ * - decoding: Set by user (either before avcodec_open2(), or in the
+ * AVCodecContext.get_format callback)
+ */
+ int hwaccel_flags;
+
+ /**
+ * Video decoding only. Certain video codecs support cropping, meaning that
+ * only a sub-rectangle of the decoded frame is intended for display. This
+ * option controls how cropping is handled by libavcodec.
+ *
+ * When set to 1 (the default), libavcodec will apply cropping internally.
+ * I.e. it will modify the output frame width/height fields and offset the
+ * data pointers (only by as much as possible while preserving alignment, or
+ * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that
+ * the frames output by the decoder refer only to the cropped area. The
+ * crop_* fields of the output frames will be zero.
+ *
+ * When set to 0, the width/height fields of the output frames will be set
+ * to the coded dimensions and the crop_* fields will describe the cropping
+ * rectangle. Applying the cropping is left to the caller.
+ *
+ * @warning When hardware acceleration with opaque output frames is used,
+ * libavcodec is unable to apply cropping from the top/left border.
+ *
+ * @note when this option is set to zero, the width/height fields of the
+ * AVCodecContext and output AVFrames have different meanings. The codec
+ * context fields store display dimensions (with the coded dimensions in
+ * coded_width/height), while the frame fields store the coded dimensions
+ * (with the display dimensions being determined by the crop_* fields).
+ */
+ int apply_cropping;
+
+ /*
+ * Video decoding only. Sets the number of extra hardware frames which
+ * the decoder will allocate for use by the caller. This must be set
+ * before avcodec_open2() is called.
+ *
+ * Some hardware decoders require all frames that they will use for
+ * output to be defined in advance before decoding starts. For such
+ * decoders, the hardware frame pool must therefore be of a fixed size.
+ * The extra frames set here are on top of any number that the decoder
+ * needs internally in order to operate normally (for example, frames
+ * used as reference pictures).
+ */
+ int extra_hw_frames;
+
+ /**
+ * The percentage of damaged samples to discard a frame.
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int discard_damaged_percentage;
+
+ /**
+ * The number of samples per frame to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_samples;
+
+ /**
+ * Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of
+ * metadata exported in frame, packet, or coded stream side data by
+ * decoders and encoders.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int export_side_data;
+
+ /**
+ * This callback is called at the beginning of each packet to get a data
+ * buffer for it.
+ *
+ * The following field will be set in the packet before this callback is
+ * called:
+ * - size
+ * This callback must use the above value to calculate the required buffer
+ * size, which must padded by at least AV_INPUT_BUFFER_PADDING_SIZE bytes.
+ *
+ * In some specific cases, the encoder may not use the entire buffer allocated
+ * by this callback. This will be reflected in the size value in the packet
+ * once returned by avcodec_receive_packet().
+ *
+ * This callback must fill the following fields in the packet:
+ * - data: alignment requirements for AVPacket apply, if any. Some
+ * architectures and encoders may benefit from having aligned data.
+ * - buf: must contain a pointer to an AVBufferRef structure. The packet's
+ * data pointer must be contained in it. See: av_buffer_create(),
+ * av_buffer_alloc(), and av_buffer_ref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_encode_buffer() must call
+ * avcodec_default_get_encode_buffer() instead of providing a buffer allocated
+ * by some other means.
+ *
+ * The flags field may contain a combination of AV_GET_ENCODE_BUFFER_FLAG_
+ * flags. They may be used for example to hint what use the buffer may get
+ * after being created. Implementations of this callback may ignore flags they
+ * don't understand. If AV_GET_ENCODE_BUFFER_FLAG_REF is set in flags then the
+ * packet may be reused (read and/or written to if it is writable) later by
+ * libavcodec.
+ *
+ * This callback must be thread-safe, as when frame threading is used, it may
+ * be called from multiple threads simultaneously.
+ *
+ * @see avcodec_default_get_encode_buffer()
+ *
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: unused
+ */
+ int (*get_encode_buffer)(struct AVCodecContext* s, AVPacket* pkt, int flags);
+
+ /**
+ * Audio channel layout.
+ * - encoding: must be set by the caller, to one of AVCodec.ch_layouts.
+ * - decoding: may be set by the caller if known e.g. from the container.
+ * The decoder can then override during decoding as needed.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int64_t frame_num;
+} AVCodecContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ *
+ * @note Nothing in this structure should be accessed by the user. At some
+ * point in future it will not be externally visible at all.
+ *
+ * @{
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char* name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see AV_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
+ /**
+ * Allocate a custom buffer
+ */
+ int (*alloc_frame)(AVCodecContext* avctx, AVFrame* frame);
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext* avctx, const uint8_t* buf,
+ uint32_t buf_size);
+
+ /**
+ * Callback for parameter data (SPS/PPS/VPS etc).
+ *
+ * Useful for hardware decoders which keep persistent state about the
+ * video parameters, and need to receive any changes to update that state.
+ *
+ * @param avctx the codec context
+ * @param type the nal unit type
+ * @param buf the nal unit data buffer
+ * @param buf_size the size of the nal unit in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_params)(AVCodecContext* avctx, int type, const uint8_t* buf,
+ uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext* avctx, const uint8_t* buf,
+ uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext* avctx);
+
+ /**
+ * Size of per-frame hardware accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int frame_priv_data_size;
+
+ /**
+ * Initialize the hwaccel private data.
+ *
+ * This will be called from ff_get_format(), after hwaccel and
+ * hwaccel_context are set and the hwaccel private data in AVCodecInternal
+ * is allocated.
+ */
+ int (*init)(AVCodecContext* avctx);
+
+ /**
+ * Uninitialize the hwaccel private data.
+ *
+ * This will be called from get_format() or avcodec_close(), after hwaccel
+ * and hwaccel_context are already uninitialized.
+ */
+ int (*uninit)(AVCodecContext* avctx);
+
+ /**
+ * Size of the private data to allocate in
+ * AVCodecInternal.hwaccel_priv_data.
+ */
+ int priv_data_size;
+
+ /**
+ * Internal hwaccel capabilities.
+ */
+ int caps_internal;
+
+ /**
+ * Fill the given hw_frames context with current codec parameters. Called
+ * from get_format. Refer to avcodec_get_hw_frames_parameters() for
+ * details.
+ *
+ * This CAN be called before AVHWAccel.init is called, and you must assume
+ * that avctx->hwaccel_priv_data is invalid.
+ */
+ int (*frame_params)(AVCodecContext* avctx, AVBufferRef* hw_frames_ctx);
+} AVHWAccel;
+
+/**
+ * HWAccel is experimental and is thus avoided in favor of non experimental
+ * codecs
+ */
+#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ *
+ * It's generally a good idea to pass this flag unless you have a specific
+ * reason not to, as hardware tends to under-report supported levels.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
+
+/**
+ * Hardware acceleration should still be attempted for decoding when the
+ * codec profile does not match the reported capabilities of the hardware.
+ *
+ * For example, this can be used to try to decode baseline profile H.264
+ * streams in hardware - it will often succeed, because many streams marked
+ * as baseline profile actually conform to constrained baseline profile.
+ *
+ * @warning If the stream is actually not supported then the behaviour is
+ * undefined, and may include returning entirely incorrect output
+ * while indicating success.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
+
+/**
+ * Some hardware decoders (namely nvdec) can either output direct decoder
+ * surfaces, or make an on-device copy and return said copy.
+ * There is a hard limit on how many decoder surfaces there can be, and it
+ * cannot be accurately guessed ahead of time.
+ * For some processing chains, this can be okay, but others will run into the
+ * limit and in turn produce very confusing errors that require fine tuning of
+ * more or less obscure options by the user, or in extreme cases cannot be
+ * resolved at all without inserting an avfilter that forces a copy.
+ *
+ * Thus, the hwaccel will by default make a copy for safety and resilience.
+ * If a users really wants to minimize the amount of copies, they can set this
+ * flag and ensure their processing chain does not exhaust the surface pool.
+ */
+#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
+
+/**
+ * @}
+ */
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * Can be set for text/ass as well once they are rendered.
+ */
+ uint8_t* data[4];
+ int linesize[4];
+
+ enum AVSubtitleType type;
+
+ char* text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The presentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char* ass;
+
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect** rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char* avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char* avcodec_license(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ */
+AVCodecContext* avcodec_alloc_context3(const AVCodec* codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext** avctx);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Fill the parameters struct based on the values from the supplied codec
+ * context. Any allocated fields in par are freed and replaced with duplicates
+ * of the corresponding fields in codec.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure
+ */
+int avcodec_parameters_from_context(AVCodecParameters* par,
+ const AVCodecContext* codec);
+
+/**
+ * Fill the codec context based on the values from the supplied codec
+ * parameters. Any allocated fields in codec that have a corresponding field in
+ * par are freed and replaced with duplicates of the corresponding field in par.
+ * Fields in codec that do not have a counterpart in par are not touched.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_to_context(AVCodecContext* codec,
+ const AVCodecParameters* par);
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @note Always call this function before using decoding routines (such as
+ * @ref avcodec_receive_frame()).
+ *
+ * @code
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * for this context, then this parameter MUST be either NULL or
+ * equal to the previously passed codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private
+ * options. On return this object will be filled with options that were not
+ * found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(),
+ * avcodec_find_encoder(), av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext* avctx, const AVCodec* codec,
+ AVDictionary** options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL
+ * codec. Subsequent calls will do nothing.
+ *
+ * @note Do not use this function. Use avcodec_free_context() to destroy a
+ * codec context (either open or closed). Opening and closing a codec context
+ * multiple times is not supported anymore -- use multiple codec contexts
+ * instead.
+ */
+int avcodec_close(AVCodecContext* avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle* sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext* s, AVFrame* frame, int flags);
+
+/**
+ * The default callback for AVCodecContext.get_encode_buffer(). It is made
+ * public so it can be called by custom get_encode_buffer() implementations for
+ * encoders without AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_encode_buffer(AVCodecContext* s, AVPacket* pkt,
+ int flags);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext* s, int* width, int* height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext* s, int* width, int* height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+#ifdef FF_API_AVCODEC_CHROMA_POS
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_enum_to_pos() instead.
+ */
+attribute_deprecated int avcodec_enum_to_chroma_pos(int* xpos, int* ypos,
+ enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_pos_to_enum() instead.
+ */
+attribute_deprecated enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos,
+ int ypos);
+#endif
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expected to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform
+ * badly due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between
+ * input and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The preallocated AVSubtitle in which the decoded subtitle
+ * will be stored, must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed,
+ * otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext* avctx, AVSubtitle* sub,
+ int* got_sub_ptr, const AVPacket* avpkt);
+
+/**
+ * Supply raw packet data as input to a decoder.
+ *
+ * Internally, this call will copy relevant AVCodecContext fields, which can
+ * influence decoding per-packet, and apply them when the packet is actually
+ * decoded. (For example AVCodecContext.skip_frame, which might direct the
+ * decoder to drop the frame contained by the packet sent with this function.)
+ *
+ * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx codec context
+ * @param[in] avpkt The input AVPacket. Usually, this will be a single video
+ * frame, or several complete audio frames.
+ * Ownership of the packet remains with the caller, and the
+ * decoder will not write to the packet. The decoder may create
+ * a reference to the packet data (or copy it if the packet is
+ * not reference-counted).
+ * Unlike with older APIs, the packet is always fully consumed,
+ * and if it contains multiple frames (e.g. some audio codecs),
+ * will require you to call avcodec_receive_frame() multiple
+ * times afterwards before you can send a new packet.
+ * It can be NULL (or an AVPacket with data set to NULL and
+ * size set to 0); in this case, it is considered a flush
+ * packet, which signals the end of the stream. Sending the
+ * first flush packet will return success. Subsequent ones are
+ * unnecessary and will return AVERROR_EOF. If the decoder
+ * still has frames buffered, it will return them after sending
+ * a flush packet.
+ *
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_frame() (once
+ * all output is read, the packet should be resent,
+ * and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the decoder has been flushed, and no new packets
+ * can be sent to it (also returned if more than 1 flush packet is sent)
+ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or requires
+ * flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate decoding errors
+ */
+int avcodec_send_packet(AVCodecContext* avctx, const AVPacket* avpkt);
+
+/**
+ * Return decoded output data from a decoder or encoder (when the
+ * AV_CODEC_FLAG_RECON_FRAME flag is used).
+ *
+ * @param avctx codec context
+ * @param frame This will be set to a reference-counted video or audio
+ * frame (depending on the decoder type) allocated by the
+ * codec. Note that the function will always call
+ * av_frame_unref(frame) before doing anything else.
+ *
+ * @retval 0 success, a frame was returned
+ * @retval AVERROR(EAGAIN) output is not available in this state - user must
+ * try to send new input
+ * @retval AVERROR_EOF the codec has been fully flushed, and there will be
+ * no more output frames
+ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
+ * AV_CODEC_FLAG_RECON_FRAME flag enabled
+ * @retval AVERROR_INPUT_CHANGED current decoded frame has changed parameters
+ * with respect to first decoded frame. Applicable when flag
+ * AV_CODEC_FLAG_DROPCHANGED is set.
+ * @retval "other negative error code" legitimate decoding errors
+ */
+int avcodec_receive_frame(AVCodecContext* avctx, AVFrame* frame);
+
+/**
+ * Supply a raw video or audio frame to the encoder. Use
+ * avcodec_receive_packet() to retrieve buffered output packets.
+ *
+ * @param avctx codec context
+ * @param[in] frame AVFrame containing the raw audio or video frame to be
+ * encoded. Ownership of the frame remains with the caller, and the encoder will
+ * not write to the frame. The encoder may create a reference to the frame data
+ * (or copy it if the frame is not reference-counted). It can be NULL, in which
+ * case it is considered a flush packet. This signals the end of the stream. If
+ * the encoder still has packets buffered, it will return them after this call.
+ * Once flushing mode has been entered, additional flush packets are ignored,
+ * and sending frames will return AVERROR_EOF.
+ *
+ * For audio:
+ * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_packet() (once all output is read, the
+ * packet should be resent, and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the encoder has been flushed, and no new frames can
+ * be sent to it
+ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires
+ * flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_send_frame(AVCodecContext* avctx, const AVFrame* frame);
+
+/**
+ * Read encoded data from the encoder.
+ *
+ * @param avctx codec context
+ * @param avpkt This will be set to a reference-counted packet allocated by the
+ * encoder. Note that the function will always call
+ * av_packet_unref(avpkt) before doing anything else.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) output is not available in the current state - user
+ * must try to send input
+ * @retval AVERROR_EOF the encoder has been fully flushed, and there will be
+ * no more output packets
+ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_receive_packet(AVCodecContext* avctx, AVPacket* avpkt);
+
+/**
+ * Create and return a AVHWFramesContext with values adequate for hardware
+ * decoding. This is meant to get called from the get_format callback, and is
+ * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx.
+ * This API is for decoding with certain hardware acceleration modes/APIs only.
+ *
+ * The returned AVHWFramesContext is not initialized. The caller must do this
+ * with av_hwframe_ctx_init().
+ *
+ * Calling this function is not a requirement, but makes it simpler to avoid
+ * codec or hardware API specific details when manually allocating frames.
+ *
+ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx,
+ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes
+ * it unnecessary to call this function or having to care about
+ * AVHWFramesContext initialization at all.
+ *
+ * There are a number of requirements for calling this function:
+ *
+ * - It must be called from get_format with the same avctx parameter that was
+ * passed to get_format. Calling it outside of get_format is not allowed, and
+ * can trigger undefined behavior.
+ * - The function is not always supported (see description of return values).
+ * Even if this function returns successfully, hwaccel initialization could
+ * fail later. (The degree to which implementations check whether the stream
+ * is actually supported varies. Some do this check only after the user's
+ * get_format callback returns.)
+ * - The hw_pix_fmt must be one of the choices suggested by get_format. If the
+ * user decides to use a AVHWFramesContext prepared with this API function,
+ * the user must return the same hw_pix_fmt from get_format.
+ * - The device_ref passed to this function must support the given hw_pix_fmt.
+ * - After calling this API function, it is the user's responsibility to
+ * initialize the AVHWFramesContext (returned by the out_frames_ref
+ * parameter), and to set AVCodecContext.hw_frames_ctx to it. If done, this must
+ * be done before returning from get_format (this is implied by the normal
+ * AVCodecContext.hw_frames_ctx API rules).
+ * - The AVHWFramesContext parameters may change every time time get_format is
+ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So
+ * you are inherently required to go through this process again on every
+ * get_format call.
+ * - It is perfectly possible to call this function without actually using
+ * the resulting AVHWFramesContext. One use-case might be trying to reuse a
+ * previously initialized AVHWFramesContext, and calling this API function
+ * only to test whether the required frame parameters have changed.
+ * - Fields that use dynamically allocated values of any kind must not be set
+ * by the user unless setting them is explicitly allowed by the documentation.
+ * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque,
+ * the new free callback must call the potentially set previous free callback.
+ * This API call may set any dynamically allocated fields, including the free
+ * callback.
+ *
+ * The function will set at least the following fields on AVHWFramesContext
+ * (potentially more, depending on hwaccel API):
+ *
+ * - All fields set by av_hwframe_ctx_alloc().
+ * - Set the format field to hw_pix_fmt.
+ * - Set the sw_format field to the most suited and most versatile format. (An
+ * implication is that this will prefer generic formats over opaque formats
+ * with arbitrary restrictions, if possible.)
+ * - Set the width/height fields to the coded frame size, rounded up to the
+ * API-specific minimum alignment.
+ * - Only _if_ the hwaccel requires a pre-allocated pool: set the
+ * initial_pool_size field to the number of maximum reference surfaces possible
+ * with the codec, plus 1 surface for the user to work (meaning the user can
+ * safely reference at most 1 decoded surface at a time), plus additional
+ * buffering introduced by frame threading. If the hwaccel does not require
+ * pre-allocation, the field is left to 0, and the decoder will allocate new
+ * surfaces on demand during decoding.
+ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying
+ * hardware API.
+ *
+ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(), but
+ * with basic frame parameters set.
+ *
+ * The function is stateless, and does not change the AVCodecContext or the
+ * device_ref AVHWDeviceContext.
+ *
+ * @param avctx The context which is currently calling get_format, and which
+ * implicitly contains all state needed for filling the returned
+ * AVHWFramesContext properly.
+ * @param device_ref A reference to the AVHWDeviceContext describing the device
+ * which will be used by the hardware decoder.
+ * @param hw_pix_fmt The hwaccel format you are going to return from get_format.
+ * @param out_frames_ref On success, set to a reference to an _uninitialized_
+ * AVHWFramesContext, created from the given device_ref.
+ * Fields will be set to values required for decoding.
+ * Not changed if an error is returned.
+ * @return zero on success, a negative value on error. The following error codes
+ * have special semantics:
+ * AVERROR(ENOENT): the decoder does not support this functionality. Setup
+ * is always manual, or it is a decoder which does not
+ * support setting AVCodecContext.hw_frames_ctx at all,
+ * or it is a software format.
+ * AVERROR(EINVAL): it is known that hardware decoding is not supported for
+ * this configuration, or the device_ref is not supported
+ * for the hwaccel referenced by hw_pix_fmt.
+ */
+int avcodec_get_hw_frames_parameters(AVCodecContext* avctx,
+ AVBufferRef* device_ref,
+ enum AVPixelFormat hw_pix_fmt,
+ AVBufferRef** out_frames_ref);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, ///< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, ///< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, ///< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, ///< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void* priv_data;
+ const struct AVCodecParser* parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+#define PARSER_FLAG_USE_CODEC_TS 0x1000
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+
+ /**
+ * Dimensions of the decoded video intended for presentation.
+ */
+ int width;
+ int height;
+
+ /**
+ * Dimensions of the coded video.
+ */
+ int coded_width;
+ int coded_height;
+
+ /**
+ * The format of the coded data, corresponds to enum AVPixelFormat for video
+ * and for enum AVSampleFormat for audio.
+ *
+ * Note that a decoder can have considerable freedom in how exactly it
+ * decodes the data, so the format reported here might be different from the
+ * one returned by a decoder.
+ */
+ int format;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[7]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext* s);
+ /* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. */
+ int (*parser_parse)(AVCodecParserContext* s, AVCodecContext* avctx,
+ const uint8_t** poutbuf, int* poutbuf_size,
+ const uint8_t* buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext* s);
+ int (*split)(AVCodecContext* avctx, const uint8_t* buf, int buf_size);
+} AVCodecParser;
+
+/**
+ * Iterate over all registered codec parsers.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec parser or NULL when the iteration is
+ * finished
+ */
+const AVCodecParser* av_parser_iterate(void** opaque);
+
+AVCodecParserContext* av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet
+ finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet
+ finished.
+ * @param buf input buffer.
+ * @param buf_size buffer size in bytes without the padding. I.e. the full
+ buffer size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. To signal
+ EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext* s, AVCodecContext* avctx,
+ uint8_t** poutbuf, int* poutbuf_size, const uint8_t* buf,
+ int buf_size, int64_t pts, int64_t dts, int64_t pos);
+
+void av_parser_close(AVCodecParserContext* s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+int avcodec_encode_subtitle(AVCodecContext* avctx, uint8_t* buf, int buf_size,
+ const AVSubtitle* sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches
+ * which of the given pixel formats should be used to suffer the least amount of
+ * loss. The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to
+ * choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses
+ * will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(
+ const enum AVPixelFormat* pix_fmt_list, enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int* loss_ptr);
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext* s,
+ const enum AVPixelFormat* fmt);
+
+/**
+ * @}
+ */
+
+void avcodec_string(char* buf, int buf_size, AVCodecContext* enc, int encode);
+
+int avcodec_default_execute(AVCodecContext* c,
+ int (*func)(AVCodecContext* c2, void* arg2),
+ void* arg, int* ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext* c,
+ int (*func)(AVCodecContext* c2, void* arg2, int,
+ int),
+ void* arg, int* ret, int count);
+// FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame* frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t* buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal codec state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note for decoders, this function just releases any references the decoder
+ * might keep internally, but the caller's references remain valid.
+ *
+ * @note for encoders, this function will only do something if the encoder
+ * declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
+ * will drain any remaining packets, and can then be re-used for a different
+ * stream (as opposed to sending a null frame which will leave the encoder
+ * in a permanent EOF state after draining). This can be desirable if the
+ * cost of tearing down and replacing the encoder instance is high.
+ */
+void avcodec_flush_buffers(AVCodecContext* avctx);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext* avctx, int frame_bytes);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext* s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avdct.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avdct.h
new file mode 100644
index 000000000000..9edf4c187e81
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avdct.h
@@ -0,0 +1,85 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVDCT_H
+#define AVCODEC_AVDCT_H
+
+#include "libavutil/opt.h"
+
+/**
+ * AVDCT context.
+ * @note function pointers can be NULL if the specific features have been
+ * disabled at build time.
+ */
+typedef struct AVDCT {
+ const AVClass* av_class;
+
+ void (*idct)(int16_t* block /* align 16 */);
+
+ /**
+ * IDCT input permutation.
+ * Several optimized IDCTs need a permutated input (relative to the
+ * normal order of the reference IDCT).
+ * This permutation must be performed before the idct_put/add.
+ * Note, normally this can be merged with the zigzag/alternate scan<br>
+ * An example to avoid confusion:
+ * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
+ * - (x -> reference DCT -> reference IDCT -> x)
+ * - (x -> reference DCT -> simple_mmx_perm = idct_permutation
+ * -> simple_idct_mmx -> x)
+ * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
+ * -> simple_idct_mmx -> ...)
+ */
+ uint8_t idct_permutation[64];
+
+ void (*fdct)(int16_t* block /* align 16 */);
+
+ /**
+ * DCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int dct_algo;
+
+ /**
+ * IDCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int idct_algo;
+
+ void (*get_pixels)(int16_t* block /* align 16 */,
+ const uint8_t* pixels /* align 8 */, ptrdiff_t line_size);
+
+ int bits_per_sample;
+
+ void (*get_pixels_unaligned)(int16_t* block /* align 16 */,
+ const uint8_t* pixels, ptrdiff_t line_size);
+} AVDCT;
+
+/**
+ * Allocates a AVDCT context.
+ * This needs to be initialized with avcodec_dct_init() after optionally
+ * configuring it with AVOptions.
+ *
+ * To free it use av_free()
+ */
+AVDCT* avcodec_dct_alloc(void);
+int avcodec_dct_init(AVDCT*);
+
+const AVClass* avcodec_dct_get_class(void);
+
+#endif /* AVCODEC_AVDCT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/avfft.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avfft.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/avfft.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avfft.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/bsf.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/bsf.h
new file mode 100644
index 000000000000..044a0597bf33
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/bsf.h
@@ -0,0 +1,335 @@
+/*
+ * Bitstream filters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSF_H
+#define AVCODEC_BSF_H
+
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/rational.h"
+
+#include "codec_id.h"
+#include "codec_par.h"
+#include "packet.h"
+
+/**
+ * @defgroup lavc_bsf Bitstream filters
+ * @ingroup libavc
+ *
+ * Bitstream filters transform encoded media data without decoding it. This
+ * allows e.g. manipulating various header values. Bitstream filters operate on
+ * @ref AVPacket "AVPackets".
+ *
+ * The bitstream filtering API is centered around two structures:
+ * AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
+ * in abstract, the latter a specific filtering process. Obtain an
+ * AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
+ * it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
+ * AVBSFContext fields, as described in its documentation, then call
+ * av_bsf_init() to prepare the filter context for use.
+ *
+ * Submit packets for filtering using av_bsf_send_packet(), obtain filtered
+ * results with av_bsf_receive_packet(). When no more input packets will be
+ * sent, submit a NULL AVPacket to signal the end of the stream to the filter.
+ * av_bsf_receive_packet() will then return trailing packets, if any are
+ * produced by the filter.
+ *
+ * Finally, free the filter context with av_bsf_free().
+ * @{
+ */
+
+/**
+ * The bitstream filter state.
+ *
+ * This struct must be allocated with av_bsf_alloc() and freed with
+ * av_bsf_free().
+ *
+ * The fields in the struct will only be changed (by the caller or by the
+ * filter) as described in their documentation, and are to be considered
+ * immutable otherwise.
+ */
+typedef struct AVBSFContext {
+ /**
+ * A class for logging and AVOptions
+ */
+ const AVClass* av_class;
+
+ /**
+ * The bitstream filter this context is an instance of.
+ */
+ const struct AVBitStreamFilter* filter;
+
+ /**
+ * Opaque filter-specific private data. If filter->priv_class is non-NULL,
+ * this is an AVOptions-enabled struct.
+ */
+ void* priv_data;
+
+ /**
+ * Parameters of the input stream. This field is allocated in
+ * av_bsf_alloc(), it needs to be filled by the caller before
+ * av_bsf_init().
+ */
+ AVCodecParameters* par_in;
+
+ /**
+ * Parameters of the output stream. This field is allocated in
+ * av_bsf_alloc(), it is set by the filter in av_bsf_init().
+ */
+ AVCodecParameters* par_out;
+
+ /**
+ * The timebase used for the timestamps of the input packets. Set by the
+ * caller before av_bsf_init().
+ */
+ AVRational time_base_in;
+
+ /**
+ * The timebase used for the timestamps of the output packets. Set by the
+ * filter in av_bsf_init().
+ */
+ AVRational time_base_out;
+} AVBSFContext;
+
+typedef struct AVBitStreamFilter {
+ const char* name;
+
+ /**
+ * A list of codec ids supported by the filter, terminated by
+ * AV_CODEC_ID_NONE.
+ * May be NULL, in that case the bitstream filter works with any codec id.
+ */
+ const enum AVCodecID* codec_ids;
+
+ /**
+ * A class for the private data, used to declare bitstream filter private
+ * AVOptions. This field is NULL for bitstream filters that do not declare
+ * any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavcodec generic
+ * code to this class.
+ */
+ const AVClass* priv_class;
+} AVBitStreamFilter;
+
+/**
+ * @return a bitstream filter with the specified name or NULL if no such
+ * bitstream filter exists.
+ */
+const AVBitStreamFilter* av_bsf_get_by_name(const char* name);
+
+/**
+ * Iterate over all registered bitstream filters.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered bitstream filter or NULL when the iteration is
+ * finished
+ */
+const AVBitStreamFilter* av_bsf_iterate(void** opaque);
+
+/**
+ * Allocate a context for a given bitstream filter. The caller must fill in the
+ * context parameters as described in the documentation and then call
+ * av_bsf_init() before sending any data to the filter.
+ *
+ * @param filter the filter for which to allocate an instance.
+ * @param[out] ctx a pointer into which the pointer to the newly-allocated
+ * context will be written. It must be freed with av_bsf_free() after the
+ * filtering is done.
+ *
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_bsf_alloc(const AVBitStreamFilter* filter, AVBSFContext** ctx);
+
+/**
+ * Prepare the filter for use, after all the parameters and options have been
+ * set.
+ *
+ * @param ctx a AVBSFContext previously allocated with av_bsf_alloc()
+ */
+int av_bsf_init(AVBSFContext* ctx);
+
+/**
+ * Submit a packet for filtering.
+ *
+ * After sending each packet, the filter must be completely drained by calling
+ * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
+ * AVERROR_EOF.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param pkt the packet to filter. The bitstream filter will take ownership of
+ * the packet and reset the contents of pkt. pkt is not touched if an error
+ * occurs. If pkt is empty (i.e. NULL, or pkt->data is NULL and
+ * pkt->side_data_elems zero), it signals the end of the stream (i.e. no more
+ * non-empty packets will be sent; sending more empty packets does nothing) and
+ * will cause the filter to output any packets it may have buffered internally.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using
+ * av_bsf_receive_packet()) before new input can be consumed.
+ * - Another negative AVERROR value if an error occurs.
+ */
+int av_bsf_send_packet(AVBSFContext* ctx, AVPacket* pkt);
+
+/**
+ * Retrieve a filtered packet.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param[out] pkt this struct will be filled with the contents of the filtered
+ * packet. It is owned by the caller and must be freed using
+ * av_packet_unref() when it is no longer needed.
+ * This parameter should be "clean" (i.e. freshly allocated
+ * with av_packet_alloc() or unreffed with av_packet_unref())
+ * when this function is called. If this function returns
+ * successfully, the contents of pkt will be completely
+ * overwritten by the returned data. On failure, pkt is not
+ * touched.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if more packets need to be sent to the filter (using
+ * av_bsf_send_packet()) to get more output.
+ * - AVERROR_EOF if there will be no further output from the filter.
+ * - Another negative AVERROR value if an error occurs.
+ *
+ * @note one input packet may result in several output packets, so after sending
+ * a packet with av_bsf_send_packet(), this function needs to be called
+ * repeatedly until it stops returning 0. It is also possible for a filter to
+ * output fewer packets than were sent to it, so this function may return
+ * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call.
+ */
+int av_bsf_receive_packet(AVBSFContext* ctx, AVPacket* pkt);
+
+/**
+ * Reset the internal bitstream filter state. Should be called e.g. when
+ * seeking.
+ */
+void av_bsf_flush(AVBSFContext* ctx);
+
+/**
+ * Free a bitstream filter context and everything associated with it; write NULL
+ * into the supplied pointer.
+ */
+void av_bsf_free(AVBSFContext** ctx);
+
+/**
+ * Get the AVClass for AVBSFContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* av_bsf_get_class(void);
+
+/**
+ * Structure for chain/list of bitstream filters.
+ * Empty list can be allocated by av_bsf_list_alloc().
+ */
+typedef struct AVBSFList AVBSFList;
+
+/**
+ * Allocate empty list of bitstream filters.
+ * The list must be later freed by av_bsf_list_free()
+ * or finalized by av_bsf_list_finalize().
+ *
+ * @return Pointer to @ref AVBSFList on success, NULL in case of failure
+ */
+AVBSFList* av_bsf_list_alloc(void);
+
+/**
+ * Free list of bitstream filters.
+ *
+ * @param lst Pointer to pointer returned by av_bsf_list_alloc()
+ */
+void av_bsf_list_free(AVBSFList** lst);
+
+/**
+ * Append bitstream filter to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf Filter context to be appended
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append(AVBSFList* lst, AVBSFContext* bsf);
+
+/**
+ * Construct new bitstream filter context given it's name and options
+ * and append it to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf_name Name of the bitstream filter
+ * @param options Options for the bitstream filter, can be set to NULL
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append2(AVBSFList* lst, const char* bsf_name,
+ AVDictionary** options);
+/**
+ * Finalize list of bitstream filters.
+ *
+ * This function will transform @ref AVBSFList to single @ref AVBSFContext,
+ * so the whole chain of bitstream filters can be treated as single filter
+ * freshly allocated by av_bsf_alloc().
+ * If the call is successful, @ref AVBSFList structure is freed and lst
+ * will be set to NULL. In case of failure, caller is responsible for
+ * freeing the structure by av_bsf_list_free()
+ *
+ * @param lst Filter list structure to be transformed
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext
+ * structure representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_finalize(AVBSFList** lst, AVBSFContext** bsf);
+
+/**
+ * Parse string describing list of bitstream filters and create single
+ * @ref AVBSFContext describing the whole chain of bitstream filters.
+ * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext
+ * freshly allocated by av_bsf_alloc().
+ *
+ * @param str String describing chain of bitstream filters in format
+ * `bsf1[=opt1=val1:opt2=val2][,bsf2]`
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext
+ * structure representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_parse_str(const char* str, AVBSFContext** bsf);
+
+/**
+ * Get null/pass-through bitstream filter.
+ *
+ * @param[out] bsf Pointer to be set to new instance of pass-through bitstream
+ * filter
+ *
+ * @return
+ */
+int av_bsf_get_null_filter(AVBSFContext** bsf);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_BSF_H
diff --git a/media/ffvpx/libavcodec/codec.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec.h
similarity index 51%
copy from media/ffvpx/libavcodec/codec.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec.h
index 77a1a3f5a296..4340d8503f29 100644
--- a/media/ffvpx/libavcodec/codec.h
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec.h
@@ -41,352 +41,344 @@
/**
* Decoder can use draw_horiz_band callback.
*/
-#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
/**
* Codec uses get_buffer() or get_encode_buffer() for allocating buffers and
* supports custom allocators.
* If not set, it might not use get_buffer() or get_encode_buffer() at all, or
* use operations that assume the buffer was allocated by
* avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
*/
-#define AV_CODEC_CAP_DR1 (1 << 1)
-#if FF_API_FLAG_TRUNCATED
-/**
- * @deprecated Use parsers to always send proper frames.
- */
-#define AV_CODEC_CAP_TRUNCATED (1 << 3)
-#endif
+#define AV_CODEC_CAP_DR1 (1 << 1)
/**
* Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output.
*
* NOTE: If this flag is not set, the codec is guaranteed to never be fed with
* with NULL data. The user can still send NULL data to the public encode
* or decode function, but libavcodec will not pass it along to the codec
* unless this flag is set.
*
* Decoders:
* The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to get the delayed data until the decoder no longer
* returns frames.
*
* Encoders:
* The encoder needs to be fed with NULL data at the end of encoding until the
* encoder no longer returns data.
*
* NOTE: For encoders implementing the AVCodec.encode2() function, setting this
* flag also means that the encoder must set the pts and duration for
* each output packet. If this flag is not set, the pts and duration will
* be determined by libavcodec from the input frame.
*/
-#define AV_CODEC_CAP_DELAY (1 << 5)
+#define AV_CODEC_CAP_DELAY (1 << 5)
/**
* Codec can be fed a final frame with a smaller size.
* This can be used to prevent truncation of the last audio samples.
*/
-#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
/**
* Codec can output multiple frames per AVPacket
* Normally demuxers return one frame at a time, demuxers which do not do
* are connected to a parser to split what they return into proper frames.
* This flag is reserved to the very rare category of codecs which have a
* bitstream that cannot be split into frames without timeconsuming
* operations like full decoding. Demuxers carrying such bitstreams thus
* may return multiple frames in a packet. This has many disadvantages like
* prohibiting stream copy in many cases thus it should only be considered
* as a last resort.
*/
-#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
/**
* Codec is experimental and is thus avoided in favor of non experimental
* encoders
*/
-#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
/**
- * Codec should fill in channel configuration and samplerate instead of container
+ * Codec should fill in channel configuration and samplerate instead of
+ * container
*/
-#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
/**
* Codec supports frame-level multithreading.
*/
-#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
/**
* Codec supports slice-based (or partition-based) multithreading.
*/
-#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
/**
* Codec supports changed parameters at any point.
*/
-#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
/**
* Codec supports multithreading through a method other than slice- or
* frame-level multithreading. Typically this marks wrappers around
* multithreading-capable external libraries.
*/
-#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
-#if FF_API_AUTO_THREADS
-#define AV_CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_OTHER_THREADS
-#endif
+#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
/**
* Audio encoder supports receiving a different number of samples in each call.
*/
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
/**
* Decoder is not a preferred choice for probing.
* This indicates that the decoder is not a good choice for probing.
* It could for example be an expensive to spin up hardware decoder,
* or it could simply not provide a lot of useful information about
* the stream.
* A decoder marked with this flag should only be used as last resort
* choice for probing.
*/
-#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
-
-#if FF_API_UNUSED_CODEC_CAPS
-/**
- * Deprecated and unused. Use AVCodecDescriptor.props instead
- */
-#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
-/**
- * Deprecated and unused. Use AVCodecDescriptor.props instead
- */
-#define AV_CODEC_CAP_LOSSLESS 0x80000000
-#endif
+#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
/**
* Codec is backed by a hardware implementation. Typically used to
* identify a non-hwaccel hardware decoder. For information about hwaccels, use
* avcodec_get_hw_config() instead.
*/
-#define AV_CODEC_CAP_HARDWARE (1 << 18)
+#define AV_CODEC_CAP_HARDWARE (1 << 18)
/**
* Codec is potentially backed by a hardware implementation, but not
* necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
* implementation provides some sort of internal fallback.
*/
-#define AV_CODEC_CAP_HYBRID (1 << 19)
+#define AV_CODEC_CAP_HYBRID (1 << 19)
/**
- * This codec takes the reordered_opaque field from input AVFrames
- * and returns it in the corresponding field in AVCodecContext after
- * encoding.
+ * This encoder can reorder user opaque values from input AVFrames and return
+ * them with corresponding output packets.
+ * @see AV_CODEC_FLAG_COPY_OPAQUE
*/
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
/**
* This encoder can be flushed using avcodec_flush_buffers(). If this flag is
* not set, the encoder must be closed and reopened to ensure that no frames
* remain pending.
*/
-#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
+#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
/**
* The encoder is able to output reconstructed frame data, i.e. raw frames that
* would be produced by decoding the encoded bitstream.
*
* Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME flag.
*/
#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
/**
* AVProfile.
*/
typedef struct AVProfile {
- int profile;
- const char *name; ///< short name for the profile
+ int profile;
+ const char* name; ///< short name for the profile
} AVProfile;
/**
* AVCodec.
*/
typedef struct AVCodec {
- /**
- * Name of the codec implementation.
- * The name is globally unique among encoders and among decoders (but an
- * encoder and a decoder can share the same name).
- * This is the primary way to find a codec from the user perspective.
- */
- const char *name;
- /**
- * Descriptive name for the codec, meant to be more human readable than name.
- * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
- */
- const char *long_name;
- enum AVMediaType type;
- enum AVCodecID id;
- /**
- * Codec capabilities.
- * see AV_CODEC_CAP_*
- */
- int capabilities;
- uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
- const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
- const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
- const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
- const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char* name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char* long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see AV_CODEC_CAP_*
+ */
+ int capabilities;
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVRational*
+ supported_framerates; ///< array of supported framerates, or NULL if any,
+ ///< array is terminated by {0,0}
+ const enum AVPixelFormat*
+ pix_fmts; ///< array of supported pixel formats, or NULL if unknown,
+ ///< array is terminated by -1
+ const int*
+ supported_samplerates; ///< array of supported audio samplerates, or NULL
+ ///< if unknown, array is terminated by 0
+ const enum AVSampleFormat*
+ sample_fmts; ///< array of supported sample formats, or NULL if unknown,
+ ///< array is terminated by -1
#if FF_API_OLD_CHANNEL_LAYOUT
- /**
- * @deprecated use ch_layouts instead
- */
- attribute_deprecated
- const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+ /**
+ * @deprecated use ch_layouts instead
+ */
+ attribute_deprecated const uint64_t*
+ channel_layouts; ///< array of support channel layouts, or NULL if
+ ///< unknown. array is terminated by 0
#endif
- const AVClass *priv_class; ///< AVClass for the private context
- const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+ const AVClass* priv_class; ///< AVClass for the private context
+ const AVProfile*
+ profiles; ///< array of recognized profiles, or NULL if unknown, array is
+ ///< terminated by {FF_PROFILE_UNKNOWN}
- /**
- * Group name of the codec implementation.
- * This is a short symbolic name of the wrapper backing this codec. A
- * wrapper uses some kind of external implementation for the codec, such
- * as an external library, or a codec implementation provided by the OS or
- * the hardware.
- * If this field is NULL, this is a builtin, libavcodec native codec.
- * If non-NULL, this will be the suffix in AVCodec.name in most cases
- * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
- */
- const char *wrapper_name;
+ /**
+ * Group name of the codec implementation.
+ * This is a short symbolic name of the wrapper backing this codec. A
+ * wrapper uses some kind of external implementation for the codec, such
+ * as an external library, or a codec implementation provided by the OS or
+ * the hardware.
+ * If this field is NULL, this is a builtin, libavcodec native codec.
+ * If non-NULL, this will be the suffix in AVCodec.name in most cases
+ * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
+ */
+ const char* wrapper_name;
- /**
- * Array of supported channel layouts, terminated with a zeroed layout.
- */
- const AVChannelLayout *ch_layouts;
+ /**
+ * Array of supported channel layouts, terminated with a zeroed layout.
+ */
+ const AVChannelLayout* ch_layouts;
} AVCodec;
/**
* Iterate over all registered codecs.
*
* @param opaque a pointer where libavcodec will store the iteration state. Must
* point to NULL to start the iteration.
*
* @return the next registered codec or NULL when the iteration is
* finished
*/
-const AVCodec *av_codec_iterate(void **opaque);
+const AVCodec* av_codec_iterate(void** opaque);
/**
* Find a registered decoder with a matching codec ID.
*
* @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
-const AVCodec *avcodec_find_decoder(enum AVCodecID id);
+const AVCodec* avcodec_find_decoder(enum AVCodecID id);
/**
* Find a registered decoder with the specified name.
*
* @param name name of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
-const AVCodec *avcodec_find_decoder_by_name(const char *name);
+const AVCodec* avcodec_find_decoder_by_name(const char* name);
/**
* Find a registered encoder with a matching codec ID.
*
* @param id AVCodecID of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
-const AVCodec *avcodec_find_encoder(enum AVCodecID id);
+const AVCodec* avcodec_find_encoder(enum AVCodecID id);
/**
* Find a registered encoder with the specified name.
*
* @param name name of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
-const AVCodec *avcodec_find_encoder_by_name(const char *name);
+const AVCodec* avcodec_find_encoder_by_name(const char* name);
/**
* @return a non-zero number if codec is an encoder, zero otherwise
*/
-int av_codec_is_encoder(const AVCodec *codec);
+int av_codec_is_encoder(const AVCodec* codec);
/**
* @return a non-zero number if codec is a decoder, zero otherwise
*/
-int av_codec_is_decoder(const AVCodec *codec);
+int av_codec_is_decoder(const AVCodec* codec);
/**
* Return a name for the specified profile, if available.
*
* @param codec the codec that is searched for the given profile
* @param profile the profile value for which a name is requested
* @return A name for the profile if found, NULL otherwise.
*/
-const char *av_get_profile_name(const AVCodec *codec, int profile);
+const char* av_get_profile_name(const AVCodec* codec, int profile);
enum {
- /**
- * The codec supports this format via the hw_device_ctx interface.
- *
- * When selecting this format, AVCodecContext.hw_device_ctx should
- * have been set to a device of the specified type before calling
- * avcodec_open2().
- */
- AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
- /**
- * The codec supports this format via the hw_frames_ctx interface.
- *
- * When selecting this format for a decoder,
- * AVCodecContext.hw_frames_ctx should be set to a suitable frames
- * context inside the get_format() callback. The frames context
- * must have been created on a device of the specified type.
- *
- * When selecting this format for an encoder,
- * AVCodecContext.hw_frames_ctx should be set to the context which
- * will be used for the input frames before calling avcodec_open2().
- */
- AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
- /**
- * The codec supports this format by some internal method.
- *
- * This format can be selected without any additional configuration -
- * no device or frames context is required.
- */
- AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
- /**
- * The codec supports this format by some ad-hoc method.
- *
- * Additional settings and/or function calls are required. See the
- * codec-specific documentation for details. (Methods requiring
- * this sort of configuration are deprecated and others should be
- * used in preference.)
- */
- AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
+ /**
+ * The codec supports this format via the hw_device_ctx interface.
+ *
+ * When selecting this format, AVCodecContext.hw_device_ctx should
+ * have been set to a device of the specified type before calling
+ * avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
+ /**
+ * The codec supports this format via the hw_frames_ctx interface.
+ *
+ * When selecting this format for a decoder,
+ * AVCodecContext.hw_frames_ctx should be set to a suitable frames
+ * context inside the get_format() callback. The frames context
+ * must have been created on a device of the specified type.
+ *
+ * When selecting this format for an encoder,
+ * AVCodecContext.hw_frames_ctx should be set to the context which
+ * will be used for the input frames before calling avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
+ /**
+ * The codec supports this format by some internal method.
+ *
+ * This format can be selected without any additional configuration -
+ * no device or frames context is required.
+ */
+ AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
+ /**
+ * The codec supports this format by some ad-hoc method.
+ *
+ * Additional settings and/or function calls are required. See the
+ * codec-specific documentation for details. (Methods requiring
+ * this sort of configuration are deprecated and others should be
+ * used in preference.)
+ */
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
};
typedef struct AVCodecHWConfig {
- /**
- * For decoders, a hardware pixel format which that decoder may be
- * able to decode to if suitable hardware is available.
- *
- * For encoders, a pixel format which the encoder may be able to
- * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
- * formats supported by the codec.
- */
- enum AVPixelFormat pix_fmt;
- /**
- * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
- * setup methods which can be used with this configuration.
- */
- int methods;
- /**
- * The device type associated with the configuration.
- *
- * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
- * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
- */
- enum AVHWDeviceType device_type;
+ /**
+ * For decoders, a hardware pixel format which that decoder may be
+ * able to decode to if suitable hardware is available.
+ *
+ * For encoders, a pixel format which the encoder may be able to
+ * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
+ * formats supported by the codec.
+ */
+ enum AVPixelFormat pix_fmt;
+ /**
+ * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
+ * setup methods which can be used with this configuration.
+ */
+ int methods;
+ /**
+ * The device type associated with the configuration.
+ *
+ * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
+ * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
+ */
+ enum AVHWDeviceType device_type;
} AVCodecHWConfig;
/**
* Retrieve supported hardware configurations for a codec.
*
* Values of index from zero to some maximum return the indexed configuration
* descriptor; all other values return NULL. If the codec does not support
* any hardware configurations then it will always return NULL.
*/
-const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index);
+const AVCodecHWConfig* avcodec_get_hw_config(const AVCodec* codec, int index);
/**
* @}
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec_desc.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_desc.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec_desc.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_desc.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_id.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_id.h
new file mode 100644
index 000000000000..b3dd07ea6e09
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_id.h
@@ -0,0 +1,669 @@
+/*
+ * Codec IDs
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_ID_H
+#define AVCODEC_CODEC_ID_H
+
+#include "libavutil/avutil.h"
+#include "libavutil/samplefmt.h"
+
+#include "version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of an existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX,
+ AV_CODEC_ID_PAF_VIDEO,
+ AV_CODEC_ID_EXR,
+ AV_CODEC_ID_VP7,
+ AV_CODEC_ID_SANM,
+ AV_CODEC_ID_SGIRLE,
+ AV_CODEC_ID_MVC1,
+ AV_CODEC_ID_MVC2,
+ AV_CODEC_ID_HQX,
+ AV_CODEC_ID_TDSC,
+ AV_CODEC_ID_HQ_HQA,
+ AV_CODEC_ID_HAP,
+ AV_CODEC_ID_DDS,
+ AV_CODEC_ID_DXV,
+ AV_CODEC_ID_SCREENPRESSO,
+ AV_CODEC_ID_RSCC,
+ AV_CODEC_ID_AVS2,
+ AV_CODEC_ID_PGX,
+ AV_CODEC_ID_AVS3,
+ AV_CODEC_ID_MSP2,
+ AV_CODEC_ID_VVC,
+#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
+ AV_CODEC_ID_Y41P,
+ AV_CODEC_ID_AVRP,
+ AV_CODEC_ID_012V,
+ AV_CODEC_ID_AVUI,
+#if FF_API_AYUV_CODECID
+ AV_CODEC_ID_AYUV,
+#endif
+ AV_CODEC_ID_TARGA_Y216,
+ AV_CODEC_ID_V308,
+ AV_CODEC_ID_V408,
+ AV_CODEC_ID_YUV4,
+ AV_CODEC_ID_AVRN,
+ AV_CODEC_ID_CPIA,
+ AV_CODEC_ID_XFACE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_SMVJPEG,
+ AV_CODEC_ID_APNG,
+ AV_CODEC_ID_DAALA,
+ AV_CODEC_ID_CFHD,
+ AV_CODEC_ID_TRUEMOTION2RT,
+ AV_CODEC_ID_M101,
+ AV_CODEC_ID_MAGICYUV,
+ AV_CODEC_ID_SHEERVIDEO,
+ AV_CODEC_ID_YLC,
+ AV_CODEC_ID_PSD,
+ AV_CODEC_ID_PIXLET,
+ AV_CODEC_ID_SPEEDHQ,
+ AV_CODEC_ID_FMVC,
+ AV_CODEC_ID_SCPR,
+ AV_CODEC_ID_CLEARVIDEO,
+ AV_CODEC_ID_XPM,
+ AV_CODEC_ID_AV1,
+ AV_CODEC_ID_BITPACKED,
+ AV_CODEC_ID_MSCC,
+ AV_CODEC_ID_SRGC,
+ AV_CODEC_ID_SVG,
+ AV_CODEC_ID_GDV,
+ AV_CODEC_ID_FITS,
+ AV_CODEC_ID_IMM4,
+ AV_CODEC_ID_PROSUMER,
+ AV_CODEC_ID_MWSC,
+ AV_CODEC_ID_WCMV,
+ AV_CODEC_ID_RASC,
+ AV_CODEC_ID_HYMT,
+ AV_CODEC_ID_ARBC,
+ AV_CODEC_ID_AGM,
+ AV_CODEC_ID_LSCR,
+ AV_CODEC_ID_VP4,
+ AV_CODEC_ID_IMM5,
+ AV_CODEC_ID_MVDV,
+ AV_CODEC_ID_MVHA,
+ AV_CODEC_ID_CDTOONS,
+ AV_CODEC_ID_MV30,
+ AV_CODEC_ID_NOTCHLC,
+ AV_CODEC_ID_PFM,
+ AV_CODEC_ID_MOBICLIP,
+ AV_CODEC_ID_PHOTOCD,
+ AV_CODEC_ID_IPU,
+ AV_CODEC_ID_ARGO,
+ AV_CODEC_ID_CRI,
+ AV_CODEC_ID_SIMBIOSIS_IMX,
+ AV_CODEC_ID_SGA_VIDEO,
+ AV_CODEC_ID_GEM,
+ AV_CODEC_ID_VBN,
+ AV_CODEC_ID_JPEGXL,
+ AV_CODEC_ID_QOI,
+ AV_CODEC_ID_PHM,
+ AV_CODEC_ID_RADIANCE_HDR,
+ AV_CODEC_ID_WBMP,
+ AV_CODEC_ID_MEDIA100,
+ AV_CODEC_ID_VQC,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO =
+ 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+ AV_CODEC_ID_PCM_S16BE_PLANAR,
+ AV_CODEC_ID_PCM_S64LE,
+ AV_CODEC_ID_PCM_S64BE,
+ AV_CODEC_ID_PCM_F16LE,
+ AV_CODEC_ID_PCM_F24LE,
+ AV_CODEC_ID_PCM_VIDC,
+ AV_CODEC_ID_PCM_SGA,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA,
+ AV_CODEC_ID_ADPCM_AFC,
+ AV_CODEC_ID_ADPCM_IMA_OKI,
+ AV_CODEC_ID_ADPCM_DTK,
+ AV_CODEC_ID_ADPCM_IMA_RAD,
+ AV_CODEC_ID_ADPCM_G726LE,
+ AV_CODEC_ID_ADPCM_THP_LE,
+ AV_CODEC_ID_ADPCM_PSX,
+ AV_CODEC_ID_ADPCM_AICA,
+ AV_CODEC_ID_ADPCM_IMA_DAT4,
+ AV_CODEC_ID_ADPCM_MTAF,
+ AV_CODEC_ID_ADPCM_AGM,
+ AV_CODEC_ID_ADPCM_ARGO,
+ AV_CODEC_ID_ADPCM_IMA_SSI,
+ AV_CODEC_ID_ADPCM_ZORK,
+ AV_CODEC_ID_ADPCM_IMA_APM,
+ AV_CODEC_ID_ADPCM_IMA_ALP,
+ AV_CODEC_ID_ADPCM_IMA_MTF,
+ AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ AV_CODEC_ID_ADPCM_IMA_ACORN,
+ AV_CODEC_ID_ADPCM_XMD,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+ AV_CODEC_ID_SDX2_DPCM,
+ AV_CODEC_ID_GREMLIN_DPCM,
+ AV_CODEC_ID_DERF_DPCM,
+ AV_CODEC_ID_WADY_DPCM,
+ AV_CODEC_ID_CBD2_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
+ AV_CODEC_ID_CODEC2,
+ AV_CODEC_ID_FFWAVESYNTH,
+ AV_CODEC_ID_SONIC,
+ AV_CODEC_ID_SONIC_LS,
+ AV_CODEC_ID_EVRC,
+ AV_CODEC_ID_SMV,
+ AV_CODEC_ID_DSD_LSBF,
+ AV_CODEC_ID_DSD_MSBF,
+ AV_CODEC_ID_DSD_LSBF_PLANAR,
+ AV_CODEC_ID_DSD_MSBF_PLANAR,
+ AV_CODEC_ID_4GV,
+ AV_CODEC_ID_INTERPLAY_ACM,
+ AV_CODEC_ID_XMA1,
+ AV_CODEC_ID_XMA2,
+ AV_CODEC_ID_DST,
+ AV_CODEC_ID_ATRAC3AL,
+ AV_CODEC_ID_ATRAC3PAL,
+ AV_CODEC_ID_DOLBY_E,
+ AV_CODEC_ID_APTX,
+ AV_CODEC_ID_APTX_HD,
+ AV_CODEC_ID_SBC,
+ AV_CODEC_ID_ATRAC9,
+ AV_CODEC_ID_HCOM,
+ AV_CODEC_ID_ACELP_KELVIN,
+ AV_CODEC_ID_MPEGH_3D_AUDIO,
+ AV_CODEC_ID_SIREN,
+ AV_CODEC_ID_HCA,
+ AV_CODEC_ID_FASTAUDIO,
+ AV_CODEC_ID_MSNSIREN,
+ AV_CODEC_ID_DFPWM,
+ AV_CODEC_ID_BONK,
+ AV_CODEC_ID_MISC4,
+ AV_CODEC_ID_APAC,
+ AV_CODEC_ID_FTR,
+ AV_CODEC_ID_WAVARC,
+ AV_CODEC_ID_RKA,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE =
+ 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+ AV_CODEC_ID_MICRODVD,
+ AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_JACOSUB,
+ AV_CODEC_ID_SAMI,
+ AV_CODEC_ID_REALTEXT,
+ AV_CODEC_ID_STL,
+ AV_CODEC_ID_SUBVIEWER1,
+ AV_CODEC_ID_SUBVIEWER,
+ AV_CODEC_ID_SUBRIP,
+ AV_CODEC_ID_WEBVTT,
+ AV_CODEC_ID_MPL2,
+ AV_CODEC_ID_VPLAYER,
+ AV_CODEC_ID_PJS,
+ AV_CODEC_ID_ASS,
+ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+ AV_CODEC_ID_TTML,
+ AV_CODEC_ID_ARIB_CAPTION,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN =
+ 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program
+ ///< stream.
+ AV_CODEC_ID_EPG,
+ AV_CODEC_ID_BINTEXT,
+ AV_CODEC_ID_XBIN,
+ AV_CODEC_ID_IDF,
+ AV_CODEC_ID_OTF,
+ AV_CODEC_ID_SMPTE_KLV,
+ AV_CODEC_ID_DVD_NAV,
+ AV_CODEC_ID_TIMED_ID3,
+ AV_CODEC_ID_BIN_DATA,
+
+ AV_CODEC_ID_PROBE =
+ 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf
+ ///< should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS =
+ 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing
+ ///< only metadata information.
+ AV_CODEC_ID_WRAPPED_AVFRAME =
+ 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+ /**
+ * Dummy null video codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_VNULL,
+ /**
+ * Dummy null audio codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_ANULL,
+};
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char* avcodec_get_name(enum AVCodecID id);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec_id the ID of the codec to which the requested profile belongs
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ *
+ * @note unlike av_get_profile_name(), which searches a list of profiles
+ * supported by a specific decoder or encoder implementation, this
+ * function searches the list of profiles from the AVCodecDescriptor
+ */
+const char* avcodec_profile_name(enum AVCodecID codec_id, int profile);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_ID_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_par.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_par.h
new file mode 100644
index 000000000000..dd9e40d0d6bc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_par.h
@@ -0,0 +1,247 @@
+/*
+ * Codec parameters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_PAR_H
+#define AVCODEC_CODEC_PAR_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/rational.h"
+#include "libavutil/pixfmt.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, ///< Top coded_first, top displayed first
+ AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, ///< Top coded first, bottom displayed first
+ AV_FIELD_BT, ///< Bottom coded first, top displayed first
+};
+
+/**
+ * This struct describes the properties of an encoded stream.
+ *
+ * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
+ * be allocated with avcodec_parameters_alloc() and freed with
+ * avcodec_parameters_free().
+ */
+typedef struct AVCodecParameters {
+ /**
+ * General type of the encoded data.
+ */
+ enum AVMediaType codec_type;
+ /**
+ * Specific type of the encoded data (the codec used).
+ */
+ enum AVCodecID codec_id;
+ /**
+ * Additional information about the codec (corresponds to the AVI FOURCC).
+ */
+ uint32_t codec_tag;
+
+ /**
+ * Extra binary data needed for initializing the decoder, codec-dependent.
+ *
+ * Must be allocated with av_malloc() and will be freed by
+ * avcodec_parameters_free(). The allocated size of extradata must be at
+ * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
+ * bytes zeroed.
+ */
+ uint8_t* extradata;
+ /**
+ * Size of the extradata content in bytes.
+ */
+ int extradata_size;
+
+ /**
+ * - video: the pixel format, the value corresponds to enum AVPixelFormat.
+ * - audio: the sample format, the value corresponds to enum AVSampleFormat.
+ */
+ int format;
+
+ /**
+ * The average bitrate of the encoded data (in bits per second).
+ */
+ int64_t bit_rate;
+
+ /**
+ * The number of bits per sample in the codedwords.
+ *
+ * This is basically the bitrate per sample. It is mandatory for a bunch of
+ * formats to actually decode them. It's the number of bits for one sample in
+ * the actual coded bitstream.
+ *
+ * This could be for example 4 for ADPCM
+ * For PCM formats this matches bits_per_raw_sample
+ * Can be 0
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * This is the number of valid bits in each output sample. If the
+ * sample format has more bits, the least significant bits are additional
+ * padding bits, which are always 0. Use right shifts to reduce the sample
+ * to its actual size. For example, audio formats with 24 bit samples will
+ * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
+ * To get the original sample use "(int32_t)sample >> 8"."
+ *
+ * For ADPCM this might be 12 or 16 or similar
+ * Can be 0
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Codec-specific bitstream restrictions that the stream conforms to.
+ */
+ int profile;
+ int level;
+
+ /**
+ * Video only. The dimensions of the video frame in pixels.
+ */
+ int width;
+ int height;
+
+ /**
+ * Video only. The aspect ratio (width / height) which a single pixel
+ * should have when displayed.
+ *
+ * When the aspect ratio is unknown / undefined, the numerator should be
+ * set to 0 (the denominator may have any value).
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Video only. The order of the fields in interlaced video.
+ */
+ enum AVFieldOrder field_order;
+
+ /**
+ * Video only. Additional colorspace characteristics.
+ */
+ enum AVColorRange color_range;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace color_space;
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * Video only. Number of delayed frames.
+ */
+ int video_delay;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Audio only. The channel layout bitmask. May be 0 if the channel layout is
+ * unknown or unspecified, otherwise the number of bits set must be equal to
+ * the channels field.
+ * @deprecated use ch_layout
+ */
+ attribute_deprecated uint64_t channel_layout;
+ /**
+ * Audio only. The number of audio channels.
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated int channels;
+#endif
+ /**
+ * Audio only. The number of audio samples per second.
+ */
+ int sample_rate;
+ /**
+ * Audio only. The number of bytes per coded audio frame, required by some
+ * formats.
+ *
+ * Corresponds to nBlockAlign in WAVEFORMATEX.
+ */
+ int block_align;
+ /**
+ * Audio only. Audio frame size, if known. Required by some formats to be
+ * static.
+ */
+ int frame_size;
+
+ /**
+ * Audio only. The amount of padding (in samples) inserted by the encoder at
+ * the beginning of the audio. I.e. this number of leading decoded samples
+ * must be discarded by the caller to get the original audio without leading
+ * padding.
+ */
+ int initial_padding;
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ */
+ int trailing_padding;
+ /**
+ * Audio only. Number of samples to skip after a discontinuity.
+ */
+ int seek_preroll;
+
+ /**
+ * Audio only. The channel layout and number of channels.
+ */
+ AVChannelLayout ch_layout;
+} AVCodecParameters;
+
+/**
+ * Allocate a new AVCodecParameters and set its fields to default values
+ * (unknown/invalid/0). The returned struct must be freed with
+ * avcodec_parameters_free().
+ */
+AVCodecParameters* avcodec_parameters_alloc(void);
+
+/**
+ * Free an AVCodecParameters instance and everything associated with it and
+ * write NULL to the supplied pointer.
+ */
+void avcodec_parameters_free(AVCodecParameters** par);
+
+/**
+ * Copy the contents of src to dst. Any allocated fields in dst are freed and
+ * replaced with newly allocated duplicates of the corresponding fields in src.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_copy(AVCodecParameters* dst,
+ const AVCodecParameters* src);
+
+/**
+ * This function is the same as av_get_audio_frame_duration(), except it works
+ * with AVCodecParameters instead of an AVCodecContext.
+ */
+int av_get_audio_frame_duration2(AVCodecParameters* par, int frame_bytes);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_PAR_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/defs.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/defs.h
new file mode 100644
index 000000000000..9a4632cee453
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/defs.h
@@ -0,0 +1,203 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DEFS_H
+#define AVCODEC_DEFS_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Misc types and constants that do not belong anywhere else.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input
+ * bitstream for decoding. This is mainly needed because some optimized
+ * bitstream readers read 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define AV_INPUT_BUFFER_PADDING_SIZE 64
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the format) and print an error message on
+ * mismatch. If AV_EF_EXPLODE is also set, a mismatching checksum will result in
+ * the decoder/demuxer returning an error.
+ */
+#define AV_EF_CRCCHECK (1 << 0)
+#define AV_EF_BITSTREAM (1 << 1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1 << 2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1 << 3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1 << 15) ///< ignore errors and continue
+#define AV_EF_CAREFUL \
+ (1 << 16) ///< consider things that violate the spec, are fast to calculate
+ ///< and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT \
+ (1 << 17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE \
+ (1 << 18) ///< consider things that a sane encoder/muxer should not do as an
+ ///< error
+
+#define FF_COMPLIANCE_VERY_STRICT \
+ 2 ///< Strictly conform to an older more strict version of the spec or
+ ///< reference software.
+#define FF_COMPLIANCE_STRICT \
+ 1 ///< Strictly conform to all the things in the spec no matter what
+ ///< consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL \
+ -2 ///< Allow nonstandardized experimental things.
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard {
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE = -16, ///< discard nothing
+ AVDISCARD_DEFAULT =
+ 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA = 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB, ///< Not part of ABI
+};
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan {
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+} AVPanScan;
+
+/**
+ * This structure describes the bitrate properties of an encoded bitstream. It
+ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
+ * parameters for H.264/HEVC.
+ */
+typedef struct AVCPBProperties {
+ /**
+ * Maximum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t max_bitrate;
+ /**
+ * Minimum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t min_bitrate;
+ /**
+ * Average bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t avg_bitrate;
+
+ /**
+ * The size of the buffer to which the ratecontrol is applied, in bits.
+ * Zero if unknown or unspecified.
+ */
+ int64_t buffer_size;
+
+ /**
+ * The delay between the time the packet this structure is associated with
+ * is received and the time when it should be decoded, in periods of a 27MHz
+ * clock.
+ *
+ * UINT64_MAX when unknown or unspecified.
+ */
+ uint64_t vbv_delay;
+} AVCPBProperties;
+
+/**
+ * Allocate a CPB properties structure and initialize its fields to default
+ * values.
+ *
+ * @param size if non-NULL, the size of the allocated struct will be written
+ * here. This is useful for embedding it in side data.
+ *
+ * @return the newly allocated struct or NULL on failure
+ */
+AVCPBProperties* av_cpb_properties_alloc(size_t* size);
+
+/**
+ * This structure supplies correlation between a packet timestamp and a wall
+ * clock production time. The definition follows the Producer Reference Time
+ * ('prft') as defined in ISO/IEC 14496-12
+ */
+typedef struct AVProducerReferenceTime {
+ /**
+ * A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
+ */
+ int64_t wallclock;
+ int flags;
+} AVProducerReferenceTime;
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char* s, unsigned int v);
+
+#endif // AVCODEC_DEFS_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/packet.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/packet.h
new file mode 100644
index 000000000000..d85cf6a84b75
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/packet.h
@@ -0,0 +1,730 @@
+/*
+ * AVPacket public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_H
+#define AVCODEC_PACKET_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/rational.h"
+#include "libavutil/version.h"
+
+#include "libavcodec/version_major.h"
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ /**
+ * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE
+ * bytes worth of palette. This side data signals that a new palette is
+ * present.
+ */
+ AV_PKT_DATA_PALETTE,
+
+ /**
+ * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
+ * that the extradata buffer was changed and the receiving side should
+ * act upon it appropriately. The new extradata is embedded in the side
+ * data buffer and should be immediately used for processing the current
+ * frame or packet.
+ */
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ */
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ /**
+ * This side data contains quality related information from the encoder.
+ * @code
+ * u32le quality factor of the compressed frame. Allowed range is between 1
+ * (good) and FF_LAMBDA_MAX (bad). u8 picture type u8 error count u16
+ * reserved u64le[error count] sum of squared differences between encoder in
+ * and output
+ * @endcode
+ */
+ AV_PKT_DATA_QUALITY_STATS,
+
+ /**
+ * This side data contains an integer value representing the stream index
+ * of a "fallback" track. A fallback track indicates an alternate
+ * track to use when the current track can not be decoded for some reason.
+ * e.g. no decoder available for codec.
+ */
+ AV_PKT_DATA_FALLBACK_TRACK,
+
+ /**
+ * This side data corresponds to the AVCPBProperties struct.
+ */
+ AV_PKT_DATA_CPB_PROPERTIES,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=main/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+
+ /**
+ * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
+ * information from the demuxer to the corresponding muxer.
+ */
+ AV_PKT_DATA_MPEGTS_STREAM_ID,
+
+ /**
+ * Mastering display metadata (based on SMPTE-2086:2014). This metadata
+ * should be associated with a video stream and contains data in the form
+ * of the AVMasteringDisplayMetadata struct.
+ */
+ AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
+
+ /**
+ * This side data should be associated with a video stream and corresponds
+ * to the AVSphericalMapping structure.
+ */
+ AV_PKT_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This metadata should be
+ * associated with a video stream and contains data in the form of the
+ * AVContentLightMetadata struct.
+ */
+ AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * ATSC A53 Part 4 Closed Captions. This metadata should be associated with
+ * a video stream. A53 CC bitstream is stored as uint8_t in
+ * AVPacketSideData.data. The number of bytes of CC data is
+ * AVPacketSideData.size.
+ */
+ AV_PKT_DATA_A53_CC,
+
+ /**
+ * This side data is encryption initialization data.
+ * The format is not part of ABI, use av_encryption_init_info_* methods to
+ * access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INIT_INFO,
+
+ /**
+ * This side data contains encryption info for how to decrypt the packet.
+ * The format is not part of ABI, use av_encryption_info_* methods to access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INFO,
+
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_PKT_DATA_AFD,
+
+ /**
+ * Producer Reference Time data corresponding to the AVProducerReferenceTime
+ * struct, usually exported by some encoders (on demand through the prft flag
+ * set in the AVCodecContext export_side_data field).
+ */
+ AV_PKT_DATA_PRFT,
+
+ /**
+ * ICC profile data consisting of an opaque octet buffer following the
+ * format described by ISO 15076-1.
+ */
+ AV_PKT_DATA_ICC_PROFILE,
+
+ /**
+ * DOVI configuration
+ * ref:
+ * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2,
+ * section 2.2
+ * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2,
+ * section 3.3 Tags are stored in struct AVDOVIDecoderConfigurationRecord.
+ */
+ AV_PKT_DATA_DOVI_CONF,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1:2014. The data is an array of 4
+ * uint32_t where the first uint32_t describes how many (1-3) of the other
+ * timecodes are used. The timecode format is described in the documentation
+ * of av_timecode_get_smpte_from_framenum() function in libavutil/timecode.h.
+ */
+ AV_PKT_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR10+ dynamic metadata associated with a video frame. The metadata is in
+ * the form of the AVDynamicHDRPlus struct and contains
+ * information for color volume transform - application 4 of
+ * SMPTE 2094-40:2016 standard.
+ */
+ AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
+
+ /**
+ * The number of side data types.
+ * This is not part of the public API/ABI in the sense that it may
+ * change when new side data types are added.
+ * This must stay the last enum value.
+ * If its value becomes huge, some code using it
+ * needs to be updated as it assumes it to be smaller than other limits.
+ */
+ AV_PKT_DATA_NB
+};
+
+#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS // DEPRECATED
+
+typedef struct AVPacketSideData {
+ uint8_t* data;
+ size_t size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames. Encoders are allowed to output empty
+ * packets, with no compressed data, containing only side data
+ * (e.g. to update some stream parameters at the end of encoding).
+ *
+ * The semantics of data ownership depends on the buf field.
+ * If it is set, the packet data is dynamically allocated and is
+ * valid indefinitely until a call to av_packet_unref() reduces the
+ * reference count to 0.
+ *
+ * If the buf field is not set av_packet_ref() would make a copy instead
+ * of increasing the reference count.
+ *
+ * The side data is always allocated with av_malloc(), copied by
+ * av_packet_ref() and freed by av_packet_unref().
+ *
+ * sizeof(AVPacket) being a part of the public ABI is deprecated. once
+ * av_init_packet() is removed, new packets will only be able to be allocated
+ * with av_packet_alloc(), and new fields may be added to the end of the struct
+ * with a minor bump.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ * @see av_packet_unref
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef* buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t* data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData* side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int64_t duration;
+
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * for some private data of the user
+ */
+ void* opaque;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the packet is unreferenced. av_packet_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target packet's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef* opaque_ref;
+
+ /**
+ * Time base of the packet's timestamps.
+ * In the future, this field may be set on packets output by encoders or
+ * demuxers, but its value will be by default ignored on input to decoders
+ * or muxers.
+ */
+ AVRational time_base;
+} AVPacket;
+
+#if FF_API_INIT_PACKET
+attribute_deprecated typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList* next;
+} AVPacketList;
+#endif
+
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+/**
+ * Flag is used to discard packets which are required to maintain valid
+ * decoder state but are not required for output and should be dropped
+ * after decoding.
+ **/
+#define AV_PKT_FLAG_DISCARD 0x0004
+/**
+ * The packet comes from a trusted source.
+ *
+ * Otherwise-unsafe constructs such as arbitrary pointers to data
+ * outside the packet may be followed.
+ */
+#define AV_PKT_FLAG_TRUSTED 0x0008
+/**
+ * Flag is used to indicate packets that contain frames that can
+ * be discarded by the decoder. I.e. Non-reference frames.
+ */
+#define AV_PKT_FLAG_DISPOSABLE 0x0010
+
+enum AVSideDataParamChangeFlags {
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * @deprecated those are not used by any decoder
+ */
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+#endif
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+
+/**
+ * Allocate an AVPacket and set its fields to default values. The resulting
+ * struct must be freed using av_packet_free().
+ *
+ * @return An AVPacket filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVPacket itself, not the data buffers. Those
+ * must be allocated through other means such as av_new_packet.
+ *
+ * @see av_new_packet
+ */
+AVPacket* av_packet_alloc(void);
+
+/**
+ * Create a new packet that references the same data as src.
+ *
+ * This is a shortcut for av_packet_alloc()+av_packet_ref().
+ *
+ * @return newly created AVPacket on success, NULL on error.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ */
+AVPacket* av_packet_clone(const AVPacket* src);
+
+/**
+ * Free the packet, if the packet is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param pkt packet to be freed. The pointer will be set to NULL.
+ * @note passing NULL is a no-op.
+ */
+void av_packet_free(AVPacket** pkt);
+
+#if FF_API_INIT_PACKET
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ *
+ * @see av_packet_alloc
+ * @see av_packet_unref
+ *
+ * @deprecated This function is deprecated. Once it's removed,
+ sizeof(AVPacket) will not be a part of the ABI anymore.
+ */
+attribute_deprecated void av_init_packet(AVPacket* pkt);
+#endif
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket* pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket* pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket* pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * and buf fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying
+ * AVBuffer. The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket* pkt, uint8_t* data, int size);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Wrap an existing array as a packet side data.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param data the side data array. It must be allocated with the av_malloc()
+ * family of functions. The ownership of the data is transferred to
+ * pkt.
+ * @param size side information size
+ * @return a non-negative number on success, a negative AVERROR code on
+ * failure. On failure, the packet is unchanged and the data remains
+ * owned by the caller.
+ */
+int av_packet_add_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ uint8_t* data, size_t size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size If supplied, *size will be set to the size of the side data
+ * or to zero if the desired side data is not present.
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(const AVPacket* pkt,
+ enum AVPacketSideDataType type, size_t* size);
+
+const char* av_packet_side_data_name(enum AVPacketSideDataType type);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t* av_packet_pack_dictionary(AVDictionary* dict, size_t* size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t* data, size_t size,
+ AVDictionary** dict);
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket* pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet. Will be completely overwritten.
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error. On error, dst
+ * will be blank (as if returned by av_packet_alloc()).
+ */
+int av_packet_ref(AVPacket* dst, const AVPacket* src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket* pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket* dst, AVPacket* src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ */
+int av_packet_copy_props(AVPacket* dst, const AVPacket* src);
+
+/**
+ * Ensure the data described by a given packet is reference counted.
+ *
+ * @note This function does not ensure that the reference will be writable.
+ * Use av_packet_make_writable instead for that purpose.
+ *
+ * @see av_packet_ref
+ * @see av_packet_make_writable
+ *
+ * @param pkt packet whose data should be made reference counted.
+ *
+ * @return 0 on success, a negative AVERROR on error. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_refcounted(AVPacket* pkt);
+
+/**
+ * Create a writable reference for the data described by a given packet,
+ * avoiding data copy if possible.
+ *
+ * @param pkt Packet whose data should be made writable.
+ *
+ * @return 0 on success, a negative AVERROR on failure. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_writable(AVPacket* pkt);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket* pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_PACKET_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/vdpau.h
new file mode 100644
index 000000000000..0b91baec821c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/vdpau.h
@@ -0,0 +1,156 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using FFmpeg
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+
+#include "libavutil/avconfig.h"
+#include "libavutil/attributes.h"
+
+#include "avcodec.h"
+
+struct AVCodecContext;
+struct AVFrame;
+
+typedef int (*AVVDPAU_Render2)(struct AVCodecContext*, struct AVFrame*,
+ const VdpPictureInfo*, uint32_t,
+ const VdpBitstreamBuffer*);
+
+/**
+ * This structure is used to share data between the libavcodec library and
+ * the client video application.
+ * The user shall allocate the structure via the av_alloc_vdpau_hwaccel
+ * function and make it available as
+ * AVCodecContext.hwaccel_context. Members can be set by the user once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * The size of this structure is not a part of the public ABI and must not
+ * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an
+ * AVVDPAUContext.
+ */
+typedef struct AVVDPAUContext {
+ /**
+ * VDPAU decoder handle
+ *
+ * Set by user.
+ */
+ VdpDecoder decoder;
+
+ /**
+ * VDPAU decoder render callback
+ *
+ * Set by the user.
+ */
+ VdpDecoderRender* render;
+
+ AVVDPAU_Render2 render2;
+} AVVDPAUContext;
+
+/**
+ * @brief allocation function for AVVDPAUContext
+ *
+ * Allows extending the struct without breaking API/ABI
+ */
+AVVDPAUContext* av_alloc_vdpaucontext(void);
+
+AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext*);
+void av_vdpau_hwaccel_set_render2(AVVDPAUContext*, AVVDPAU_Render2);
+
+/**
+ * Associate a VDPAU device with a codec context for hardware acceleration.
+ * This function is meant to be called from the get_format() codec callback,
+ * or earlier. It can also be called after avcodec_flush_buffers() to change
+ * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent
+ * display preemption).
+ *
+ * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes
+ * successfully.
+ *
+ * @param avctx decoding context whose get_format() callback is invoked
+ * @param device VDPAU device handle to use for hardware acceleration
+ * @param get_proc_address VDPAU device driver
+ * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
+ *
+ * @return 0 on success, an AVERROR code on failure.
+ */
+int av_vdpau_bind_context(AVCodecContext* avctx, VdpDevice device,
+ VdpGetProcAddress* get_proc_address, unsigned flags);
+
+/**
+ * Gets the parameters to create an adequate VDPAU video surface for the codec
+ * context using VDPAU hardware decoding acceleration.
+ *
+ * @note Behavior is undefined if the context was not successfully bound to a
+ * VDPAU device using av_vdpau_bind_context().
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param type storage space for the VDPAU video surface chroma type
+ * (or NULL to ignore)
+ * @param width storage space for the VDPAU video surface pixel width
+ * (or NULL to ignore)
+ * @param height storage space for the VDPAU video surface pixel height
+ * (or NULL to ignore)
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_vdpau_get_surface_parameters(AVCodecContext* avctx, VdpChromaType* type,
+ uint32_t* width, uint32_t* height);
+
+/**
+ * Allocate an AVVDPAUContext.
+ *
+ * @return Newly-allocated AVVDPAUContext or NULL on failure.
+ */
+AVVDPAUContext* av_vdpau_alloc_context(void);
+
+/** @} */
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/media/ffvpx/libavcodec/version.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version.h
similarity index 60%
copy from media/ffvpx/libavcodec/version.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version.h
index 43d0d9a9fcb1..d3f6020c86d6 100644
--- a/media/ffvpx/libavcodec/version.h
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version.h
@@ -29,17 +29,17 @@
#include "version_major.h"
-#define LIBAVCODEC_VERSION_MINOR 51
-#define LIBAVCODEC_VERSION_MICRO 101
+#define LIBAVCODEC_VERSION_MINOR 5
+#define LIBAVCODEC_VERSION_MICRO 100
-#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
- LIBAVCODEC_VERSION_MINOR, \
- LIBAVCODEC_VERSION_MICRO)
-#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
- LIBAVCODEC_VERSION_MINOR, \
- LIBAVCODEC_VERSION_MICRO)
-#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+#define LIBAVCODEC_VERSION_INT \
+ AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION \
+ AV_VERSION(LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
-#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
#endif /* AVCODEC_VERSION_H */
diff --git a/media/ffvpx/libavcodec/version_major.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version_major.h
similarity index 55%
copy from media/ffvpx/libavcodec/version_major.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version_major.h
index 12f863deb79d..e9cd92cb3a60 100644
--- a/media/ffvpx/libavcodec/version_major.h
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version_major.h
@@ -25,33 +25,28 @@
* Libavcodec version macros.
*/
-#define LIBAVCODEC_VERSION_MAJOR 59
+#define LIBAVCODEC_VERSION_MAJOR 60
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*/
-#define FF_API_OPENH264_SLICE_MODE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_OPENH264_CABAC (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_UNUSED_CODEC_CAPS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_THREAD_SAFE_CALLBACKS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_GET_FRAME_CLASS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AUTO_THREADS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_FLAG_TRUNCATED (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_SUB_TEXT_FORMAT (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 60)
+#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
+
+// reminder to remove CrystalHD decoders on next major bump
+#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)
#endif /* AVCODEC_VERSION_MAJOR_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/attributes.h
new file mode 100644
index 000000000000..774d1fe91616
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/attributes.h
@@ -0,0 +1,173 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x, y) \
+ (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+# define AV_GCC_VERSION_AT_MOST(x, y) \
+ (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y))
+#else
+# define AV_GCC_VERSION_AT_LEAST(x, y) 0
+# define AV_GCC_VERSION_AT_MOST(x, y) 0
+#endif
+
+#ifdef __has_builtin
+# define AV_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define AV_HAS_BUILTIN(x) 0
+#endif
+
+#ifndef av_always_inline
+# if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_always_inline __attribute__((always_inline)) inline
+# elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+# else
+# define av_always_inline inline
+# endif
+#endif
+
+#ifndef av_extern_inline
+# if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+# define av_extern_inline extern inline
+# else
+# define av_extern_inline inline
+# endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 4)
+# define av_warn_unused_result __attribute__((warn_unused_result))
+#else
+# define av_warn_unused_result
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1) || defined(__clang__)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2, 6) || defined(__clang__)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4, 3) || defined(__clang__)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4, 1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+# if AV_GCC_VERSION_AT_LEAST(4, 6) || defined(__clang__)
+# define AV_NOWARN_DEPRECATED(code) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+ code _Pragma("GCC diagnostic pop")
+# elif defined(_MSC_VER)
+# define AV_NOWARN_DEPRECATED(code) \
+ __pragma(warning(push)) __pragma(warning(disable : 4996)) code; \
+ __pragma(warning(pop))
+# else
+# define AV_NOWARN_DEPRECATED(code) code
+# endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3, 1) || defined(__clang__)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 3) || defined(__clang__)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER)
+# define av_uninit(x) x = x
+#else
+# define av_uninit(x) x
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) \
+ __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2, 5) || defined(__clang__)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avconfig.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avconfig.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avconfig.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avutil.h
new file mode 100644
index 000000000000..8f8343bd043f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avutil.h
@@ -0,0 +1,371 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * @ingroup lavu
+ * Convenience header that includes @ref lavu "libavutil"'s core.
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu libavutil
+ * Common code shared across all FFmpeg libraries.
+ *
+ * @note
+ * libavutil is designed to be modular. In most cases, in order to use the
+ * functions provided by one component of libavutil you must explicitly include
+ * the specific header containing that feature. If you are only using
+ * media-related components, you could simply include libavutil/avutil.h, which
+ * brings in most of the "core" components.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Mathematics
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_video Video related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return an informative version string. This usually is the actual release
+ * version number or a git commit description. This string has no fixed format
+ * and can change any time. It should never be parsed by code.
+ */
+const char* av_version_info(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char* avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char* avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char* av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1 << FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256 * 128 - 1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE // FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * FFmpeg internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q \
+ (AVRational) { 1, AV_TIME_BASE }
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+ AV_PICTURE_TYPE_I, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "error.h"
+#include "rational.h"
+#include "version.h"
+#include "macros.h"
+#include "mathematics.h"
+#include "log.h"
+#include "pixfmt.h"
+
+/**
+ * Return x default pointer in case p is NULL.
+ */
+static inline void* av_x_if_null(const void* p, const void* x) {
+ return (void*)(intptr_t)(p ? p : x);
+}
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+unsigned av_int_list_length_for_size(unsigned elsize, const void* list,
+ uint64_t term) av_pure;
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+#define av_int_list_length(list, term) \
+ av_int_list_length_for_size(sizeof(*(list)), list, term)
+
+#if FF_API_AV_FOPEN_UTF8
+/**
+ * Open a file using a UTF-8 filename.
+ * The API of this function matches POSIX fopen(), errors are returned through
+ * errno.
+ * @deprecated Avoid using it, as on Windows, the FILE* allocated by this
+ * function may be allocated with a different CRT than the caller
+ * who uses the FILE*. No replacement provided in public API.
+ */
+attribute_deprecated FILE* av_fopen_utf8(const char* path, const char* mode);
+#endif
+
+/**
+ * Return the fractional representation of the internal time base.
+ */
+AVRational av_get_time_base_q(void);
+
+#define AV_FOURCC_MAX_STRING_SIZE 32
+
+#define av_fourcc2str(fourcc) \
+ av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc)
+
+/**
+ * Fill the provided buffer with a string containing a FourCC (four-character
+ * code) representation.
+ *
+ * @param buf a buffer with size in bytes of at least
+ * AV_FOURCC_MAX_STRING_SIZE
+ * @param fourcc the fourcc to represent
+ * @return the buffer in input
+ */
+char* av_fourcc_make_string(char* buf, uint32_t fourcc);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/buffer.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/buffer.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/buffer.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/buffer.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/channel_layout.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/channel_layout.h
new file mode 100644
index 000000000000..1ce5bb32d125
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/channel_layout.h
@@ -0,0 +1,842 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "version.h"
+#include "attributes.h"
+
+/**
+ * @file
+ * @ingroup lavu_audio_channels
+ * Public libavutil channel layout APIs header.
+ */
+
+/**
+ * @defgroup lavu_audio_channels Audio channels
+ * @ingroup lavu_audio
+ *
+ * Audio channel layout utility functions
+ *
+ * @{
+ */
+
+enum AVChannel {
+ ///< Invalid channel index
+ AV_CHAN_NONE = -1,
+ AV_CHAN_FRONT_LEFT,
+ AV_CHAN_FRONT_RIGHT,
+ AV_CHAN_FRONT_CENTER,
+ AV_CHAN_LOW_FREQUENCY,
+ AV_CHAN_BACK_LEFT,
+ AV_CHAN_BACK_RIGHT,
+ AV_CHAN_FRONT_LEFT_OF_CENTER,
+ AV_CHAN_FRONT_RIGHT_OF_CENTER,
+ AV_CHAN_BACK_CENTER,
+ AV_CHAN_SIDE_LEFT,
+ AV_CHAN_SIDE_RIGHT,
+ AV_CHAN_TOP_CENTER,
+ AV_CHAN_TOP_FRONT_LEFT,
+ AV_CHAN_TOP_FRONT_CENTER,
+ AV_CHAN_TOP_FRONT_RIGHT,
+ AV_CHAN_TOP_BACK_LEFT,
+ AV_CHAN_TOP_BACK_CENTER,
+ AV_CHAN_TOP_BACK_RIGHT,
+ /** Stereo downmix. */
+ AV_CHAN_STEREO_LEFT = 29,
+ /** See above. */
+ AV_CHAN_STEREO_RIGHT,
+ AV_CHAN_WIDE_LEFT,
+ AV_CHAN_WIDE_RIGHT,
+ AV_CHAN_SURROUND_DIRECT_LEFT,
+ AV_CHAN_SURROUND_DIRECT_RIGHT,
+ AV_CHAN_LOW_FREQUENCY_2,
+ AV_CHAN_TOP_SIDE_LEFT,
+ AV_CHAN_TOP_SIDE_RIGHT,
+ AV_CHAN_BOTTOM_FRONT_CENTER,
+ AV_CHAN_BOTTOM_FRONT_LEFT,
+ AV_CHAN_BOTTOM_FRONT_RIGHT,
+
+ /** Channel is empty can be safely skipped. */
+ AV_CHAN_UNUSED = 0x200,
+
+ /** Channel contains data, but its position is unknown. */
+ AV_CHAN_UNKNOWN = 0x300,
+
+ /**
+ * Range of channels between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN system.
+ *
+ * Given a channel id `<i>` between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel `<n>` is
+ * `<n> = <i> - AV_CHAN_AMBISONIC_BASE`.
+ *
+ * @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
+ * orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the channels
+ * implicitly by their position in the stream.
+ */
+ AV_CHAN_AMBISONIC_BASE = 0x400,
+ // leave space for 1024 ids, which correspond to maximum order-32 harmonics,
+ // which should be enough for the foreseeable use cases
+ AV_CHAN_AMBISONIC_END = 0x7ff,
+};
+
+enum AVChannelOrder {
+ /**
+ * Only the channel count is specified, without any further information
+ * about the channel order.
+ */
+ AV_CHANNEL_ORDER_UNSPEC,
+ /**
+ * The native channel order, i.e. the channels are in the same order in
+ * which they are defined in the AVChannel enum. This supports up to 63
+ * different channels.
+ */
+ AV_CHANNEL_ORDER_NATIVE,
+ /**
+ * The channel order does not correspond to any other predefined order and
+ * is stored as an explicit map. For example, this could be used to support
+ * layouts with 64 or more channels, or with empty/skipped (AV_CHAN_SILENCE)
+ * channels at arbitrary positions.
+ */
+ AV_CHANNEL_ORDER_CUSTOM,
+ /**
+ * The audio is represented as the decomposition of the sound field into
+ * spherical harmonics. Each channel corresponds to a single expansion
+ * component. Channels are ordered according to ACN (Ambisonic Channel
+ * Number).
+ *
+ * The channel with the index n in the stream contains the spherical
+ * harmonic of degree l and order m given by
+ * @code{.unparsed}
+ * l = floor(sqrt(n)),
+ * m = n - l * (l + 1).
+ * @endcode
+ *
+ * Conversely given a spherical harmonic of degree l and order m, the
+ * corresponding channel index n is given by
+ * @code{.unparsed}
+ * n = l * (l + 1) + m.
+ * @endcode
+ *
+ * Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
+ * as defined in AmbiX format $ 2.1.
+ */
+ AV_CHANNEL_ORDER_AMBISONIC,
+};
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT)
+#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT)
+#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER)
+#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY)
+#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT)
+#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT)
+#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER)
+#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER)
+#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT)
+#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT)
+#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER)
+#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT)
+#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER)
+#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT)
+#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT)
+#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER)
+#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT)
+#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT)
+#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT)
+#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT)
+#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT)
+#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT)
+#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT)
+#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2)
+#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT)
+#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT)
+#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER)
+#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT)
+#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT)
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order.
+ @deprecated channel order is now indicated in a special field in
+ AVChannelLayout
+ */
+# define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+#endif
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0 | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 \
+ (AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD \
+ (AV_CH_LAYOUT_STEREO | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 \
+ (AV_CH_LAYOUT_SURROUND | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0 | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK \
+ (AV_CH_LAYOUT_SURROUND | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK \
+ (AV_CH_LAYOUT_5POINT0_BACK | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT \
+ (AV_CH_LAYOUT_2_2 | AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1 | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT \
+ (AV_CH_LAYOUT_6POINT0_FRONT | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 \
+ (AV_CH_LAYOUT_5POINT1 | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE \
+ (AV_CH_LAYOUT_5POINT1 | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_TOP_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_OCTAGONAL \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_LEFT | AV_CH_BACK_CENTER | \
+ AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_CUBE \
+ (AV_CH_LAYOUT_QUAD | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT | \
+ AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT)
+#define AV_CH_LAYOUT_HEXADECAGONAL \
+ (AV_CH_LAYOUT_OCTAGONAL | AV_CH_WIDE_LEFT | AV_CH_WIDE_RIGHT | \
+ AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT | AV_CH_TOP_BACK_CENTER | \
+ AV_CH_TOP_FRONT_CENTER | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT | AV_CH_STEREO_RIGHT)
+#define AV_CH_LAYOUT_22POINT2 \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_BACK_CENTER | AV_CH_LOW_FREQUENCY_2 | \
+ AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT | AV_CH_TOP_FRONT_LEFT | \
+ AV_CH_TOP_FRONT_RIGHT | AV_CH_TOP_FRONT_CENTER | AV_CH_TOP_CENTER | \
+ AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT | AV_CH_TOP_SIDE_LEFT | \
+ AV_CH_TOP_SIDE_RIGHT | AV_CH_TOP_BACK_CENTER | AV_CH_BOTTOM_FRONT_CENTER | \
+ AV_CH_BOTTOM_FRONT_LEFT | AV_CH_BOTTOM_FRONT_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * @}
+ */
+
+/**
+ * An AVChannelCustom defines a single channel within a custom order layout
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of the
+ * public ABI.
+ *
+ * No new fields may be added to it without a major version bump.
+ */
+typedef struct AVChannelCustom {
+ enum AVChannel id;
+ char name[16];
+ void* opaque;
+} AVChannelCustom;
+
+/**
+ * An AVChannelLayout holds information about the channel layout of audio data.
+ *
+ * A channel layout here is defined as a set of channels ordered in a specific
+ * way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case an
+ * AVChannelLayout carries only the channel count).
+ * All orders may be treated as if they were AV_CHANNEL_ORDER_UNSPEC by
+ * ignoring everything but the channel count, as long as
+ * av_channel_layout_check() considers they are valid.
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelLayout) is a part of the
+ * public ABI and may be used by the caller. E.g. it may be allocated on stack
+ * or embedded in caller-defined structs.
+ *
+ * AVChannelLayout can be initialized as follows:
+ * - default initialization with {0}, followed by setting all used fields
+ * correctly;
+ * - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
+ * - with a constructor function, such as av_channel_layout_default(),
+ * av_channel_layout_from_mask() or av_channel_layout_from_string().
+ *
+ * The channel layout must be unitialized with av_channel_layout_uninit()
+ *
+ * Copying an AVChannelLayout via assigning is forbidden,
+ * av_channel_layout_copy() must be used instead (and its return value should
+ * be checked)
+ *
+ * No new fields may be added to it without a major version bump, except for
+ * new elements of the union fitting in sizeof(uint64_t).
+ */
+typedef struct AVChannelLayout {
+ /**
+ * Channel order used in this layout.
+ * This is a mandatory field.
+ */
+ enum AVChannelOrder order;
+
+ /**
+ * Number of channels in this layout. Mandatory field.
+ */
+ int nb_channels;
+
+ /**
+ * Details about which channels are present in this layout.
+ * For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
+ * used.
+ */
+ union {
+ /**
+ * This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be used
+ * for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
+ * It is a bitmask, where the position of each set bit means that the
+ * AVChannel with the corresponding value is present.
+ *
+ * I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then AV_CHAN_FOO
+ * is present in the layout. Otherwise it is not present.
+ *
+ * @note when a channel layout using a bitmask is constructed or
+ * modified manually (i.e. not using any of the av_channel_layout_*
+ * functions), the code doing it must ensure that the number of set bits
+ * is equal to nb_channels.
+ */
+ uint64_t mask;
+ /**
+ * This member must be used when the channel order is
+ * AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with each
+ * element signalling the presence of the AVChannel with the
+ * corresponding value in map[i].id.
+ *
+ * I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is the
+ * i-th channel in the audio data.
+ *
+ * When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the channel contains an ambisonic
+ * component with ACN index (as defined above)
+ * n = map[i].id - AV_CHAN_AMBISONIC_BASE.
+ *
+ * map[i].name may be filled with a 0-terminated string, in which case
+ * it will be used for the purpose of identifying the channel with the
+ * convenience functions below. Otherise it must be zeroed.
+ */
+ AVChannelCustom* map;
+ } u;
+
+ /**
+ * For some private data of the user.
+ */
+ void* opaque;
+} AVChannelLayout;
+
+#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
+ { \
+ .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = (nb), .u = {.mask = (m) } \
+ }
+
+/**
+ * @name Common pre-defined channel layouts
+ * @{
+ */
+#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1, AV_CH_LAYOUT_MONO)
+#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO)
+#define AV_CHANNEL_LAYOUT_2POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2POINT1)
+#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2_1)
+#define AV_CHANNEL_LAYOUT_SURROUND \
+ AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_SURROUND)
+#define AV_CHANNEL_LAYOUT_3POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_3POINT1)
+#define AV_CHANNEL_LAYOUT_4POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_4POINT0)
+#define AV_CHANNEL_LAYOUT_4POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_4POINT1)
+#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_2_2)
+#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_QUAD)
+#define AV_CHANNEL_LAYOUT_5POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0)
+#define AV_CHANNEL_LAYOUT_5POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1)
+#define AV_CHANNEL_LAYOUT_5POINT0_BACK \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0_BACK)
+#define AV_CHANNEL_LAYOUT_5POINT1_BACK \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0)
+#define AV_CHANNEL_LAYOUT_6POINT0_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_HEXAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_HEXAGONAL)
+#define AV_CHANNEL_LAYOUT_6POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1)
+#define AV_CHANNEL_LAYOUT_6POINT1_BACK \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT1_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0)
+#define AV_CHANNEL_LAYOUT_7POINT0_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_TOP_BACK)
+#define AV_CHANNEL_LAYOUT_OCTAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
+#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_CUBE)
+#define AV_CHANNEL_LAYOUT_HEXADECAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
+#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX \
+ AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO_DOWNMIX)
+#define AV_CHANNEL_LAYOUT_22POINT2 \
+ AV_CHANNEL_LAYOUT_MASK(24, AV_CH_LAYOUT_22POINT2)
+#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
+ { \
+ .order = AV_CHANNEL_ORDER_AMBISONIC, .nb_channels = 4, .u = {.mask = 0 } \
+ }
+/** @} */
+
+struct AVBPrint;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+/**
+ * @name Deprecated Functions
+ * @{
+ */
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ *
+ * @deprecated use av_channel_layout_from_string()
+ */
+attribute_deprecated uint64_t av_get_channel_layout(const char* name);
+
+/**
+ * Return a channel layout and the number of channels based on the specified
+ * name.
+ *
+ * This function is similar to (@see av_get_channel_layout), but can also parse
+ * unknown channel layout specifications.
+ *
+ * @param[in] name channel layout specification string
+ * @param[out] channel_layout parsed channel layout (0 if unknown)
+ * @param[out] nb_channels number of channels
+ *
+ * @return 0 on success, AVERROR(EINVAL) if the parsing fails.
+ * @deprecated use av_channel_layout_from_string()
+ */
+attribute_deprecated int av_get_extended_channel_layout(
+ const char* name, uint64_t* channel_layout, int* nb_channels);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ * @param nb_channels number of channels
+ * @param channel_layout channel layout bitset
+ * @deprecated use av_channel_layout_describe()
+ */
+attribute_deprecated void av_get_channel_layout_string(char* buf, int buf_size,
+ int nb_channels,
+ uint64_t channel_layout);
+
+/**
+ * Append a description of a channel layout to a bprint buffer.
+ * @deprecated use av_channel_layout_describe()
+ */
+attribute_deprecated void av_bprint_channel_layout(struct AVBPrint* bp,
+ int nb_channels,
+ uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ * @deprecated use AVChannelLayout.nb_channels
+ */
+attribute_deprecated int av_get_channel_layout_nb_channels(
+ uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ *
+ * @deprecated use av_channel_layout_default()
+ */
+attribute_deprecated int64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel_layout channel layout bitset
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ *
+ * @deprecated use av_channel_layout_index_from_channel()
+ */
+attribute_deprecated int av_get_channel_layout_channel_index(
+ uint64_t channel_layout, uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ * @deprecated use av_channel_layout_channel_from_index()
+ */
+attribute_deprecated uint64_t
+av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ *
+ * @deprecated use av_channel_name()
+ */
+attribute_deprecated const char* av_get_channel_name(uint64_t channel);
+
+/**
+ * Get the description of a given channel.
+ *
+ * @param channel a channel layout with a single channel
+ * @return channel description on success, NULL on error
+ * @deprecated use av_channel_description()
+ */
+attribute_deprecated const char* av_get_channel_description(uint64_t channel);
+
+/**
+ * Get the value and name of a standard channel layout.
+ *
+ * @param[in] index index in an internal list, starting at 0
+ * @param[out] layout channel layout mask
+ * @param[out] name name of the layout
+ * @return 0 if the layout exists,
+ * <0 if index is beyond the limits
+ * @deprecated use av_channel_layout_standard()
+ */
+attribute_deprecated int av_get_standard_channel_layout(unsigned index,
+ uint64_t* layout,
+ const char** name);
+/**
+ * @}
+ */
+#endif
+
+/**
+ * Get a human readable string in an abbreviated form describing a given
+ * channel. This is the inverse function of @ref av_channel_from_string().
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose name to get
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_name(char* buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_name().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_name_bprint(struct AVBPrint* bp, enum AVChannel channel_id);
+
+/**
+ * Get a human readable string describing a given channel.
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose description to get
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_description(char* buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_description().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_description_bprint(struct AVBPrint* bp,
+ enum AVChannel channel_id);
+
+/**
+ * This is the inverse function of @ref av_channel_name().
+ *
+ * @return the channel with the given name
+ * AV_CHAN_NONE when name does not identify a known channel
+ */
+enum AVChannel av_channel_from_string(const char* name);
+
+/**
+ * Initialize a native channel layout from a bitmask indicating which channels
+ * are present.
+ *
+ * @param channel_layout the layout structure to be initialized
+ * @param mask bitmask describing the channel layout
+ *
+ * @return 0 on success
+ * AVERROR(EINVAL) for invalid mask values
+ */
+int av_channel_layout_from_mask(AVChannelLayout* channel_layout, uint64_t mask);
+
+/**
+ * Initialize a channel layout from a given string description.
+ * The input string can be represented by:
+ * - the formal channel layout name (returned by av_channel_layout_describe())
+ * - single or multiple channel names (returned by av_channel_name(), eg. "FL",
+ * or concatenated with "+", each optionally containing a custom name after
+ * a "@", eg. "FL@Left+FR@Right+LFE")
+ * - a decimal or hexadecimal value of a native channel layout (eg. "4" or
+ * "0x4")
+ * - the number of channels with default layout (eg. "4c")
+ * - the number of unordered channels (eg. "4C" or "4 channels")
+ * - the ambisonic order followed by optional non-diegetic channels (eg.
+ * "ambisonic 2+stereo")
+ *
+ * @param channel_layout input channel layout
+ * @param str string describing the channel layout
+ * @return 0 channel layout was detected, AVERROR_INVALIDATATA otherwise
+ */
+int av_channel_layout_from_string(AVChannelLayout* channel_layout,
+ const char* str);
+
+/**
+ * Get the default channel layout for a given number of channels.
+ *
+ * @param ch_layout the layout structure to be initialized
+ * @param nb_channels number of channels
+ */
+void av_channel_layout_default(AVChannelLayout* ch_layout, int nb_channels);
+
+/**
+ * Iterate over all standard channel layouts.
+ *
+ * @param opaque a pointer where libavutil will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the standard channel layout or NULL when the iteration is
+ * finished
+ */
+const AVChannelLayout* av_channel_layout_standard(void** opaque);
+
+/**
+ * Free any allocated data in the channel layout and reset the channel
+ * count to 0.
+ *
+ * @param channel_layout the layout structure to be uninitialized
+ */
+void av_channel_layout_uninit(AVChannelLayout* channel_layout);
+
+/**
+ * Make a copy of a channel layout. This differs from just assigning src to dst
+ * in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
+ *
+ * @note the destination channel_layout will be always uninitialized before
+ * copy.
+ *
+ * @param dst destination channel layout
+ * @param src source channel layout
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_channel_layout_copy(AVChannelLayout* dst, const AVChannelLayout* src);
+
+/**
+ * Get a human-readable string describing the channel layout properties.
+ * The string will be in the same format that is accepted by
+ * @ref av_channel_layout_from_string(), allowing to rebuild the same
+ * channel layout, except for opaque pointers.
+ *
+ * @param channel_layout channel layout to be described
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_layout_describe(const AVChannelLayout* channel_layout, char* buf,
+ size_t buf_size);
+
+/**
+ * bprint variant of av_channel_layout_describe().
+ *
+ * @note the string will be appended to the bprint buffer.
+ * @return 0 on success, or a negative AVERROR value on failure.
+ */
+int av_channel_layout_describe_bprint(const AVChannelLayout* channel_layout,
+ struct AVBPrint* bp);
+
+/**
+ * Get the channel with the given index in a channel layout.
+ *
+ * @param channel_layout input channel layout
+ * @param idx index of the channel
+ * @return channel with the index idx in channel_layout on success or
+ * AV_CHAN_NONE on failure (if idx is not valid or the channel order is
+ * unspecified)
+ */
+enum AVChannel av_channel_layout_channel_from_index(
+ const AVChannelLayout* channel_layout, unsigned int idx);
+
+/**
+ * Get the index of a given channel in a channel layout. In case multiple
+ * channels are found, only the first match will be returned.
+ *
+ * @param channel_layout input channel layout
+ * @param channel the channel whose index to obtain
+ * @return index of channel in channel_layout on success or a negative number if
+ * channel is not present in channel_layout.
+ */
+int av_channel_layout_index_from_channel(const AVChannelLayout* channel_layout,
+ enum AVChannel channel);
+
+/**
+ * Get the index in a channel layout of a channel described by the given string.
+ * In case multiple channels are found, only the first match will be returned.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel whose index to obtain
+ * @return a channel index described by the given string, or a negative AVERROR
+ * value.
+ */
+int av_channel_layout_index_from_string(const AVChannelLayout* channel_layout,
+ const char* name);
+
+/**
+ * Get a channel described by the given string.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel to obtain
+ * @return a channel described by the given string in channel_layout on success
+ * or AV_CHAN_NONE on failure (if the string is not valid or the channel
+ * order is unspecified)
+ */
+enum AVChannel av_channel_layout_channel_from_string(
+ const AVChannelLayout* channel_layout, const char* name);
+
+/**
+ * Find out what channels from a given set are present in a channel layout,
+ * without regard for their positions.
+ *
+ * @param channel_layout input channel layout
+ * @param mask a combination of AV_CH_* representing a set of channels
+ * @return a bitfield representing all the channels from mask that are present
+ * in channel_layout
+ */
+uint64_t av_channel_layout_subset(const AVChannelLayout* channel_layout,
+ uint64_t mask);
+
+/**
+ * Check whether a channel layout is valid, i.e. can possibly describe audio
+ * data.
+ *
+ * @param channel_layout input channel layout
+ * @return 1 if channel_layout is valid, 0 otherwise.
+ */
+int av_channel_layout_check(const AVChannelLayout* channel_layout);
+
+/**
+ * Check whether two channel layouts are semantically the same, i.e. the same
+ * channels are present on the same positions in both.
+ *
+ * If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the other is
+ * not, they are considered to be unequal. If both are AV_CHANNEL_ORDER_UNSPEC,
+ * they are considered equal iff the channel counts are the same in both.
+ *
+ * @param chl input channel layout
+ * @param chl1 input channel layout
+ * @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
+ * AVERROR code if one or both are invalid.
+ */
+int av_channel_layout_compare(const AVChannelLayout* chl,
+ const AVChannelLayout* chl1);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/common.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/common.h
new file mode 100644
index 000000000000..6c09e40bc05a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/common.h
@@ -0,0 +1,589 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && \
+ !defined(UINT64_C)
+# error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "macros.h"
+
+// rounded division & shift
+#define RSHIFT(a, b) \
+ ((a) > 0 ? ((a) + ((1 << (b)) >> 1)) >> (b) \
+ : ((a) + ((1 << (b)) >> 1) - 1) >> (b))
+/* assume b>0 */
+#define ROUNDED_DIV(a, b) \
+ (((a) >= 0 ? (a) + ((b) >> 1) : (a) - ((b) >> 1)) / (b))
+/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
+#define AV_CEIL_RSHIFT(a, b) \
+ (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) : ((a) + (1 << (b)) - 1) >> (b))
+/* Backwards compat. */
+#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT
+
+#define FFUDIV(a, b) (((a) > 0 ? (a) : (a) - (b) + 1) / (b))
+#define FFUMOD(a, b) ((a) - (b)*FFUDIV(a, b))
+
+/**
+ * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as
+ * they are not representable as absolute values of their type. This is the same
+ * as with *abs()
+ * @see FFNABS()
+ */
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+/**
+ * Negative Absolute value.
+ * this works for all integers of all types.
+ * As with many macros, this evaluates its argument twice, it thus must not have
+ * a sideeffect, that is FFNABS(x++) has undefined behavior.
+ */
+#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
+
+/**
+ * Unsigned Absolute value.
+ * This takes the absolute value of a signed int and returns it as a unsigned.
+ * This also works with INT_MIN which would otherwise not be representable
+ * As with many macros, this evaluates its argument twice.
+ */
+#define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a))
+#define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a))
+
+/* misc math functions */
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_intp2
+# define av_clip_intp2 av_clip_intp2_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_mod_uintp2
+# define av_mod_uintp2 av_mod_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_sat_sub32
+# define av_sat_sub32 av_sat_sub32_c
+#endif
+#ifndef av_sat_dsub32
+# define av_sat_dsub32 av_sat_dsub32_c
+#endif
+#ifndef av_sat_add64
+# define av_sat_add64 av_sat_add64_c
+#endif
+#ifndef av_sat_sub64
+# define av_sat_sub64 av_sat_sub64_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
+#ifndef av_parity
+# define av_parity av_parity_c
+#endif
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin)
+ return amin;
+ else if (a > amax)
+ return amax;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin,
+ int64_t amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin)
+ return amin;
+ else if (a > amax)
+ return amax;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a) {
+ if (a & (~0xFF))
+ return (~a) >> 31;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a) {
+ if ((a + 0x80U) & ~0xFF)
+ return (a >> 31) ^ 0x7F;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a) {
+ if (a & (~0xFFFF))
+ return (~a) >> 31;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a) {
+ if ((a + 0x8000U) & ~0xFFFF)
+ return (a >> 31) ^ 0x7FFF;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) {
+ if ((a + 0x80000000u) & ~UINT64_C(0xFFFFFFFF))
+ return (int32_t)((a >> 63) ^ 0x7FFFFFFF);
+ else
+ return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer into the -(2^p),(2^p-1) range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_intp2_c(int a, int p) {
+ if (((unsigned)a + (1 << p)) & ~((2 << p) - 1))
+ return (a >> 31) ^ ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) {
+ if (a & ~((1 << p) - 1))
+ return (~a) >> 31 & ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clear high bits from an unsigned integer starting with specific bit position
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a,
+ unsigned p) {
+ return a & ((1U << p) - 1);
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b) {
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum sat(a + sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b) {
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Subtract two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int av_sat_sub32_c(int a, int b) {
+ return av_clipl_int32((int64_t)a - b);
+}
+
+/**
+ * Subtract a doubled value from another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and subtracted from a
+ * @return difference sat(a - sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dsub32_c(int a, int b) {
+ return av_sat_sub32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Add two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int64_t av_sat_add64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5, 1)) || \
+ AV_HAS_BUILTIN(__builtin_add_overflow)
+ int64_t tmp;
+ return !__builtin_add_overflow(a, b, &tmp)
+ ? tmp
+ : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ int64_t s = a + (uint64_t)b;
+ if ((int64_t)(a ^ b | ~s ^ b) >= 0) return INT64_MAX ^ (b >> 63);
+ return s;
+#endif
+}
+
+/**
+ * Subtract two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int64_t av_sat_sub64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5, 1)) || \
+ AV_HAS_BUILTIN(__builtin_sub_overflow)
+ int64_t tmp;
+ return !__builtin_sub_overflow(a, b, &tmp)
+ ? tmp
+ : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ if (b <= 0 && a >= INT64_MAX + b) return INT64_MAX;
+ if (b >= 0 && a <= INT64_MIN + b) return INT64_MIN;
+ return a - b;
+#endif
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin,
+ float amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin,
+ double amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x) {
+ return av_log2((x - 1U) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x) {
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x) {
+ return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+static av_always_inline av_const int av_parity_c(uint32_t v) {
+ return av_popcount(v) & 1;
+}
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++, or if you want to make sure
+ * that *ptr stops at the end of a NULL terminated string then
+ * *ptr ? *ptr++ : 0
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR) \
+ val = (GET_BYTE); \
+ { \
+ uint32_t top = (val & 128) >> 1; \
+ if ((val & 0xc0) == 0x80 || val >= 0xFE) { \
+ ERROR \
+ } \
+ while (val & top) { \
+ unsigned int tmp = (GET_BYTE)-128; \
+ if (tmp >> 6) { \
+ ERROR \
+ } \
+ val = (val << 6) + tmp; \
+ top <<= 5; \
+ } \
+ val &= (top << 1) - 1; \
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR) \
+ val = (GET_16BIT); \
+ { \
+ unsigned int hi = val - 0xD800; \
+ if (hi < 0x800) { \
+ val = (GET_16BIT)-0xDC00; \
+ if (val > 0x3FFU || hi > 0x3FFU) { \
+ ERROR \
+ } \
+ val += (hi << 10) + 0x10000; \
+ } \
+ }
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes
+ * long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE) \
+ { \
+ int bytes, shift; \
+ uint32_t in = val; \
+ if (in < 0x80) { \
+ tmp = in; \
+ PUT_BYTE \
+ } else { \
+ bytes = (av_log2(in) + 4) / 5; \
+ shift = (bytes - 1) * 6; \
+ tmp = (256 - (256 >> bytes)) | (in >> shift); \
+ PUT_BYTE \
+ while (shift >= 6) { \
+ shift -= 6; \
+ tmp = 0x80 | ((in >> shift) & 0x3f); \
+ PUT_BYTE \
+ } \
+ } \
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT) \
+ { \
+ uint32_t in = val; \
+ if (in < 0x10000) { \
+ tmp = in; \
+ PUT_16BIT \
+ } else { \
+ tmp = 0xD800 | ((in - 0x10000) >> 10); \
+ PUT_16BIT \
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF); \
+ PUT_16BIT \
+ } \
+ }
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/cpu.h
new file mode 100644
index 000000000000..8a48d944ebd9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/cpu.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include <stddef.h>
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+/* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW \
+ 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW \
+ 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_SSSE3SLOW \
+ 0x4000000 ///< SSSE3 supported, but usually not faster
+#define AV_CPU_FLAG_ATOM \
+ 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions
+#define AV_CPU_FLAG_AVX \
+ 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't
+ ///< used
+#define AV_CPU_FLAG_AVXSLOW \
+ 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g.
+ ///< Bulldozer)
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
+#define AV_CPU_FLAG_AVX2 \
+ 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't
+ ///< used
+#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
+#define AV_CPU_FLAG_AVX512 \
+ 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM
+ ///< registers aren't used
+#define AV_CPU_FLAG_AVX512ICL \
+ 0x200000 ///< F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ
+#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers.
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
+#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+#define AV_CPU_FLAG_ARMV8 (1 << 6)
+#define AV_CPU_FLAG_VFP_VM \
+ (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in
+ ///< various CPUs implementations
+#define AV_CPU_FLAG_SETEND (1 << 16)
+
+#define AV_CPU_FLAG_MMI (1 << 0)
+#define AV_CPU_FLAG_MSA (1 << 1)
+
+// Loongarch SIMD extension.
+#define AV_CPU_FLAG_LSX (1 << 0)
+#define AV_CPU_FLAG_LASX (1 << 1)
+
+// RISC-V extensions
+#define AV_CPU_FLAG_RVI (1 << 0) ///< I (full GPR bank)
+#define AV_CPU_FLAG_RVF (1 << 1) ///< F (single precision FP)
+#define AV_CPU_FLAG_RVD (1 << 2) ///< D (double precision FP)
+#define AV_CPU_FLAG_RVV_I32 (1 << 3) ///< Vectors of 8/16/32-bit int's */
+#define AV_CPU_FLAG_RVV_F32 (1 << 4) ///< Vectors of float's */
+#define AV_CPU_FLAG_RVV_I64 (1 << 5) ///< Vectors of 64-bit int's */
+#define AV_CPU_FLAG_RVV_F64 (1 << 6) ///< Vectors of double's
+#define AV_CPU_FLAG_RVB_BASIC (1 << 7) ///< Basic bit-manipulations
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in an application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on
+ * that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned* flags, const char* s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+/**
+ * Overrides cpu count detection and forces the specified count.
+ * Count < 1 disables forcing of specific count.
+ */
+void av_cpu_force_count(int count);
+
+/**
+ * Get the maximum data alignment that may be required by FFmpeg.
+ *
+ * Note that this is affected by the build configuration and the CPU flags mask,
+ * so e.g. if the CPU supports AVX, but libavutil has been built with
+ * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through
+ * av_set_cpu_flags_mask(), then this function will behave as if AVX is not
+ * present.
+ */
+size_t av_cpu_max_align(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/dict.h
new file mode 100644
index 000000000000..967a1c8041f0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/dict.h
@@ -0,0 +1,259 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key-value pairs.
+ *
+ * - To **create an AVDictionary**, simply pass an address of a NULL
+ * pointer to av_dict_set(). NULL can be used as an empty dictionary
+ * wherever a pointer to an AVDictionary is required.
+ * - To **insert an entry**, use av_dict_set().
+ * - Use av_dict_get() to **retrieve an entry**.
+ * - To **iterate over all entries**, use av_dict_iterate().
+ * - In order to **free the dictionary and all its contents**, use
+ av_dict_free().
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while ((t = av_dict_iterate(d, t))) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ */
+
+/**
+ * @name AVDictionary Flags
+ * Flags that influence behavior of the matching of keys or insertion to the
+ * dictionary.
+ * @{
+ */
+#define AV_DICT_MATCH_CASE \
+ 1 /**< Only get an entry with exact-case key match. Only relevant in \
+ av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX \
+ 2 /**< Return first entry in a dictionary whose first part corresponds to \
+ the search key, ignoring the suffix of the found key string. Only \
+ relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY \
+ 4 /**< Take ownership of a key that's been \
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL \
+ 8 /**< Take ownership of a value that's been \
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries. */
+#define AV_DICT_APPEND \
+ 32 /**< If the entry already exists, append to it. Note that no \
+ delimiter is added, the strings are simply concatenated. */
+#define AV_DICT_MULTIKEY \
+ 64 /**< Allow to store several equal keys in the dictionary */
+/**
+ * @}
+ */
+
+typedef struct AVDictionaryEntry {
+ char* key;
+ char* value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key Matching key
+ * @param flags A collection of AV_DICT_* flags controlling how the
+ * entry is retrieved
+ *
+ * @return Found entry or NULL in case no matching entry was found in the
+ * dictionary
+ */
+AVDictionaryEntry* av_dict_get(const AVDictionary* m, const char* key,
+ const AVDictionaryEntry* prev, int flags);
+
+/**
+ * Iterate over a dictionary
+ *
+ * Iterates through all entries in the dictionary.
+ *
+ * @warning The returned AVDictionaryEntry key/value must not be changed.
+ *
+ * @warning As av_dict_set() invalidates all previous entries returned
+ * by this function, it must not be called while iterating over the dict.
+ *
+ * Typical usage:
+ * @code
+ * const AVDictionaryEntry *e = NULL;
+ * while ((e = av_dict_iterate(m, e))) {
+ * // ...
+ * }
+ * @endcode
+ *
+ * @param m The dictionary to iterate over
+ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
+ *
+ * @retval AVDictionaryEntry* The next element in the dictionary
+ * @retval NULL No more elements in the dictionary
+ */
+const AVDictionaryEntry* av_dict_iterate(const AVDictionary* m,
+ const AVDictionaryEntry* prev);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary* m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @warning Adding a new entry to a dictionary invalidates all existing entries
+ * previously returned with av_dict_get() or av_dict_iterate().
+ *
+ * @param pm Pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key Entry key to add to *pm (will either be av_strduped or added
+ * as a new key depending on flags)
+ * @param value Entry value to add to *pm (will be av_strduped or added as a
+ * new key depending on flags). Passing a NULL value will cause an existing
+ * entry to be deleted.
+ *
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary** pm, const char* key, const char* value,
+ int flags);
+
+/**
+ * Convenience wrapper for av_dict_set() that converts the value to a string
+ * and stores it.
+ *
+ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary** pm, const char* key, int64_t value,
+ int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep A 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep A 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags Flags to use when adding to the dictionary.
+ * ::AV_DICT_DONT_STRDUP_KEY and ::AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ *
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary** pm, const char* str,
+ const char* key_val_sep, const char* pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ *
+ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @param dst Pointer to a pointer to a AVDictionary struct to copy into. If
+ * *dst is NULL, this function will allocate a struct for you and put it in *dst
+ * @param src Pointer to the source AVDictionary struct to copy items from.
+ * @param flags Flags to use when setting entries in *dst
+ *
+ * @return 0 on success, negative AVERROR code on failure. If dst was allocated
+ * by this function, callers should free the associated memory.
+ */
+int av_dict_copy(AVDictionary** dst, const AVDictionary* src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary** m);
+
+/**
+ * Get dictionary entries as a string.
+ *
+ * Create a string containing dictionary's entries.
+ * Such string may be passed back to av_dict_parse_string().
+ * @note String is escaped with backslashes ('\').
+ *
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the
+ * same.
+ *
+ * @param[in] m The dictionary
+ * @param[out] buffer Pointer to buffer that will be allocated with
+ * string containg entries. Buffer must be freed by the caller when is no longer
+ * needed.
+ * @param[in] key_val_sep Character used to separate key from value
+ * @param[in] pairs_sep Character used to separate two pairs from each
+ * other
+ *
+ * @return >= 0 on success, negative on error
+ */
+int av_dict_get_string(const AVDictionary* m, char** buffer,
+ const char key_val_sep, const char pairs_sep);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/error.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/error.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/error.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/error.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/frame.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/frame.h
new file mode 100644
index 000000000000..3b4f1e84bccd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/frame.h
@@ -0,0 +1,960 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "channel_layout.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in
+ * libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+ /**
+ * ReplayGain information in the form of the AVReplayGain struct.
+ */
+ AV_FRAME_DATA_REPLAYGAIN,
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the frame for correct
+ * presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_FRAME_DATA_DISPLAYMATRIX,
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_FRAME_DATA_AFD,
+ /**
+ * Motion vectors exported by some codecs (on demand through the export_mvs
+ * flag set in the libavcodec AVCodecContext flags2 option).
+ * The data is the AVMotionVector struct defined in
+ * libavutil/motion_vector.h.
+ */
+ AV_FRAME_DATA_MOTION_VECTORS,
+ /**
+ * Recommmends skipping the specified number of samples. This is exported
+ * only if the "skip_manual" AVOption is set in libavcodec.
+ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_FRAME_DATA_SKIP_SAMPLES,
+ /**
+ * This side data must be associated with an audio frame and corresponds to
+ * enum AVAudioServiceType defined in avcodec.h.
+ */
+ AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
+ /**
+ * Mastering display metadata associated with a video frame. The payload is
+ * an AVMasteringDisplayMetadata type and contains information about the
+ * mastering display color volume.
+ */
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ /**
+ * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer.
+ * This is set on the first frame of a GOP that has a temporal reference of 0.
+ */
+ AV_FRAME_DATA_GOP_TIMECODE,
+
+ /**
+ * The data represents the AVSphericalMapping structure defined in
+ * libavutil/spherical.h.
+ */
+ AV_FRAME_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This payload contains data in
+ * the form of the AVContentLightMetadata struct.
+ */
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * The data contains an ICC profile as an opaque octet buffer following the
+ * format described by ISO 15076-1 with an optional name defined in the
+ * metadata key entry "name".
+ */
+ AV_FRAME_DATA_ICC_PROFILE,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4
+ * uint32_t where the first uint32_t describes how many (1-3) of the other
+ * timecodes are used. The timecode format is described in the documentation
+ * of av_timecode_get_smpte_from_framenum() function in libavutil/timecode.h.
+ */
+ AV_FRAME_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRPlus type and contains information for color
+ * volume transform - application 4 of SMPTE 2094-40:2016 standard.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
+
+ /**
+ * Regions Of Interest, the data is an array of AVRegionOfInterest type, the
+ * number of array element is implied by AVFrameSideData.size /
+ * AVRegionOfInterest.self_size.
+ */
+ AV_FRAME_DATA_REGIONS_OF_INTEREST,
+
+ /**
+ * Encoding parameters for a video frame, as described by AVVideoEncParams.
+ */
+ AV_FRAME_DATA_VIDEO_ENC_PARAMS,
+
+ /**
+ * User data unregistered metadata associated with a video frame.
+ * This is the H.26[45] UDU SEI message, and shouldn't be used for any other
+ * purpose The data is stored as uint8_t in AVFrameSideData.data which is 16
+ * bytes of uuid_iso_iec_11578 followed by AVFrameSideData.size - 16 bytes of
+ * user_data_payload_byte.
+ */
+ AV_FRAME_DATA_SEI_UNREGISTERED,
+
+ /**
+ * Film grain parameters for a frame, described by AVFilmGrainParams.
+ * Must be present for every frame which should have film grain applied.
+ */
+ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
+
+ /**
+ * Bounding boxes for object detection and classification,
+ * as described by AVDetectionBBoxHeader.
+ */
+ AV_FRAME_DATA_DETECTION_BBOXES,
+
+ /**
+ * Dolby Vision RPU raw data, suitable for passing to x265
+ * or other libraries. Array of uint8_t, with NAL emulation
+ * bytes intact.
+ */
+ AV_FRAME_DATA_DOVI_RPU_BUFFER,
+
+ /**
+ * Parsed Dolby Vision metadata, suitable for passing to a software
+ * implementation. The payload is the AVDOVIMetadata struct defined in
+ * libavutil/dovi_meta.h.
+ */
+ AV_FRAME_DATA_DOVI_METADATA,
+
+ /**
+ * HDR Vivid dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRVivid type and contains information for color
+ * volume transform - CUVA 005.1-2021.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274.
+ */
+ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
+};
+
+enum AVActiveFormatDescription {
+ AV_AFD_SAME = 8,
+ AV_AFD_4_3 = 9,
+ AV_AFD_16_9 = 10,
+ AV_AFD_14_9 = 11,
+ AV_AFD_4_3_SP_14_9 = 13,
+ AV_AFD_16_9_SP_14_9 = 14,
+ AV_AFD_SP_4_3 = 15,
+};
+
+/**
+ * Structure to hold side data for an AVFrame.
+ *
+ * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be
+ * added to the end with a minor bump.
+ */
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t* data;
+ size_t size;
+ AVDictionary* metadata;
+ AVBufferRef* buf;
+} AVFrameSideData;
+
+/**
+ * Structure describing a single Region Of Interest.
+ *
+ * When multiple regions are defined in a single side-data block, they
+ * should be ordered from most to least important - some encoders are only
+ * capable of supporting a limited number of distinct regions, so will have
+ * to truncate the list.
+ *
+ * When overlapping regions are defined, the first region containing a given
+ * area of the frame applies.
+ */
+typedef struct AVRegionOfInterest {
+ /**
+ * Must be set to the size of this data structure (that is,
+ * sizeof(AVRegionOfInterest)).
+ */
+ uint32_t self_size;
+ /**
+ * Distance in pixels from the top edge of the frame to the top and
+ * bottom edges and from the left edge of the frame to the left and
+ * right edges of the rectangle defining this region of interest.
+ *
+ * The constraints on a region are encoder dependent, so the region
+ * actually affected may be slightly larger for alignment or other
+ * reasons.
+ */
+ int top;
+ int bottom;
+ int left;
+ int right;
+ /**
+ * Quantisation offset.
+ *
+ * Must be in the range -1 to +1. A value of zero indicates no quality
+ * change. A negative value asks for better quality (less quantisation),
+ * while a positive value asks for worse quality (greater quantisation).
+ *
+ * The range is calibrated so that the extreme values indicate the
+ * largest possible offset - if the rest of the frame is encoded with the
+ * worst possible quality, an offset of -1 indicates that this region
+ * should be encoded with the best possible quality anyway. Intermediate
+ * values are then interpolated in some codec-dependent way.
+ *
+ * For example, in 10-bit H.264 the quantisation parameter varies between
+ * -12 and 51. A typical qoffset value of -1/10 therefore indicates that
+ * this region should be encoded with a QP around one-tenth of the full
+ * range better than the rest of the frame. So, if most of the frame
+ * were to be encoded with a QP of around 30, this region would get a QP
+ * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
+ * An extreme value of -1 would indicate that this region should be
+ * encoded with the best possible quality regardless of the treatment of
+ * the rest of the frame - that is, should be encoded at a QP of -12.
+ */
+ AVRational qoffset;
+} AVRegionOfInterest;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ *
+ * Fields can be accessed through AVOptions, the name string used, matches the
+ * C structure field name for fields accessible through AVOptions. The AVClass
+ * for AVFrame can be obtained from avcodec_get_frame_class()
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte. For video,
+ * it could even point to the end of the image data.
+ *
+ * All pointers in data and extended_data must point into one of the
+ * AVBufferRef in buf or extended_buf.
+ *
+ * Some decoders access areas outside 0,0 - width,height, please
+ * see avcodec_align_dimensions2(). Some filters and swscale can read
+ * up to 16 bytes beyond the planes, if these filters are to be used,
+ * then 16 extra bytes must be allocated.
+ *
+ * NOTE: Pointers not needed by the format MUST be set to NULL.
+ *
+ * @attention In case of video, the data[] pointers can point to the
+ * end of image data in order to reverse line order, when used in
+ * combination with negative values in the linesize[] array.
+ */
+ uint8_t* data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, a positive or negative value, which is typically indicating
+ * the size in bytes of each picture line, but it can also be:
+ * - the negative byte size of lines for vertical flipping
+ * (with data[n] pointing to the end of the data
+ * - a positive or negative multiple of the byte size as for accessing
+ * even and odd fields of a frame (possibly flipped)
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * For video the linesizes should be multiples of the CPUs alignment
+ * preference, this is 16 or 32 for modern desktop CPUs.
+ * Some code requires such alignment other code can be slower without
+ * correct alignment, for yet other it makes no difference.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ *
+ * @attention In case of video, line size values can be negative to achieve
+ * a vertically inverted iteration over image lines.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t** extended_data;
+
+ /**
+ * @name Video dimensions
+ * Video frames only. The coded dimensions (in pixels) of the video frame,
+ * i.e. the size of the rectangle that contains some well-defined values.
+ *
+ * @note The part of the frame intended for display/presentation is further
+ * restricted by the @ref cropping "Cropping rectangle".
+ * @{
+ */
+ int width, height;
+ /**
+ * @}
+ */
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown
+ * to user).
+ */
+ int64_t pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame. (if frame
+ * threading isn't used) This is also the Presentation time of this AVFrame
+ * calculated from only AVPacket.dts values without pts values.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * Time base for the timestamps in this frame.
+ * In the future, this field may be set on frames output by decoders or
+ * filters, but its value will be by default ignored on input to encoders
+ * or filters.
+ */
+ AVRational time_base;
+
+#if FF_API_FRAME_PICTURE_NUMBER
+ /**
+ * picture number in bitstream order
+ */
+ attribute_deprecated int coded_picture_number;
+ /**
+ * picture number in display order
+ */
+ attribute_deprecated int display_picture_number;
+#endif
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+ /**
+ * for some private data of the user
+ */
+ void* opaque;
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ int palette_has_changed;
+
+#if FF_API_REORDERED_OPAQUE
+ /**
+ * reordered opaque 64 bits (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through
+ * AVCodecContext.reordered_opaque
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
+ */
+ attribute_deprecated int64_t reordered_opaque;
+#endif
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Channel layout of the audio data.
+ * @deprecated use ch_layout instead
+ */
+ attribute_deprecated uint64_t channel_layout;
+#endif
+
+ /**
+ * AVBuffer references backing the data for this frame. All the pointers in
+ * data and extended_data must point inside one of the buffers in buf or
+ * extended_buf. This array must be filled contiguously -- if buf[i] is
+ * non-NULL then buf[j] must also be non-NULL for all j < i.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef* buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef** extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData** side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * @ingroup lavu_frame
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * A flag to mark the frames which need to be decoded, but shouldn't be output.
+ */
+#define AV_FRAME_FLAG_DISCARD (1 << 2)
+ /**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ enum AVColorPrimaries color_primaries;
+
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * frame timestamp estimated using various heuristics, in stream time base
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int64_t best_effort_timestamp;
+
+ /**
+ * reordered pos from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pos;
+
+#if FF_API_PKT_DURATION
+ /**
+ * duration of the corresponding packet, expressed in
+ * AVStream->time_base units, 0 if unknown.
+ * - encoding: unused
+ * - decoding: Read by user.
+ *
+ * @deprecated use duration instead
+ */
+ attribute_deprecated int64_t pkt_duration;
+#endif
+
+ /**
+ * metadata.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVDictionary* metadata;
+
+ /**
+ * decode error flags of the frame, set to a combination of
+ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+ * were errors during the decoding.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
+#define FF_DECODE_ERROR_MISSING_REFERENCE 2
+#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
+#define FF_DECODE_ERROR_DECODE_SLICES 8
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * number of audio channels, only used for audio.
+ * - encoding: unused
+ * - decoding: Read by user.
+ * @deprecated use ch_layout instead
+ */
+ attribute_deprecated int channels;
+#endif
+
+ /**
+ * size of the corresponding packet containing the compressed
+ * frame.
+ * It is set to a negative value if unknown.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int pkt_size;
+
+ /**
+ * For hwaccel-format frames, this should be a reference to the
+ * AVHWFramesContext describing the frame.
+ */
+ AVBufferRef* hw_frames_ctx;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the frame is unreferenced. av_frame_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target frame's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef* opaque_ref;
+
+ /**
+ * @anchor cropping
+ * @name Cropping
+ * Video frames only. The number of pixels to discard from the the
+ * top/bottom/left/right border of the frame to obtain the sub-rectangle of
+ * the frame intended for presentation.
+ * @{
+ */
+ size_t crop_top;
+ size_t crop_bottom;
+ size_t crop_left;
+ size_t crop_right;
+ /**
+ * @}
+ */
+
+ /**
+ * AVBufferRef for internal use by a single libav* library.
+ * Must not be used to transfer data between libraries.
+ * Has to be NULL when ownership of the frame leaves the respective library.
+ *
+ * Code outside the FFmpeg libs should never check or change the contents of
+ * the buffer ref.
+ *
+ * FFmpeg calls av_buffer_unref() on it when the frame is unreferenced.
+ * av_frame_copy_props() calls create a new reference with av_buffer_ref()
+ * for the target frame's private_ref field.
+ */
+ AVBufferRef* private_ref;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Duration of the frame, in the same units as pts. 0 if unknown.
+ */
+ int64_t duration;
+} AVFrame;
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame* av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame** frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @warning: dst MUST have been either unreferenced with av_frame_unref(dst),
+ * or newly allocated with av_frame_alloc() before calling this
+ * function, or undefined behavior will occur.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame* av_frame_clone(const AVFrame* src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame* frame);
+
+/**
+ * Move everything contained in src to dst and reset src.
+ *
+ * @warning: dst is not unreferenced, but directly overwritten without reading
+ * or deallocating its contents. Call av_frame_unref(dst) manually
+ * before calling this function to ensure that no memory is leaked.
+ */
+void av_frame_move_ref(AVFrame* dst, AVFrame* src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and ch_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @warning: if frame already has been allocated, calling this function will
+ * leak memory. In addition, undefined behavior can occur in certain
+ * cases.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align Required buffer size alignment. If equal to 0, alignment will be
+ * chosen automatically for the current CPU. It is highly
+ * recommended to pass 0 here unless you know what you are doing.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame* frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame* frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not. Non-refcounted frames behave as non-writable, i.e. a copy
+ * is always made.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame* frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param frame the frame to get the plane's buffer from
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef* av_frame_get_plane_buffer(AVFrame* frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData* av_frame_new_side_data(AVFrame* frame,
+ enum AVFrameSideDataType type,
+ size_t size);
+
+/**
+ * Add a new side data to a frame from an existing AVBufferRef
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type the type of the added side data
+ * @param buf an AVBufferRef to add as side data. The ownership of
+ * the reference is transferred to the frame.
+ *
+ * @return newly added side data on success, NULL on error. On failure
+ * the frame is unchanged and the AVBufferRef remains owned by
+ * the caller.
+ */
+AVFrameSideData* av_frame_new_side_data_from_buf(AVFrame* frame,
+ enum AVFrameSideDataType type,
+ AVBufferRef* buf);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData* av_frame_get_side_data(const AVFrame* frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * Remove and free all side data instances of the given type.
+ */
+void av_frame_remove_side_data(AVFrame* frame, enum AVFrameSideDataType type);
+
+/**
+ * Flags for frame cropping.
+ */
+enum {
+ /**
+ * Apply the maximum possible cropping, even if it requires setting the
+ * AVFrame.data[] entries to unaligned pointers. Passing unaligned data
+ * to FFmpeg API is generally not allowed, and causes undefined behavior
+ * (such as crashes). You can pass unaligned data only to FFmpeg APIs that
+ * are explicitly documented to accept it. Use this flag only if you
+ * absolutely know what you are doing.
+ */
+ AV_FRAME_CROP_UNALIGNED = 1 << 0,
+};
+
+/**
+ * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/
+ * crop_bottom fields. If cropping is successful, the function will adjust the
+ * data pointers and the width/height fields, and set the crop fields to 0.
+ *
+ * In all cases, the cropping boundaries will be rounded to the inherent
+ * alignment of the pixel format. In some cases, such as for opaque hwaccel
+ * formats, the left/top cropping is ignored. The crop fields are set to 0 even
+ * if the cropping was rounded or ignored.
+ *
+ * @param frame the frame which should be cropped
+ * @param flags Some combination of AV_FRAME_CROP_* flags, or 0.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. If the cropping fields
+ * were invalid, AVERROR(ERANGE) is returned, and nothing is changed.
+ */
+int av_frame_apply_cropping(AVFrame* frame, int flags);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char* av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext.h
new file mode 100644
index 000000000000..fb376ce4994b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext.h
@@ -0,0 +1,606 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_H
+#define AVUTIL_HWCONTEXT_H
+
+#include "buffer.h"
+#include "frame.h"
+#include "log.h"
+#include "pixfmt.h"
+
+enum AVHWDeviceType {
+ AV_HWDEVICE_TYPE_NONE,
+ AV_HWDEVICE_TYPE_VDPAU,
+ AV_HWDEVICE_TYPE_CUDA,
+ AV_HWDEVICE_TYPE_VAAPI,
+ AV_HWDEVICE_TYPE_DXVA2,
+ AV_HWDEVICE_TYPE_QSV,
+ AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
+ AV_HWDEVICE_TYPE_D3D11VA,
+ AV_HWDEVICE_TYPE_DRM,
+ AV_HWDEVICE_TYPE_OPENCL,
+ AV_HWDEVICE_TYPE_MEDIACODEC,
+ AV_HWDEVICE_TYPE_VULKAN,
+};
+
+typedef struct AVHWDeviceInternal AVHWDeviceInternal;
+
+/**
+ * This struct aggregates all the (hardware/vendor-specific) "high-level" state,
+ * i.e. state that is not tied to a concrete processing configuration.
+ * E.g., in an API that supports hardware-accelerated encoding and decoding,
+ * this struct will (if possible) wrap the state that is common to both encoding
+ * and decoding and from which specific instances of encoders or decoders can be
+ * derived.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism. The
+ * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field
+ * points to the actual AVHWDeviceContext. Further objects derived from
+ * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with
+ * specific properties) will hold an internal reference to it. After all the
+ * references are released, the AVHWDeviceContext itself will be freed,
+ * optionally invoking a user-specified callback for uninitializing the hardware
+ * state.
+ */
+typedef struct AVHWDeviceContext {
+ /**
+ * A class for logging. Set by av_hwdevice_ctx_alloc().
+ */
+ const AVClass* av_class;
+
+ /**
+ * Private data used internally by libavutil. Must not be accessed in any
+ * way by the caller.
+ */
+ AVHWDeviceInternal* internal;
+
+ /**
+ * This field identifies the underlying API used for hardware access.
+ *
+ * This field is set when this struct is allocated and never changed
+ * afterwards.
+ */
+ enum AVHWDeviceType type;
+
+ /**
+ * The format-specific data, allocated and freed by libavutil along with
+ * this context.
+ *
+ * Should be cast by the user to the format-specific context defined in the
+ * corresponding header (hwcontext_*.h) and filled as described in the
+ * documentation before calling av_hwdevice_ctx_init().
+ *
+ * After calling av_hwdevice_ctx_init() this struct should not be modified
+ * by the caller.
+ */
+ void* hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwdevice_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ *
+ * @note when other objects (e.g an AVHWFramesContext) are derived from this
+ * struct, this callback will be invoked after all such child objects
+ * are fully uninitialized and their respective destructors invoked.
+ */
+ void (*free)(struct AVHWDeviceContext* ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void* user_opaque;
+} AVHWDeviceContext;
+
+typedef struct AVHWFramesInternal AVHWFramesInternal;
+
+/**
+ * This struct describes a set or pool of "hardware" frames (i.e. those with
+ * data not located in normal system memory). All the frames in the pool are
+ * assumed to be allocated in the same way and interchangeable.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism and tied to a
+ * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor
+ * yields a reference, whose data field points to the actual AVHWFramesContext
+ * struct.
+ */
+typedef struct AVHWFramesContext {
+ /**
+ * A class for logging.
+ */
+ const AVClass* av_class;
+
+ /**
+ * Private data used internally by libavutil. Must not be accessed in any
+ * way by the caller.
+ */
+ AVHWFramesInternal* internal;
+
+ /**
+ * A reference to the parent AVHWDeviceContext. This reference is owned and
+ * managed by the enclosing AVHWFramesContext, but the caller may derive
+ * additional references from it.
+ */
+ AVBufferRef* device_ref;
+
+ /**
+ * The parent AVHWDeviceContext. This is simply a pointer to
+ * device_ref->data provided for convenience.
+ *
+ * Set by libavutil in av_hwframe_ctx_init().
+ */
+ AVHWDeviceContext* device_ctx;
+
+ /**
+ * The format-specific data, allocated and freed automatically along with
+ * this context.
+ *
+ * Should be cast by the user to the format-specific context defined in the
+ * corresponding header (hwframe_*.h) and filled as described in the
+ * documentation before calling av_hwframe_ctx_init().
+ *
+ * After any frames using this context are created, the contents of this
+ * struct should not be modified by the caller.
+ */
+ void* hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ */
+ void (*free)(struct AVHWFramesContext* ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void* user_opaque;
+
+ /**
+ * A pool from which the frames are allocated by av_hwframe_get_buffer().
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ * The buffers returned by calling av_buffer_pool_get() on this pool must
+ * have the properties described in the documentation in the corresponding hw
+ * type's header (hwcontext_*.h). The pool will be freed strictly before
+ * this struct's free() callback is invoked.
+ *
+ * This field may be NULL, then libavutil will attempt to allocate a pool
+ * internally. Note that certain device types enforce pools allocated at
+ * fixed size (frame count), which cannot be extended dynamically. In such a
+ * case, initial_pool_size must be set appropriately.
+ */
+ AVBufferPool* pool;
+
+ /**
+ * Initial size of the frame pool. If a device type does not support
+ * dynamically resizing the pool, then this is also the maximum pool size.
+ *
+ * May be set by the caller before calling av_hwframe_ctx_init(). Must be
+ * set if pool is NULL and the device type does not support dynamic pools.
+ */
+ int initial_pool_size;
+
+ /**
+ * The pixel format identifying the underlying HW surface type.
+ *
+ * Must be a hwaccel format, i.e. the corresponding descriptor must have the
+ * AV_PIX_FMT_FLAG_HWACCEL flag set.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ enum AVPixelFormat format;
+
+ /**
+ * The pixel format identifying the actual data layout of the hardware
+ * frames.
+ *
+ * Must be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * @note when the underlying API does not provide the exact data layout, but
+ * only the colorspace/bit depth, this field should be set to the fully
+ * planar version of that format (e.g. for 8-bit 420 YUV it should be
+ * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else).
+ */
+ enum AVPixelFormat sw_format;
+
+ /**
+ * The allocated dimensions of the frames in this pool.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ int width, height;
+} AVHWFramesContext;
+
+/**
+ * Look up an AVHWDeviceType by name.
+ *
+ * @param name String name of the device type (case-insensitive).
+ * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if
+ * not found.
+ */
+enum AVHWDeviceType av_hwdevice_find_type_by_name(const char* name);
+
+/** Get the string name of an AVHWDeviceType.
+ *
+ * @param type Type from enum AVHWDeviceType.
+ * @return Pointer to a static string containing the name, or NULL if the type
+ * is not valid.
+ */
+const char* av_hwdevice_get_type_name(enum AVHWDeviceType type);
+
+/**
+ * Iterate over supported device types.
+ *
+ * @param prev AV_HWDEVICE_TYPE_NONE initially, then the previous type
+ * returned by this function in subsequent iterations.
+ * @return The next usable device type from enum AVHWDeviceType, or
+ * AV_HWDEVICE_TYPE_NONE if there are no more.
+ */
+enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev);
+
+/**
+ * Allocate an AVHWDeviceContext for a given hardware type.
+ *
+ * @param type the type of the hardware device to allocate.
+ * @return a reference to the newly created AVHWDeviceContext on success or NULL
+ * on failure.
+ */
+AVBufferRef* av_hwdevice_ctx_alloc(enum AVHWDeviceType type);
+
+/**
+ * Finalize the device context before use. This function must be called after
+ * the context is filled with all the required information and before it is
+ * used in any way.
+ *
+ * @param ref a reference to the AVHWDeviceContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwdevice_ctx_init(AVBufferRef* ref);
+
+/**
+ * Open a device of the specified type and create an AVHWDeviceContext for it.
+ *
+ * This is a convenience function intended to cover the simple cases. Callers
+ * who need to fine-tune device creation/management should open the device
+ * manually and then wrap it in an AVHWDeviceContext using
+ * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init().
+ *
+ * The returned context is already initialized and ready for use, the caller
+ * should not call av_hwdevice_ctx_init() on it. The user_opaque/free fields of
+ * the created AVHWDeviceContext are set by this function and should not be
+ * touched by the caller.
+ *
+ * @param device_ctx On success, a reference to the newly-created device context
+ * will be written here. The reference is owned by the caller
+ * and must be released with av_buffer_unref() when no longer
+ * needed. On failure, NULL will be written to this pointer.
+ * @param type The type of the device to create.
+ * @param device A type-specific string identifying the device to open.
+ * @param opts A dictionary of additional (type-specific) options to use in
+ * opening the device. The dictionary remains owned by the caller.
+ * @param flags currently unused
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create(AVBufferRef** device_ctx, enum AVHWDeviceType type,
+ const char* device, AVDictionary* opts, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * If the source device is a device of the target type or was originally
+ * derived from such a device (possibly through one or more intermediate
+ * devices of other types), then this will return a reference to the
+ * existing device of the same type as is requested.
+ *
+ * Otherwise, it will attempt to derive a new device from the given source
+ * device. If direct derivation to the new type is not implemented, it will
+ * attempt the same derivation from each ancestor of the source device in
+ * turn looking for an implemented derivation method.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived(AVBufferRef** dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef* src_ctx, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * This function performs the same action as av_hwdevice_ctx_create_derived,
+ * however, it is able to set options for the new device to be derived.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param options Options for the new device to create, same format as in
+ * av_hwdevice_ctx_create.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef** dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef* src_ctx,
+ AVDictionary* options, int flags);
+
+/**
+ * Allocate an AVHWFramesContext tied to a given device context.
+ *
+ * @param device_ctx a reference to a AVHWDeviceContext. This function will make
+ * a new reference for internal use, the one passed to the
+ * function remains owned by the caller.
+ * @return a reference to the newly created AVHWFramesContext on success or NULL
+ * on failure.
+ */
+AVBufferRef* av_hwframe_ctx_alloc(AVBufferRef* device_ctx);
+
+/**
+ * Finalize the context before use. This function must be called after the
+ * context is filled with all the required information and before it is attached
+ * to any frames.
+ *
+ * @param ref a reference to the AVHWFramesContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_ctx_init(AVBufferRef* ref);
+
+/**
+ * Allocate a new frame attached to the given AVHWFramesContext.
+ *
+ * @param hwframe_ctx a reference to an AVHWFramesContext
+ * @param frame an empty (freshly allocated or unreffed) frame to be filled with
+ * newly allocated buffers.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_get_buffer(AVBufferRef* hwframe_ctx, AVFrame* frame, int flags);
+
+/**
+ * Copy data to or from a hw surface. At least one of dst/src must have an
+ * AVHWFramesContext attached.
+ *
+ * If src has an AVHWFramesContext attached, then the format of dst (if set)
+ * must use one of the formats returned by av_hwframe_transfer_get_formats(src,
+ * AV_HWFRAME_TRANSFER_DIRECTION_FROM).
+ * If dst has an AVHWFramesContext attached, then the format of src must use one
+ * of the formats returned by av_hwframe_transfer_get_formats(dst,
+ * AV_HWFRAME_TRANSFER_DIRECTION_TO)
+ *
+ * dst may be "clean" (i.e. with data/buf pointers unset), in which case the
+ * data buffers will be allocated by this function using av_frame_get_buffer().
+ * If dst->format is set, then this format will be used, otherwise (when
+ * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen.
+ *
+ * The two frames must have matching allocated dimensions (i.e. equal to
+ * AVHWFramesContext.width/height), since not all device types support
+ * transferring a sub-rectangle of the whole surface. The display dimensions
+ * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but
+ * also have to be equal for both frames. When the display dimensions are
+ * smaller than the allocated dimensions, the content of the padding in the
+ * destination frame is unspecified.
+ *
+ * @param dst the destination frame. dst is not touched on failure.
+ * @param src the source frame.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR error code on failure.
+ */
+int av_hwframe_transfer_data(AVFrame* dst, const AVFrame* src, int flags);
+
+enum AVHWFrameTransferDirection {
+ /**
+ * Transfer the data from the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_FROM,
+
+ /**
+ * Transfer the data to the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_TO,
+};
+
+/**
+ * Get a list of possible source or target formats usable in
+ * av_hwframe_transfer_data().
+ *
+ * @param hwframe_ctx the frame context to obtain the information for
+ * @param dir the direction of the transfer
+ * @param formats the pointer to the output format list will be written here.
+ * The list is terminated with AV_PIX_FMT_NONE and must be freed
+ * by the caller when no longer needed using av_free().
+ * If this function returns successfully, the format list will
+ * have at least one item (not counting the terminator).
+ * On failure, the contents of this pointer are unspecified.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwframe_transfer_get_formats(AVBufferRef* hwframe_ctx,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat** formats, int flags);
+
+/**
+ * This struct describes the constraints on hardware frames attached to
+ * a given device with a hardware-specific configuration. This is returned
+ * by av_hwdevice_get_hwframe_constraints() and must be freed by
+ * av_hwframe_constraints_free() after use.
+ */
+typedef struct AVHWFramesConstraints {
+ /**
+ * A list of possible values for format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. This member will always be filled.
+ */
+ enum AVPixelFormat* valid_hw_formats;
+
+ /**
+ * A list of possible values for sw_format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is
+ * not known.
+ */
+ enum AVPixelFormat* valid_sw_formats;
+
+ /**
+ * The minimum size of frames in this hw_frames_ctx.
+ * (Zero if not known.)
+ */
+ int min_width;
+ int min_height;
+
+ /**
+ * The maximum size of frames in this hw_frames_ctx.
+ * (INT_MAX if not known / no limit.)
+ */
+ int max_width;
+ int max_height;
+} AVHWFramesConstraints;
+
+/**
+ * Allocate a HW-specific configuration structure for a given HW device.
+ * After use, the user must free all members as required by the specific
+ * hardware structure being used, then free the structure itself with
+ * av_free().
+ *
+ * @param device_ctx a reference to the associated AVHWDeviceContext.
+ * @return The newly created HW-specific configuration structure on
+ * success or NULL on failure.
+ */
+void* av_hwdevice_hwconfig_alloc(AVBufferRef* device_ctx);
+
+/**
+ * Get the constraints on HW frames given a device and the HW-specific
+ * configuration to be used with that device. If no HW-specific
+ * configuration is provided, returns the maximum possible capabilities
+ * of the device.
+ *
+ * @param ref a reference to the associated AVHWDeviceContext.
+ * @param hwconfig a filled HW-specific configuration structure, or NULL
+ * to return the maximum possible capabilities of the device.
+ * @return AVHWFramesConstraints structure describing the constraints
+ * on the device, or NULL if not available.
+ */
+AVHWFramesConstraints* av_hwdevice_get_hwframe_constraints(
+ AVBufferRef* ref, const void* hwconfig);
+
+/**
+ * Free an AVHWFrameConstraints structure.
+ *
+ * @param constraints The (filled or unfilled) AVHWFrameConstraints structure.
+ */
+void av_hwframe_constraints_free(AVHWFramesConstraints** constraints);
+
+/**
+ * Flags to apply to frame mappings.
+ */
+enum {
+ /**
+ * The mapping must be readable.
+ */
+ AV_HWFRAME_MAP_READ = 1 << 0,
+ /**
+ * The mapping must be writeable.
+ */
+ AV_HWFRAME_MAP_WRITE = 1 << 1,
+ /**
+ * The mapped frame will be overwritten completely in subsequent
+ * operations, so the current frame data need not be loaded. Any values
+ * which are not overwritten are unspecified.
+ */
+ AV_HWFRAME_MAP_OVERWRITE = 1 << 2,
+ /**
+ * The mapping must be direct. That is, there must not be any copying in
+ * the map or unmap steps. Note that performance of direct mappings may
+ * be much lower than normal memory.
+ */
+ AV_HWFRAME_MAP_DIRECT = 1 << 3,
+};
+
+/**
+ * Map a hardware frame.
+ *
+ * This has a number of different possible effects, depending on the format
+ * and origin of the src and dst frames. On input, src should be a usable
+ * frame with valid buffers and dst should be blank (typically as just created
+ * by av_frame_alloc()). src should have an associated hwframe context, and
+ * dst may optionally have a format and associated hwframe context.
+ *
+ * If src was created by mapping a frame from the hwframe context of dst,
+ * then this function undoes the mapping - dst is replaced by a reference to
+ * the frame that src was originally mapped from.
+ *
+ * If both src and dst have an associated hwframe context, then this function
+ * attempts to map the src frame from its hardware context to that of dst and
+ * then fill dst with appropriate data to be usable there. This will only be
+ * possible if the hwframe contexts and associated devices are compatible -
+ * given compatible devices, av_hwframe_ctx_create_derived() can be used to
+ * create a hwframe context for dst in which mapping should be possible.
+ *
+ * If src has a hwframe context but dst does not, then the src frame is
+ * mapped to normal memory and should thereafter be usable as a normal frame.
+ * If the format is set on dst, then the mapping will attempt to create dst
+ * with that format and fail if it is not possible. If format is unset (is
+ * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate
+ * format to use is (probably the sw_format of the src hwframe context).
+ *
+ * A return value of AVERROR(ENOSYS) indicates that the mapping is not
+ * possible with the given arguments and hwframe setup, while other return
+ * values indicate that it failed somehow.
+ *
+ * On failure, the destination frame will be left blank, except for the
+ * hw_frames_ctx/format fields thay may have been set by the caller - those will
+ * be preserved as they were.
+ *
+ * @param dst Destination frame, to contain the mapping.
+ * @param src Source frame, to be mapped.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_map(AVFrame* dst, const AVFrame* src, int flags);
+
+/**
+ * Create and initialise an AVHWFramesContext as a mapping of another existing
+ * AVHWFramesContext on a different device.
+ *
+ * av_hwframe_ctx_init() should not be called after this.
+ *
+ * @param derived_frame_ctx On success, a reference to the newly created
+ * AVHWFramesContext.
+ * @param format The AVPixelFormat for the derived context.
+ * @param derived_device_ctx A reference to the device to create the new
+ * AVHWFramesContext on.
+ * @param source_frame_ctx A reference to an existing AVHWFramesContext
+ * which will be mapped to the derived context.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the
+ * mapping parameters to apply to frames which are allocated
+ * in the derived device.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_ctx_create_derived(AVBufferRef** derived_frame_ctx,
+ enum AVPixelFormat format,
+ AVBufferRef* derived_device_ctx,
+ AVBufferRef* source_frame_ctx, int flags);
+
+#endif /* AVUTIL_HWCONTEXT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/hwcontext_vaapi.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext_vaapi.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/hwcontext_vaapi.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext_vaapi.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/intfloat.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/intfloat.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/intfloat.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/log.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/log.h
new file mode 100644
index 000000000000..e1f2af7b18aa
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/log.h
@@ -0,0 +1,388 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "attributes.h"
+#include "version.h"
+
+typedef enum {
+ AV_CLASS_CATEGORY_NA = 0,
+ AV_CLASS_CATEGORY_INPUT,
+ AV_CLASS_CATEGORY_OUTPUT,
+ AV_CLASS_CATEGORY_MUXER,
+ AV_CLASS_CATEGORY_DEMUXER,
+ AV_CLASS_CATEGORY_ENCODER,
+ AV_CLASS_CATEGORY_DECODER,
+ AV_CLASS_CATEGORY_FILTER,
+ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+ AV_CLASS_CATEGORY_SWSCALER,
+ AV_CLASS_CATEGORY_SWRESAMPLER,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT,
+ AV_CLASS_CATEGORY_NB ///< not part of ABI/API
+} AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption* option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Category used for visualization (like color)
+ * This is only set if the category is equal for all objects using this class.
+ * available since version (51 << 16 | 56 << 8 | 100)
+ */
+ AVClassCategory category;
+
+ /**
+ * Callback to return the category.
+ * available since version (51 << 16 | 59 << 8 | 100)
+ */
+ AVClassCategory (*get_category)(void* ctx);
+
+ /**
+ * Callback to return the supported/allowed ranges.
+ * available since version (52.12)
+ */
+ int (*query_ranges)(struct AVOptionRanges**, void* obj, const char* key,
+ int flags);
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void* obj, void* prev);
+
+ /**
+ * Iterate over the AVClasses corresponding to potential AVOptions-enabled
+ * children.
+ *
+ * @param iter pointer to opaque iteration state. The caller must initialize
+ * *iter to NULL before the first call.
+ * @return AVClass for the next AVOptions-enabled child or NULL if there are
+ * no more such children.
+ *
+ * @note The difference between child_next and this is that child_next
+ * iterates over _already existing_ objects, while child_class_iterate
+ * iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_iterate)(void** iter);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Extremely verbose debugging, useful for libav* development.
+ */
+#define AV_LOG_TRACE 56
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+ @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) ((x) << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void* avcl, int level, const char* fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Send the specified message to the log once with the initial_level and then
+ * with the subsequent_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param initial_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" for the first occurance.
+ * @param subsequent_level importance level of the message expressed using a
+ * @ref lavu_log_constants "Logging Constant" after the first occurance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param state a variable to keep trak of if a message has already been printed
+ * this must be initialized to 0 before the first use. The same state
+ * must not be accessed by 2 Threads simultaneously.
+ */
+void av_log_once(void* avcl, int initial_level, int subsequent_level,
+ int* state, const char* fmt, ...) av_printf_format(5, 6);
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void* avcl, int level, const char* fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ * threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void* avcl, int level, const char* fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void* ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line
+ * @param line_size size of the buffer
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void* ptr, int level, const char* fmt, va_list vl,
+ char* line, int line_size, int* print_prefix);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line;
+ * may be NULL if line_size is 0
+ * @param line_size size of the buffer; at most line_size-1 characters will
+ * be written to the buffer, plus one null terminator
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ * @return Returns a negative value if an error occurred, otherwise returns
+ * the number of characters that would have been written for a
+ * sufficiently large buffer, not including the terminating null
+ * character. If the return value is not less than line_size, it means
+ * that the log message was truncated to fit the buffer.
+ */
+int av_log_format_line2(void* ptr, int level, const char* fmt, va_list vl,
+ char* line, int line_size, int* print_prefix);
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/macros.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/macros.h
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/macros.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/macros.h
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mathematics.h
new file mode 100644
index 000000000000..5ebf81e64fe6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mathematics.h
@@ -0,0 +1,249 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @addtogroup lavu_math
+ * Mathematical utilities for working with timestamp and time base.
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+# define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_LN2
+# define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN10
+# define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+# define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+# define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PI
+# define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_PI_2
+# define M_PI_2 1.57079632679489661923 /* pi/2 */
+#endif
+#ifndef M_SQRT1_2
+# define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+# define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef NAN
+# define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+# define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ *
+ * @{
+ */
+
+/**
+ * Rounding methods.
+ */
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF =
+ 5, ///< Round to nearest and halfway cases away from zero.
+ /**
+ * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through
+ * unchanged, avoiding special cases for #AV_NOPTS_VALUE.
+ *
+ * Unlike other values of the enumeration AVRounding, this value is a
+ * bitmask that must be used in conjunction with another value of the
+ * enumeration through a bitwise OR, in order to set behavior for normal
+ * cases.
+ *
+ * @code{.c}
+ * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling 3:
+ * // Calculating 3 * 1 / 2
+ * // 3 / 2 is rounded up to 2
+ * // => 2
+ *
+ * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling AV_NOPTS_VALUE:
+ * // AV_NOPTS_VALUE == INT64_MIN
+ * // AV_NOPTS_VALUE is passed through
+ * // => AV_NOPTS_VALUE
+ * @endcode
+ */
+ AV_ROUND_PASS_MINMAX = 8192,
+};
+
+/**
+ * Compute the greatest common divisor of two integer operands.
+ *
+ * @param a Operand
+ * @param b Operand
+ * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >=
+ * 0; if a == 0 and b == 0, returns 0.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow.
+ *
+ * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow, and does not support different rounding methods.
+ * If the result is not representable then INT64_MIN is returned.
+ *
+ * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c,
+ enum AVRounding rnd) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q()
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding rnd) av_const;
+
+/**
+ * Compare two timestamps each in its own time base.
+ *
+ * @return One of the following values:
+ * - -1 if `ts_a` is before `ts_b`
+ * - 1 if `ts_a` is after `ts_b`
+ * - 0 if they represent the same position
+ *
+ * @warning
+ * The result of the function is undefined if one of the timestamps is outside
+ * the `int64_t` range when represented in the other's timebase.
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare the remainders of two integer operands divided by a common divisor.
+ *
+ * In other words, compare the least significant `log2(mod)` bits of integers
+ * `a` and `b`.
+ *
+ * @code{.c}
+ * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 %
+ * 0x10 (0x2) av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11)
+ * > 0x02 % 0x20 (0x02)
+ * @endcode
+ *
+ * @param a Operand
+ * @param b Operand
+ * @param mod Divisor; must be a power of 2
+ * @return
+ * - a negative value if `a % mod < b % mod`
+ * - a positive value if `a % mod > b % mod`
+ * - zero if `a % mod == b % mod`
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * This function is designed to be called per audio packet to scale the input
+ * timestamp to a different time base. Compared to a simple av_rescale_q()
+ * call, this function is robust against possible inconsistent frame durations.
+ *
+ * The `last` parameter is a state variable that must be preserved for all
+ * subsequent calls for the same stream. For the first call, `*last` should be
+ * initialized to #AV_NOPTS_VALUE.
+ *
+ * @param[in] in_tb Input time base
+ * @param[in] in_ts Input timestamp
+ * @param[in] fs_tb Duration time base; typically this is finer-grained
+ * (greater) than `in_tb` and `out_tb`
+ * @param[in] duration Duration till the next call to this function (i.e.
+ * duration of the current packet/frame)
+ * @param[in,out] last Pointer to a timestamp expressed in terms of
+ * `fs_tb`, acting as a state variable
+ * @param[in] out_tb Output timebase
+ * @return Timestamp expressed in terms of `out_tb`
+ *
+ * @note In the context of this function, "duration" is in term of samples, not
+ * seconds.
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb,
+ int duration, int64_t* last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param[in] ts Input timestamp
+ * @param[in] ts_tb Input timestamp time base
+ * @param[in] inc Value to be added
+ * @param[in] inc_tb Time base of `inc`
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb,
+ int64_t inc);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/media/ffvpx/libavutil/mem.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mem.h
similarity index 79%
copy from media/ffvpx/libavutil/mem.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mem.h
index c9c4fcf1ff02..8f5d8c756ee3 100644
--- a/media/ffvpx/libavutil/mem.h
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mem.h
@@ -51,300 +51,216 @@
* @{
*/
-#if FF_API_DECLARE_ALIGNED
-/**
- *
- * @defgroup lavu_mem_macros Alignment Macros
- * Helper macros for declaring aligned variables.
- * @{
- */
-
-/**
- * @def DECLARE_ALIGNED(n,t,v)
- * Declare a variable that is aligned in memory.
- *
- * @code{.c}
- * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
- * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
- *
- * // The default-alignment equivalent would be
- * uint16_t aligned_int = 42;
- * uint8_t aligned_array[128];
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_ALIGNED(n,t,v)
- * Declare an aligned variable appropriate for use in inline assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_CONST(n,t,v)
- * Declare a static constant aligned variable appropriate for use in inline
- * assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
-#elif defined(__DJGPP__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
-#elif defined(__GNUC__) || defined(__clang__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
-#elif defined(_MSC_VER)
- #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
-#else
- #define DECLARE_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_CONST(n,t,v) static const t v
-#endif
-
-/**
- * @}
- */
-#endif
-
/**
* @defgroup lavu_mem_attrs Function Attributes
* Function attributes applicable to memory handling functions.
*
* These function attributes can help compilers emit more useful warnings, or
* generate better code.
* @{
*/
/**
* @def av_malloc_attrib
* Function attribute denoting a malloc-like function.
*
- * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function attribute `malloc` in GCC's documentation</a>
+ * @see <a
+ * href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function
+ * attribute `malloc` in GCC's documentation</a>
*/
-#if AV_GCC_VERSION_AT_LEAST(3,1)
- #define av_malloc_attrib __attribute__((__malloc__))
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_malloc_attrib __attribute__((__malloc__))
#else
- #define av_malloc_attrib
+# define av_malloc_attrib
#endif
/**
* @def av_alloc_size(...)
* Function attribute used on a function that allocates memory, whose size is
* given by the specified parameter(s).
*
* @code{.c}
* void *av_malloc(size_t size) av_alloc_size(1);
* void *av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2);
* @endcode
*
* @param ... One or two parameter indexes, separated by a comma
*
- * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function attribute `alloc_size` in GCC's documentation</a>
+ * @see <a
+ * href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function
+ * attribute `alloc_size` in GCC's documentation</a>
*/
-#if AV_GCC_VERSION_AT_LEAST(4,3)
- #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#if AV_GCC_VERSION_AT_LEAST(4, 3)
+# define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
#else
- #define av_alloc_size(...)
+# define av_alloc_size(...)
#endif
/**
* @}
*/
/**
* @defgroup lavu_mem_funcs Heap Management
* Functions responsible for allocating, freeing, and copying memory.
*
* All memory allocation functions have a built-in upper limit of `INT_MAX`
* bytes. This may be changed with av_max_alloc(), although exercise extreme
* caution when doing so.
*
* @{
*/
/**
* Allocate a memory block with alignment suitable for all memory accesses
* (including vectors if available on the CPU).
*
* @param size Size in bytes for the memory block to be allocated
* @return Pointer to the allocated block, or `NULL` if the block cannot
* be allocated
* @see av_mallocz()
*/
-void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+void* av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
/**
* Allocate a memory block with alignment suitable for all memory accesses
* (including vectors if available on the CPU) and zero all the bytes of the
* block.
*
* @param size Size in bytes for the memory block to be allocated
* @return Pointer to the allocated block, or `NULL` if it cannot be allocated
* @see av_malloc()
*/
-void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+void* av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
/**
* Allocate a memory block for an array with av_malloc().
*
* The allocated memory will have size `size * nmemb` bytes.
*
* @param nmemb Number of element
* @param size Size of a single element
* @return Pointer to the allocated block, or `NULL` if the block cannot
* be allocated
* @see av_malloc()
*/
-av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
+av_alloc_size(1, 2) void* av_malloc_array(size_t nmemb, size_t size);
/**
* Allocate a memory block for an array with av_mallocz().
*
* The allocated memory will have size `size * nmemb` bytes.
*
* @param nmemb Number of elements
* @param size Size of the single element
* @return Pointer to the allocated block, or `NULL` if the block cannot
* be allocated
*
* @see av_mallocz()
* @see av_malloc_array()
*/
-void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
-
-#if FF_API_AV_MALLOCZ_ARRAY
-/**
- * @deprecated use av_calloc()
- */
-attribute_deprecated
-void *av_mallocz_array(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
-#endif
+void* av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
/**
* Allocate, reallocate, or free a block of memory.
*
* If `ptr` is `NULL` and `size` > 0, allocate a new block. Otherwise, expand or
* shrink that block of memory according to `size`.
*
* @param ptr Pointer to a memory block already allocated with
* av_realloc() or `NULL`
* @param size Size in bytes of the memory block to be allocated or
* reallocated
*
* @return Pointer to a newly-reallocated block or `NULL` if the block
* cannot be reallocated
*
* @warning Unlike av_malloc(), the returned pointer is not guaranteed to be
* correctly aligned. The returned pointer must be freed after even
* if size is zero.
* @see av_fast_realloc()
* @see av_reallocp()
*/
-void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+void* av_realloc(void* ptr, size_t size) av_alloc_size(2);
/**
* Allocate, reallocate, or free a block of memory through a pointer to a
* pointer.
*
* If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is
* zero, free the memory block pointed to by `*ptr`. Otherwise, expand or
* shrink that block of memory according to `size`.
*
* @param[in,out] ptr Pointer to a pointer to a memory block already allocated
* with av_realloc(), or a pointer to `NULL`. The pointer
* is updated on success, or freed on failure.
* @param[in] size Size in bytes for the memory block to be allocated or
* reallocated
*
* @return Zero on success, an AVERROR error code on failure
*
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned.
*/
-av_warn_unused_result
-int av_reallocp(void *ptr, size_t size);
+av_warn_unused_result int av_reallocp(void* ptr, size_t size);
/**
* Allocate, reallocate, or free a block of memory.
*
* This function does the same thing as av_realloc(), except:
* - It takes two size arguments and allocates `nelem * elsize` bytes,
* after checking the result of the multiplication for integer overflow.
* - It frees the input block in case of failure, thus avoiding the memory
* leak with the classic
* @code{.c}
* buf = realloc(buf);
* if (!buf)
* return -1;
* @endcode
* pattern.
*/
-void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
+void* av_realloc_f(void* ptr, size_t nelem, size_t elsize);
/**
* Allocate, reallocate, or free an array.
*
* If `ptr` is `NULL` and `nmemb` > 0, allocate a new block.
*
* @param ptr Pointer to a memory block already allocated with
* av_realloc() or `NULL`
* @param nmemb Number of elements in the array
* @param size Size of the single element of the array
*
* @return Pointer to a newly-reallocated block or NULL if the block
* cannot be reallocated
*
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned. The returned pointer must be freed after even if
* nmemb is zero.
* @see av_reallocp_array()
*/
-av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+av_alloc_size(2, 3) void* av_realloc_array(void* ptr, size_t nmemb,
+ size_t size);
/**
* Allocate, reallocate an array through a pointer to a pointer.
*
* If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block.
*
* @param[in,out] ptr Pointer to a pointer to a memory block already
* allocated with av_realloc(), or a pointer to `NULL`.
* The pointer is updated on success, or freed on failure.
* @param[in] nmemb Number of elements
* @param[in] size Size of the single element
*
* @return Zero on success, an AVERROR error code on failure
*
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned. *ptr must be freed after even if nmemb is zero.
*/
-int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+int av_reallocp_array(void* ptr, size_t nmemb, size_t size);
/**
* Reallocate the given buffer if it is not large enough, otherwise do nothing.
@@ -378,7 +294,7 @@ int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
* @see av_realloc()
* @see av_fast_malloc()
*/
-void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+void* av_fast_realloc(void* ptr, unsigned int* size, size_t min_size);
/**
* Allocate a buffer, reusing the given one if large enough.
@@ -409,107 +325,107 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
* @see av_realloc()
* @see av_fast_mallocz()
*/
-void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+void av_fast_malloc(void* ptr, unsigned int* size, size_t min_size);
/**
* Allocate and clear a buffer, reusing the given one if large enough.
*
* Like av_fast_malloc(), but all newly allocated space is initially cleared.
* Reused buffer is not cleared.
*
* `*ptr` is allowed to be `NULL`, in which case allocation always happens if
* `size_needed` is greater than 0.
*
* @param[in,out] ptr Pointer to pointer to an already allocated buffer.
* `*ptr` will be overwritten with pointer to new
* buffer on success or `NULL` on failure
* @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
* updated to the new allocated size, in particular 0
* in case of failure.
* @param[in] min_size Desired minimal size of buffer `*ptr`
* @see av_fast_malloc()
*/
-void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size);
+void av_fast_mallocz(void* ptr, unsigned int* size, size_t min_size);
/**
* Free a memory block which has been allocated with a function of av_malloc()
* or av_realloc() family.
*
* @param ptr Pointer to the memory block which should be freed.
*
* @note `ptr = NULL` is explicitly allowed.
* @note It is recommended that you use av_freep() instead, to prevent leaving
* behind dangling pointers.
* @see av_freep()
*/
-void av_free(void *ptr);
+void av_free(void* ptr);
/**
* Free a memory block which has been allocated with a function of av_malloc()
* or av_realloc() family, and set the pointer pointing to it to `NULL`.
*
* @code{.c}
* uint8_t *buf = av_malloc(16);
* av_free(buf);
* // buf now contains a dangling pointer to freed memory, and accidental
* // dereference of buf will result in a use-after-free, which may be a
* // security risk.
*
* uint8_t *buf = av_malloc(16);
* av_freep(&buf);
* // buf is now NULL, and accidental dereference will only result in a
* // NULL-pointer dereference.
* @endcode
*
* @param ptr Pointer to the pointer to the memory block which should be freed
* @note `*ptr = NULL` is safe and leads to no action.
* @see av_free()
*/
-void av_freep(void *ptr);
+void av_freep(void* ptr);
/**
* Duplicate a string.
*
* @param s String to be duplicated
* @return Pointer to a newly-allocated string containing a
* copy of `s` or `NULL` if the string cannot be allocated
* @see av_strndup()
*/
-char *av_strdup(const char *s) av_malloc_attrib;
+char* av_strdup(const char* s) av_malloc_attrib;
/**
* Duplicate a substring of a string.
*
* @param s String to be duplicated
* @param len Maximum length of the resulting string (not counting the
* terminating byte)
* @return Pointer to a newly-allocated string containing a
* substring of `s` or `NULL` if the string cannot be allocated
*/
-char *av_strndup(const char *s, size_t len) av_malloc_attrib;
+char* av_strndup(const char* s, size_t len) av_malloc_attrib;
/**
* Duplicate a buffer with av_malloc().
*
* @param p Buffer to be duplicated
* @param size Size in bytes of the buffer copied
* @return Pointer to a newly allocated buffer containing a
* copy of `p` or `NULL` if the buffer cannot be allocated
*/
-void *av_memdup(const void *p, size_t size);
+void* av_memdup(const void* p, size_t size);
/**
* Overlapping memcpy() implementation.
*
* @param dst Destination buffer
* @param back Number of bytes back to start copying (i.e. the initial size of
* the overlapping window); must be > 0
* @param cnt Number of bytes to copy; must be >= 0
*
* @note `cnt > back` is valid, this will copy the bytes we just copied,
* thus creating a repeating pattern with a period length of `back`.
*/
-void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+void av_memcpy_backptr(uint8_t* dst, int back, int cnt);
/**
* @}
@@ -611,68 +527,68 @@ void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
* @param[in] elem Element to add
* @see av_dynarray_add_nofree(), av_dynarray2_add()
*/
-void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+void av_dynarray_add(void* tab_ptr, int* nb_ptr, void* elem);
/**
* Add an element to a dynamic array.
*
* Function has the same functionality as av_dynarray_add(),
* but it doesn't free memory on fails. It returns error code
* instead and leave current buffer untouched.
*
* @return >=0 on success, negative otherwise
* @see av_dynarray_add(), av_dynarray2_add()
*/
-av_warn_unused_result
-int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
+av_warn_unused_result int av_dynarray_add_nofree(void* tab_ptr, int* nb_ptr,
+ void* elem);
/**
* Add an element of size `elem_size` to a dynamic array.
*
* The array is reallocated when its number of elements reaches powers of 2.
* Therefore, the amortized cost of adding an element is constant.
*
* In case of success, the pointer to the array is updated in order to
* point to the new grown array, and the number pointed to by `nb_ptr`
* is incremented.
* In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and
* `*nb_ptr` is set to 0.
*
* @param[in,out] tab_ptr Pointer to the array to grow
* @param[in,out] nb_ptr Pointer to the number of elements in the array
* @param[in] elem_size Size in bytes of an element in the array
* @param[in] elem_data Pointer to the data of the element to add. If
* `NULL`, the space of the newly added element is
* allocated but left uninitialized.
*
* @return Pointer to the data of the element to copy in the newly allocated
* space
* @see av_dynarray_add(), av_dynarray_add_nofree()
*/
-void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
- const uint8_t *elem_data);
+void* av_dynarray2_add(void** tab_ptr, int* nb_ptr, size_t elem_size,
+ const uint8_t* elem_data);
/**
* @}
*/
/**
* @defgroup lavu_mem_misc Miscellaneous Functions
*
* Other functions related to memory allocation.
*
* @{
*/
/**
* Multiply two `size_t` values checking for overflow.
*
* @param[in] a Operand of multiplication
* @param[in] b Operand of multiplication
* @param[out] r Pointer to the result of the operation
* @return 0 on success, AVERROR(EINVAL) on overflow
*/
-int av_size_mult(size_t a, size_t b, size_t *r);
+int av_size_mult(size_t a, size_t b, size_t* r);
/**
* Set the maximum size that may be allocated in one block.
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/pixfmt.h
new file mode 100644
index 000000000000..b7516d9e3b58
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/pixfmt.h
@@ -0,0 +1,891 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @note
+ * If the resolution is not a multiple of the chroma subsampling factor
+ * then the chroma plane resolution must be rounded up.
+ *
+ * @par
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB32 palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y
+ ///< samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y
+ ///< samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y
+ ///< samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y
+ ///< samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y
+ ///< samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black,
+ ///< in each byte pixels are ordered from the
+ ///< msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white,
+ ///< in each byte pixels are ordered from the
+ ///< msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV420P and
+ ///< setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV422P and
+ ///< setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV444P and
+ ///< setting color_range
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb),
+ ///< a byte contains two pixels, the first pixel in the byte
+ ///< is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb),
+ ///< a byte contains two pixels, the first pixel in the byte
+ ///< is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for
+ ///< the UV components, which are interleaved (first byte U
+ ///< and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y
+ ///< samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in
+ ///< favor of AV_PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2
+ ///< Y & A samples)
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb),
+ ///< big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb),
+ ///< little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb),
+ ///< big-endian , X=unused/undefined
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb),
+ ///< little-endian, X=unused/undefined
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb),
+ ///< big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb),
+ ///< little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb),
+ ///< big-endian , X=unused/undefined
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb),
+ ///< little-endian, X=unused/undefined
+
+ /**
+ * Hardware acceleration through VA-API, data[3] contains a
+ * VASurfaceID.
+ */
+ AV_PIX_FMT_VAAPI,
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3]
+ ///< contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb),
+ ///< little-endian, X=unused/undefined
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb),
+ ///< big-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb),
+ ///< little-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb),
+ ///< big-endian, X=unused/undefined
+ AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each
+ * bit depth. Notice that each 9/10 bits sample is stored in 16 bits with
+ * extra padding. If you want to support multiple bit depths, then using
+ * AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y
+ ///< & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3]
+ ///< contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb),
+ ///< the 2-byte value for each X/Y/Z is stored as
+ ///< little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb),
+ ///< the 2-byte value for each X/Y/Z is stored as
+ ///< big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample
+ ///< per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb
+ ///< sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb
+ ///< sample per 2x1 Y samples), big-endian
+
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
+
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ /**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *
+ * Before FFmpeg 5.0:
+ * mfxFrameSurface1.Data.MemId contains a pointer when importing
+ * the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
+ *
+ * DXVA2:
+ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
+ *
+ * FFmpeg 5.0 and above:
+ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
+ * structure when importing the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxHDLPair.first contains a VASurfaceID pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * DXVA2:
+ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * D3D11:
+ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
+ * mfxHDLPair.second contains the texture array index of the frame if the
+ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is a
+ * normal texture.
+ */
+ AV_PIX_FMT_QSV,
+ /**
+ * HW acceleration though MMAL, data[3] contains a pointer to the
+ * MMAL_BUFFER_HEADER_T structure.
+ */
+ AV_PIX_FMT_MMAL,
+
+ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API,
+ ///< Picture.data[3] contains a
+ ///< ID3D11VideoDecoderOutputView pointer
+
+ /**
+ * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
+ * exactly as for system memory frames.
+ */
+ AV_PIX_FMT_CUDA,
+
+ AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1
+ ///< Y samples) full scale (JPEG), deprecated in favor
+ ///< of AV_PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 16-bit samples, big-endian
+
+#if FF_API_XVMC
+ AV_PIX_FMT_XVMC, ///< XVideo Motion Acceleration via common packet passing
+#endif
+
+ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), big-endian
+ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), big-endian
+ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples), little-endian
+ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples), big-endian
+
+ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
+
+ AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high
+ ///< bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high
+ ///< bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
+
+ AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
+ AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
+
+ AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
+
+ AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
+ AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
+ AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
+ AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
+
+ AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
+ AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
+
+ /**
+ * Hardware surfaces for Direct3D11.
+ *
+ * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11
+ * hwaccel API and filtering support AV_PIX_FMT_D3D11 only.
+ *
+ * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the
+ * texture array index of the frame as intptr_t if the ID3D11Texture2D is
+ * an array texture (or always 0 if it's a normal texture).
+ */
+ AV_PIX_FMT_D3D11,
+
+ AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian
+ AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian
+
+ AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp,
+ ///< big-endian
+ AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp,
+ ///< little-endian
+ AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4,
+ ///< 128bpp, big-endian
+ AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4,
+ ///< 128bpp, little-endian
+
+ /**
+ * DRM-managed buffers exposed through PRIME buffer sharing.
+ *
+ * data[0] points to an AVDRMFrameDescriptor.
+ */
+ AV_PIX_FMT_DRM_PRIME,
+ /**
+ * Hardware surfaces for OpenCL.
+ *
+ * data[i] contain 2D image objects (typed in C as cl_mem, used
+ * in OpenCL as image2d_t) for each plane of the surface.
+ */
+ AV_PIX_FMT_OPENCL,
+
+ AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
+ AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
+
+ AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
+ AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
+
+ AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), 12b alpha, little-endian
+ AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), 12b alpha, little-endian
+
+ AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for
+ ///< the UV components, which are interleaved (first byte U
+ ///< and the following byte V)
+ AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
+
+ /**
+ * Vulkan hardware images.
+ *
+ * data[0] points to an AVVkFrame
+ */
+ AV_PIX_FMT_VULKAN,
+
+ AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the
+ ///< high bits, big-endian
+ AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the
+ ///< high bits, little-endian
+
+ AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
+ ///< 10B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
+ ///< 10B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
+ ///< 10R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
+ ///< 10R(lsb), big-endian, X=unused/undefined
+
+ AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp, big-endian
+ AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp, little-endian
+
+ AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp, big-endian
+ AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp, little-endian
+
+ AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4, 32bpp, VUYAVUYA...
+
+ AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA 16:16:16:16,
+ ///< 64bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA 16:16:16:16,
+ ///< 64bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_VUYX, ///< packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha
+ ///< channel is left undefined
+
+ AV_PIX_FMT_P012LE, ///< like NV12, with 12bpp per component, data in the high
+ ///< bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P012BE, ///< like NV12, with 12bpp per component, data in the high
+ ///< bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_Y212BE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the
+ ///< high bits, zeros in the low bits, big-endian
+ AV_PIX_FMT_Y212LE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the
+ ///< high bits, zeros in the low bits, little-endian
+
+ AV_PIX_FMT_XV30BE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb),
+ ///< big-endian, variant of Y410 where alpha channel is
+ ///< left undefined
+ AV_PIX_FMT_XV30LE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb),
+ ///< little-endian, variant of Y410 where alpha channel is
+ ///< left undefined
+
+ AV_PIX_FMT_XV36BE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits,
+ ///< zeros in the low bits, big-endian, variant of Y412
+ ///< where alpha channel is left undefined
+ AV_PIX_FMT_XV36LE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits,
+ ///< zeros in the low bits, little-endian, variant of Y412
+ ///< where alpha channel is left undefined
+
+ AV_PIX_FMT_RGBF32BE, ///< IEEE-754 single precision packed RGB 32:32:32,
+ ///< 96bpp, RGBRGB..., big-endian
+ AV_PIX_FMT_RGBF32LE, ///< IEEE-754 single precision packed RGB 32:32:32,
+ ///< 96bpp, RGBRGB..., little-endian
+
+ AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA 32:32:32:32,
+ ///< 128bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA 32:32:32:32,
+ ///< 128bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to
+ ///< link with shared libav* because the number of formats
+ ///< might differ between versions
+};
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
+#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
+#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
+#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE, YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE, YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE, YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE, GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE)
+#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE)
+
+#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
+#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
+
+#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE, YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE, YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE, YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE)
+#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
+#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE)
+#define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE)
+#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE)
+
+#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
+#define AV_PIX_FMT_Y212 AV_PIX_FMT_NE(Y212BE, Y212LE)
+#define AV_PIX_FMT_XV30 AV_PIX_FMT_NE(XV30BE, XV30LE)
+#define AV_PIX_FMT_XV36 AV_PIX_FMT_NE(XV36BE, XV36LE)
+#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
+#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
+
+#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
+#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
+#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
+#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
+
+#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
+
+#define AV_PIX_FMT_RGBF32 AV_PIX_FMT_NE(RGBF32BE, RGBF32LE)
+#define AV_PIX_FMT_RGBAF32 AV_PIX_FMT_NE(RGBAF32BE, RGBAF32LE)
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.1 and
+ * ITU-T H.273.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 =
+ 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M =
+ 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R
+ ///< BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M =
+ 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M =
+ 7, ///< identical to above, also called "SMPTE C" even though it uses D65
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
+ AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
+ AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
+ AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
+ AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one of JEDEC
+ ///< P22 group phosphors
+ AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
+ AVCOL_PRI_NB ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.2.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358
+ ///< 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT =
+ 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system
+ AVCOL_TRC_SMPTE2084 =
+ 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems
+ AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084,
+ AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1
+ AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428,
+ AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma"
+ AVCOL_TRC_NB ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.3.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC
+ ///< 61966-2-1 (sRGB), YZX and ST 428-1
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 /
+ ///< derived in SMPTE RP 177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED =
+ 3, ///< reserved for future use by ITU-T and ISO/IEC just like 15-255 are
+ AVCOL_SPC_FCC =
+ 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R
+ ///< BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M =
+ 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC /
+ ///< functionally identical to above
+ AVCOL_SPC_SMPTE240M =
+ 7, ///< derived from 170M primaries and D65 white point, 170M is derived
+ ///< from BT470 System M's primaries
+ AVCOL_SPC_YCGCO =
+ 8, ///< used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x
+ AVCOL_SPC_CHROMA_DERIVED_NCL =
+ 12, ///< Chromaticity-derived non-constant luminance system
+ AVCOL_SPC_CHROMA_DERIVED_CL =
+ 13, ///< Chromaticity-derived constant luminance system
+ AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp
+ AVCOL_SPC_NB ///< Not part of ABI
+};
+
+/**
+ * Visual content value range.
+ *
+ * These values are based on definitions that can be found in multiple
+ * specifications, such as ITU-T BT.709 (3.4 - Quantization of RGB, luminance
+ * and colour-difference signals), ITU-T BT.2020 (Table 5 - Digital
+ * Representation) as well as ITU-T BT.2100 (Table 9 - Digital 10- and 12-bit
+ * integer representation). At the time of writing, the BT.2100 one is
+ * recommended, as it also defines the full range representation.
+ *
+ * Common definitions:
+ * - For RGB and luma planes such as Y in YCbCr and I in ICtCp,
+ * 'E' is the original value in range of 0.0 to 1.0.
+ * - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original
+ * value in range of -0.5 to 0.5.
+ * - 'n' is the output bit depth.
+ * - For additional definitions such as rounding and clipping to valid n
+ * bit unsigned integer range, please refer to BT.2100 (Table 9).
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+
+ /**
+ * Narrow or limited range content.
+ *
+ * - For luma planes:
+ *
+ * (219 * E + 16) * 2^(n-8)
+ *
+ * F.ex. the range of 16-235 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (224 * E + 128) * 2^(n-8)
+ *
+ * F.ex. the range of 16-240 for 8 bits
+ */
+ AVCOL_RANGE_MPEG = 1,
+
+ /**
+ * Full range content.
+ *
+ * - For RGB and luma planes:
+ *
+ * (2^n - 1) * E
+ *
+ * F.ex. the range of 0-255 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (2^n - 1) * E + 2^(n - 1)
+ *
+ * F.ex. the range of 1-255 for 8 bits
+ */
+ AVCOL_RANGE_JPEG = 2,
+ AVCOL_RANGE_NB ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * Illustration showing the location of the first (top left) chroma sample of
+ *the image, the left shows only luma, the right shows the location of the
+ *chroma sample, the 2 could be imagined to overlay each other but are drawn
+ *separately due to limitations of ASCII
+ *
+ * 1st 2nd 1st 2nd horizontal luma sample positions
+ * v v v v
+ * ______ ______
+ *1st luma line > |X X ... |3 4 X ... X are luma samples,
+ * | |1 2 1-6 are possible chroma positions
+ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0
+ AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0
+ AVCHROMA_LOC_TOPLEFT =
+ 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/rational.h
new file mode 100644
index 000000000000..1ce38d09688c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/rational.h
@@ -0,0 +1,222 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_math_rational
+ * Utilties for rational number calculation.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_math_rational AVRational
+ * @ingroup lavu_math
+ * Rational number calculation.
+ *
+ * While rational numbers can be expressed as floating-point numbers, the
+ * conversion process is a lossy one, so are floating-point operations. On the
+ * other hand, the nature of FFmpeg demands highly accurate calculation of
+ * timestamps. This set of rational number utilities serves as a generic
+ * interface for manipulating rational numbers as pairs of numerators and
+ * denominators.
+ *
+ * Many of the functions that operate on AVRational's have the suffix `_q`, in
+ * reference to the mathematical symbol "" (Q) which denotes the set of all
+ * rational numbers.
+ *
+ * @{
+ */
+
+/**
+ * Rational number (pair of numerator and denominator).
+ */
+typedef struct AVRational {
+ int num; ///< Numerator
+ int den; ///< Denominator
+} AVRational;
+
+/**
+ * Create an AVRational.
+ *
+ * Useful for compilers that do not support compound literals.
+ *
+ * @note The return value is not reduced.
+ * @see av_reduce()
+ */
+static inline AVRational av_make_q(int num, int den) {
+ AVRational r = {num, den};
+ return r;
+}
+
+/**
+ * Compare two rationals.
+ *
+ * @param a First rational
+ * @param b Second rational
+ *
+ * @return One of the following values:
+ * - 0 if `a == b`
+ * - 1 if `a > b`
+ * - -1 if `a < b`
+ * - `INT_MIN` if one of the values is of the form `0 / 0`
+ */
+static inline int av_cmp_q(AVRational a, AVRational b) {
+ const int64_t tmp = a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if (tmp)
+ return (int)((tmp ^ a.den ^ b.den) >> 63) | 1;
+ else if (b.den && a.den)
+ return 0;
+ else if (a.num && b.num)
+ return (a.num >> 31) - (b.num >> 31);
+ else
+ return INT_MIN;
+}
+
+/**
+ * Convert an AVRational to a `double`.
+ * @param a AVRational to convert
+ * @return `a` in floating-point form
+ * @see av_d2q()
+ */
+static inline double av_q2d(AVRational a) { return a.num / (double)a.den; }
+
+/**
+ * Reduce a fraction.
+ *
+ * This is useful for framerate calculations.
+ *
+ * @param[out] dst_num Destination numerator
+ * @param[out] dst_den Destination denominator
+ * @param[in] num Source numerator
+ * @param[in] den Source denominator
+ * @param[in] max Maximum allowed values for `dst_num` & `dst_den`
+ * @return 1 if the operation is exact, 0 otherwise
+ */
+int av_reduce(int* dst_num, int* dst_den, int64_t num, int64_t den,
+ int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q) {
+ AVRational r = {q.den, q.num};
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ *
+ * In case of infinity, the returned value is expressed as `{1, 0}` or
+ * `{-1, 0}` depending on the sign.
+ *
+ * @param d `double` to convert
+ * @param max Maximum allowed numerator and denominator
+ * @return `d` in AVRational form
+ * @see av_q2d()
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * Find which of the two rationals is closer to another rational.
+ *
+ * @param q Rational to be compared against
+ * @param q1 Rational to be tested
+ * @param q2 Rational to be tested
+ * @return One of the following values:
+ * - 1 if `q1` is nearer to `q` than `q2`
+ * - -1 if `q2` is nearer to `q` than `q1`
+ * - 0 if they have the same distance
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the value in a list of rationals nearest a given reference rational.
+ *
+ * @param q Reference rational
+ * @param q_list Array of rationals terminated by `{0, 0}`
+ * @return Index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point
+ * format.
+ *
+ * @param q Rational to be converted
+ * @return Equivalent floating-point value, expressed as an unsigned 32-bit
+ * integer.
+ * @note The returned value is platform-indepedant.
+ */
+uint32_t av_q2intfloat(AVRational q);
+
+/**
+ * Return the best rational so that a and b are multiple of it.
+ * If the resulting denominator is larger than max_den, return def.
+ */
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/samplefmt.h
new file mode 100644
index 000000000000..31cbca765978
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/samplefmt.h
@@ -0,0 +1,274 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a
+ * signed 24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+ AV_SAMPLE_FMT_S64, ///< signed 64 bits
+ AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking
+ ///< dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char* av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char* name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt,
+ int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char* av_get_sample_fmt_string(char* buf, int buf_size,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int* linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return minimum size in bytes required for the buffer on
+ * success, or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t** audio_data, int* linesize,
+ const uint8_t* buf, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next
+ * bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t** audio_data, int* linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t*** audio_data, int* linesize,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t** dst, uint8_t* const* src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t** audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/media/ffvpx/libavutil/version.h b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/version.h
similarity index 66%
copy from media/ffvpx/libavutil/version.h
copy to dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/version.h
index 2df788e529ab..ae72840c0a06 100644
--- a/media/ffvpx/libavutil/version.h
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/version.h
@@ -53,68 +53,66 @@
* @{
*/
-#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c))
-#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION_INT(a, b, c) ((a) << 16 | (b) << 8 | (c))
+#define AV_VERSION_DOT(a, b, c) a##.##b##.##c
#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
/**
* Extract version components from the full ::AV_VERSION_INT int as returned
* by functions like ::avformat_version() and ::avcodec_version()
*/
#define AV_VERSION_MAJOR(a) ((a) >> 16)
-#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8)
-#define AV_VERSION_MICRO(a) ((a) & 0xFF)
+#define AV_VERSION_MINOR(a) (((a)&0x00FF00) >> 8)
+#define AV_VERSION_MICRO(a) ((a)&0xFF)
/**
* @}
*/
/**
* @defgroup lavu_ver Version and Build diagnostics
*
* Macros and function useful to check at compiletime and at runtime
* which version of libavutil is in use.
*
* @{
*/
-#define LIBAVUTIL_VERSION_MAJOR 57
-#define LIBAVUTIL_VERSION_MINOR 40
+#define LIBAVUTIL_VERSION_MAJOR 58
+#define LIBAVUTIL_VERSION_MINOR 3
#define LIBAVUTIL_VERSION_MICRO 100
-#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
- LIBAVUTIL_VERSION_MINOR, \
- LIBAVUTIL_VERSION_MICRO)
-#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
- LIBAVUTIL_VERSION_MINOR, \
- LIBAVUTIL_VERSION_MICRO)
-#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+#define LIBAVUTIL_VERSION_INT \
+ AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION \
+ AV_VERSION(LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
-#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
/**
* @defgroup lavu_depr_guards Deprecation Guards
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*
* @{
*/
-#define FF_API_D2STR (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_DECLARE_ALIGNED (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_COLORSPACE_NAME (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_AV_MALLOCZ_ARRAY (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 58)
+#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_REORDERED_OPAQUE (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FRAME_PICTURE_NUMBER (LIBAVUTIL_VERSION_MAJOR < 59)
/**
* @}
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/moz.build b/dom/media/platforms/ffmpeg/ffmpeg60/moz.build
similarity index 100%
copy from dom/media/platforms/ffmpeg/ffmpeg59/moz.build
copy to dom/media/platforms/ffmpeg/ffmpeg60/moz.build
diff --git a/dom/media/platforms/ffmpeg/moz.build b/dom/media/platforms/ffmpeg/moz.build
index aa3b505dabb8..f87dd00623ce 100644
--- a/dom/media/platforms/ffmpeg/moz.build
+++ b/dom/media/platforms/ffmpeg/moz.build
@@ -15,6 +15,7 @@ DIRS += [
"ffmpeg57",
"ffmpeg58",
"ffmpeg59",
+ "ffmpeg60",
]
UNIFIED_SOURCES += [
diff --git a/media/ffvpx/README_MOZILLA b/media/ffvpx/README_MOZILLA
index b90d6020b50a..fdd0bc3531aa 100644
--- a/media/ffvpx/README_MOZILLA
+++ b/media/ffvpx/README_MOZILLA
@@ -32,6 +32,14 @@ Then, make sure the files:
include conditional compilation directives, by probably reverting them (or
reverting and adjusting them if new codecs are added).
+## Add headers for a new major ffmpeg version
+
+If a new major version of ffmpeg is being imported in the tree, it's necessary
+to vendor the new ffmpeg headers in `dom/media/platforms/ffmpeg/ffmpegxx` where
+xx is the new ffmpeg major version number, and to modify the dynamic linker
+wrapper in
+`dom/media/platforms/dom/media/platforms/ffmpeg/{FFmpegLibWrapper.cpp,FFmpegRungtimeLinker.cpp}` with the new version and the new API in this new version.
+
## `config.h` generation
Configuration files are generated as follow using the configure script (here
diff --git a/media/ffvpx/changes.patch b/media/ffvpx/changes.patch
index da7561f25489..af2aa2084940 100644
--- a/media/ffvpx/changes.patch
+++ b/media/ffvpx/changes.patch
@@ -46,29 +46,6 @@ index 9fb8d0a..97ad3b9 100644
rgba_color[0] = rgba >> 24;
rgba_color[1] = rgba >> 16;
rgba_color[2] = rgba >> 8;
-diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.c b/media/ffvpx/libavutil/hwcontext_vaapi.c
---- a/media/ffvpx/libavutil/hwcontext_vaapi.c
-+++ b/media/ffvpx/libavutil/hwcontext_vaapi.c
-@@ -39,17 +39,19 @@
- # include <unistd.h>
- #endif
-
-
- #include "avassert.h"
- #include "buffer.h"
- #include "common.h"
- #include "hwcontext.h"
-+#if CONFIG_LIBDRM
- #include "hwcontext_drm.h"
-+#endif
- #include "hwcontext_internal.h"
- #include "hwcontext_vaapi.h"
- #include "mem.h"
- #include "pixdesc.h"
- #include "pixfmt.h"
-
-
- typedef struct VAAPIDevicePriv {
diff -up media/ffvpx/libavutil/fftime.h media/ffvpx/libavutil/fftime.h
--- media/ffvpx/libavutil/fftime.h 2021-12-06 14:51:40.378642713 +0100
+++ media/ffvpx/libavutil/fftime.h 2021-12-06 14:51:54.618098212 +0100
diff --git a/media/ffvpx/libavcodec/allcodecs.c b/media/ffvpx/libavcodec/allcodecs.c
index 4f1d66cb0c76..e593ad19afc1 100644
--- a/media/ffvpx/libavcodec/allcodecs.c
+++ b/media/ffvpx/libavcodec/allcodecs.c
@@ -154,6 +154,7 @@ extern const FFCodec ff_h264_decoder;
extern const FFCodec ff_h264_crystalhd_decoder;
extern const FFCodec ff_h264_v4l2m2m_decoder;
extern const FFCodec ff_h264_mediacodec_decoder;
+extern const FFCodec ff_h264_mediacodec_encoder;
extern const FFCodec ff_h264_mmal_decoder;
extern const FFCodec ff_h264_qsv_decoder;
extern const FFCodec ff_h264_rkmpp_decoder;
@@ -286,6 +287,7 @@ extern const FFCodec ff_r210_decoder;
extern const FFCodec ff_rasc_decoder;
extern const FFCodec ff_rawvideo_encoder;
extern const FFCodec ff_rawvideo_decoder;
+extern const FFCodec ff_rka_decoder;
extern const FFCodec ff_rl2_decoder;
extern const FFCodec ff_roq_encoder;
extern const FFCodec ff_roq_decoder;
@@ -537,6 +539,7 @@ extern const FFCodec ff_twinvq_decoder;
extern const FFCodec ff_vmdaudio_decoder;
extern const FFCodec ff_vorbis_encoder;
extern const FFCodec ff_vorbis_decoder;
+extern const FFCodec ff_wavarc_decoder;
extern const FFCodec ff_wavpack_encoder;
extern const FFCodec ff_wavpack_decoder;
extern const FFCodec ff_wmalossless_decoder;
@@ -619,14 +622,16 @@ extern const FFCodec ff_pcm_vidc_encoder;
extern const FFCodec ff_pcm_vidc_decoder;
/* DPCM codecs */
+extern const FFCodec ff_cbd2_dpcm_decoder;
extern const FFCodec ff_derf_dpcm_decoder;
extern const FFCodec ff_gremlin_dpcm_decoder;
extern const FFCodec ff_interplay_dpcm_decoder;
extern const FFCodec ff_roq_dpcm_encoder;
extern const FFCodec ff_roq_dpcm_decoder;
extern const FFCodec ff_sdx2_dpcm_decoder;
extern const FFCodec ff_sol_dpcm_decoder;
extern const FFCodec ff_xan_dpcm_decoder;
+extern const FFCodec ff_wady_dpcm_decoder;
/* ADPCM codecs */
extern const FFCodec ff_adpcm_4xm_decoder;
@@ -692,6 +697,7 @@ extern const FFCodec ff_adpcm_thp_decoder;
extern const FFCodec ff_adpcm_thp_le_decoder;
extern const FFCodec ff_adpcm_vima_decoder;
extern const FFCodec ff_adpcm_xa_decoder;
+extern const FFCodec ff_adpcm_xmd_decoder;
extern const FFCodec ff_adpcm_yamaha_encoder;
extern const FFCodec ff_adpcm_yamaha_decoder;
extern const FFCodec ff_adpcm_zork_decoder;
@@ -827,22 +833,26 @@ extern const FFCodec ff_libaom_av1_decoder;
/* hwaccel hooks only, so prefer external decoders */
extern const FFCodec ff_av1_decoder;
extern const FFCodec ff_av1_cuvid_decoder;
+extern const FFCodec ff_av1_mediacodec_decoder;
+extern const FFCodec ff_av1_nvenc_encoder;
extern const FFCodec ff_av1_qsv_decoder;
extern const FFCodec ff_av1_qsv_encoder;
+extern const FFCodec ff_av1_amf_encoder;
extern const FFCodec ff_libopenh264_encoder;
extern const FFCodec ff_libopenh264_decoder;
extern const FFCodec ff_h264_amf_encoder;
extern const FFCodec ff_h264_cuvid_decoder;
extern const FFCodec ff_h264_mf_encoder;
extern const FFCodec ff_h264_nvenc_encoder;
extern const FFCodec ff_h264_omx_encoder;
extern const FFCodec ff_h264_qsv_encoder;
extern const FFCodec ff_h264_v4l2m2m_encoder;
extern const FFCodec ff_h264_vaapi_encoder;
extern const FFCodec ff_h264_videotoolbox_encoder;
extern const FFCodec ff_hevc_amf_encoder;
extern const FFCodec ff_hevc_cuvid_decoder;
extern const FFCodec ff_hevc_mediacodec_decoder;
+extern const FFCodec ff_hevc_mediacodec_encoder;
extern const FFCodec ff_hevc_mf_encoder;
extern const FFCodec ff_hevc_nvenc_encoder;
extern const FFCodec ff_hevc_qsv_encoder;
@@ -876,6 +886,12 @@ extern const FFCodec ff_vp9_qsv_decoder;
extern const FFCodec ff_vp9_vaapi_encoder;
extern const FFCodec ff_vp9_qsv_encoder;
+// null codecs
+extern const FFCodec ff_vnull_decoder;
+extern const FFCodec ff_vnull_encoder;
+extern const FFCodec ff_anull_decoder;
+extern const FFCodec ff_anull_encoder;
+
// The iterate API is not usable with ossfuzz due to the excessive size of binaries created
#if CONFIG_OSSFUZZ
const FFCodec * codec_list[] = {
diff --git a/media/ffvpx/libavcodec/av1_parser.c b/media/ffvpx/libavcodec/av1_parser.c
index 4cbd7408a026..14dae92fe9f7 100644
--- a/media/ffvpx/libavcodec/av1_parser.c
+++ b/media/ffvpx/libavcodec/av1_parser.c
@@ -162,8 +162,11 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
avctx->color_trc = (enum AVColorTransferCharacteristic) color->transfer_characteristics;
avctx->color_range = color->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
- if (avctx->framerate.num)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
+ if (seq->timing_info_present_flag) {
+ const AV1RawTimingInfo *timing = &seq->timing_info;
+ av_reduce(&avctx->framerate.den, &avctx->framerate.num,
+ timing->num_units_in_display_tick, timing->time_scale, INT_MAX);
+ }
end:
ff_cbs_fragment_reset(td);
diff --git a/media/ffvpx/libavcodec/av1dec.c b/media/ffvpx/libavcodec/av1dec.c
index 0c24eac842c1..d83c902f1f82 100644
--- a/media/ffvpx/libavcodec/av1dec.c
+++ b/media/ffvpx/libavcodec/av1dec.c
@@ -194,7 +194,7 @@ static uint8_t get_shear_params_valid(AV1DecContext *s, int idx)
}
/**
-* update gm type/params, since cbs already implemented part of this funcation,
+* update gm type/params, since cbs already implemented part of this function,
* so we don't need to full implement spec.
*/
static void global_motion_params(AV1DecContext *s)
@@ -567,7 +567,7 @@ static int get_pixel_format(AVCodecContext *avctx)
* implemented in the future, need remove this check.
*/
if (!avctx->hwaccel) {
- av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport"
+ av_log(avctx, AV_LOG_ERROR, "Your platform doesn't support"
" hardware accelerated AV1 decoding.\n");
return AVERROR(ENOSYS);
}
diff --git a/media/ffvpx/libavcodec/avcodec.c b/media/ffvpx/libavcodec/avcodec.c
index a85d3c230932..fb1362290f5c 100644
--- a/media/ffvpx/libavcodec/avcodec.c
+++ b/media/ffvpx/libavcodec/avcodec.c
@@ -266,7 +266,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
goto free_and_end;
}
- avctx->frame_number = 0;
+ avctx->frame_num = 0;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
@@ -349,11 +354,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
ret = AVERROR(EINVAL);
goto free_and_end;
}
-
-#if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
-#endif
}
if (codec->priv_class)
av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
@@ -386,9 +386,6 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
av_frame_unref(avci->recon_frame);
} else {
av_packet_unref(avci->last_pkt_props);
- while (av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1) >= 0)
- av_packet_unref(avci->last_pkt_props);
-
av_packet_unref(avci->in_pkt);
avctx->pts_correction_last_pts =
@@ -453,13 +450,6 @@ av_cold int avcodec_close(AVCodecContext *avctx)
av_freep(&avci->byte_buffer);
av_frame_free(&avci->buffer_frame);
av_packet_free(&avci->buffer_pkt);
- if (avci->pkt_props) {
- while (av_fifo_can_read(avci->pkt_props)) {
- av_packet_unref(avci->last_pkt_props);
- av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1);
- }
- av_fifo_freep2(&avci->pkt_props);
- }
av_packet_free(&avci->last_pkt_props);
av_packet_free(&avci->in_pkt);
diff --git a/media/ffvpx/libavcodec/avcodec.h b/media/ffvpx/libavcodec/avcodec.h
index 3edd8e263650..9a0fe97cad10 100644
--- a/media/ffvpx/libavcodec/avcodec.h
+++ b/media/ffvpx/libavcodec/avcodec.h
@@ -232,44 +232,77 @@ typedef struct RcOverride{
*/
#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
/**
- * Request the encoder to output reconstructed frames, i.e. frames that would be
- * produced by decoding the encoded bistream. These frames may be retrieved by
- * calling avcodec_receive_frame() immediately after a successful call to
+ * Request the encoder to output reconstructed frames, i.e.\ frames that would
+ * be produced by decoding the encoded bistream. These frames may be retrieved
+ * by calling avcodec_receive_frame() immediately after a successful call to
* avcodec_receive_packet().
*
* Should only be used with encoders flagged with the
- * AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
+ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
*/
#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
+/**
+ * @par decoding
+ * Request the decoder to propagate each packets AVPacket.opaque and
+ * AVPacket.opaque_ref to its corresponding output AVFrame.
+ *
+ * @par encoding:
+ * Request the encoder to propagate each frame's AVFrame.opaque and
+ * AVFrame.opaque_ref values to its corresponding output AVPacket.
+ *
+ * @par
+ * May only be set on encoders that have the
+ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
+ *
+ * @note
+ * While in typical cases one input frame produces exactly one output packet
+ * (perhaps after a delay), in general the mapping of frames to packets is
+ * M-to-N, so
+ * - Any number of input frames may be associated with any given output packet.
+ * This includes zero - e.g. some encoders may output packets that carry only
+ * metadata about the whole stream.
+ * - A given input frame may be associated with any number of output packets.
+ * Again this includes zero - e.g. some encoders may drop frames under certain
+ * conditions.
+ * .
+ * This implies that when using this flag, the caller must NOT assume that
+ * - a given input frame's opaques will necessarily appear on some output packet;
+ * - every output packet will have some non-NULL opaque value.
+ * .
+ * When an output packet contains multiple frames, the opaque values will be
+ * taken from the first of those.
+ *
+ * @note
+ * The converse holds for decoders, with frames and packets switched.
+ */
+#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
+/**
+ * Signal to the encoder that the values of AVFrame.duration are valid and
+ * should be used (typically for transferring them to output packets).
+ *
+ * If this flag is not set, frame durations are ignored.
+ */
+#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
/**
* Use internal 2pass ratecontrol in first pass mode.
*/
#define AV_CODEC_FLAG_PASS1 (1 << 9)
/**
* Use internal 2pass ratecontrol in second pass mode.
*/
#define AV_CODEC_FLAG_PASS2 (1 << 10)
/**
* loop filter.
*/
#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
/**
* Only decode/encode grayscale.
*/
#define AV_CODEC_FLAG_GRAY (1 << 13)
/**
* error[?] variables will be set during encoding.
*/
#define AV_CODEC_FLAG_PSNR (1 << 15)
-#if FF_API_FLAG_TRUNCATED
-/**
- * Input bitstream might be truncated at a random location
- * instead of only at frame boundaries.
- *
- * @deprecated use codec parsers for packetizing input
- */
-#define AV_CODEC_FLAG_TRUNCATED (1 << 16)
-#endif
/**
* Use interlaced DCT.
*/
@@ -310,11 +343,6 @@ typedef struct RcOverride{
*/
#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
-/**
- * timecode is in drop frame format. DEPRECATED!!!!
- */
-#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13)
-
/**
* Input bitstream might be truncated at a packet boundaries
* instead of only at frame boundaries.
@@ -515,8 +543,7 @@ typedef struct AVCodecContext {
* (fixed_vop_rate == 0 implies that it is different from the framerate)
*
* - encoding: MUST be set by user.
- * - decoding: the use of this field for decoding is deprecated.
- * Use framerate instead.
+ * - decoding: unused.
*/
AVRational time_base;
@@ -1034,16 +1061,20 @@ typedef struct AVCodecContext {
*/
int frame_size;
+#if FF_API_AVCTX_FRAME_NUMBER
/**
* Frame counter, set by libavcodec.
*
* - decoding: total number of frames returned from the decoder so far.
* - encoding: total number of frames passed to the encoder so far.
*
* @note the counter is not incremented if encoding/decoding resulted in
* an error.
+ * @deprecated use frame_num instead
*/
+ attribute_deprecated
int frame_number;
+#endif
/**
* number of bytes per packet if constant and known or 0
@@ -1350,16 +1381,21 @@ typedef struct AVCodecContext {
*/
int err_recognition;
+#if FF_API_REORDERED_OPAQUE
/**
* opaque 64-bit number (generally a PTS) that will be reordered and
* output in AVFrame.reordered_opaque
* - encoding: Set by libavcodec to the reordered_opaque of the input
* frame corresponding to the last returned packet. Only
* supported by encoders with the
* AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability.
* - decoding: Set by user.
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
*/
+ attribute_deprecated
int64_t reordered_opaque;
+#endif
/**
* Hardware accelerator in use
@@ -1484,27 +1520,6 @@ typedef struct AVCodecContext {
*/
int active_thread_type;
-#if FF_API_THREAD_SAFE_CALLBACKS
- /**
- * Set by the client if its custom get_buffer() callback can be called
- * synchronously from another thread, which allows faster multithreaded decoding.
- * draw_horiz_band() will be called from other threads regardless of this setting.
- * Ignored if the default get_buffer() is used.
- * - encoding: Set by user.
- * - decoding: Set by user.
- *
- * @deprecated the custom get_buffer2() callback should always be
- * thread-safe. Thread-unsafe get_buffer2() implementations will be
- * invalid starting with LIBAVCODEC_VERSION_MAJOR=60; in other words,
- * libavcodec will behave as if this field was always set to 1.
- * Callers that want to be forward compatible with future libavcodec
- * versions should wrap access to this field in
- * `#if LIBAVCODEC_VERSION_MAJOR < 60`
- */
- attribute_deprecated
- int thread_safe_callbacks;
-#endif
-
/**
* The codec may call this to execute several independent things.
* It will return only after finishing all tasks.
@@ -1639,6 +1654,7 @@ typedef struct AVCodecContext {
#define FF_PROFILE_HEVC_MAIN_10 2
#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
#define FF_PROFILE_HEVC_REXT 4
+#define FF_PROFILE_HEVC_SCC 9
#define FF_PROFILE_VVC_MAIN_10 1
#define FF_PROFILE_VVC_MAIN_10_444 33
@@ -1805,17 +1821,6 @@ typedef struct AVCodecContext {
*/
int seek_preroll;
-#if FF_API_DEBUG_MV
- /**
- * @deprecated unused
- */
- attribute_deprecated
- int debug_mv;
-#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
-#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
-#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
-#endif
-
/**
* custom intra quantization matrix
* - encoding: Set by user, can be NULL.
@@ -1882,15 +1887,6 @@ typedef struct AVCodecContext {
*/
AVBufferRef *hw_frames_ctx;
-#if FF_API_SUB_TEXT_FORMAT
- /**
- * @deprecated unused
- */
- attribute_deprecated
- int sub_text_format;
-#define FF_SUB_TEXT_FMT_ASS 0
-#endif
-
/**
* Audio only. The amount of padding (in samples) appended by the encoder to
* the end of the audio. I.e. this number of decoded samples must be
@@ -2057,6 +2053,17 @@ typedef struct AVCodecContext {
* The decoder can then override during decoding as needed.
*/
AVChannelLayout ch_layout;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int64_t frame_num;
} AVCodecContext;
/**
@@ -2253,6 +2260,22 @@ typedef struct AVHWAccel {
*/
#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
+/**
+ * Some hardware decoders (namely nvdec) can either output direct decoder
+ * surfaces, or make an on-device copy and return said copy.
+ * There is a hard limit on how many decoder surfaces there can be, and it
+ * cannot be accurately guessed ahead of time.
+ * For some processing chains, this can be okay, but others will run into the
+ * limit and in turn produce very confusing errors that require fine tuning of
+ * more or less obscure options by the user, or in extreme cases cannot be
+ * resolved at all without inserting an avfilter that forces a copy.
+ *
+ * Thus, the hwaccel will by default make a copy for safety and resilience.
+ * If a users really wants to minimize the amount of copies, they can set this
+ * flag and ensure their processing chain does not exhaust the surface pool.
+ */
+#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
+
/**
* @}
*/
@@ -2358,14 +2381,6 @@ void avcodec_free_context(AVCodecContext **avctx);
*/
const AVClass *avcodec_get_class(void);
-#if FF_API_GET_FRAME_CLASS
-/**
- * @deprecated This function should not be used.
- */
-attribute_deprecated
-const AVClass *avcodec_get_frame_class(void);
-#endif
-
/**
* Get the AVClass for AVSubtitleRect. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
@@ -2552,8 +2567,7 @@ enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
* @param[in] avpkt The input AVPacket containing the input buffer.
*/
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
- int *got_sub_ptr,
- AVPacket *avpkt);
+ int *got_sub_ptr, const AVPacket *avpkt);
/**
* Supply raw packet data as input to a decoder.
@@ -2589,95 +2603,94 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
* still has frames buffered, it will return them after sending
* a flush packet.
*
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): input is not accepted in the current state - user
- * must read output with avcodec_receive_frame() (once
- * all output is read, the packet should be resent, and
- * the call will not fail with EAGAIN).
- * AVERROR_EOF: the decoder has been flushed, and no new packets can
- * be sent to it (also returned if more than 1 flush
- * packet is sent)
- * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush
- * AVERROR(ENOMEM): failed to add packet to internal queue, or similar
- * other errors: legitimate decoding errors
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_frame() (once
+ * all output is read, the packet should be resent,
+ * and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the decoder has been flushed, and no new packets can be
+ * sent to it (also returned if more than 1 flush
+ * packet is sent)
+ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate decoding errors
*/
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
/**
* Return decoded output data from a decoder or encoder (when the
* AV_CODEC_FLAG_RECON_FRAME flag is used).
*
* @param avctx codec context
* @param frame This will be set to a reference-counted video or audio
* frame (depending on the decoder type) allocated by the
* codec. Note that the function will always call
* av_frame_unref(frame) before doing anything else.
*
- * @return
- * 0: success, a frame was returned
- * AVERROR(EAGAIN): output is not available in this state - user must try
- * to send new input
- * AVERROR_EOF: the codec has been fully flushed, and there will be
- * no more output frames
- * AVERROR(EINVAL): codec not opened, or it is an encoder without
- * the AV_CODEC_FLAG_RECON_FRAME flag enabled
- * AVERROR_INPUT_CHANGED: current decoded frame has changed parameters
- * with respect to first decoded frame. Applicable
- * when flag AV_CODEC_FLAG_DROPCHANGED is set.
- * other negative values: legitimate decoding errors
+ * @retval 0 success, a frame was returned
+ * @retval AVERROR(EAGAIN) output is not available in this state - user must
+ * try to send new input
+ * @retval AVERROR_EOF the codec has been fully flushed, and there will be
+ * no more output frames
+ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
+ * AV_CODEC_FLAG_RECON_FRAME flag enabled
+ * @retval AVERROR_INPUT_CHANGED current decoded frame has changed parameters with
+ * respect to first decoded frame. Applicable when flag
+ * AV_CODEC_FLAG_DROPCHANGED is set.
+ * @retval "other negative error code" legitimate decoding errors
*/
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
/**
* Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet()
* to retrieve buffered output packets.
*
* @param avctx codec context
* @param[in] frame AVFrame containing the raw audio or video frame to be encoded.
* Ownership of the frame remains with the caller, and the
* encoder will not write to the frame. The encoder may create
* a reference to the frame data (or copy it if the frame is
* not reference-counted).
* It can be NULL, in which case it is considered a flush
* packet. This signals the end of the stream. If the encoder
* still has packets buffered, it will return them after this
* call. Once flushing mode has been entered, additional flush
* packets are ignored, and sending frames will return
* AVERROR_EOF.
*
* For audio:
* If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
* can have any number of samples.
* If it is not set, frame->nb_samples must be equal to
* avctx->frame_size for all frames except the last.
* The final frame may be smaller than avctx->frame_size.
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): input is not accepted in the current state - user
- * must read output with avcodec_receive_packet() (once
- * all output is read, the packet should be resent, and
- * the call will not fail with EAGAIN).
- * AVERROR_EOF: the encoder has been flushed, and no new frames can
- * be sent to it
- * AVERROR(EINVAL): codec not opened, it is a decoder, or requires flush
- * AVERROR(ENOMEM): failed to add packet to internal queue, or similar
- * other errors: legitimate encoding errors
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user must
+ * read output with avcodec_receive_packet() (once all
+ * output is read, the packet should be resent, and the
+ * call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the encoder has been flushed, and no new frames can
+ * be sent to it
+ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate encoding errors
*/
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
/**
* Read encoded data from the encoder.
*
* @param avctx codec context
* @param avpkt This will be set to a reference-counted packet allocated by the
* encoder. Note that the function will always call
* av_packet_unref(avpkt) before doing anything else.
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): output is not available in the current state - user
- * must try to send input
- * AVERROR_EOF: the encoder has been fully flushed, and there will be
- * no more output packets
- * AVERROR(EINVAL): codec not opened, or it is a decoder
- * other errors: legitimate encoding errors
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) output is not available in the current state - user must
+ * try to send input
+ * @retval AVERROR_EOF the encoder has been fully flushed, and there will be no
+ * more output packets
+ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
+ * @retval "another negative error code" legitimate encoding errors
*/
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
diff --git a/media/ffvpx/libavcodec/avcodec.symbols b/media/ffvpx/libavcodec/avcodec.symbols
index 491ce320c215..b15862fa506a 100644
--- a/media/ffvpx/libavcodec/avcodec.symbols
+++ b/media/ffvpx/libavcodec/avcodec.symbols
@@ -63,7 +63,6 @@ avcodec_find_encoder_by_name
avcodec_flush_buffers
avcodec_free_context
avcodec_get_class
-avcodec_get_frame_class
avcodec_get_hw_config
avcodec_get_name
avcodec_get_subtitle_rect_class
diff --git a/media/ffvpx/libavcodec/avpacket.c b/media/ffvpx/libavcodec/avpacket.c
index bcbc4166cb40..5fef65e97aef 100644
--- a/media/ffvpx/libavcodec/avpacket.c
+++ b/media/ffvpx/libavcodec/avpacket.c
@@ -316,7 +316,7 @@ uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
const AVDictionaryEntry *t = NULL;
size_t total_length = 0;
- while ((t = av_dict_get(dict, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(dict, t))) {
for (int i = 0; i < 2; i++) {
const char *str = i ? t->value : t->key;
const size_t len = strlen(str) + 1;
diff --git a/media/ffvpx/libavcodec/bitstream_filters.c b/media/ffvpx/libavcodec/bitstream_filters.c
index a3bebefe5f95..e8216819caec 100644
--- a/media/ffvpx/libavcodec/bitstream_filters.c
+++ b/media/ffvpx/libavcodec/bitstream_filters.c
@@ -43,6 +43,7 @@ extern const FFBitStreamFilter ff_hapqa_extract_bsf;
extern const FFBitStreamFilter ff_hevc_metadata_bsf;
extern const FFBitStreamFilter ff_hevc_mp4toannexb_bsf;
extern const FFBitStreamFilter ff_imx_dump_header_bsf;
+extern const FFBitStreamFilter ff_media100_to_mjpegb_bsf;
extern const FFBitStreamFilter ff_mjpeg2jpeg_bsf;
extern const FFBitStreamFilter ff_mjpega_dump_header_bsf;
extern const FFBitStreamFilter ff_mp3_header_decompress_bsf;
diff --git a/media/ffvpx/libavcodec/cbs_av1_syntax_template.c b/media/ffvpx/libavcodec/cbs_av1_syntax_template.c
index d98d3d42dea3..e95925a493e1 100644
--- a/media/ffvpx/libavcodec/cbs_av1_syntax_template.c
+++ b/media/ffvpx/libavcodec/cbs_av1_syntax_template.c
@@ -1862,11 +1862,8 @@ static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
fb(16, white_point_chromaticity_x);
fb(16, white_point_chromaticity_y);
- fc(32, luminance_max, 1, MAX_UINT_BITS(32));
- // luminance_min must be lower than luminance_max. Convert luminance_max from
- // 24.8 fixed point to 18.14 fixed point in order to compare them.
- fc(32, luminance_min, 0, FFMIN(((uint64_t)current->luminance_max << 6) - 1,
- MAX_UINT_BITS(32)));
+ fb(32, luminance_max);
+ fb(32, luminance_min);
return 0;
}
diff --git a/media/ffvpx/libavcodec/codec.h b/media/ffvpx/libavcodec/codec.h
index 77a1a3f5a296..3b1995bcfefa 100644
--- a/media/ffvpx/libavcodec/codec.h
+++ b/media/ffvpx/libavcodec/codec.h
@@ -50,12 +50,6 @@
* avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
*/
#define AV_CODEC_CAP_DR1 (1 << 1)
-#if FF_API_FLAG_TRUNCATED
-/**
- * @deprecated Use parsers to always send proper frames.
- */
-#define AV_CODEC_CAP_TRUNCATED (1 << 3)
-#endif
/**
* Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output.
@@ -125,53 +119,39 @@
* multithreading-capable external libraries.
*/
#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
-#if FF_API_AUTO_THREADS
-#define AV_CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_OTHER_THREADS
-#endif
/**
* Audio encoder supports receiving a different number of samples in each call.
*/
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
/**
* Decoder is not a preferred choice for probing.
* This indicates that the decoder is not a good choice for probing.
* It could for example be an expensive to spin up hardware decoder,
* or it could simply not provide a lot of useful information about
* the stream.
* A decoder marked with this flag should only be used as last resort
* choice for probing.
*/
#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
-#if FF_API_UNUSED_CODEC_CAPS
-/**
- * Deprecated and unused. Use AVCodecDescriptor.props instead
- */
-#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
-/**
- * Deprecated and unused. Use AVCodecDescriptor.props instead
- */
-#define AV_CODEC_CAP_LOSSLESS 0x80000000
-#endif
-
/**
* Codec is backed by a hardware implementation. Typically used to
* identify a non-hwaccel hardware decoder. For information about hwaccels, use
* avcodec_get_hw_config() instead.
*/
#define AV_CODEC_CAP_HARDWARE (1 << 18)
/**
* Codec is potentially backed by a hardware implementation, but not
* necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
* implementation provides some sort of internal fallback.
*/
#define AV_CODEC_CAP_HYBRID (1 << 19)
/**
- * This codec takes the reordered_opaque field from input AVFrames
- * and returns it in the corresponding field in AVCodecContext after
- * encoding.
+ * This encoder can reorder user opaque values from input AVFrames and return
+ * them with corresponding output packets.
+ * @see AV_CODEC_FLAG_COPY_OPAQUE
*/
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
diff --git a/media/ffvpx/libavcodec/codec_desc.c b/media/ffvpx/libavcodec/codec_desc.c
index 24a0433dbac9..199f62df15b1 100644
--- a/media/ffvpx/libavcodec/codec_desc.c
+++ b/media/ffvpx/libavcodec/codec_desc.c
@@ -2536,6 +2536,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Acorn Replay"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
+ {
+ .id = AV_CODEC_ID_ADPCM_XMD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_xmd",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Konami XMD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
/* AMR */
{
@@ -2619,6 +2626,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("DPCM Xilam DERF"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
+ {
+ .id = AV_CODEC_ID_WADY_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wady_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Marble WADY"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CBD2_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "cbd2_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Cuberoot-Delta-Exact"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
/* audio codecs */
{
@@ -3332,6 +3353,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("FTR Voice"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
+ {
+ .id = AV_CODEC_ID_WAVARC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wavarc",
+ .long_name = NULL_IF_CONFIG_SMALL("Waveform Archiver"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RKA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "rka",
+ .long_name = NULL_IF_CONFIG_SMALL("RKA (RK Audio)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
/* subtitle codecs */
{
@@ -3604,6 +3639,18 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("AVFrame to AVPacket passthrough"),
.props = AV_CODEC_PROP_LOSSLESS,
},
+ {
+ .id = AV_CODEC_ID_VNULL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vnull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null video codec"),
+ },
+ {
+ .id = AV_CODEC_ID_ANULL,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "anull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null audio codec"),
+ },
};
static int descriptor_compare(const void *key, const void *member)
diff --git a/media/ffvpx/libavcodec/codec_id.h b/media/ffvpx/libavcodec/codec_id.h
index f436a2b624f4..89a4a0cb893a 100644
--- a/media/ffvpx/libavcodec/codec_id.h
+++ b/media/ffvpx/libavcodec/codec_id.h
@@ -413,23 +413,26 @@ enum AVCodecID {
AV_CODEC_ID_ADPCM_IMA_CUNNING,
AV_CODEC_ID_ADPCM_IMA_MOFLEX,
AV_CODEC_ID_ADPCM_IMA_ACORN,
+ AV_CODEC_ID_ADPCM_XMD,
/* AMR */
AV_CODEC_ID_AMR_NB = 0x12000,
AV_CODEC_ID_AMR_WB,
/* RealAudio codecs*/
AV_CODEC_ID_RA_144 = 0x13000,
AV_CODEC_ID_RA_288,
/* various DPCM codecs */
AV_CODEC_ID_ROQ_DPCM = 0x14000,
AV_CODEC_ID_INTERPLAY_DPCM,
AV_CODEC_ID_XAN_DPCM,
AV_CODEC_ID_SOL_DPCM,
AV_CODEC_ID_SDX2_DPCM,
AV_CODEC_ID_GREMLIN_DPCM,
AV_CODEC_ID_DERF_DPCM,
+ AV_CODEC_ID_WADY_DPCM,
+ AV_CODEC_ID_CBD2_DPCM,
/* audio codecs */
AV_CODEC_ID_MP2 = 0x15000,
@@ -533,6 +536,8 @@ enum AVCodecID {
AV_CODEC_ID_MISC4,
AV_CODEC_ID_APAC,
AV_CODEC_ID_FTR,
+ AV_CODEC_ID_WAVARC,
+ AV_CODEC_ID_RKA,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@@ -587,6 +592,16 @@ enum AVCodecID {
* stream (only used by libavformat) */
AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+ /**
+ * Dummy null video codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_VNULL,
+ /**
+ * Dummy null audio codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_ANULL,
};
/**
diff --git a/media/ffvpx/libavcodec/codec_internal.h b/media/ffvpx/libavcodec/codec_internal.h
index e3b77e6deaf2..130a7dc3cd77 100644
--- a/media/ffvpx/libavcodec/codec_internal.h
+++ b/media/ffvpx/libavcodec/codec_internal.h
@@ -80,6 +80,14 @@
* Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
*/
#define FF_CODEC_CAP_ICC_PROFILES (1 << 9)
+/**
+ * The encoder has AV_CODEC_CAP_DELAY set, but does not actually have delay - it
+ * only wants to be flushed at the end to update some context variables (e.g.
+ * 2pass stats) or produce a trailing packet. Besides that it immediately
+ * produces exactly one output packet per each input frame, just as no-delay
+ * encoders do.
+ */
+#define FF_CODEC_CAP_EOF_FLUSH (1 << 10)
/**
* FFCodec.codec_tags termination value
diff --git a/media/ffvpx/libavcodec/decode.c b/media/ffvpx/libavcodec/decode.c
index 6be2d3d6edfe..be2be81089fc 100644
--- a/media/ffvpx/libavcodec/decode.c
+++ b/media/ffvpx/libavcodec/decode.c
@@ -132,38 +132,16 @@ fail2:
return 0;
}
-#define IS_EMPTY(pkt) (!(pkt)->data)
-
-static int copy_packet_props(AVPacket *dst, const AVPacket *src)
-{
- int ret = av_packet_copy_props(dst, src);
- if (ret < 0)
- return ret;
-
- dst->size = src->size; // HACK: Needed for ff_decode_frame_props().
- dst->data = (void*)1; // HACK: Needed for IS_EMPTY().
-
- return 0;
-}
-
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
{
- AVPacket tmp = { 0 };
int ret = 0;
- if (IS_EMPTY(avci->last_pkt_props)) {
- if (av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1) < 0)
- return copy_packet_props(avci->last_pkt_props, pkt);
+ av_packet_unref(avci->last_pkt_props);
+ if (pkt) {
+ ret = av_packet_copy_props(avci->last_pkt_props, pkt);
+ if (!ret)
+ avci->last_pkt_props->opaque = (void *)(intptr_t)pkt->size; // Needed for ff_decode_frame_props().
}
-
- ret = copy_packet_props(&tmp, pkt);
- if (ret < 0)
- return ret;
-
- ret = av_fifo_write(avci->pkt_props, &tmp, 1);
- if (ret < 0)
- av_packet_unref(&tmp);
-
return ret;
}
@@ -449,50 +427,41 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (!got_frame)
av_frame_unref(frame);
-#if FF_API_FLAG_TRUNCATED
- if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
-#else
if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
-#endif
ret = pkt->size;
-#if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
-#endif
-
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
if (avci->draining && !actual_got_frame) {
if (ret < 0) {
/* prevent infinite loop if a decoder wrongly always return error on draining */
/* reasonable nb_errors_max = maximum b frames + thread count */
int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
avctx->thread_count : 1);
if (avci->nb_draining_errors++ >= nb_errors_max) {
av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
"Stop draining and force EOF.\n");
avci->draining_done = 1;
ret = AVERROR_BUG;
}
} else {
avci->draining_done = 1;
}
}
if (ret >= pkt->size || ret < 0) {
av_packet_unref(pkt);
- av_packet_unref(avci->last_pkt_props);
} else {
int consumed = ret;
pkt->data += consumed;
pkt->size -= consumed;
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
- avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
+ // See extract_packet_props() comment.
+ avci->last_pkt_props->opaque = (void *)((intptr_t)avci->last_pkt_props->opaque - consumed);
avci->last_pkt_props->pts = AV_NOPTS_VALUE;
avci->last_pkt_props->dts = AV_NOPTS_VALUE;
}
@@ -578,27 +547,19 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
ret = codec->cb.receive_frame(avctx, frame);
- if (ret != AVERROR(EAGAIN))
- av_packet_unref(avci->last_pkt_props);
} else
ret = decode_simple_receive_frame(avctx, frame);
if (ret == AVERROR_EOF)
avci->draining_done = 1;
/* preserve ret */
ok = detect_colorspace(avctx, frame);
if (ok < 0) {
av_frame_unref(frame);
return ok;
}
- if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS) &&
- IS_EMPTY(avci->last_pkt_props)) {
- // May fail if the FIFO is empty.
- av_fifo_read(avci->pkt_props, avci->last_pkt_props, 1);
- }
-
if (!ret) {
frame->best_effort_timestamp = guess_correct_pts(avctx,
frame->pts,
@@ -697,91 +658,126 @@ static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
AV_FRAME_CROP_UNALIGNED : 0);
}
+// make sure frames returned to the caller are valid
+static int frame_validate(AVCodecContext *avctx, AVFrame *frame)
+{
+ if (!frame->buf[0] || frame->format < 0)
+ goto fail;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (frame->width <= 0 || frame->height <= 0)
+ goto fail;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (!av_channel_layout_check(&frame->ch_layout) ||
+ frame->sample_rate <= 0)
+ goto fail;
+
+ break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+fail:
+ av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. "
+ "This is a bug, please report it.\n");
+ return AVERROR_BUG;
+}
+
int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret, changed;
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
if (avci->buffer_frame->buf[0]) {
av_frame_move_ref(frame, avci->buffer_frame);
} else {
ret = decode_receive_frame_internal(avctx, frame);
if (ret < 0)
return ret;
}
+ ret = frame_validate(avctx, frame);
+ if (ret < 0)
+ goto fail;
+
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = apply_cropping(avctx, frame);
- if (ret < 0) {
- av_frame_unref(frame);
- return ret;
- }
+ if (ret < 0)
+ goto fail;
}
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
- if (avctx->frame_number == 1) {
+ if (avctx->frame_num == 1) {
avci->initial_format = frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avci->initial_width = frame->width;
avci->initial_height = frame->height;
break;
case AVMEDIA_TYPE_AUDIO:
avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
avctx->sample_rate;
ret = av_channel_layout_copy(&avci->initial_ch_layout, &frame->ch_layout);
- if (ret < 0) {
- av_frame_unref(frame);
- return ret;
- }
+ if (ret < 0)
+ goto fail;
break;
}
}
- if (avctx->frame_number > 1) {
+ if (avctx->frame_num > 1) {
changed = avci->initial_format != frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
changed |= avci->initial_width != frame->width ||
avci->initial_height != frame->height;
break;
case AVMEDIA_TYPE_AUDIO:
changed |= avci->initial_sample_rate != frame->sample_rate ||
avci->initial_sample_rate != avctx->sample_rate ||
av_channel_layout_compare(&avci->initial_ch_layout, &frame->ch_layout);
break;
}
if (changed) {
avci->changed_frames_dropped++;
- av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
+ av_log(avctx, AV_LOG_INFO, "dropped changed frame #%"PRId64" pts %"PRId64
" drop count: %d \n",
- avctx->frame_number, frame->pts,
+ avctx->frame_num, frame->pts,
avci->changed_frames_dropped);
- av_frame_unref(frame);
- return AVERROR_INPUT_CHANGED;
+ ret = AVERROR_INPUT_CHANGED;
+ goto fail;
}
}
}
return 0;
+fail:
+ av_frame_unref(frame);
+ return ret;
}
static void get_subtitle_defaults(AVSubtitle *sub)
{
memset(sub, 0, sizeof(*sub));
sub->pts = AV_NOPTS_VALUE;
}
#define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
-static int recode_subtitle(AVCodecContext *avctx, AVPacket **outpkt,
- AVPacket *inpkt, AVPacket *buf_pkt)
+static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt,
+ const AVPacket *inpkt, AVPacket *buf_pkt)
{
#if CONFIG_ICONV
iconv_t cd = (iconv_t)-1;
@@ -861,28 +857,27 @@ static int utf8_check(const uint8_t *str)
}
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
- int *got_sub_ptr,
- AVPacket *avpkt)
+ int *got_sub_ptr, const AVPacket *avpkt)
{
int ret = 0;
if (!avpkt->data && avpkt->size) {
av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
return AVERROR(EINVAL);
}
if (!avctx->codec)
return AVERROR(EINVAL);
if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
return AVERROR(EINVAL);
}
*got_sub_ptr = 0;
get_subtitle_defaults(sub);
if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
AVCodecInternal *avci = avctx->internal;
- AVPacket *pkt;
+ const AVPacket *pkt;
ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt);
if (ret < 0)
@@ -926,7 +921,12 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
}
if (*got_sub_ptr)
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
}
return ret;
@@ -1291,54 +1291,77 @@ static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
return av_packet_unpack_dictionary(side_metadata, size, frame_md);
}
-int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt)
{
- AVPacket *pkt = avctx->internal->last_pkt_props;
static const struct {
enum AVPacketSideDataType packet;
enum AVFrameSideDataType frame;
} sd[] = {
{ AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
{ AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
{ AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
{ AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
{ AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
{ AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
{ AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
{ AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
{ AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
{ AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE },
{ AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
};
- if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
- frame->pts = pkt->pts;
- frame->pkt_pos = pkt->pos;
- frame->duration = pkt->duration;
- frame->pkt_size = pkt->size;
+ frame->pts = pkt->pts;
+ frame->pkt_pos = pkt->pos;
+ frame->duration = pkt->duration;
+ frame->pkt_size = pkt->size;
- for (int i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
- size_t size;
- uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
- if (packet_sd) {
- AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
- sd[i].frame,
- size);
- if (!frame_sd)
- return AVERROR(ENOMEM);
+ for (int i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
+ size_t size;
+ uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
+ if (packet_sd) {
+ AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
+ sd[i].frame,
+ size);
+ if (!frame_sd)
+ return AVERROR(ENOMEM);
- memcpy(frame_sd->data, packet_sd, size);
- }
- }
- add_metadata_from_side_data(pkt, frame);
-
- if (pkt->flags & AV_PKT_FLAG_DISCARD) {
- frame->flags |= AV_FRAME_FLAG_DISCARD;
- } else {
- frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
+ memcpy(frame_sd->data, packet_sd, size);
}
}
+ add_metadata_from_side_data(pkt, frame);
+
+ if (pkt->flags & AV_PKT_FLAG_DISCARD) {
+ frame->flags |= AV_FRAME_FLAG_DISCARD;
+ } else {
+ frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
+ }
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref);
+ if (ret < 0)
+ return ret;
+ frame->opaque = pkt->opaque;
+ }
+
+ return 0;
+}
+
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
+{
+ const AVPacket *pkt = avctx->internal->last_pkt_props;
+
+ if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
+ if (ret < 0)
+ return ret;
+ frame->pkt_size = (int)(intptr_t)pkt->opaque;
+ }
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
frame->reordered_opaque = avctx->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
frame->color_primaries = avctx->color_primaries;
@@ -1582,19 +1605,6 @@ int ff_decode_preinit(AVCodecContext *avctx)
* free the already allocated subtitle_header before overwriting it */
av_freep(&avctx->subtitle_header);
-#if FF_API_THREAD_SAFE_CALLBACKS
-FF_DISABLE_DEPRECATION_WARNINGS
- if ((avctx->thread_type & FF_THREAD_FRAME) &&
- avctx->get_buffer2 != avcodec_default_get_buffer2 &&
- !avctx->thread_safe_callbacks) {
- av_log(avctx, AV_LOG_WARNING, "Requested frame threading with a "
- "custom get_buffer2() implementation which is not marked as "
- "thread safe. This is not supported anymore, make your "
- "callback thread-safe.\n");
- }
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
avctx->codec->max_lowres);
@@ -1651,9 +1661,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
avci->in_pkt = av_packet_alloc();
avci->last_pkt_props = av_packet_alloc();
- avci->pkt_props = av_fifo_alloc2(1, sizeof(*avci->last_pkt_props),
- AV_FIFO_FLAG_AUTO_GROW);
- if (!avci->in_pkt || !avci->last_pkt_props || !avci->pkt_props)
+ if (!avci->in_pkt || !avci->last_pkt_props)
return AVERROR(ENOMEM);
ret = decode_bsfs_init(avctx);
diff --git a/media/ffvpx/libavcodec/decode.h b/media/ffvpx/libavcodec/decode.h
index 5d95369b5e80..8430ffbd6648 100644
--- a/media/ffvpx/libavcodec/decode.h
+++ b/media/ffvpx/libavcodec/decode.h
@@ -69,6 +69,12 @@ int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
*/
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt);
+/**
+ * Set various frame properties from the provided packet.
+ */
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt);
+
/**
* Set various frame properties from the codec context / packet data.
*/
diff --git a/media/ffvpx/libavcodec/encode.c b/media/ffvpx/libavcodec/encode.c
index fbe2c97..041fc76 100644
--- a/media/ffvpx/libavcodec/encode.c
+++ b/media/ffvpx/libavcodec/encode.c
@@ -164,39 +164,63 @@ fail:
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVSubtitle *sub)
{
int ret;
if (sub->start_display_time) {
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
return -1;
}
ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
return ret;
}
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
if (avci->draining)
return AVERROR_EOF;
if (!avci->buffer_frame->buf[0])
return AVERROR(EAGAIN);
av_frame_move_ref(frame, avci->buffer_frame);
return 0;
}
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame)
+{
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->reordered_opaque = frame->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&pkt->opaque_ref, frame->opaque_ref);
+ if (ret < 0)
+ return ret;
+ pkt->opaque = frame->opaque;
+ }
+
+ return 0;
+}
+
int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
AVFrame *frame, int *got_packet)
{
const FFCodec *const codec = ffcodec(avctx->codec);
int ret;
ret = codec->cb.encode(avctx, avpkt, frame, got_packet);
emms_c();
av_assert0(ret <= 0);
@@ -204,44 +228,52 @@ int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
if (avpkt->data) {
ret = encode_make_refcounted(avctx, avpkt);
if (ret < 0)
goto unref;
// Date returned by encoders must always be ref-counted
av_assert0(avpkt->buf);
}
// set the timestamps for the simple no-delay case
// encoders with delay have to set the timestamps themselves
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (frame && (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
- if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
- if (!avpkt->duration)
+ if (!avpkt->duration) {
+ if (frame->duration)
+ avpkt->duration = frame->duration;
+ else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
+ }
}
+
+ ret = ff_encode_reordered_opaque(avctx, avpkt, frame);
+ if (ret < 0)
+ goto unref;
}
// dts equals pts unless there is reordering
// there can be no reordering if there is no encoder delay
if (!(avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER) ||
- !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))
avpkt->dts = avpkt->pts;
} else {
unref:
av_packet_unref(avpkt);
}
-#if !FF_API_THREAD_SAFE_CALLBACKS
+
if (frame)
av_frame_unref(frame);
-#endif
return ret;
}
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
AVFrame *frame = avci->in_frame;
const FFCodec *const codec = ffcodec(avctx->codec);
int got_packet;
@@ -268,24 +300,20 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
got_packet = 0;
av_assert0(codec->cb_type == FF_CODEC_CB_TYPE_ENCODE);
if (CONFIG_FRAME_THREAD_ENCODER && avci->frame_thread_encoder)
/* This will unref frame. */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
ret = ff_encode_encode_cb(avctx, avpkt, frame, &got_packet);
-#if FF_API_THREAD_SAFE_CALLBACKS
- if (frame)
- av_frame_unref(frame);
-#endif
}
if (avci->draining && !got_packet)
avci->draining_done = 1;
return ret;
}
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
@@ -435,20 +463,27 @@ FF_DISABLE_DEPRECATION_WARNINGS
dst->duration = dst->pkt_duration;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
ret = encode_generate_icc_profile(avctx, dst);
if (ret < 0)
return ret;
}
+ // unset frame duration unless AV_CODEC_FLAG_FRAME_DURATION is set,
+ // since otherwise we cannot be sure that whatever value it has is in the
+ // right timebase, so we would produce an incorrect value, which is worse
+ // than none at all
+ if (!(avctx->flags & AV_CODEC_FLAG_FRAME_DURATION))
+ dst->duration = 0;
+
return 0;
}
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
@@ -466,21 +501,26 @@ int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame
if (ret < 0)
return ret;
}
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
}
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
return 0;
}
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
av_packet_unref(avpkt);
@@ -627,20 +667,27 @@ static int encode_preinit_audio(AVCodecContext *avctx)
int ff_encode_preinit(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
int ret = 0;
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
return AVERROR(EINVAL);
}
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
+ av_log(avctx, AV_LOG_ERROR, "The copy_opaque flag is set, but the "
+ "encoder does not support it.\n");
+ return AVERROR(EINVAL);
+ }
+
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO: ret = encode_preinit_video(avctx); break;
case AVMEDIA_TYPE_AUDIO: ret = encode_preinit_audio(avctx); break;
}
if (ret < 0)
return ret;
if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
&& avctx->bit_rate>0 && avctx->bit_rate<1000) {
av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
diff --git a/media/ffvpx/libavcodec/encode.h b/media/ffvpx/libavcodec/encode.h
index 81d18d6eadf6..26a3304045bd 100644
--- a/media/ffvpx/libavcodec/encode.h
+++ b/media/ffvpx/libavcodec/encode.h
@@ -69,6 +69,12 @@ int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
*/
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
+/**
+ * Propagate user opaque values from the frame to avctx/pkt as needed.
+ */
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame);
+
/*
* Perform encoder initialization and validation.
* Called when opening the encoder, before the FFCodec.init() call.
diff --git a/media/ffvpx/libavcodec/flac.c b/media/ffvpx/libavcodec/flac.c
index 352d663c67d8..174b4801beab 100644
--- a/media/ffvpx/libavcodec/flac.c
+++ b/media/ffvpx/libavcodec/flac.c
@@ -28,7 +28,7 @@
#include "flacdata.h"
#include "flac_parse.h"
-static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 };
+static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 32 };
static const AVChannelLayout flac_channel_layouts[8] = {
AV_CHANNEL_LAYOUT_MONO,
@@ -82,7 +82,7 @@ int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
/* bits per sample */
bps_code = get_bits(gb, 3);
- if (bps_code == 3 || bps_code == 7) {
+ if (bps_code == 3) {
av_log(avctx, AV_LOG_ERROR + log_level_offset,
"invalid sample size code (%d)\n",
bps_code);
diff --git a/media/ffvpx/libavcodec/flacdec.c b/media/ffvpx/libavcodec/flacdec.c
index 5b8547a98f51..cc778a8dff19 100644
--- a/media/ffvpx/libavcodec/flacdec.c
+++ b/media/ffvpx/libavcodec/flacdec.c
@@ -64,6 +64,9 @@ typedef struct FLACContext {
int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples
uint8_t *decoded_buffer;
unsigned int decoded_buffer_size;
+ int64_t *decoded_33bps; ///< decoded samples for a 33 bps subframe
+ uint8_t *decoded_buffer_33bps;
+ unsigned int decoded_buffer_size_33bps;
int buggy_lpc; ///< use workaround for old lavc encoded files
FLACDSPContext dsp;
@@ -154,6 +157,24 @@ static int allocate_buffers(FLACContext *s)
s->stream_info.channels,
s->stream_info.max_blocksize,
AV_SAMPLE_FMT_S32P, 0);
+ if (ret >= 0 && s->stream_info.bps == 32 && s->stream_info.channels == 2) {
+ buf_size = av_samples_get_buffer_size(NULL, 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+ if (buf_size < 0)
+ return buf_size;
+
+ av_fast_malloc(&s->decoded_buffer_33bps, &s->decoded_buffer_size_33bps, buf_size);
+ if (!s->decoded_buffer_33bps)
+ return AVERROR(ENOMEM);
+
+ ret = av_samples_fill_arrays((uint8_t **)&s->decoded_33bps, NULL,
+ s->decoded_buffer_33bps,
+ 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+
+ }
return ret < 0 ? ret : 0;
}
@@ -331,6 +352,62 @@ static int decode_subframe_fixed(FLACContext *s, int32_t *decoded,
return 0;
}
+#define DECODER_SUBFRAME_FIXED_WIDE(residual) { \
+ const int blocksize = s->blocksize; \
+ int ret; \
+ \
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0) \
+ return ret; \
+ \
+ switch (pred_order) { \
+ case 0: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = residual[i]; \
+ break; \
+ case 1: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + (int64_t)decoded[i-1];\
+ break; \
+ case 2: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 2*(int64_t)decoded[i-1] - (int64_t)decoded[i-2]; \
+ break; \
+ case 3: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 3*(int64_t)decoded[i-1] - 3*(int64_t)decoded[i-2] + (int64_t)decoded[i-3]; \
+ break; \
+ case 4: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 4*(int64_t)decoded[i-1] - 6*(int64_t)decoded[i-2] + 4*(int64_t)decoded[i-3] - (int64_t)decoded[i-4]; \
+ break; \
+ default: \
+ av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order); \
+ return AVERROR_INVALIDDATA; \
+ } \
+ return 0; \
+}
+
+static int decode_subframe_fixed_wide(FLACContext *s, int32_t *decoded,
+ int pred_order, int bps)
+{
+ /* warm up samples */
+ for (int i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ }
+ DECODER_SUBFRAME_FIXED_WIDE(decoded);
+}
+
+
+static int decode_subframe_fixed_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ /* warm up samples */ \
+ for (int i = 0; i < pred_order; i++) { \
+ decoded[i] = get_sbits64(&s->gb, 33); \
+ } \
+ DECODER_SUBFRAME_FIXED_WIDE(residual);
+}
+
static void lpc_analyze_remodulate(SUINT32 *decoded, const int coeffs[32],
int order, int qlevel, int len, int bps)
{
@@ -402,68 +479,138 @@ static int decode_subframe_lpc(FLACContext *s, int32_t *decoded, int pred_order,
return 0;
}
+static int decode_subframe_lpc_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ int i, j, ret;
+ int coeff_prec, qlevel;
+ int coeffs[32];
+
+ /* warm up samples */
+ for (i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits64(&s->gb, 33);
+ }
+
+ coeff_prec = get_bits(&s->gb, 4) + 1;
+ if (coeff_prec == 16) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coeff precision\n");
+ return AVERROR_INVALIDDATA;
+ }
+ qlevel = get_sbits(&s->gb, 5);
+ if (qlevel < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "qlevel %d not supported, maybe buggy stream\n",
+ qlevel);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < pred_order; i++) {
+ coeffs[pred_order - i - 1] = get_sbits(&s->gb, coeff_prec);
+ }
+
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0)
+ return ret;
+
+ for (i = pred_order; i < s->blocksize; i++, decoded++) {
+ int64_t sum = 0;
+ for (j = 0; j < pred_order; j++)
+ sum += (int64_t)coeffs[j] * decoded[j];
+ decoded[j] = residual[i] + (sum >> qlevel);
+ }
+
+ return 0;
+}
+
static inline int decode_subframe(FLACContext *s, int channel)
{
int32_t *decoded = s->decoded[channel];
int type, wasted = 0;
int bps = s->stream_info.bps;
- int i, tmp, ret;
+ int i, ret;
if (channel == 0) {
if (s->ch_mode == FLAC_CHMODE_RIGHT_SIDE)
bps++;
} else {
if (s->ch_mode == FLAC_CHMODE_LEFT_SIDE || s->ch_mode == FLAC_CHMODE_MID_SIDE)
bps++;
}
if (get_bits1(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, "invalid subframe padding\n");
return AVERROR_INVALIDDATA;
}
type = get_bits(&s->gb, 6);
if (get_bits1(&s->gb)) {
int left = get_bits_left(&s->gb);
if ( left <= 0 ||
(left < bps && !show_bits_long(&s->gb, left)) ||
- !show_bits_long(&s->gb, bps)) {
+ !show_bits_long(&s->gb, bps-1)) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid number of wasted bits > available bits (%d) - left=%d\n",
bps, left);
return AVERROR_INVALIDDATA;
}
wasted = 1 + get_unary(&s->gb, 1, get_bits_left(&s->gb));
bps -= wasted;
}
- if (bps > 32) {
- avpriv_report_missing_feature(s->avctx, "Decorrelated bit depth > 32");
- return AVERROR_PATCHWELCOME;
- }
//FIXME use av_log2 for types
if (type == 0) {
- tmp = get_sbits_long(&s->gb, bps);
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = tmp;
+ if (bps < 33) {
+ int32_t tmp = get_sbits_long(&s->gb, bps);
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = tmp;
+ } else {
+ int64_t tmp = get_sbits64(&s->gb, 33);
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = tmp;
+ }
} else if (type == 1) {
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = get_sbits_long(&s->gb, bps);
+ if (bps < 33) {
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ } else {
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = get_sbits64(&s->gb, 33);
+ }
} else if ((type >= 8) && (type <= 12)) {
- if ((ret = decode_subframe_fixed(s, decoded, type & ~0x8, bps)) < 0)
- return ret;
+ int order = type & ~0x8;
+ if (bps < 33) {
+ if (bps + order <= 32) {
+ if ((ret = decode_subframe_fixed(s, decoded, order, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_fixed_wide(s, decoded, order, bps)) < 0)
+ return ret;
+ }
+ } else {
+ if ((ret = decode_subframe_fixed_33bps(s, s->decoded_33bps, decoded, order)) < 0)
+ return ret;
+ }
} else if (type >= 32) {
- if ((ret = decode_subframe_lpc(s, decoded, (type & ~0x20)+1, bps)) < 0)
- return ret;
+ if (bps < 33) {
+ if ((ret = decode_subframe_lpc(s, decoded, (type & ~0x20)+1, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_lpc_33bps(s, s->decoded_33bps, decoded, (type & ~0x20)+1)) < 0)
+ return ret;
+ }
} else {
av_log(s->avctx, AV_LOG_ERROR, "invalid coding type\n");
return AVERROR_INVALIDDATA;
}
- if (wasted && wasted < 32) {
- int i;
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = (unsigned)decoded[i] << wasted;
+ if (wasted) {
+ if (wasted+bps == 33) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = (uint64_t)decoded[i] << wasted;
+ } else if (wasted < 32) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = (unsigned)decoded[i] << wasted;
+ }
}
return 0;
@@ -554,6 +701,26 @@ static int decode_frame(FLACContext *s)
return 0;
}
+static void decorrelate_33bps(int ch_mode, int32_t **decoded, int64_t *decoded_33bps, int len)
+{
+ int i;
+ if (ch_mode == FLAC_CHMODE_LEFT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[1][i] = decoded[0][i] - decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_RIGHT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[0][i] = decoded[1][i] + decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_MID_SIDE ) {
+ for (i = 0; i < len; i++) {
+ uint64_t a = decoded[0][i];
+ int64_t b = decoded_33bps[i];
+ a -= b >> 1;
+ decoded[0][i] = (a + b);
+ decoded[1][i] = a;
+ }
+ }
+}
+
static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt)
{
@@ -612,29 +779,36 @@ static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
- s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
- s->stream_info.channels,
- s->blocksize, s->sample_shift);
+ if (s->stream_info.bps == 32 && s->ch_mode > 0) {
+ decorrelate_33bps(s->ch_mode, s->decoded, s->decoded_33bps, s->blocksize);
+ s->dsp.decorrelate[0](frame->data, s->decoded, s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ } else {
+ s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
+ s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ }
if (bytes_read > buf_size) {
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
return AVERROR_INVALIDDATA;
}
if (bytes_read < buf_size) {
av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
buf_size - bytes_read, buf_size);
}
*got_frame_ptr = 1;
return bytes_read;
}
static av_cold int flac_decode_close(AVCodecContext *avctx)
{
FLACContext *s = avctx->priv_data;
av_freep(&s->decoded_buffer);
+ av_freep(&s->decoded_buffer_33bps);
return 0;
}
diff --git a/media/ffvpx/libavcodec/get_bits.h b/media/ffvpx/libavcodec/get_bits.h
index 992765dc92c9..65dc080ddb3e 100644
--- a/media/ffvpx/libavcodec/get_bits.h
+++ b/media/ffvpx/libavcodec/get_bits.h
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
- * Copyright (c) 2016 Alexandra Hájková
*
* This file is part of FFmpeg.
*
@@ -58,12 +57,55 @@
#define CACHED_BITSTREAM_READER 0
#endif
+#if CACHED_BITSTREAM_READER
+
+// we always want the LE implementation, to provide get_bits_le()
+#define BITSTREAM_LE
+
+#ifndef BITSTREAM_READER_LE
+# define BITSTREAM_BE
+# define BITSTREAM_DEFAULT_BE
+#endif
+
+#include "bitstream.h"
+
+#undef BITSTREAM_LE
+#undef BITSTREAM_BE
+#undef BITSTREAM_DEFAULT_BE
+
+typedef BitstreamContext GetBitContext;
+
+#define get_bits_count bits_tell
+#define get_bits_left bits_left
+#define skip_bits_long bits_skip
+#define skip_bits bits_skip
+#define get_bits bits_read_nz
+#define get_bitsz bits_read
+#define get_bits_long bits_read
+#define get_bits1 bits_read_bit
+#define get_bits64 bits_read_64
+#define get_xbits bits_read_xbits
+#define get_sbits bits_read_signed_nz
+#define get_sbits_long bits_read_signed
+#define show_bits bits_peek
+#define show_bits_long bits_peek
+#define init_get_bits bits_init
+#define init_get_bits8 bits_init8
+#define align_get_bits bits_align
+#define get_vlc2 bits_read_vlc
+
+#define init_get_bits8_le(s, buffer, byte_size) bits_init8_le((BitstreamContextLE*)s, buffer, byte_size)
+#define get_bits_le(s, n) bits_read_le((BitstreamContextLE*)s, n)
+
+#define show_bits1(s) bits_peek(s, 1)
+#define skip_bits1(s) bits_skip(s, 1)
+
+#define skip_1stop_8data_bits bits_skip_1stop_8data
+
+#else // CACHED_BITSTREAM_READER
+
typedef struct GetBitContext {
const uint8_t *buffer, *buffer_end;
-#if CACHED_BITSTREAM_READER
- uint64_t cache;
- unsigned bits_left;
-#endif
int index;
int size_in_bits;
int size_in_bits_plus8;
@@ -120,16 +162,12 @@ static inline unsigned int show_bits(GetBitContext *s, int n);
* For examples see get_bits, show_bits, skip_bits, get_vlc.
*/
-#if CACHED_BITSTREAM_READER
-# define MIN_CACHE_BITS 64
-#elif defined LONG_BITSTREAM_READER
+#if defined LONG_BITSTREAM_READER
# define MIN_CACHE_BITS 32
#else
# define MIN_CACHE_BITS 25
#endif
-#if !CACHED_BITSTREAM_READER
-
#define OPEN_READER_NOSIZE(name, gb) \
unsigned int name ## _index = (gb)->index; \
unsigned int av_unused name ## _cache
@@ -214,356 +252,178 @@ static inline unsigned int show_bits(GetBitContext *s, int n);
#define GET_CACHE(name, gb) ((uint32_t) name ## _cache)
-#endif
static inline int get_bits_count(const GetBitContext *s)
{
-#if CACHED_BITSTREAM_READER
- return s->index - s->bits_left;
-#else
return s->index;
-#endif
}
-#if CACHED_BITSTREAM_READER
-static inline void refill_32(GetBitContext *s, int is_le)
-{
-#if !UNCHECKED_BITSTREAM_READER
- if (s->index >> 3 >= s->buffer_end - s->buffer)
- return;
-#endif
-
- if (is_le)
- s->cache = (uint64_t)AV_RL32(s->buffer + (s->index >> 3)) << s->bits_left | s->cache;
- else
- s->cache = s->cache | (uint64_t)AV_RB32(s->buffer + (s->index >> 3)) << (32 - s->bits_left);
- s->index += 32;
- s->bits_left += 32;
-}
-
-static inline void refill_64(GetBitContext *s, int is_le)
-{
-#if !UNCHECKED_BITSTREAM_READER
- if (s->index >> 3 >= s->buffer_end - s->buffer)
- return;
-#endif
-
- if (is_le)
- s->cache = AV_RL64(s->buffer + (s->index >> 3));
- else
- s->cache = AV_RB64(s->buffer + (s->index >> 3));
- s->index += 64;
- s->bits_left = 64;
-}
-
-static inline uint64_t get_val(GetBitContext *s, unsigned n, int is_le)
-{
- uint64_t ret;
- av_assert2(n>0 && n<=63);
- if (is_le) {
- ret = s->cache & ((UINT64_C(1) << n) - 1);
- s->cache >>= n;
- } else {
- ret = s->cache >> (64 - n);
- s->cache <<= n;
- }
- s->bits_left -= n;
- return ret;
-}
-
-static inline unsigned show_val(const GetBitContext *s, unsigned n)
-{
-#ifdef BITSTREAM_READER_LE
- return s->cache & ((UINT64_C(1) << n) - 1);
-#else
- return s->cache >> (64 - n);
-#endif
-}
-#endif
-
/**
* Skips the specified number of bits.
* @param n the number of bits to skip,
* For the UNCHECKED_BITSTREAM_READER this must not cause the distance
* from the start to overflow int32_t. Staying within the bitstream + padding
* is sufficient, too.
*/
static inline void skip_bits_long(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- skip_bits(s, n);
-#else
#if UNCHECKED_BITSTREAM_READER
s->index += n;
#else
s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index);
#endif
-#endif
}
-#if CACHED_BITSTREAM_READER
-static inline void skip_remaining(GetBitContext *s, unsigned n)
-{
-#ifdef BITSTREAM_READER_LE
- s->cache >>= n;
-#else
- s->cache <<= n;
-#endif
- s->bits_left -= n;
-}
-#endif
-
/**
* Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
* if MSB not set it is negative
* @param n length in bits
*/
static inline int get_xbits(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- int32_t cache = show_bits(s, 32);
- int sign = ~cache >> 31;
- skip_remaining(s, n);
-
- return ((((uint32_t)(sign ^ cache)) >> (32 - n)) ^ sign) - sign;
-#else
register int sign;
register int32_t cache;
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
cache = GET_CACHE(re, s);
sign = ~cache >> 31;
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
-#endif
}
-#if !CACHED_BITSTREAM_READER
static inline int get_xbits_le(GetBitContext *s, int n)
{
register int sign;
register int32_t cache;
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE_LE(re, s);
cache = GET_CACHE(re, s);
sign = sign_extend(~cache, n) >> 31;
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return (zero_extend(sign ^ cache, n) ^ sign) - sign;
}
-#endif
static inline int get_sbits(GetBitContext *s, int n)
{
register int tmp;
-#if CACHED_BITSTREAM_READER
- av_assert2(n>0 && n<=25);
- tmp = sign_extend(get_bits(s, n), n);
-#else
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_SBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
return tmp;
}
/**
* Read 1-25 bits.
*/
static inline unsigned int get_bits(GetBitContext *s, int n)
{
register unsigned int tmp;
-#if CACHED_BITSTREAM_READER
-
- av_assert2(n>0 && n<=32);
- if (n > s->bits_left) {
-#ifdef BITSTREAM_READER_LE
- refill_32(s, 1);
-#else
- refill_32(s, 0);
-#endif
- if (s->bits_left < 32)
- s->bits_left = n;
- }
-
-#ifdef BITSTREAM_READER_LE
- tmp = get_val(s, n, 1);
-#else
- tmp = get_val(s, n, 0);
-#endif
-#else
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
av_assert2(tmp < UINT64_C(1) << n);
return tmp;
}
/**
* Read 0-25 bits.
*/
static av_always_inline int get_bitsz(GetBitContext *s, int n)
{
return n ? get_bits(s, n) : 0;
}
static inline unsigned int get_bits_le(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- av_assert2(n>0 && n<=32);
- if (n > s->bits_left) {
- refill_32(s, 1);
- if (s->bits_left < 32)
- s->bits_left = n;
- }
-
- return get_val(s, n, 1);
-#else
register int tmp;
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE_LE(re, s);
tmp = SHOW_UBITS_LE(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return tmp;
-#endif
}
/**
* Show 1-25 bits.
*/
static inline unsigned int show_bits(GetBitContext *s, int n)
{
register unsigned int tmp;
-#if CACHED_BITSTREAM_READER
- if (n > s->bits_left)
-#ifdef BITSTREAM_READER_LE
- refill_32(s, 1);
-#else
- refill_32(s, 0);
-#endif
-
- tmp = show_val(s, n);
-#else
OPEN_READER_NOSIZE(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
-#endif
return tmp;
}
static inline void skip_bits(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- if (n < s->bits_left)
- skip_remaining(s, n);
- else {
- n -= s->bits_left;
- s->cache = 0;
- s->bits_left = 0;
-
- if (n >= 64) {
- unsigned skip = (n / 8) * 8;
-
- n -= skip;
- s->index += skip;
- }
-#ifdef BITSTREAM_READER_LE
- refill_64(s, 1);
-#else
- refill_64(s, 0);
-#endif
- if (n)
- skip_remaining(s, n);
- }
-#else
OPEN_READER(re, s);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
}
static inline unsigned int get_bits1(GetBitContext *s)
{
-#if CACHED_BITSTREAM_READER
- if (!s->bits_left)
-#ifdef BITSTREAM_READER_LE
- refill_64(s, 1);
-#else
- refill_64(s, 0);
-#endif
-
-#ifdef BITSTREAM_READER_LE
- return get_val(s, 1, 1);
-#else
- return get_val(s, 1, 0);
-#endif
-#else
unsigned int index = s->index;
uint8_t result = s->buffer[index >> 3];
#ifdef BITSTREAM_READER_LE
result >>= index & 7;
result &= 1;
#else
result <<= index & 7;
result >>= 8 - 1;
#endif
#if !UNCHECKED_BITSTREAM_READER
if (s->index < s->size_in_bits_plus8)
#endif
index++;
s->index = index;
return result;
-#endif
}
static inline unsigned int show_bits1(GetBitContext *s)
{
return show_bits(s, 1);
}
static inline void skip_bits1(GetBitContext *s)
{
skip_bits(s, 1);
}
/**
* Read 0-32 bits.
*/
static inline unsigned int get_bits_long(GetBitContext *s, int n)
{
av_assert2(n>=0 && n<=32);
if (!n) {
return 0;
-#if CACHED_BITSTREAM_READER
- }
- return get_bits(s, n);
-#else
} else if (n <= MIN_CACHE_BITS) {
return get_bits(s, n);
} else {
#ifdef BITSTREAM_READER_LE
unsigned ret = get_bits(s, 16);
return ret | (get_bits(s, n - 16) << 16);
#else
unsigned ret = get_bits(s, 16) << (n - 16);
return ret | get_bits(s, n - 16);
#endif
}
-#endif
}
/**
@@ -596,88 +456,85 @@ static inline int get_sbits_long(GetBitContext *s, int n)
return sign_extend(get_bits_long(s, n), n);
}
+/**
+ * Read 0-64 bits as a signed integer.
+ */
+static inline int64_t get_sbits64(GetBitContext *s, int n)
+{
+ // sign_extend(x, 0) is undefined
+ if (!n)
+ return 0;
+
+ return sign_extend64(get_bits64(s, n), n);
+}
+
/**
* Show 0-32 bits.
*/
static inline unsigned int show_bits_long(GetBitContext *s, int n)
{
if (n <= MIN_CACHE_BITS) {
return show_bits(s, n);
} else {
GetBitContext gb = *s;
return get_bits_long(&gb, n);
}
}
-static inline int init_get_bits_xe(GetBitContext *s, const uint8_t *buffer,
- int bit_size, int is_le)
+
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
+ int bit_size)
{
int buffer_size;
int ret = 0;
if (bit_size >= INT_MAX - FFMAX(7, AV_INPUT_BUFFER_PADDING_SIZE*8) || bit_size < 0 || !buffer) {
bit_size = 0;
buffer = NULL;
ret = AVERROR_INVALIDDATA;
}
buffer_size = (bit_size + 7) >> 3;
s->buffer = buffer;
s->size_in_bits = bit_size;
s->size_in_bits_plus8 = bit_size + 8;
s->buffer_end = buffer + buffer_size;
s->index = 0;
-#if CACHED_BITSTREAM_READER
- s->cache = 0;
- s->bits_left = 0;
- refill_64(s, is_le);
-#endif
-
return ret;
}
-/**
- * Initialize GetBitContext.
- * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
- * larger than the actual read bits because some optimized bitstream
- * readers read 32 or 64 bit at once and could read over the end
- * @param bit_size the size of the buffer in bits
- * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
- */
-static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
- int bit_size)
-{
-#ifdef BITSTREAM_READER_LE
- return init_get_bits_xe(s, buffer, bit_size, 1);
-#else
- return init_get_bits_xe(s, buffer, bit_size, 0);
-#endif
-}
-
/**
* Initialize GetBitContext.
* @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
* larger than the actual read bits because some optimized bitstream
* readers read 32 or 64 bit at once and could read over the end
* @param byte_size the size of the buffer in bytes
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
*/
static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
int byte_size)
{
if (byte_size > INT_MAX / 8 || byte_size < 0)
byte_size = -1;
return init_get_bits(s, buffer, byte_size * 8);
}
static inline int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer,
int byte_size)
{
if (byte_size > INT_MAX / 8 || byte_size < 0)
byte_size = -1;
- return init_get_bits_xe(s, buffer, byte_size * 8, 1);
+ return init_get_bits(s, buffer, byte_size * 8);
}
static inline const uint8_t *align_get_bits(GetBitContext *s)
@@ -762,60 +619,28 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
SKIP_BITS(name, gb, n); \
} while (0)
-/* Return the LUT element for the given bitstream configuration. */
-static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
- const VLCElem *table)
-{
- unsigned idx;
-
- *nb_bits = -*n;
- idx = show_bits(s, *nb_bits) + code;
- *n = table[idx].len;
-
- return table[idx].sym;
-}
-
/**
* Parse a vlc code.
* @param bits is the number of bits which will be read at once, must be
* identical to nb_bits in init_vlc()
* @param max_depth is the number of times bits bits must be read to completely
* read the longest vlc code
* = (max_vlc_length + bits - 1) / bits
* @returns the code parsed or -1 if no vlc matches
*/
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table,
int bits, int max_depth)
{
-#if CACHED_BITSTREAM_READER
- int nb_bits;
- unsigned idx = show_bits(s, bits);
- int code = table[idx].sym;
- int n = table[idx].len;
-
- if (max_depth > 1 && n < 0) {
- skip_remaining(s, bits);
- code = set_idx(s, code, &n, &nb_bits, table);
- if (max_depth > 2 && n < 0) {
- skip_remaining(s, nb_bits);
- code = set_idx(s, code, &n, &nb_bits, table);
- }
- }
- skip_remaining(s, n);
-
- return code;
-#else
int code;
OPEN_READER(re, s);
UPDATE_CACHE(re, s);
GET_VLC(code, re, s, table, bits, max_depth);
CLOSE_READER(re, s);
return code;
-#endif
}
static inline int decode012(GetBitContext *gb)
@@ -855,4 +680,6 @@ static inline int skip_1stop_8data_bits(GetBitContext *gb)
return 0;
}
+#endif // CACHED_BITSTREAM_READER
+
#endif /* AVCODEC_GET_BITS_H */
diff --git a/media/ffvpx/libavcodec/internal.h b/media/ffvpx/libavcodec/internal.h
index 76a6ea6bc646..a283c52e0122 100644
--- a/media/ffvpx/libavcodec/internal.h
+++ b/media/ffvpx/libavcodec/internal.h
@@ -88,7 +88,6 @@ typedef struct AVCodecInternal {
* for decoding.
*/
AVPacket *last_pkt_props;
- struct AVFifo *pkt_props;
/**
* temporary buffer used for encoders to store their bitstream
diff --git a/media/ffvpx/libavcodec/libdav1d.c b/media/ffvpx/libavcodec/libdav1d.c
index 028929404e1c..2488a709c722 100644
--- a/media/ffvpx/libavcodec/libdav1d.c
+++ b/media/ffvpx/libavcodec/libdav1d.c
@@ -41,9 +41,6 @@
typedef struct Libdav1dContext {
AVClass *class;
Dav1dContext *c;
- /* This packet coincides with AVCodecInternal.in_pkt
- * and is not owned by us. */
- AVPacket *pkt;
AVBufferPool *pool;
int pool_size;
@@ -219,8 +216,6 @@ static av_cold int libdav1d_init(AVCodecContext *c)
#endif
int res;
- dav1d->pkt = c->internal->in_pkt;
-
av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
dav1d_default_settings(&s);
@@ -265,87 +260,121 @@ static av_cold int libdav1d_init(AVCodecContext *c)
s.n_frame_threads, s.n_tile_threads);
#endif
+#if FF_DAV1D_VERSION_AT_LEAST(6,8)
+ if (c->skip_frame >= AVDISCARD_NONKEY)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_KEY;
+ else if (c->skip_frame >= AVDISCARD_NONINTRA)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_INTRA;
+ else if (c->skip_frame >= AVDISCARD_NONREF)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_REFERENCE;
+#endif
+
res = libdav1d_parse_extradata(c);
if (res < 0)
return res;
res = dav1d_open(&dav1d->c, &s);
if (res < 0)
return AVERROR(ENOMEM);
return 0;
}
static void libdav1d_flush(AVCodecContext *c)
{
Libdav1dContext *dav1d = c->priv_data;
dav1d_data_unref(&dav1d->data);
dav1d_flush(dav1d->c);
}
+typedef struct OpaqueData {
+ void *pkt_orig_opaque;
+#if FF_API_REORDERED_OPAQUE
+ int64_t reordered_opaque;
+#endif
+} OpaqueData;
+
static void libdav1d_data_free(const uint8_t *data, void *opaque) {
AVBufferRef *buf = opaque;
av_buffer_unref(&buf);
}
static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
+ AVPacket *pkt = opaque;
av_assert0(data == opaque);
- av_free(opaque);
+ av_free(pkt->opaque);
+ av_packet_free(&pkt);
}
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data;
Dav1dPicture pic = { 0 }, *p = &pic;
+ AVPacket *pkt;
+ OpaqueData *od = NULL;
#if FF_DAV1D_VERSION_AT_LEAST(5,1)
enum Dav1dEventFlags event_flags = 0;
#endif
int res;
if (!data->sz) {
- AVPacket *const pkt = dav1d->pkt;
+ pkt = av_packet_alloc();
+
+ if (!pkt)
+ return AVERROR(ENOMEM);
res = ff_decode_get_packet(c, pkt);
- if (res < 0 && res != AVERROR_EOF)
+ if (res < 0 && res != AVERROR_EOF) {
+ av_packet_free(&pkt);
return res;
+ }
if (pkt->size) {
res = dav1d_data_wrap(data, pkt->data, pkt->size,
libdav1d_data_free, pkt->buf);
if (res < 0) {
- av_packet_unref(pkt);
+ av_packet_free(&pkt);
return res;
}
- data->m.timestamp = pkt->pts;
- data->m.offset = pkt->pos;
- data->m.duration = pkt->duration;
-
pkt->buf = NULL;
- av_packet_unref(pkt);
- if (c->reordered_opaque != AV_NOPTS_VALUE) {
- uint8_t *reordered_opaque = av_memdup(&c->reordered_opaque,
- sizeof(c->reordered_opaque));
- if (!reordered_opaque) {
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (
+#if FF_API_REORDERED_OPAQUE
+ c->reordered_opaque != AV_NOPTS_VALUE ||
+#endif
+ (pkt->opaque && (c->flags & AV_CODEC_FLAG_COPY_OPAQUE))) {
+ od = av_mallocz(sizeof(*od));
+ if (!od) {
+ av_packet_free(&pkt);
dav1d_data_unref(data);
return AVERROR(ENOMEM);
}
-
- res = dav1d_data_wrap_user_data(data, reordered_opaque,
- libdav1d_user_data_free, reordered_opaque);
- if (res < 0) {
- av_free(reordered_opaque);
- dav1d_data_unref(data);
- return res;
- }
+ od->pkt_orig_opaque = pkt->opaque;
+#if FF_API_REORDERED_OPAQUE
+ od->reordered_opaque = c->reordered_opaque;
+#endif
+FF_ENABLE_DEPRECATION_WARNINGS
}
- } else if (res >= 0) {
- av_packet_unref(pkt);
- return AVERROR(EAGAIN);
+ pkt->opaque = od;
+
+ res = dav1d_data_wrap_user_data(data, (const uint8_t *)pkt,
+ libdav1d_user_data_free, pkt);
+ if (res < 0) {
+ av_free(pkt->opaque);
+ av_packet_free(&pkt);
+ dav1d_data_unref(data);
+ return res;
+ }
+ pkt = NULL;
+ } else {
+ av_packet_free(&pkt);
+ if (res >= 0)
+ return AVERROR(EAGAIN);
}
}
@@ -410,17 +439,29 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
INT_MAX);
ff_set_sar(c, frame->sample_aspect_ratio);
- if (p->m.user_data.data)
- memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque));
+ pkt = (AVPacket *)p->m.user_data.data;
+ od = pkt->opaque;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (od && od->reordered_opaque != AV_NOPTS_VALUE)
+ frame->reordered_opaque = od->reordered_opaque;
else
frame->reordered_opaque = AV_NOPTS_VALUE;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ // restore the original user opaque value for
+ // ff_decode_frame_props_from_pkt()
+ pkt->opaque = od ? od->pkt_orig_opaque : NULL;
+ av_freep(&od);
// match timestamps and packet size
- frame->pts = p->m.timestamp;
- frame->pkt_dts = p->m.timestamp;
- frame->pkt_pos = p->m.offset;
- frame->pkt_size = p->m.size;
- frame->duration = p->m.duration;
+ res = ff_decode_frame_props_from_pkt(c, frame, pkt);
+ pkt->opaque = NULL;
+ if (res < 0)
+ goto fail;
+
+ frame->pkt_dts = pkt->pts;
frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
switch (p->frame_hdr->frame_type) {
@@ -594,7 +635,7 @@ const FFCodec ff_libdav1d_decoder = {
.flush = libdav1d_flush,
FF_CODEC_RECEIVE_FRAME_CB(libdav1d_receive_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
- .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS |
+ .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_SETS_FRAME_PROPS |
FF_CODEC_CAP_AUTO_THREADS,
.p.priv_class = &libdav1d_class,
.p.wrapper_name = "libdav1d",
diff --git a/media/ffvpx/libavcodec/mathops.h b/media/ffvpx/libavcodec/mathops.h
index c89054d6edf5..a1dc323304b4 100644
--- a/media/ffvpx/libavcodec/mathops.h
+++ b/media/ffvpx/libavcodec/mathops.h
@@ -138,6 +138,15 @@ static inline av_const int sign_extend(int val, unsigned bits)
}
#endif
+#ifndef sign_extend64
+static inline av_const int64_t sign_extend64(int64_t val, unsigned bits)
+{
+ unsigned shift = 8 * sizeof(int64_t) - bits;
+ union { uint64_t u; int64_t s; } v = { (uint64_t) val << shift };
+ return v.s >> shift;
+}
+#endif
+
#ifndef zero_extend
static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
{
diff --git a/media/ffvpx/libavcodec/motion_est.h b/media/ffvpx/libavcodec/motion_est.h
index b20cdabbbb82..f6a563b08c7b 100644
--- a/media/ffvpx/libavcodec/motion_est.h
+++ b/media/ffvpx/libavcodec/motion_est.h
@@ -52,7 +52,6 @@ typedef struct MotionEstContext {
uint8_t *scratchpad; /**< data area for the ME algo, so that
* the ME does not need to malloc/free. */
uint8_t *temp;
- int best_bits;
uint32_t *map; ///< map to avoid duplicate evaluations
uint32_t *score_map; ///< map to store the scores
unsigned map_generation;
diff --git a/media/ffvpx/libavcodec/mpegpicture.h b/media/ffvpx/libavcodec/mpegpicture.h
index a1455ee13c2c..7919aa402ca5 100644
--- a/media/ffvpx/libavcodec/mpegpicture.h
+++ b/media/ffvpx/libavcodec/mpegpicture.h
@@ -76,6 +76,9 @@ typedef struct Picture {
int reference;
int shared;
+
+ int display_picture_number;
+ int coded_picture_number;
} Picture;
/**
diff --git a/media/ffvpx/libavcodec/mpegvideo.h b/media/ffvpx/libavcodec/mpegvideo.h
index ccec0dd75f2e..55828e610271 100644
--- a/media/ffvpx/libavcodec/mpegvideo.h
+++ b/media/ffvpx/libavcodec/mpegvideo.h
@@ -44,9 +44,6 @@
#include "pixblockdsp.h"
#include "put_bits.h"
#include "ratecontrol.h"
-#if FF_API_FLAG_TRUNCATED
-#include "parser.h"
-#endif
#include "mpegutils.h"
#include "qpeldsp.h"
#include "videodsp.h"
@@ -117,6 +114,7 @@ typedef struct MpegEncContext {
int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else
int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else
int picture_number; //FIXME remove, unclear definition
+ int extradata_parsed;
int picture_in_gop_number; ///< 0-> first pic in gop, ...
int mb_width, mb_height; ///< number of MBs horizontally & vertically
int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
@@ -174,6 +172,7 @@ typedef struct MpegEncContext {
Picture *last_picture_ptr; ///< pointer to the previous picture.
Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
Picture *current_picture_ptr; ///< pointer to the current picture
+ int skipped_last_frame;
int last_dc[3]; ///< last DC values for MPEG-1
int16_t *dc_val_base;
int16_t *dc_val[3]; ///< used for MPEG-4 DC prediction, all 3 arrays must be continuous
@@ -351,10 +350,6 @@ typedef struct MpegEncContext {
GetBitContext last_resync_gb; ///< used to search for the next resync marker
int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
-#if FF_API_FLAG_TRUNCATED
- ParseContext parse_context;
-#endif
-
/* H.263 specific */
int gob_index;
int obmc; ///< overlapped block motion compensation
diff --git a/media/ffvpx/libavcodec/options.c b/media/ffvpx/libavcodec/options.c
index 2e05d29e1ee6..a9b35ee1c345 100644
--- a/media/ffvpx/libavcodec/options.c
+++ b/media/ffvpx/libavcodec/options.c
@@ -124,7 +124,11 @@ static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
s->sw_pix_fmt = AV_PIX_FMT_NONE;
s->sample_fmt = AV_SAMPLE_FMT_NONE;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
s->reordered_opaque = AV_NOPTS_VALUE;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if(codec && codec2->priv_data_size){
s->priv_data = av_mallocz(codec2->priv_data_size);
if (!s->priv_data)
@@ -185,39 +189,6 @@ const AVClass *avcodec_get_class(void)
return &av_codec_context_class;
}
-#if FF_API_GET_FRAME_CLASS
-FF_DISABLE_DEPRECATION_WARNINGS
-#define FOFFSET(x) offsetof(AVFrame,x)
-
-static const AVOption frame_options[]={
-{"best_effort_timestamp", "", FOFFSET(best_effort_timestamp), AV_OPT_TYPE_INT64, {.i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, 0},
-{"pkt_pos", "", FOFFSET(pkt_pos), AV_OPT_TYPE_INT64, {.i64 = -1 }, INT64_MIN, INT64_MAX, 0},
-{"pkt_size", "", FOFFSET(pkt_size), AV_OPT_TYPE_INT64, {.i64 = -1 }, INT64_MIN, INT64_MAX, 0},
-{"sample_aspect_ratio", "", FOFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0 }, 0, INT_MAX, 0},
-{"width", "", FOFFSET(width), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{"height", "", FOFFSET(height), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{"format", "", FOFFSET(format), AV_OPT_TYPE_INT, {.i64 = -1 }, 0, INT_MAX, 0},
-#if FF_API_OLD_CHANNEL_LAYOUT
-{"channel_layout", "", FOFFSET(channel_layout), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, 0},
-#endif
-{"sample_rate", "", FOFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{NULL},
-};
-
-static const AVClass av_frame_class = {
- .class_name = "AVFrame",
- .item_name = NULL,
- .option = frame_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-const AVClass *avcodec_get_frame_class(void)
-{
- return &av_frame_class;
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
#define SROFFSET(x) offsetof(AVSubtitleRect,x)
static const AVOption subtitle_rect_options[]={
diff --git a/media/ffvpx/libavcodec/options_table.h b/media/ffvpx/libavcodec/options_table.h
index cd02f5096f18..4fea57673ad0 100644
--- a/media/ffvpx/libavcodec/options_table.h
+++ b/media/ffvpx/libavcodec/options_table.h
@@ -58,13 +58,12 @@ static const AVOption avcodec_options[] = {
{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
{"recon_frame", "export reconstructed frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_RECON_FRAME}, .unit = "flags"},
+{"copy_opaque", "propagate opaque values", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_COPY_OPAQUE}, .unit = "flags"},
+{"frame_duration", "use frame durations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_FRAME_DURATION}, .unit = "flags"},
{"pass1", "use internal 2-pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"},
{"pass2", "use internal 2-pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"},
{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"},
{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"},
-#if FF_API_FLAG_TRUNCATED
-{"truncated", "(Deprecated, use parsers instead.) Input bitstream might be randomly truncated", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_TRUNCATED }, INT_MIN, INT_MAX, V|D | AV_OPT_FLAG_DEPRECATED, "flags"},
-#endif
{"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
@@ -98,7 +97,7 @@ static const AVOption avcodec_options[] = {
#endif
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|E},
-{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"frame_number", NULL, OFFSET(frame_num), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"delay", NULL, OFFSET(delay), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"qcomp", "video quantizer scale compression (VBR). Constant of ratecontrol equation. "
"Recommended range for default rc_eq: 0.0-1.0",
@@ -377,28 +376,25 @@ static const AVOption avcodec_options[] = {
{"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_AUTOMATIC}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
{"pre_decoder", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_PRE_DECODER}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
{"ignore", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_IGNORE}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
-#if FF_API_SUB_TEXT_FORMAT
-{"sub_text_format", "Deprecated, does nothing", OFFSET(sub_text_format), AV_OPT_TYPE_INT, {.i64 = FF_SUB_TEXT_FMT_ASS}, 0, 1, S|D | AV_OPT_FLAG_DEPRECATED, "sub_text_format"},
-{"ass", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_TEXT_FMT_ASS}, INT_MIN, INT_MAX, S|D, "sub_text_format"},
-#endif
{"apply_cropping", NULL, OFFSET(apply_cropping), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, V | D },
{"skip_alpha", "Skip processing alpha", OFFSET(skip_alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, V|D },
{"field_order", "Field order", OFFSET(field_order), AV_OPT_TYPE_INT, {.i64 = AV_FIELD_UNKNOWN }, 0, 5, V|D|E, "field_order" },
{"progressive", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_PROGRESSIVE }, 0, 0, V|D|E, "field_order" },
{"tt", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_TT }, 0, 0, V|D|E, "field_order" },
{"bb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BB }, 0, 0, V|D|E, "field_order" },
{"tb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_TB }, 0, 0, V|D|E, "field_order" },
{"bt", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BT }, 0, 0, V|D|E, "field_order" },
{"dump_separator", "set information dump field separator", OFFSET(dump_separator), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, A|V|S|D|E},
{"codec_whitelist", "List of decoders that are allowed to be used", OFFSET(codec_whitelist), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, A|V|S|D },
{"pixel_format", "set pixel format", OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_NONE}, -1, INT_MAX, 0 },
{"video_size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str=NULL}, 0, INT_MAX, 0 },
{"max_pixels", "Maximum number of pixels", OFFSET(max_pixels), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|V|S|D|E },
{"max_samples", "Maximum number of samples", OFFSET(max_samples), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|D|E },
{"hwaccel_flags", NULL, OFFSET(hwaccel_flags), AV_OPT_TYPE_FLAGS, {.i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, 0, UINT_MAX, V|D, "hwaccel_flags"},
{"ignore_level", "ignore level even if the codec level used is unknown or higher than the maximum supported level reported by the hardware driver", 0, AV_OPT_TYPE_CONST, { .i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, INT_MIN, INT_MAX, V | D, "hwaccel_flags" },
{"allow_high_depth", "allow to output YUV pixel formats with a different chroma sampling than 4:2:0 and/or other than 8 bits per component", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{"allow_profile_mismatch", "attempt to decode anyway if HW accelerated decoder's supported profiles do not exactly match the stream", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
+{"unsafe_output", "allow potentially unsafe hwaccel frame output that might require special care to process successfully", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_UNSAFE_OUTPUT }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{"extra_hw_frames", "Number of extra hardware frames to allocate for the user", OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, V|D },
{"discard_damaged_percentage", "Percentage of damaged samples to discard a frame", OFFSET(discard_damaged_percentage), AV_OPT_TYPE_INT, {.i64 = 95 }, 0, 100, V|D },
{NULL},
diff --git a/media/ffvpx/libavcodec/profiles.c b/media/ffvpx/libavcodec/profiles.c
index 7af7fbeb1302..2230fc5415d0 100644
--- a/media/ffvpx/libavcodec/profiles.c
+++ b/media/ffvpx/libavcodec/profiles.c
@@ -85,6 +85,7 @@ const AVProfile ff_hevc_profiles[] = {
{ FF_PROFILE_HEVC_MAIN_10, "Main 10" },
{ FF_PROFILE_HEVC_MAIN_STILL_PICTURE, "Main Still Picture" },
{ FF_PROFILE_HEVC_REXT, "Rext" },
+ { FF_PROFILE_HEVC_SCC, "Scc" },
{ FF_PROFILE_UNKNOWN },
};
diff --git a/media/ffvpx/libavcodec/pthread.c b/media/ffvpx/libavcodec/pthread.c
index 60ba87dac484..ca84b81391da 100644
--- a/media/ffvpx/libavcodec/pthread.c
+++ b/media/ffvpx/libavcodec/pthread.c
@@ -48,9 +48,6 @@
static void validate_thread_parameters(AVCodecContext *avctx)
{
int frame_threading_supported = (avctx->codec->capabilities & AV_CODEC_CAP_FRAME_THREADS)
-#if FF_API_FLAG_TRUNCATED
- && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED)
-#endif
&& !(avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
&& !(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS);
if (avctx->thread_count == 1) {
diff --git a/media/ffvpx/libavcodec/pthread_frame.c b/media/ffvpx/libavcodec/pthread_frame.c
index df82a4125f29..d9d5afaa82d8 100644
--- a/media/ffvpx/libavcodec/pthread_frame.c
+++ b/media/ffvpx/libavcodec/pthread_frame.c
@@ -99,22 +99,6 @@ typedef struct PerThreadContext {
atomic_int state;
-#if FF_API_THREAD_SAFE_CALLBACKS
- /**
- * Array of frames passed to ff_thread_release_buffer().
- * Frames are released after all threads referencing them are finished.
- */
- AVFrame **released_buffers;
- int num_released_buffers;
- int released_buffers_allocated;
-
- AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
- int requested_flags; ///< flags passed to get_buffer() for requested_frame
-
- const enum AVPixelFormat *available_formats; ///< Format array for get_format()
- enum AVPixelFormat result_format; ///< get_format() result
-#endif
-
int die; ///< Set when the thread should exit.
int hwaccel_serializing;
@@ -156,11 +140,6 @@ typedef struct FrameThreadContext {
void *stash_hwaccel_priv;
} FrameThreadContext;
-#if FF_API_THREAD_SAFE_CALLBACKS
-#define THREAD_SAFE_CALLBACKS(avctx) \
-((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
-#endif
-
static void async_lock(FrameThreadContext *fctx)
{
pthread_mutex_lock(&fctx->async_mutex);
@@ -212,14 +191,8 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
if (p->die) break;
-FF_DISABLE_DEPRECATION_WARNINGS
- if (!codec->update_thread_context
-#if FF_API_THREAD_SAFE_CALLBACKS
- && THREAD_SAFE_CALLBACKS(avctx)
-#endif
- )
+ if (!codec->update_thread_context)
ff_thread_finish_setup(avctx);
-FF_ENABLE_DEPRECATION_WARNINGS
/* If a decoder supports hwaccel, then it must call ff_get_format().
* Since that call must happen before ff_thread_finish_setup(), the
@@ -371,93 +344,78 @@ FF_ENABLE_DEPRECATION_WARNINGS
*/
static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
{
+ int err;
+
dst->flags = src->flags;
dst->draw_horiz_band= src->draw_horiz_band;
dst->get_buffer2 = src->get_buffer2;
dst->opaque = src->opaque;
dst->debug = src->debug;
dst->slice_flags = src->slice_flags;
dst->flags2 = src->flags2;
dst->export_side_data = src->export_side_data;
dst->skip_loop_filter = src->skip_loop_filter;
dst->skip_idct = src->skip_idct;
dst->skip_frame = src->skip_frame;
- dst->frame_number = src->frame_number;
- dst->reordered_opaque = src->reordered_opaque;
-#if FF_API_THREAD_SAFE_CALLBACKS
+ dst->frame_num = src->frame_num;
+#if FF_API_AVCTX_FRAME_NUMBER
FF_DISABLE_DEPRECATION_WARNINGS
- dst->thread_safe_callbacks = src->thread_safe_callbacks;
+ dst->frame_number = src->frame_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->reordered_opaque = src->reordered_opaque;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (src->slice_count && src->slice_offset) {
if (dst->slice_count < src->slice_count) {
int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
sizeof(*dst->slice_offset));
if (err < 0)
return err;
}
memcpy(dst->slice_offset, src->slice_offset,
src->slice_count * sizeof(*dst->slice_offset));
}
dst->slice_count = src->slice_count;
+
+ av_packet_unref(dst->internal->last_pkt_props);
+ err = av_packet_copy_props(dst->internal->last_pkt_props, src->internal->last_pkt_props);
+ if (err < 0)
+ return err;
+
return 0;
}
-#if FF_API_THREAD_SAFE_CALLBACKS
-/// Releases the buffers that this decoding thread was the last user of.
-static void release_delayed_buffers(PerThreadContext *p)
-{
- FrameThreadContext *fctx = p->parent;
-
- while (p->num_released_buffers > 0) {
- AVFrame *f;
-
- pthread_mutex_lock(&fctx->buffer_mutex);
-
- // fix extended data in case the caller screwed it up
- av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
- f = p->released_buffers[--p->num_released_buffers];
- f->extended_data = f->data;
- av_frame_unref(f);
-
- pthread_mutex_unlock(&fctx->buffer_mutex);
- }
-}
-#endif
-
static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
AVPacket *avpkt)
{
FrameThreadContext *fctx = p->parent;
PerThreadContext *prev_thread = fctx->prev_thread;
const AVCodec *codec = p->avctx->codec;
int ret;
if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
pthread_mutex_lock(&p->mutex);
ret = update_context_from_user(p->avctx, user_avctx);
if (ret) {
pthread_mutex_unlock(&p->mutex);
return ret;
}
atomic_store_explicit(&p->debug_threads,
(p->avctx->debug & FF_DEBUG_THREADS) != 0,
memory_order_relaxed);
-#if FF_API_THREAD_SAFE_CALLBACKS
- release_delayed_buffers(p);
-#endif
-
if (prev_thread) {
int err;
if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
@@ -492,44 +450,6 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
pthread_cond_signal(&p->input_cond);
pthread_mutex_unlock(&p->mutex);
-#if FF_API_THREAD_SAFE_CALLBACKS
-FF_DISABLE_DEPRECATION_WARNINGS
- /*
- * If the client doesn't have a thread-safe get_buffer(),
- * then decoding threads call back to the main thread,
- * and it calls back to the client here.
- */
-
- if (!p->avctx->thread_safe_callbacks && (
- p->avctx->get_format != avcodec_default_get_format ||
- p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
- while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
- int call_done = 1;
- pthread_mutex_lock(&p->progress_mutex);
- while (atomic_load(&p->state) == STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
- case STATE_GET_BUFFER:
- p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
- break;
- case STATE_GET_FORMAT:
- p->result_format = ff_get_format(p->avctx, p->available_formats);
- break;
- default:
- call_done = 0;
- break;
- }
- if (call_done) {
- atomic_store(&p->state, STATE_SETTING_UP);
- pthread_cond_signal(&p->progress_cond);
- }
- pthread_mutex_unlock(&p->progress_mutex);
- }
- }
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
fctx->prev_thread = p;
fctx->next_decoding++;
@@ -760,21 +680,16 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
if (codec->close && p->thread_init != UNINITIALIZED)
codec->close(ctx);
-#if FF_API_THREAD_SAFE_CALLBACKS
- release_delayed_buffers(p);
- for (int j = 0; j < p->released_buffers_allocated; j++)
- av_frame_free(&p->released_buffers[j]);
- av_freep(&p->released_buffers);
-#endif
if (ctx->priv_data) {
if (codec->p.priv_class)
av_opt_free(ctx->priv_data);
av_freep(&ctx->priv_data);
}
av_freep(&ctx->slice_offset);
av_buffer_unref(&ctx->internal->pool);
+ av_packet_free(&ctx->internal->last_pkt_props);
av_freep(&ctx->internal);
av_buffer_unref(&ctx->hw_frames_ctx);
}
@@ -848,11 +763,14 @@ static av_cold int init_thread(PerThreadContext *p, int *threads_to_free,
if (!(p->frame = av_frame_alloc()) ||
!(p->avpkt = av_packet_alloc()))
return AVERROR(ENOMEM);
- copy->internal->last_pkt_props = p->avpkt;
if (!first)
copy->internal->is_copy = 1;
+ copy->internal->last_pkt_props = av_packet_alloc();
+ if (!copy->internal->last_pkt_props)
+ return AVERROR(ENOMEM);
+
if (codec->init) {
err = codec->init(copy);
if (err < 0) {
@@ -959,116 +877,48 @@ void ff_thread_flush(AVCodecContext *avctx)
av_frame_unref(p->frame);
p->result = 0;
-#if FF_API_THREAD_SAFE_CALLBACKS
- release_delayed_buffers(p);
-#endif
-
if (ffcodec(avctx->codec)->flush)
ffcodec(avctx->codec)->flush(p->avctx);
}
}
int ff_thread_can_start_frame(AVCodecContext *avctx)
{
PerThreadContext *p = avctx->internal->thread_ctx;
-FF_DISABLE_DEPRECATION_WARNINGS
+
if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
- (ffcodec(avctx->codec)->update_thread_context
-#if FF_API_THREAD_SAFE_CALLBACKS
- || !THREAD_SAFE_CALLBACKS(avctx)
-#endif
- )) {
+ ffcodec(avctx->codec)->update_thread_context) {
return 0;
}
-FF_ENABLE_DEPRECATION_WARNINGS
+
return 1;
}
static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
{
PerThreadContext *p;
int err;
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
return ff_get_buffer(avctx, f, flags);
p = avctx->internal->thread_ctx;
FF_DISABLE_DEPRECATION_WARNINGS
if (atomic_load(&p->state) != STATE_SETTING_UP &&
- (ffcodec(avctx->codec)->update_thread_context
-#if FF_API_THREAD_SAFE_CALLBACKS
- || !THREAD_SAFE_CALLBACKS(avctx)
-#endif
- )) {
+ ffcodec(avctx->codec)->update_thread_context) {
FF_ENABLE_DEPRECATION_WARNINGS
av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
return -1;
}
pthread_mutex_lock(&p->parent->buffer_mutex);
-#if !FF_API_THREAD_SAFE_CALLBACKS
err = ff_get_buffer(avctx, f, flags);
-#else
-FF_DISABLE_DEPRECATION_WARNINGS
- if (THREAD_SAFE_CALLBACKS(avctx)) {
- err = ff_get_buffer(avctx, f, flags);
- } else {
- pthread_mutex_lock(&p->progress_mutex);
- p->requested_frame = f;
- p->requested_flags = flags;
- atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
- pthread_cond_broadcast(&p->progress_cond);
-
- while (atomic_load(&p->state) != STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- err = p->result;
-
- pthread_mutex_unlock(&p->progress_mutex);
-
- }
- if (!THREAD_SAFE_CALLBACKS(avctx) && !ffcodec(avctx->codec)->update_thread_context)
- ff_thread_finish_setup(avctx);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
pthread_mutex_unlock(&p->parent->buffer_mutex);
return err;
}
-#if FF_API_THREAD_SAFE_CALLBACKS
-FF_DISABLE_DEPRECATION_WARNINGS
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
-{
- enum AVPixelFormat res;
- PerThreadContext *p;
- if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
- avctx->get_format == avcodec_default_get_format)
- return ff_get_format(avctx, fmt);
-
- p = avctx->internal->thread_ctx;
- if (atomic_load(&p->state) != STATE_SETTING_UP) {
- av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
- return -1;
- }
- pthread_mutex_lock(&p->progress_mutex);
- p->available_formats = fmt;
- atomic_store(&p->state, STATE_GET_FORMAT);
- pthread_cond_broadcast(&p->progress_cond);
-
- while (atomic_load(&p->state) != STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- res = p->result_format;
-
- pthread_mutex_unlock(&p->progress_mutex);
-
- return res;
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
int ret = thread_get_buffer_internal(avctx, f, flags);
@@ -1110,69 +960,13 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
-#if FF_API_THREAD_SAFE_CALLBACKS
-FF_DISABLE_DEPRECATION_WARNINGS
- PerThreadContext *p;
- FrameThreadContext *fctx;
- AVFrame *dst;
- int ret = 0;
- int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
- THREAD_SAFE_CALLBACKS(avctx);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
if (!f)
return;
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
-#if !FF_API_THREAD_SAFE_CALLBACKS
av_frame_unref(f);
-#else
- // when the frame buffers are not allocated, just reset it to clean state
- if (can_direct_free || !f->buf[0]) {
- av_frame_unref(f);
- return;
- }
-
- p = avctx->internal->thread_ctx;
- fctx = p->parent;
- pthread_mutex_lock(&fctx->buffer_mutex);
-
- if (p->num_released_buffers == p->released_buffers_allocated) {
- AVFrame **tmp = av_realloc_array(p->released_buffers, p->released_buffers_allocated + 1,
- sizeof(*p->released_buffers));
- if (tmp) {
- tmp[p->released_buffers_allocated] = av_frame_alloc();
- p->released_buffers = tmp;
- }
-
- if (!tmp || !tmp[p->released_buffers_allocated]) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- p->released_buffers_allocated++;
- }
-
- dst = p->released_buffers[p->num_released_buffers];
- av_frame_move_ref(dst, f);
-
- p->num_released_buffers++;
-
-fail:
- pthread_mutex_unlock(&fctx->buffer_mutex);
-
- // make sure the frame is clean even if we fail to free it
- // this leaks, but it is better than crashing
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
- memset(f->buf, 0, sizeof(f->buf));
- if (f->extended_buf)
- memset(f->extended_buf, 0, f->nb_extended_buf * sizeof(*f->extended_buf));
- av_frame_unref(f);
- }
-#endif
}
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
diff --git a/media/ffvpx/libavcodec/put_bits.h b/media/ffvpx/libavcodec/put_bits.h
index 4b4f977ad5b1..4561dc131ac1 100644
--- a/media/ffvpx/libavcodec/put_bits.h
+++ b/media/ffvpx/libavcodec/put_bits.h
@@ -363,6 +363,13 @@ static inline void put_bits64(PutBitContext *s, int n, uint64_t value)
}
}
+static inline void put_sbits63(PutBitContext *pb, int n, int64_t value)
+{
+ av_assert2(n >= 0 && n < 64);
+
+ put_bits64(pb, n, (uint64_t)(value) & (~(UINT64_MAX << n)));
+}
+
/**
* Return the pointer to the byte where the bitstream writer will put
* the next bit.
diff --git a/media/ffvpx/libavcodec/ratecontrol.h b/media/ffvpx/libavcodec/ratecontrol.h
index 2a7aaec644c4..4de80fad9048 100644
--- a/media/ffvpx/libavcodec/ratecontrol.h
+++ b/media/ffvpx/libavcodec/ratecontrol.h
@@ -80,9 +80,6 @@ typedef struct RateControlContext{
int frame_count[5];
int last_non_b_pict_type;
- void *non_lavc_opaque; ///< context for non lavc rc code (for example xvid)
- float dry_run_qscale; ///< for xvid rc
- int last_picture_number; ///< for xvid rc
AVExpr * rc_eq_eval;
}RateControlContext;
diff --git a/media/ffvpx/libavcodec/thread.h b/media/ffvpx/libavcodec/thread.h
index d5673f25eaff..88a14cfeb12a 100644
--- a/media/ffvpx/libavcodec/thread.h
+++ b/media/ffvpx/libavcodec/thread.h
@@ -62,19 +62,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
*/
void ff_thread_finish_setup(AVCodecContext *avctx);
-#if FF_API_THREAD_SAFE_CALLBACKS
-/**
- * Wrapper around get_format() for frame-multithreaded codecs.
- * Call this function instead of avctx->get_format().
- * Cannot be called after the codec has called ff_thread_finish_setup().
- *
- * @param avctx The current context.
- * @param fmt The list of available formats.
- */
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
-#else
#define ff_thread_get_format ff_get_format
-#endif
/**
* Wrapper around get_buffer() for frame-multithreaded codecs.
diff --git a/media/ffvpx/libavcodec/utils.c b/media/ffvpx/libavcodec/utils.c
index 2b63a498b9ab..599da21dba29 100644
--- a/media/ffvpx/libavcodec/utils.c
+++ b/media/ffvpx/libavcodec/utils.c
@@ -243,6 +243,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
case AV_PIX_FMT_GBRAP16BE:
w_align = 16; //FIXME assume 16 pixel per macroblock
h_align = 16 * 2; // interlaced needs 2 macroblocks height
+ if (s->codec_id == AV_CODEC_ID_BINKVIDEO)
+ w_align = 16*2;
break;
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUVJ411P:
@@ -321,19 +323,23 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
*width = FFALIGN(*width, w_align);
*height = FFALIGN(*height, h_align);
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres ||
+ s->codec_id == AV_CODEC_ID_VC1 || s->codec_id == AV_CODEC_ID_WMV3 ||
s->codec_id == AV_CODEC_ID_VP5 || s->codec_id == AV_CODEC_ID_VP6 ||
s->codec_id == AV_CODEC_ID_VP6F || s->codec_id == AV_CODEC_ID_VP6A
) {
// some of the optimized chroma MC reads one line too much
// which is also done in mpeg decoders with lowres > 0
*height += 2;
// H.264 uses edge emulation for out of frame motion vectors, for this
// it requires a temporary area large enough to hold a 21x21 block,
// increasing witdth ensure that the temporary area is large enough,
// the next rounded up width is 32
*width = FFMAX(*width, 32);
}
+ if (s->codec_id == AV_CODEC_ID_SVQ3) {
+ *width = FFMAX(*width, 32);
+ }
for (i = 0; i < 4; i++)
linesize_align[i] = STRIDE_ALIGN;
@@ -514,7 +520,9 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
case AV_CODEC_ID_PCM_SGA:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_SDX2_DPCM:
+ case AV_CODEC_ID_CBD2_DPCM:
case AV_CODEC_ID_DERF_DPCM:
+ case AV_CODEC_ID_WADY_DPCM:
return 8;
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S16BE_PLANAR:
@@ -765,6 +773,9 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
case AV_CODEC_ID_ADPCM_MTAF:
tmp = blocks * (ba - 16LL) * 2 / ch;
break;
+ case AV_CODEC_ID_ADPCM_XMD:
+ tmp = blocks * 32;
+ break;
}
if (tmp) {
if (tmp != (int)tmp)
@@ -906,11 +917,6 @@ int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
#if !HAVE_THREADS
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
-{
- return ff_get_format(avctx, fmt);
-}
-
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
return ff_get_buffer(avctx, f, flags);
diff --git a/media/ffvpx/libavcodec/vaapi_av1.c b/media/ffvpx/libavcodec/vaapi_av1.c
index 63374c31c986..d0339b2705a8 100644
--- a/media/ffvpx/libavcodec/vaapi_av1.c
+++ b/media/ffvpx/libavcodec/vaapi_av1.c
@@ -274,7 +274,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
};
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
- if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY)
+ if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
pic_param.ref_frame_map[i] = VA_INVALID_ID;
else
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
diff --git a/media/ffvpx/libavcodec/vaapi_decode.c b/media/ffvpx/libavcodec/vaapi_decode.c
index 134f10eca5fd..ab8c12e36423 100644
--- a/media/ffvpx/libavcodec/vaapi_decode.c
+++ b/media/ffvpx/libavcodec/vaapi_decode.c
@@ -410,7 +410,9 @@ static const struct {
#endif
#if VA_CHECK_VERSION(1, 2, 0) && CONFIG_HEVC_VAAPI_HWACCEL
MAP(HEVC, HEVC_REXT, None,
- ff_vaapi_parse_hevc_rext_profile ),
+ ff_vaapi_parse_hevc_rext_scc_profile ),
+ MAP(HEVC, HEVC_SCC, None,
+ ff_vaapi_parse_hevc_rext_scc_profile ),
#endif
MAP(MJPEG, MJPEG_HUFFMAN_BASELINE_DCT,
JPEGBaseline),
diff --git a/media/ffvpx/libavcodec/vaapi_hevc.h b/media/ffvpx/libavcodec/vaapi_hevc.h
index b3b0e6fc1e98..449635d0d706 100644
--- a/media/ffvpx/libavcodec/vaapi_hevc.h
+++ b/media/ffvpx/libavcodec/vaapi_hevc.h
@@ -22,6 +22,6 @@
#include <va/va.h>
#include "avcodec.h"
-VAProfile ff_vaapi_parse_hevc_rext_profile(AVCodecContext *avctx);
+VAProfile ff_vaapi_parse_hevc_rext_scc_profile(AVCodecContext *avctx);
#endif /* AVCODEC_VAAPI_HEVC_H */
diff --git a/media/ffvpx/libavcodec/version.h b/media/ffvpx/libavcodec/version.h
index 43d0d9a9fcb1..7aa95fc3f1c1 100644
--- a/media/ffvpx/libavcodec/version.h
+++ b/media/ffvpx/libavcodec/version.h
@@ -29,8 +29,8 @@
#include "version_major.h"
-#define LIBAVCODEC_VERSION_MINOR 51
-#define LIBAVCODEC_VERSION_MICRO 101
+#define LIBAVCODEC_VERSION_MINOR 5
+#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
diff --git a/media/ffvpx/libavcodec/version_major.h b/media/ffvpx/libavcodec/version_major.h
index 12f863deb79d..c2f118b26204 100644
--- a/media/ffvpx/libavcodec/version_major.h
+++ b/media/ffvpx/libavcodec/version_major.h
@@ -25,33 +25,28 @@
* Libavcodec version macros.
*/
-#define LIBAVCODEC_VERSION_MAJOR 59
+#define LIBAVCODEC_VERSION_MAJOR 60
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*/
-#define FF_API_OPENH264_SLICE_MODE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_OPENH264_CABAC (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_UNUSED_CODEC_CAPS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_THREAD_SAFE_CALLBACKS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_GET_FRAME_CLASS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AUTO_THREADS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_FLAG_TRUNCATED (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_SUB_TEXT_FORMAT (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 60)
-#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 60)
+#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
+
+// reminder to remove CrystalHD decoders on next major bump
+#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)
#endif /* AVCODEC_VERSION_MAJOR_H */
diff --git a/media/ffvpx/libavcodec/videodsp_template.c b/media/ffvpx/libavcodec/videodsp_template.c
index 324d70f2cb26..d653f4d524df 100644
--- a/media/ffvpx/libavcodec/videodsp_template.c
+++ b/media/ffvpx/libavcodec/videodsp_template.c
@@ -64,30 +64,30 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
av_assert2(start_x < end_x && block_w);
w = end_x - start_x;
- src += start_y * src_linesize + start_x * sizeof(pixel);
+ src += start_y * src_linesize + start_x * (ptrdiff_t)sizeof(pixel);
buf += start_x * sizeof(pixel);
// top
for (y = 0; y < start_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += buf_linesize;
}
// copy existing part
for (; y < end_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
src += src_linesize;
buf += buf_linesize;
}
// bottom
src -= src_linesize;
for (; y < block_h; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += buf_linesize;
}
- buf -= block_h * buf_linesize + start_x * sizeof(pixel);
+ buf -= block_h * buf_linesize + start_x * (ptrdiff_t)sizeof(pixel);
while (block_h--) {
pixel *bufp = (pixel *) buf;
diff --git a/media/ffvpx/libavcodec/vp8.c b/media/ffvpx/libavcodec/vp8.c
index 67f36d893370..db2419deaf70 100644
--- a/media/ffvpx/libavcodec/vp8.c
+++ b/media/ffvpx/libavcodec/vp8.c
@@ -2404,15 +2404,16 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
int num_jobs = s->num_jobs;
const VP8Frame *prev_frame = s->prev_frame;
VP8Frame *curframe = s->curframe;
- VPXRangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
+ VPXRangeCoder *coeff_c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
+
VP8Macroblock *mb;
uint8_t *dst[3] = {
curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
};
- if (vpx_rac_is_end(c))
+ if (vpx_rac_is_end(&s->c))
return AVERROR_INVALIDDATA;
if (mb_y == 0)
@@ -2443,35 +2444,38 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
- if (vpx_rac_is_end(c))
+ if (vpx_rac_is_end(&s->c))
return AVERROR_INVALIDDATA;
// Wait for previous thread to read mb_x+2, and reach mb_y-1.
if (prev_td != td) {
if (threadnr != 0) {
check_thread_pos(td, prev_td,
mb_x + (is_vp7 ? 2 : 1),
mb_y - (is_vp7 ? 2 : 1));
} else {
check_thread_pos(td, prev_td,
mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
mb_y - (is_vp7 ? 2 : 1));
}
}
s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
s->linesize, 4);
s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
dst[2] - dst[1], 2);
if (!s->mb_layout)
decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
prev_frame && prev_frame->seg_map ?
prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_PREVIOUS);
- if (!mb->skip)
- decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
+ if (!mb->skip) {
+ if (vpx_rac_is_end(coeff_c))
+ return AVERROR_INVALIDDATA;
+ decode_mb_coeffs(s, td, coeff_c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
+ }
if (mb->mode <= MODE_I4x4)
intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
diff --git a/media/ffvpx/libavcodec/x86/fft.asm b/media/ffvpx/libavcodec/x86/fft.asm
index a44596e5655c..34c3fc9a0f5a 100644
--- a/media/ffvpx/libavcodec/x86/fft.asm
+++ b/media/ffvpx/libavcodec/x86/fft.asm
@@ -475,7 +475,7 @@ cglobal fft_calc, 2,5,8
mov r0, r1
mov r1, r3
FFT_DISPATCH _interleave %+ SUFFIX, r1
- REP_RET
+ RET
%endif
@@ -510,7 +510,7 @@ cglobal fft_calc, 2,5,8
add r2, mmsize*2
jl .loop
.end:
- REP_RET
+ RET
cglobal fft_permute, 2,7,1
mov r4, [r0 + FFTContext.revtab]
@@ -543,7 +543,7 @@ cglobal fft_permute, 2,7,1
movaps [r1 + r2 + 16], xmm1
add r2, 32
jl .loopcopy
- REP_RET
+ RET
INIT_XMM sse
cglobal imdct_calc, 3,5,3
@@ -583,7 +583,7 @@ cglobal imdct_calc, 3,5,3
sub r3, mmsize
add r2, mmsize
jl .loop
- REP_RET
+ RET
%ifdef PIC
%define SECTION_REL - $$
diff --git a/media/ffvpx/libavcodec/x86/flacdsp.asm b/media/ffvpx/libavcodec/x86/flacdsp.asm
index 6d755f497281..44416e4dfde7 100644
--- a/media/ffvpx/libavcodec/x86/flacdsp.asm
+++ b/media/ffvpx/libavcodec/x86/flacdsp.asm
@@ -79,7 +79,7 @@ ALIGN 16
movd [decodedq+4], m1
jg .loop_sample
.ret:
- REP_RET
+ RET
%endmacro
%if HAVE_XOP_EXTERNAL
@@ -133,7 +133,7 @@ align 16
mova [outq + lenq], m%2
add lenq, 16
jl .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -177,7 +177,7 @@ align 16
add outq, mmsize*2
sub lend, mmsize/4
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -302,7 +302,7 @@ align 16
add outq, mmsize*REPCOUNT
sub lend, mmsize/4
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM ssse3
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred.asm b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
index 31840a14720f..8a38ba2bb5d2 100644
--- a/media/ffvpx/libavcodec/x86/h264_intrapred.asm
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
@@ -62,7 +62,7 @@ cglobal pred16x16_vertical_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_horizontal_8(uint8_t *src, ptrdiff_t stride)
@@ -95,7 +95,7 @@ cglobal pred16x16_horizontal_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
@@ -146,7 +146,7 @@ cglobal pred16x16_dc_8, 2,7
lea r4, [r4+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -192,7 +192,7 @@ cglobal pred16x16_tm_vp8_8, 2,6,6
lea r0, [r0+r1*2]
dec r5d
jg .loop
- REP_RET
+ RET
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
@@ -228,7 +228,7 @@ cglobal pred16x16_tm_vp8_8, 2, 4, 5, dst, stride, stride3, iteration
lea dstq, [dstq+strideq*4]
dec iterationd
jg .loop
- REP_RET
+ RET
%endif
;-----------------------------------------------------------------------------
@@ -427,7 +427,7 @@ cglobal pred16x16_plane_%1_8, 2,9,7
lea r0, [r0+r2*2]
dec r4
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -556,7 +556,7 @@ ALIGN 16
lea r0, [r0+r2*2]
dec r4
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -599,7 +599,7 @@ cglobal pred8x8_horizontal_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
@@ -737,7 +737,7 @@ cglobal pred8x8_dc_rv40_8, 2,7
lea r4, [r4+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred8x8_tm_vp8_8(uint8_t *src, ptrdiff_t stride)
@@ -770,34 +770,34 @@ cglobal pred8x8_tm_vp8_8, 2,6,4
lea r0, [r0+r1*2]
dec r5d
jg .loop
- REP_RET
+ RET
INIT_XMM ssse3
cglobal pred8x8_tm_vp8_8, 2,3,6
sub r0, r1
movdqa xmm4, [tm_shuf]
pxor xmm1, xmm1
movq xmm0, [r0]
punpcklbw xmm0, xmm1
movd xmm5, [r0-4]
pshufb xmm5, xmm4
mov r2d, 4
.loop:
movd xmm2, [r0+r1*1-4]
movd xmm3, [r0+r1*2-4]
pshufb xmm2, xmm4
pshufb xmm3, xmm4
psubw xmm2, xmm5
psubw xmm3, xmm5
paddw xmm2, xmm0
paddw xmm3, xmm0
packuswb xmm2, xmm3
movq [r0+r1*1], xmm2
movhps [r0+r1*2], xmm2
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
; dest, left, right, src, tmp
; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
@@ -1802,7 +1802,7 @@ cglobal pred4x4_tm_vp8_8, 3,6
lea r0, [r0+r2*2]
dec r5d
jg .loop
- REP_RET
+ RET
INIT_XMM ssse3
cglobal pred4x4_tm_vp8_8, 3,3
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
index c4645d434ee1..2f30807332ad 100644
--- a/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
@@ -327,7 +327,7 @@ cglobal pred8x8_horizontal_10, 2, 3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_predict_8x8_dc_10(pixel *src, ptrdiff_t stride)
@@ -481,7 +481,7 @@ cglobal pred8x8_plane_10, 2, 7, 7
add r0, r1
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
@@ -994,25 +994,25 @@ cglobal pred16x16_vertical_10, 2, 3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_horizontal_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred16x16_horizontal_10, 2, 3
mov r2d, 8
.vloop:
movd m0, [r0+r1*0-4]
movd m1, [r0+r1*1-4]
SPLATW m0, m0, 1
SPLATW m1, m1, 1
MOV16 r0+r1*0, m0, m0, m0, m0
MOV16 r0+r1*1, m1, m1, m1, m1
lea r0, [r0+r1*2]
dec r2d
jg .vloop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_dc_10(pixel *src, ptrdiff_t stride)
@@ -1048,29 +1048,29 @@ cglobal pred16x16_dc_10, 2, 6
lea r5, [r5+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_top_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred16x16_top_dc_10, 2, 3
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
HADDW m0, m2
SPLATW m0, m0
paddw m0, [pw_8]
psrlw m0, 4
mov r2d, 8
.loop:
MOV16 r0+r1*1, m0, m0, m0, m0
MOV16 r0+r1*2, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_left_dc_10(pixel *src, ptrdiff_t stride)
@@ -1101,19 +1101,19 @@ cglobal pred16x16_left_dc_10, 2, 6
lea r5, [r5+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_128_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred16x16_128_dc_10, 2,3
mova m0, [pw_512]
mov r2d, 8
.loop:
MOV16 r0+r1*0, m0, m0, m0, m0
MOV16 r0+r1*1, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
diff --git a/media/ffvpx/libavcodec/x86/videodsp.asm b/media/ffvpx/libavcodec/x86/videodsp.asm
index b19a8300c54c..3cc07878d3b8 100644
--- a/media/ffvpx/libavcodec/x86/videodsp.asm
+++ b/media/ffvpx/libavcodec/x86/videodsp.asm
@@ -433,4 +433,4 @@ cglobal prefetch, 3, 3, 0, buf, stride, h
add bufq, strideq
dec hd
jg .loop
- REP_RET
+ RET
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp.asm b/media/ffvpx/libavcodec/x86/vp8dsp.asm
index 33d488bf6f21..6ac5a7721bfc 100644
--- a/media/ffvpx/libavcodec/x86/vp8dsp.asm
+++ b/media/ffvpx/libavcodec/x86/vp8dsp.asm
@@ -200,7 +200,7 @@ cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
shl mxd, 4
@@ -230,7 +230,7 @@ cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
shl myd, 4
@@ -268,7 +268,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
lea myd, [myq*3]
@@ -314,7 +314,7 @@ cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX ssse3
@@ -368,7 +368,7 @@ cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
; 4x4 block, H-only 6-tap filter
INIT_MMX mmxext
@@ -426,7 +426,7 @@ cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
INIT_XMM sse2
cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
@@ -474,7 +474,7 @@ cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
INIT_XMM sse2
cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
@@ -537,7 +537,7 @@ cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%macro FILTER_V 1
; 4x4 block, V-only 4-tap filter
@@ -590,7 +590,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
; 4x4 block, V-only 6-tap filter
@@ -655,7 +655,7 @@ cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
@@ -738,7 +738,7 @@ cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, p
lea srcq, [srcq+srcstrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
%if cpuflag(ssse3)
cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
@@ -815,43 +815,43 @@ cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride
lea srcq, [srcq+srcstrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
FILTER_BILINEAR 4
INIT_XMM sse2
FILTER_BILINEAR 8
INIT_MMX ssse3
FILTER_BILINEAR 4
INIT_XMM ssse3
FILTER_BILINEAR 8
INIT_MMX mmx
cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
.nextrow:
movq mm0, [srcq+srcstrideq*0]
movq mm1, [srcq+srcstrideq*1]
lea srcq, [srcq+srcstrideq*2]
movq [dstq+dststrideq*0], mm0
movq [dstq+dststrideq*1], mm1
lea dstq, [dstq+dststrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
INIT_XMM sse
cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
.nextrow:
movups xmm0, [srcq+srcstrideq*0]
movups xmm1, [srcq+srcstrideq*1]
lea srcq, [srcq+srcstrideq*2]
movaps [dstq+dststrideq*0], xmm0
movaps [dstq+dststrideq*1], xmm1
lea dstq, [dstq+dststrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
diff --git a/media/ffvpx/libavutil/avstring.c b/media/ffvpx/libavutil/avstring.c
index 5ddbe9219e8e..e460b5be7f33 100644
--- a/media/ffvpx/libavutil/avstring.c
+++ b/media/ffvpx/libavutil/avstring.c
@@ -139,16 +139,6 @@ end:
return p;
}
-#if FF_API_D2STR
-char *av_d2str(double d)
-{
- char *str = av_malloc(16);
- if (str)
- snprintf(str, 16, "%f", d);
- return str;
-}
-#endif
-
#define WHITESPACES " \n\t\r"
char *av_get_token(const char **buf, const char *term)
diff --git a/media/ffvpx/libavutil/avstring.h b/media/ffvpx/libavutil/avstring.h
index 74aa4cd0e4e2..e26026376308 100644
--- a/media/ffvpx/libavutil/avstring.h
+++ b/media/ffvpx/libavutil/avstring.h
@@ -157,15 +157,6 @@ static inline size_t av_strnlen(const char *s, size_t len)
*/
char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
-#if FF_API_D2STR
-/**
- * Convert a number to an av_malloced string.
- * @deprecated use av_asprintf() with "%f" or a more specific format
- */
-attribute_deprecated
-char *av_d2str(double d);
-#endif
-
/**
* Unescape the given string until a non escaped terminating char,
* and return the token corresponding to the unescaped string.
diff --git a/media/ffvpx/libavutil/avutil.symbols b/media/ffvpx/libavutil/avutil.symbols
index f3081a024bcb..aefd9b731098 100644
--- a/media/ffvpx/libavutil/avutil.symbols
+++ b/media/ffvpx/libavutil/avutil.symbols
@@ -54,14 +54,14 @@ av_crc
av_crc_get_table
av_crc_init
av_d2q
-av_d2str
av_default_get_category
av_default_item_name
av_dict_copy
av_dict_count
av_dict_free
av_dict_get
av_dict_get_string
+av_dict_iterate
av_dict_parse_string
av_dict_set
av_dict_set_int
@@ -134,7 +134,6 @@ av_get_channel_layout_channel_index
av_get_channel_layout_nb_channels
av_get_channel_layout_string
av_get_channel_name
-av_get_colorspace_name
av_get_cpu_flags
av_get_default_channel_layout
av_get_known_color_name
@@ -325,4 +324,3 @@ av_hwdevice_ctx_init
av_hwframe_transfer_get_formats
av_hwdevice_ctx_create_derived
av_malloc_array
-av_mallocz_array
diff --git a/media/ffvpx/libavutil/channel_layout.c b/media/ffvpx/libavutil/channel_layout.c
index 5af7ea0e010c..e2f7512254a8 100644
--- a/media/ffvpx/libavutil/channel_layout.c
+++ b/media/ffvpx/libavutil/channel_layout.c
@@ -196,6 +196,7 @@ static const struct channel_layout_name channel_layout_map[] = {
{ "7.1", AV_CHANNEL_LAYOUT_7POINT1 },
{ "7.1(wide)", AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK },
{ "7.1(wide-side)", AV_CHANNEL_LAYOUT_7POINT1_WIDE },
+ { "7.1(top)", AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK },
{ "octagonal", AV_CHANNEL_LAYOUT_OCTAGONAL },
{ "cube", AV_CHANNEL_LAYOUT_CUBE },
{ "hexadecagonal", AV_CHANNEL_LAYOUT_HEXADECAGONAL },
diff --git a/media/ffvpx/libavutil/channel_layout.h b/media/ffvpx/libavutil/channel_layout.h
index 3e691633607e..f345415c55e4 100644
--- a/media/ffvpx/libavutil/channel_layout.h
+++ b/media/ffvpx/libavutil/channel_layout.h
@@ -232,6 +232,7 @@ enum AVChannelOrder {
#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_TOP_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_CUBE (AV_CH_LAYOUT_QUAD|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
@@ -389,6 +390,7 @@ typedef struct AVChannelLayout {
#define AV_CHANNEL_LAYOUT_7POINT1 AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_TOP_BACK)
#define AV_CHANNEL_LAYOUT_OCTAGONAL AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_CUBE)
#define AV_CHANNEL_LAYOUT_HEXADECAGONAL AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
diff --git a/media/ffvpx/libavutil/dict.c b/media/ffvpx/libavutil/dict.c
index 14ad780a7919..f673977a986b 100644
--- a/media/ffvpx/libavutil/dict.c
+++ b/media/ffvpx/libavutil/dict.c
@@ -20,50 +20,65 @@
#include <string.h>
+#include "avassert.h"
#include "avstring.h"
#include "dict.h"
#include "dict_internal.h"
#include "internal.h"
#include "mem.h"
#include "time_internal.h"
#include "bprint.h"
struct AVDictionary {
int count;
AVDictionaryEntry *elems;
};
int av_dict_count(const AVDictionary *m)
{
return m ? m->count : 0;
}
-AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
- const AVDictionaryEntry *prev, int flags)
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev)
{
- unsigned int i, j;
+ int i = 0;
- if (!m || !key)
+ if (!m)
return NULL;
if (prev)
i = prev - m->elems + 1;
- else
- i = 0;
- for (; i < m->count; i++) {
- const char *s = m->elems[i].key;
+ av_assert2(i >= 0);
+ if (i >= m->count)
+ return NULL;
+
+ return &m->elems[i];
+}
+
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags)
+{
+ const AVDictionaryEntry *entry = prev;
+ unsigned int j;
+
+ if (!key)
+ return NULL;
+
+ while ((entry = av_dict_iterate(m, entry))) {
+ const char *s = entry->key;
if (flags & AV_DICT_MATCH_CASE)
for (j = 0; s[j] == key[j] && key[j]; j++)
;
else
for (j = 0; av_toupper(s[j]) == av_toupper(key[j]) && key[j]; j++)
;
if (key[j])
continue;
if (s[j] && !(flags & AV_DICT_IGNORE_SUFFIX))
continue;
- return &m->elems[i];
+ return (AVDictionaryEntry *)entry;
}
return NULL;
}
@@ -221,36 +236,36 @@ void av_dict_free(AVDictionary **pm)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
- while ((t = av_dict_get(src, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(src, t))) {
int ret = av_dict_set(dst, t->key, t->value, flags);
if (ret < 0)
return ret;
}
return 0;
}
int av_dict_get_string(const AVDictionary *m, char **buffer,
const char key_val_sep, const char pairs_sep)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
AVBPrint bprint;
int cnt = 0;
char special_chars[] = {pairs_sep, key_val_sep, '\0'};
if (!buffer || pairs_sep == '\0' || key_val_sep == '\0' || pairs_sep == key_val_sep ||
pairs_sep == '\\' || key_val_sep == '\\')
return AVERROR(EINVAL);
if (!av_dict_count(m)) {
*buffer = av_strdup("");
return *buffer ? 0 : AVERROR(ENOMEM);
}
av_bprint_init(&bprint, 64, AV_BPRINT_SIZE_UNLIMITED);
- while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(m, t))) {
if (cnt++)
av_bprint_append_data(&bprint, &pairs_sep, 1);
av_bprint_escape(&bprint, t->key, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
diff --git a/media/ffvpx/libavutil/dict.h b/media/ffvpx/libavutil/dict.h
index 0d1afc6c64ab..713c9e361af8 100644
--- a/media/ffvpx/libavutil/dict.h
+++ b/media/ffvpx/libavutil/dict.h
@@ -39,154 +39,197 @@
* @brief Simple key:value store
*
* @{
- * Dictionaries are used for storing key:value pairs. To create
- * an AVDictionary, simply pass an address of a NULL pointer to
- * av_dict_set(). NULL can be used as an empty dictionary wherever
- * a pointer to an AVDictionary is required.
- * Use av_dict_get() to retrieve an entry or iterate over all
- * entries and finally av_dict_free() to free the dictionary
- * and all its contents.
+ * Dictionaries are used for storing key-value pairs.
+ *
+ * - To **create an AVDictionary**, simply pass an address of a NULL
+ * pointer to av_dict_set(). NULL can be used as an empty dictionary
+ * wherever a pointer to an AVDictionary is required.
+ * - To **insert an entry**, use av_dict_set().
+ * - Use av_dict_get() to **retrieve an entry**.
+ * - To **iterate over all entries**, use av_dict_iterate().
+ * - In order to **free the dictionary and all its contents**, use av_dict_free().
*
@code
AVDictionary *d = NULL; // "create" an empty dictionary
AVDictionaryEntry *t = NULL;
av_dict_set(&d, "foo", "bar", 0); // add an entry
char *k = av_strdup("key"); // if your strings are already allocated,
char *v = av_strdup("value"); // you can avoid copying them like this
av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
- while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
- <....> // iterate over all entries in d
+ while ((t = av_dict_iterate(d, t))) {
+ <....> // iterate over all entries in d
}
av_dict_free(&d);
@endcode
*/
+/**
+ * @name AVDictionary Flags
+ * Flags that influence behavior of the matching of keys or insertion to the dictionary.
+ * @{
+ */
#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
allocated with av_malloc() or another memory allocation function. */
#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
allocated with av_malloc() or another memory allocation function. */
-#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries. */
#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
- delimiter is added, the strings are simply concatenated. */
+ delimiter is added, the strings are simply concatenated. */
#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */
+/**
+ * @}
+ */
typedef struct AVDictionaryEntry {
char *key;
char *value;
} AVDictionaryEntry;
typedef struct AVDictionary AVDictionary;
/**
* Get a dictionary entry with matching key.
*
* The returned entry key or value must not be changed, or it will
* cause undefined behavior.
*
- * To iterate through all the dictionary entries, you can set the matching key
- * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key Matching key
+ * @param flags A collection of AV_DICT_* flags controlling how the
+ * entry is retrieved
*
- * @param prev Set to the previous matching element to find the next.
- * If set to NULL the first matching element is returned.
- * @param key matching key
- * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
- * @return found entry or NULL in case no matching entry was found in the dictionary
+ * @return Found entry or NULL in case no matching entry was found in the dictionary
*/
AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
const AVDictionaryEntry *prev, int flags);
+/**
+ * Iterate over a dictionary
+ *
+ * Iterates through all entries in the dictionary.
+ *
+ * @warning The returned AVDictionaryEntry key/value must not be changed.
+ *
+ * @warning As av_dict_set() invalidates all previous entries returned
+ * by this function, it must not be called while iterating over the dict.
+ *
+ * Typical usage:
+ * @code
+ * const AVDictionaryEntry *e = NULL;
+ * while ((e = av_dict_iterate(m, e))) {
+ * // ...
+ * }
+ * @endcode
+ *
+ * @param m The dictionary to iterate over
+ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
+ *
+ * @retval AVDictionaryEntry* The next element in the dictionary
+ * @retval NULL No more elements in the dictionary
+ */
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev);
+
/**
* Get number of entries in dictionary.
*
* @param m dictionary
* @return number of entries in dictionary
*/
int av_dict_count(const AVDictionary *m);
/**
* Set the given entry in *pm, overwriting an existing entry.
*
* Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
* these arguments will be freed on error.
*
- * Warning: Adding a new entry to a dictionary invalidates all existing entries
- * previously returned with av_dict_get.
+ * @warning Adding a new entry to a dictionary invalidates all existing entries
+ * previously returned with av_dict_get() or av_dict_iterate().
*
- * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
- * a dictionary struct is allocated and put in *pm.
- * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
- * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
- * Passing a NULL value will cause an existing entry to be deleted.
- * @return >= 0 on success otherwise an error code <0
+ * @param pm Pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key Entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
+ * @param value Entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ *
+ * @return >= 0 on success otherwise an error code <0
*/
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
/**
- * Convenience wrapper for av_dict_set that converts the value to a string
+ * Convenience wrapper for av_dict_set() that converts the value to a string
* and stores it.
*
- * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
*/
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
/**
* Parse the key/value pairs list and add the parsed entries to a dictionary.
*
* In case of failure, all the successfully set entries are stored in
* *pm. You may need to manually free the created dictionary.
*
- * @param key_val_sep a 0-terminated list of characters used to separate
+ * @param key_val_sep A 0-terminated list of characters used to separate
* key from value
- * @param pairs_sep a 0-terminated list of characters used to separate
+ * @param pairs_sep A 0-terminated list of characters used to separate
* two pairs from each other
- * @param flags flags to use when adding to dictionary.
- * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ * @param flags Flags to use when adding to the dictionary.
+ * ::AV_DICT_DONT_STRDUP_KEY and ::AV_DICT_DONT_STRDUP_VAL
* are ignored since the key/value tokens will always
* be duplicated.
+ *
* @return 0 on success, negative AVERROR code on failure
*/
int av_dict_parse_string(AVDictionary **pm, const char *str,
const char *key_val_sep, const char *pairs_sep,
int flags);
/**
* Copy entries from one AVDictionary struct into another.
- * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
- * this function will allocate a struct for you and put it in *dst
- * @param src pointer to source AVDictionary struct
- * @param flags flags to use when setting entries in *dst
- * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @param dst Pointer to a pointer to a AVDictionary struct to copy into. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src Pointer to the source AVDictionary struct to copy items from.
+ * @param flags Flags to use when setting entries in *dst
+ *
* @return 0 on success, negative AVERROR code on failure. If dst was allocated
* by this function, callers should free the associated memory.
*/
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
/**
* Free all the memory allocated for an AVDictionary struct
* and all keys and values.
*/
void av_dict_free(AVDictionary **m);
/**
* Get dictionary entries as a string.
*
* Create a string containing dictionary's entries.
* Such string may be passed back to av_dict_parse_string().
* @note String is escaped with backslashes ('\').
*
- * @param[in] m dictionary
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ *
+ * @param[in] m The dictionary
* @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
* Buffer must be freed by the caller when is no longer needed.
- * @param[in] key_val_sep character used to separate key from value
- * @param[in] pairs_sep character used to separate two pairs from each other
+ * @param[in] key_val_sep Character used to separate key from value
+ * @param[in] pairs_sep Character used to separate two pairs from each other
+ *
* @return >= 0 on success, negative on error
- * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
*/
int av_dict_get_string(const AVDictionary *m, char **buffer,
const char key_val_sep, const char pairs_sep);
diff --git a/media/ffvpx/libavutil/ffversion.h b/media/ffvpx/libavutil/ffversion.h
index 05d052c91f23..21f6614f7aa9 100644
--- a/media/ffvpx/libavutil/ffversion.h
+++ b/media/ffvpx/libavutil/ffversion.h
@@ -1,5 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H
-#define FFMPEG_VERSION "N-107213-gfed07efcde"
+#define FFMPEG_VERSION "N-109117-g6a3e174ad1"
#endif /* AVUTIL_FFVERSION_H */
diff --git a/media/ffvpx/libavutil/frame.c b/media/ffvpx/libavutil/frame.c
index de4ad1f94d23..9545477acc95 100644
--- a/media/ffvpx/libavutil/frame.c
+++ b/media/ffvpx/libavutil/frame.c
@@ -35,23 +35,6 @@
av_get_channel_layout_nb_channels((frame)->channel_layout))
#endif
-#if FF_API_COLORSPACE_NAME
-const char *av_get_colorspace_name(enum AVColorSpace val)
-{
- static const char * const name[] = {
- [AVCOL_SPC_RGB] = "GBR",
- [AVCOL_SPC_BT709] = "bt709",
- [AVCOL_SPC_FCC] = "fcc",
- [AVCOL_SPC_BT470BG] = "bt470bg",
- [AVCOL_SPC_SMPTE170M] = "smpte170m",
- [AVCOL_SPC_SMPTE240M] = "smpte240m",
- [AVCOL_SPC_YCOCG] = "YCgCo",
- };
- if ((unsigned)val >= FF_ARRAY_ELEMS(name))
- return NULL;
- return name[val];
-}
-#endif
static void get_frame_defaults(AVFrame *frame)
{
memset(frame, 0, sizeof(*frame));
@@ -304,11 +287,19 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dst->time_base = src->time_base;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
dst->reordered_opaque = src->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
dst->quality = src->quality;
dst->best_effort_timestamp = src->best_effort_timestamp;
+#if FF_API_FRAME_PICTURE_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
dst->coded_picture_number = src->coded_picture_number;
dst->display_picture_number = src->display_picture_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
dst->flags = src->flags;
dst->decode_error_flags = src->decode_error_flags;
dst->color_primaries = src->color_primaries;
@@ -831,6 +822,7 @@ const char *av_frame_side_data_name(enum AVFrameSideDataType type)
case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
+ case AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT: return "Ambient viewing environment";
}
return NULL;
}
diff --git a/media/ffvpx/libavutil/frame.h b/media/ffvpx/libavutil/frame.h
index e60a82f6c0ef..25802695493a 100644
--- a/media/ffvpx/libavutil/frame.h
+++ b/media/ffvpx/libavutil/frame.h
@@ -209,6 +209,11 @@ enum AVFrameSideDataType {
* volume transform - CUVA 005.1-2021.
*/
AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274.
+ */
+ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
};
enum AVActiveFormatDescription {
@@ -446,14 +451,18 @@ typedef struct AVFrame {
*/
AVRational time_base;
+#if FF_API_FRAME_PICTURE_NUMBER
/**
* picture number in bitstream order
*/
+ attribute_deprecated
int coded_picture_number;
/**
* picture number in display order
*/
+ attribute_deprecated
int display_picture_number;
+#endif
/**
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
@@ -486,15 +495,20 @@ typedef struct AVFrame {
*/
int palette_has_changed;
+#if FF_API_REORDERED_OPAQUE
/**
* reordered opaque 64 bits (generally an integer or a double precision float
* PTS but can be anything).
* The user sets AVCodecContext.reordered_opaque to represent the input at
* that time,
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
*/
+ attribute_deprecated
int64_t reordered_opaque;
+#endif
/**
* Sample rate of the audio data.
@@ -715,15 +729,6 @@ typedef struct AVFrame {
} AVFrame;
-#if FF_API_COLORSPACE_NAME
-/**
- * Get the name of a colorspace.
- * @return a static string identifying the colorspace; can be NULL.
- * @deprecated use av_color_space_name()
- */
-attribute_deprecated
-const char *av_get_colorspace_name(enum AVColorSpace val);
-#endif
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
diff --git a/media/ffvpx/libavutil/hwcontext.c b/media/ffvpx/libavutil/hwcontext.c
index ab9ad3703ec7..339659826931 100644
--- a/media/ffvpx/libavutil/hwcontext.c
+++ b/media/ffvpx/libavutil/hwcontext.c
@@ -397,10 +397,14 @@ int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ref,
static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
{
- AVHWFramesContext *ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVHWFramesContext *ctx;
AVFrame *frame_tmp;
int ret = 0;
+ if (!src->hw_frames_ctx)
+ return AVERROR(EINVAL);
+ ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+
frame_tmp = av_frame_alloc();
if (!frame_tmp)
return AVERROR(ENOMEM);
diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.c b/media/ffvpx/libavutil/hwcontext_vaapi.c
index 997c0033fc7e..de30a919ab73 100644
--- a/media/ffvpx/libavutil/hwcontext_vaapi.c
+++ b/media/ffvpx/libavutil/hwcontext_vaapi.c
@@ -1321,8 +1321,17 @@ static int vaapi_map_to_drm_esh(AVHWFramesContext *hwfc, AVFrame *dst,
surface_id = (VASurfaceID)(uintptr_t)src->data[3];
export_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
- if (flags & AV_HWFRAME_MAP_READ)
+ if (flags & AV_HWFRAME_MAP_READ) {
export_flags |= VA_EXPORT_SURFACE_READ_ONLY;
+
+ vas = vaSyncSurface(hwctx->display, surface_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ }
+
if (flags & AV_HWFRAME_MAP_WRITE)
export_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;
@@ -1702,42 +1711,57 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, const char *device,
char path[64];
int n, max_devices = 8;
#if CONFIG_LIBDRM
+ drmVersion *info;
const AVDictionaryEntry *kernel_driver;
kernel_driver = av_dict_get(opts, "kernel_driver", NULL, 0);
#endif
for (n = 0; n < max_devices; n++) {
snprintf(path, sizeof(path),
"/dev/dri/renderD%d", 128 + n);
priv->drm_fd = open(path, O_RDWR);
if (priv->drm_fd < 0) {
av_log(ctx, AV_LOG_VERBOSE, "Cannot open "
"DRM render node for device %d.\n", n);
break;
}
#if CONFIG_LIBDRM
+ info = drmGetVersion(priv->drm_fd);
+ if (!info) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Failed to get DRM version for device %d.\n", n);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
if (kernel_driver) {
- drmVersion *info;
- info = drmGetVersion(priv->drm_fd);
if (strcmp(kernel_driver->value, info->name)) {
av_log(ctx, AV_LOG_VERBOSE, "Ignoring device %d "
"with non-matching kernel driver (%s).\n",
n, info->name);
drmFreeVersion(info);
close(priv->drm_fd);
priv->drm_fd = -1;
continue;
}
av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
"DRM render node for device %d, "
"with matching kernel driver (%s).\n",
n, info->name);
drmFreeVersion(info);
- } else
-#endif
- {
- av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
- "DRM render node for device %d.\n", n);
+ break;
+ // drmGetVersion() ensures |info->name| is 0-terminated.
+ } else if (!strcmp(info->name, "vgem")) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Skipping vgem node for device %d.\n", n);
+ drmFreeVersion(info);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
}
+ drmFreeVersion(info);
+#endif
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d.\n", n);
break;
}
if (n >= max_devices)
diff --git a/media/ffvpx/libavutil/mem.c b/media/ffvpx/libavutil/mem.c
index 18aff5291f0f..36b8940a0cf5 100644
--- a/media/ffvpx/libavutil/mem.c
+++ b/media/ffvpx/libavutil/mem.c
@@ -212,16 +212,6 @@ void *av_malloc_array(size_t nmemb, size_t size)
return av_malloc(result);
}
-#if FF_API_AV_MALLOCZ_ARRAY
-void *av_mallocz_array(size_t nmemb, size_t size)
-{
- size_t result;
- if (size_mult(nmemb, size, &result) < 0)
- return NULL;
- return av_mallocz(result);
-}
-#endif
-
void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
{
size_t result;
diff --git a/media/ffvpx/libavutil/mem.h b/media/ffvpx/libavutil/mem.h
index c9c4fcf1ff02..62b4ca6e50e3 100644
--- a/media/ffvpx/libavutil/mem.h
+++ b/media/ffvpx/libavutil/mem.h
@@ -51,86 +51,6 @@
* @{
*/
-#if FF_API_DECLARE_ALIGNED
-/**
- *
- * @defgroup lavu_mem_macros Alignment Macros
- * Helper macros for declaring aligned variables.
- * @{
- */
-
-/**
- * @def DECLARE_ALIGNED(n,t,v)
- * Declare a variable that is aligned in memory.
- *
- * @code{.c}
- * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
- * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
- *
- * // The default-alignment equivalent would be
- * uint16_t aligned_int = 42;
- * uint8_t aligned_array[128];
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_ALIGNED(n,t,v)
- * Declare an aligned variable appropriate for use in inline assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_CONST(n,t,v)
- * Declare a static constant aligned variable appropriate for use in inline
- * assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
-#elif defined(__DJGPP__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
-#elif defined(__GNUC__) || defined(__clang__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
-#elif defined(_MSC_VER)
- #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
-#else
- #define DECLARE_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_CONST(n,t,v) static const t v
-#endif
-
-/**
- * @}
- */
-#endif
-
/**
* @defgroup lavu_mem_attrs Function Attributes
* Function attributes applicable to memory handling functions.
@@ -239,14 +159,6 @@ av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
*/
void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
-#if FF_API_AV_MALLOCZ_ARRAY
-/**
- * @deprecated use av_calloc()
- */
-attribute_deprecated
-void *av_mallocz_array(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
-#endif
-
/**
* Allocate, reallocate, or free a block of memory.
*
diff --git a/media/ffvpx/libavutil/mem_internal.h b/media/ffvpx/libavutil/mem_internal.h
index 955e31a69814..2448c606f193 100644
--- a/media/ffvpx/libavutil/mem_internal.h
+++ b/media/ffvpx/libavutil/mem_internal.h
@@ -30,7 +30,6 @@
#include "mem.h"
#include "version.h"
-#if !FF_API_DECLARE_ALIGNED
/**
* @def DECLARE_ALIGNED(n,t,v)
* Declare a variable that is aligned in memory.
@@ -97,7 +96,6 @@
#define DECLARE_ASM_ALIGNED(n,t,v) t v
#define DECLARE_ASM_CONST(n,t,v) static const t v
#endif
-#endif
// Some broken preprocessors need a second expansion
// to be forced to tokenize __VA_ARGS__
diff --git a/media/ffvpx/libavutil/opt.c b/media/ffvpx/libavutil/opt.c
index a3940f47fbf7..0908751752ea 100644
--- a/media/ffvpx/libavutil/opt.c
+++ b/media/ffvpx/libavutil/opt.c
@@ -1742,17 +1742,17 @@ void av_opt_free(void *obj)
int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
AVDictionary *tmp = NULL;
int ret;
if (!options)
return 0;
- while ((t = av_dict_get(*options, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(*options, t))) {
ret = av_opt_set(obj, t->key, t->value, search_flags);
if (ret == AVERROR_OPTION_NOT_FOUND)
- ret = av_dict_set(&tmp, t->key, t->value, 0);
+ ret = av_dict_set(&tmp, t->key, t->value, AV_DICT_MULTIKEY);
if (ret < 0) {
av_log(obj, AV_LOG_ERROR, "Error setting option %s to value %s.\n", t->key, t->value);
av_dict_free(&tmp);
@@ -2137,16 +2137,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_DICT: {
AVDictionary *dict1 = NULL;
AVDictionary *dict2 = *(AVDictionary **)dst;
- AVDictionaryEntry *en1 = NULL;
- AVDictionaryEntry *en2 = NULL;
+ const AVDictionaryEntry *en1 = NULL;
+ const AVDictionaryEntry *en2 = NULL;
ret = av_dict_parse_string(&dict1, o->default_val.str, "=", ":", 0);
if (ret < 0) {
av_dict_free(&dict1);
return ret;
}
do {
- en1 = av_dict_get(dict1, "", en1, AV_DICT_IGNORE_SUFFIX);
- en2 = av_dict_get(dict2, "", en2, AV_DICT_IGNORE_SUFFIX);
+ en1 = av_dict_iterate(dict1, en1);
+ en2 = av_dict_iterate(dict2, en2);
} while (en1 && en2 && !strcmp(en1->key, en2->key) && !strcmp(en1->value, en2->value));
av_dict_free(&dict1);
return (!en1 && !en2);
diff --git a/media/ffvpx/libavutil/pixdesc.c b/media/ffvpx/libavutil/pixdesc.c
index ca3e204a0b8e..62a2ae08d907 100644
--- a/media/ffvpx/libavutil/pixdesc.c
+++ b/media/ffvpx/libavutil/pixdesc.c
@@ -46,19 +46,35 @@ void av_read_image_line2(void *dst,
uint32_t *dst32 = dst;
if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
- int skip = x * step + comp.offset;
- const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
- int shift = 8 - depth - (skip & 7);
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ const uint32_t *p = (uint32_t *)byte_p;
- while (w--) {
- int val = (*p >> shift) & mask;
- if (read_pal_component)
- val = data[1][4*val + c];
- shift -= step;
- p -= shift >> 3;
- shift &= 7;
- if (dst_element_size == 4) *dst32++ = val;
- else *dst16++ = val;
+ while (w--) {
+ int val = AV_RB32(p);
+ val = (val >> comp.offset) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ int val = (*p >> shift) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ }
}
} else {
const uint8_t *p = data[plane] + y * linesize[plane] +
@@ -109,15 +125,29 @@ void av_write_image_line2(const void *src,
const uint16_t *src16 = src;
if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
- int skip = x * step + comp.offset;
- uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
- int shift = 8 - depth - (skip & 7);
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ uint32_t *p = (uint32_t *)byte_p;
+ int offset = comp.offset;
+ uint32_t mask = ((1ULL << depth) - 1) << offset;
- while (w--) {
- *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift;
- shift -= step;
- p -= shift >> 3;
- shift &= 7;
+ while (w--) {
+ uint16_t val = src_element_size == 4 ? *src32++ : *src16++;
+ AV_WB32(p, (AV_RB32(p) & ~mask) | (val << offset));
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift;
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ }
}
} else {
int shift = comp.shift;
diff --git a/media/ffvpx/libavutil/pixfmt.h b/media/ffvpx/libavutil/pixfmt.h
index 224670a73185..37c2c79e0140 100644
--- a/media/ffvpx/libavutil/pixfmt.h
+++ b/media/ffvpx/libavutil/pixfmt.h
@@ -206,8 +206,36 @@ enum AVPixelFormat {
AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
/**
- * HW acceleration through QSV, data[3] contains a pointer to the
- * mfxFrameSurface1 structure.
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *
+ * Before FFmpeg 5.0:
+ * mfxFrameSurface1.Data.MemId contains a pointer when importing
+ * the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
+ *
+ * DXVA2:
+ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
+ *
+ * FFmpeg 5.0 and above:
+ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
+ * structure when importing the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxHDLPair.first contains a VASurfaceID pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * DXVA2:
+ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * D3D11:
+ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
+ * mfxHDLPair.second contains the texture array index of the frame if the
+ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is a
+ * normal texture.
*/
AV_PIX_FMT_QSV,
/**
diff --git a/media/ffvpx/libavutil/version.h b/media/ffvpx/libavutil/version.h
index 2df788e529ab..900b7989718a 100644
--- a/media/ffvpx/libavutil/version.h
+++ b/media/ffvpx/libavutil/version.h
@@ -78,43 +78,41 @@
* @{
*/
-#define LIBAVUTIL_VERSION_MAJOR 57
-#define LIBAVUTIL_VERSION_MINOR 40
+#define LIBAVUTIL_VERSION_MAJOR 58
+#define LIBAVUTIL_VERSION_MINOR 3
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \
LIBAVUTIL_VERSION_MICRO)
#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \
LIBAVUTIL_VERSION_MICRO)
#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
/**
* @defgroup lavu_depr_guards Deprecation Guards
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*
* @{
*/
-#define FF_API_D2STR (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_DECLARE_ALIGNED (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_COLORSPACE_NAME (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_AV_MALLOCZ_ARRAY (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 58)
-#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 58)
+#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_REORDERED_OPAQUE (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FRAME_PICTURE_NUMBER (LIBAVUTIL_VERSION_MAJOR < 59)
/**
* @}
diff --git a/media/ffvpx/libavutil/video_enc_params.c b/media/ffvpx/libavutil/video_enc_params.c
index 54bfed0ed979..33592dc12843 100644
--- a/media/ffvpx/libavutil/video_enc_params.c
+++ b/media/ffvpx/libavutil/video_enc_params.c
@@ -27,11 +27,11 @@
AVVideoEncParams *av_video_enc_params_alloc(enum AVVideoEncParamsType type,
unsigned int nb_blocks, size_t *out_size)
{
- const size_t blocks_offset = offsetof(
- struct {
- AVVideoEncParams p;
- AVVideoBlockParams b;
- }, b);
+ struct TestStruct {
+ AVVideoEncParams p;
+ AVVideoBlockParams b;
+ };
+ const size_t blocks_offset = offsetof(struct TestStruct, b);
size_t size = blocks_offset;
AVVideoEncParams *par;
diff --git a/media/ffvpx/libavutil/x86/float_dsp.asm b/media/ffvpx/libavutil/x86/float_dsp.asm
index ff608f5f5a80..e84ba5256680 100644
--- a/media/ffvpx/libavutil/x86/float_dsp.asm
+++ b/media/ffvpx/libavutil/x86/float_dsp.asm
@@ -48,7 +48,7 @@ ALIGN 16
sub lenq, 64
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -141,7 +141,7 @@ cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif ; mmsize
sub lenq, 64
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -178,7 +178,7 @@ cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
mova [dstq+lenq], m1
sub lenq, mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -233,7 +233,7 @@ cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
movaps [dstq+lenq+3*mmsize], m4
sub lenq, mmsize*4
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -280,7 +280,7 @@ cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
movaps [dstq+lenq+mmsize], m2
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -323,7 +323,7 @@ cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
sub len1q, mmsize
add lenq, mmsize
jl .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
@@ -352,7 +352,7 @@ ALIGN 16
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -401,7 +401,7 @@ ALIGN 16
add src1q, 2*mmsize
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -585,4 +585,4 @@ cglobal butterflies_float, 3,3,3, src0, src1, len
mova [src0q + lenq], m0
add lenq, mmsize
jl .loop
- REP_RET
+ RET
diff --git a/media/ffvpx/libavutil/x86/lls.asm b/media/ffvpx/libavutil/x86/lls.asm
index d2526d1ff487..e8141e6c4ffa 100644
--- a/media/ffvpx/libavutil/x86/lls.asm
+++ b/media/ffvpx/libavutil/x86/lls.asm
@@ -123,7 +123,7 @@ cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
test id, id
jle .loop2x1
.ret:
- REP_RET
+ RET
%macro UPDATE_LLS 0
cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
@@ -240,7 +240,7 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
cmp id, countd
jle .loop2x1
.ret:
- REP_RET
+ RET
%endmacro ; UPDATE_LLS
%if HAVE_AVX_EXTERNAL
diff --git a/tools/rewriting/ThirdPartyPaths.txt b/tools/rewriting/ThirdPartyPaths.txt
index c34577779bd7..65f2af3e093e 100644
--- a/tools/rewriting/ThirdPartyPaths.txt
+++ b/tools/rewriting/ThirdPartyPaths.txt
@@ -31,6 +31,7 @@ dom/media/gmp/widevine-adapter/content_decryption_module_proxy.h
dom/media/platforms/ffmpeg/ffmpeg57/
dom/media/platforms/ffmpeg/ffmpeg58/
dom/media/platforms/ffmpeg/ffmpeg59/
+dom/media/platforms/ffmpeg/ffmpeg60/
dom/media/platforms/ffmpeg/libav53/
dom/media/platforms/ffmpeg/libav54/
dom/media/platforms/ffmpeg/libav55/