early-access version 1680

This commit is contained in:
pineappleEA 2021-05-13 11:45:27 +02:00
parent 1434d96e7d
commit 66ed389c6f
311 changed files with 6452 additions and 2597 deletions

View file

@ -12,6 +12,8 @@ project(yuzu)
# OFF by default, but if ENABLE_SDL2 and MSVC are true then ON # OFF by default, but if ENABLE_SDL2 and MSVC are true then ON
option(ENABLE_SDL2 "Enable the SDL2 frontend" ON) option(ENABLE_SDL2 "Enable the SDL2 frontend" ON)
CMAKE_DEPENDENT_OPTION(YUZU_USE_BUNDLED_SDL2 "Download bundled SDL2 binaries" ON "ENABLE_SDL2;MSVC" OFF) CMAKE_DEPENDENT_OPTION(YUZU_USE_BUNDLED_SDL2 "Download bundled SDL2 binaries" ON "ENABLE_SDL2;MSVC" OFF)
# On Linux system SDL2 is likely to be lacking HIDAPI support which have drawbacks but is needed for SDL motion
CMAKE_DEPENDENT_OPTION(YUZU_ALLOW_SYSTEM_SDL2 "Try using system SDL2 before fallling back to one from externals" NOT UNIX "ENABLE_SDL2" OFF)
option(ENABLE_QT "Enable the Qt frontend" ON) option(ENABLE_QT "Enable the Qt frontend" ON)
option(ENABLE_QT_TRANSLATION "Enable translations for the Qt frontend" OFF) option(ENABLE_QT_TRANSLATION "Enable translations for the Qt frontend" OFF)
@ -292,6 +294,7 @@ if (ENABLE_SDL2)
target_link_libraries(SDL2 INTERFACE "${SDL2_LIBRARY}") target_link_libraries(SDL2 INTERFACE "${SDL2_LIBRARY}")
target_include_directories(SDL2 INTERFACE "${SDL2_INCLUDE_DIR}") target_include_directories(SDL2 INTERFACE "${SDL2_INCLUDE_DIR}")
else() else()
if (YUZU_ALLOW_SYSTEM_SDL2)
find_package(SDL2 2.0.15 QUIET) find_package(SDL2 2.0.15 QUIET)
if (SDL2_FOUND) if (SDL2_FOUND)
@ -307,6 +310,9 @@ if (ENABLE_SDL2)
else() else()
message(STATUS "SDL2 2.0.15 or newer not found, falling back to externals.") message(STATUS "SDL2 2.0.15 or newer not found, falling back to externals.")
endif() endif()
else()
message(STATUS "Using SDL2 from externals.")
endif()
endif() endif()
endif() endif()

View file

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 1679. This is the source code for early-access 1680.
## Legal Notice ## Legal Notice

View file

@ -47,8 +47,20 @@ target_include_directories(unicorn-headers INTERFACE ./unicorn/include)
# SDL2 # SDL2
if (NOT SDL2_FOUND AND ENABLE_SDL2) if (NOT SDL2_FOUND AND ENABLE_SDL2)
# Yuzu itself needs: Events Joystick Haptic Sensor Timers
# Yuzu-cmd also needs: Video (depends on Loadso/Dlopen)
set(SDL_UNUSED_SUBSYSTEMS
Atomic Audio Render Power Threads
File CPUinfo Filesystem Locale)
foreach(_SUB ${SDL_UNUSED_SUBSYSTEMS})
string(TOUPPER ${_SUB} _OPT)
option(SDL_${_OPT} "" OFF)
endforeach()
set(SDL_STATIC ON) set(SDL_STATIC ON)
set(SDL_SHARED OFF) set(SDL_SHARED OFF)
option(HIDAPI "" ON)
add_subdirectory(SDL EXCLUDE_FROM_ALL) add_subdirectory(SDL EXCLUDE_FROM_ALL)
add_library(SDL2 ALIAS SDL2-static) add_library(SDL2 ALIAS SDL2-static)
endif() endif()

View file

@ -15,7 +15,9 @@
<ceffmpeg@gmail.com> <cehoyos@rainbow.studorg.tuwien.ac.at> <ceffmpeg@gmail.com> <cehoyos@rainbow.studorg.tuwien.ac.at>
<ffmpeg@gyani.pro> <gyandoshi@gmail.com> <ffmpeg@gyani.pro> <gyandoshi@gmail.com>
<atomnuker@gmail.com> <rpehlivanov@obe.tv> <atomnuker@gmail.com> <rpehlivanov@obe.tv>
<zhong.li@intel.com> <zhongli_dev@126.com> <lizhong1008@gmail.com> <zhong.li@intel.com>
<lizhong1008@gmail.com> <zhongli_dev@126.com>
<andreas.rheinhardt@gmail.com> <andreas.rheinhardt@googlemail.com> <andreas.rheinhardt@gmail.com> <andreas.rheinhardt@googlemail.com>
rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com> rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
<thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com> <thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com>
<liuqi05@kuaishou.com> <lq@chinaffmpeg.org>

View file

@ -1,47 +1,13 @@
Entries are sorted chronologically from oldest to youngest within each release, Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest. releases are sorted from youngest to oldest.
version 4.3.1: version <next>:
avcodec/tiff: Check input space in dng_decode_jpeg() - AudioToolbox output device
avcodec/mjpeg_parser: Adjust size rejection threshold - MacCaption demuxer
avcodec/cbs_jpeg: Fix uninitialized end index in cbs_jpeg_split_fragment() - PGX decoder
avformat/sdp: Fix potential write beyond end of buffer - chromanr video filter
avformat/mm: Check for existence of audio stream - VDPAU accelerated HEVC 10/12bit decoding
avformat/mov: Fix unaligned read of uint32_t and endian-dependance in mov_read_default
avcodec/apedec: Fix undefined integer overflow with 24bit
avcodec/loco: Fix integer overflow with large values from loco_get_rice()
avformat/smjpegdec: Check the existence of referred streams
avcodec/tiff: Check frame parameters before blit for DNG
avcodec/mjpegdec: Limit bayer to single plane outputting format
avcodec/pnmdec: Fix misaligned reads
avcodec/mv30: Fix integer overflows in idct2_1d()
avcodec/hcadec: Check total_band_count against imdct_in size
avcodec/scpr3: Fix out of array access with dectab
avcodec/tiff: Do not overrun the array ends in dng_blit()
avcodec/dstdec: Replace AC overread check by sample rate check
dnn_backend_native: Add overflow check for length calculation.
avcodec/h264_metadata_bsf: Fix invalid av_freep
avcodec/cbs_h265: set default VUI parameters when vui_parameters_present_flag is false
avcodec/av1_parser: initialize avctx->pix_fmt
avcodec/av1_parser: add missing parsing for RGB pixel format signaling
avcodec/av1_parser: set context values outside the OBU parsing loop
avutil/avsscanf: Add () to avoid integer overflow in scanexp()
avformat/utils: reorder duration computation to avoid overflow
avcodec/pngdec: Check for fctl after idat
avformat/hls: Pass a copy of the URL for probing
avutil/common: Fix integer overflow in av_ceil_log2_c()
avcodec/wmalosslessdec: fix overflow with pred in revert_cdlms
avformat/mvdec: Fix integer overflow with billions of channels
avformat/microdvddec: skip malformed lines without frame number.
dnn_backend_native: check operand index
dnn_backend_native.c: refine code for fail case
avformat/mov: fix memleaks
libavformat/mov: Fix memleaks when demuxing DV audio
avcodec/cbs_av1: Fix writing uvlc numbers >= INT_MAX
avformat/avc, mxfenc: Avoid allocation of H264 SPS structure, fix memleak
avcodec/bitstream: Don't check for undefined behaviour after it happened
avformat/aviobuf: Also return truncated buffer in avio_get_dyn_buf()
avformat/aviobuf: Don't check for overflow after it happened
version 4.3: version 4.3:
- v360 filter - v360 filter

View file

@ -55,7 +55,7 @@ fate.ffmpeg.org Timothy Gu
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
Patchwork Andriy Gelman Patchwork Andriy Gelman
mailing lists Baptiste Coudurier mailing lists Baptiste Coudurier
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet Twitter Reynaldo H. Verdejo Pinochet
Launchpad Timothy Gu Launchpad Timothy Gu
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4 ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4

View file

@ -1 +1 @@
4.3.1 4.3.git

View file

@ -253,6 +253,8 @@ External library support:
--enable-libopenh264 enable H.264 encoding via OpenH264 [no] --enable-libopenh264 enable H.264 encoding via OpenH264 [no]
--enable-libopenjpeg enable JPEG 2000 de/encoding via OpenJPEG [no] --enable-libopenjpeg enable JPEG 2000 de/encoding via OpenJPEG [no]
--enable-libopenmpt enable decoding tracked files via libopenmpt [no] --enable-libopenmpt enable decoding tracked files via libopenmpt [no]
--enable-libopenvino enable OpenVINO as a DNN module backend
for DNN based filters like dnn_processing [no]
--enable-libopus enable Opus de/encoding via libopus [no] --enable-libopus enable Opus de/encoding via libopus [no]
--enable-libpulse enable Pulseaudio input via libpulse [no] --enable-libpulse enable Pulseaudio input via libpulse [no]
--enable-librabbitmq enable RabbitMQ library [no] --enable-librabbitmq enable RabbitMQ library [no]
@ -1741,7 +1743,6 @@ EXTERNAL_LIBRARY_VERSION3_LIST="
liblensfun liblensfun
libopencore_amrnb libopencore_amrnb
libopencore_amrwb libopencore_amrwb
libvmaf
libvo_amrwbenc libvo_amrwbenc
mbedtls mbedtls
rkmpp rkmpp
@ -1790,6 +1791,7 @@ EXTERNAL_LIBRARY_LIST="
libopenh264 libopenh264
libopenjpeg libopenjpeg
libopenmpt libopenmpt
libopenvino
libopus libopus
libpulse libpulse
librabbitmq librabbitmq
@ -1808,6 +1810,7 @@ EXTERNAL_LIBRARY_LIST="
libtheora libtheora
libtwolame libtwolame
libv4l2 libv4l2
libvmaf
libvorbis libvorbis
libvpx libvpx
libwavpack libwavpack
@ -2620,7 +2623,7 @@ cbs_mpeg2_select="cbs"
cbs_vp9_select="cbs" cbs_vp9_select="cbs"
dct_select="rdft" dct_select="rdft"
dirac_parse_select="golomb" dirac_parse_select="golomb"
dnn_suggest="libtensorflow" dnn_suggest="libtensorflow libopenvino"
error_resilience_select="me_cmp" error_resilience_select="me_cmp"
faandct_deps="faan" faandct_deps="faan"
faandct_select="fdctdsp" faandct_select="fdctdsp"
@ -3367,6 +3370,8 @@ alsa_outdev_deps="alsa"
avfoundation_indev_deps="avfoundation corevideo coremedia pthreads" avfoundation_indev_deps="avfoundation corevideo coremedia pthreads"
avfoundation_indev_suggest="coregraphics applicationservices" avfoundation_indev_suggest="coregraphics applicationservices"
avfoundation_indev_extralibs="-framework Foundation" avfoundation_indev_extralibs="-framework Foundation"
audiotoolbox_outdev_deps="audiotoolbox pthreads"
audiotoolbox_outdev_extralibs="-framework AudioToolbox -framework CoreAudio"
bktr_indev_deps_any="dev_bktr_ioctl_bt848_h machine_ioctl_bt848_h dev_video_bktr_ioctl_bt848_h dev_ic_bt8xx_h" bktr_indev_deps_any="dev_bktr_ioctl_bt848_h machine_ioctl_bt848_h dev_video_bktr_ioctl_bt848_h dev_ic_bt8xx_h"
caca_outdev_deps="libcaca" caca_outdev_deps="libcaca"
decklink_deps_any="libdl LoadLibrary" decklink_deps_any="libdl LoadLibrary"
@ -6152,6 +6157,7 @@ enabled videotoolbox && check_apple_framework VideoToolbox
check_apple_framework CoreFoundation check_apple_framework CoreFoundation
check_apple_framework CoreMedia check_apple_framework CoreMedia
check_apple_framework CoreVideo check_apple_framework CoreVideo
check_apple_framework CoreAudio
enabled avfoundation && { enabled avfoundation && {
disable coregraphics applicationservices disable coregraphics applicationservices
@ -6275,7 +6281,7 @@ enabled avisynth && require_headers "avisynth/avisynth_c.h"
enabled cuda_nvcc && { check_nvcc cuda_nvcc || die "ERROR: failed checking for nvcc."; } enabled cuda_nvcc && { check_nvcc cuda_nvcc || die "ERROR: failed checking for nvcc."; }
enabled chromaprint && require chromaprint chromaprint.h chromaprint_get_version -lchromaprint enabled chromaprint && require chromaprint chromaprint.h chromaprint_get_version -lchromaprint
enabled decklink && { require_headers DeckLinkAPI.h && enabled decklink && { require_headers DeckLinkAPI.h &&
{ test_cpp_condition DeckLinkAPIVersion.h "BLACKMAGIC_DECKLINK_API_VERSION >= 0x0a090500" || die "ERROR: Decklink API version must be >= 10.9.5."; } } { test_cpp_condition DeckLinkAPIVersion.h "BLACKMAGIC_DECKLINK_API_VERSION >= 0x0a0a0000" || die "ERROR: Decklink API version must be >= 10.10"; } }
enabled frei0r && require_headers "frei0r.h dlfcn.h" enabled frei0r && require_headers "frei0r.h dlfcn.h"
enabled gmp && require gmp gmp.h mpz_export -lgmp enabled gmp && require gmp gmp.h mpz_export -lgmp
enabled gnutls && require_pkg_config gnutls gnutls gnutls/gnutls.h gnutls_global_init enabled gnutls && require_pkg_config gnutls gnutls gnutls/gnutls.h gnutls_global_init
@ -6347,6 +6353,7 @@ enabled libopenh264 && require_pkg_config libopenh264 openh264 wels/codec_
enabled libopenjpeg && { check_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version || enabled libopenjpeg && { check_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version ||
{ require_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version -DOPJ_STATIC && add_cppflags -DOPJ_STATIC; } } { require_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version -DOPJ_STATIC && add_cppflags -DOPJ_STATIC; } }
enabled libopenmpt && require_pkg_config libopenmpt "libopenmpt >= 0.2.6557" libopenmpt/libopenmpt.h openmpt_module_create -lstdc++ && append libopenmpt_extralibs "-lstdc++" enabled libopenmpt && require_pkg_config libopenmpt "libopenmpt >= 0.2.6557" libopenmpt/libopenmpt.h openmpt_module_create -lstdc++ && append libopenmpt_extralibs "-lstdc++"
enabled libopenvino && require libopenvino c_api/ie_c_api.h ie_c_api_version -linference_engine_c_api
enabled libopus && { enabled libopus && {
enabled libopus_decoder && { enabled libopus_decoder && {
require_pkg_config libopus opus opus_multistream.h opus_multistream_decoder_create require_pkg_config libopus opus opus_multistream.h opus_multistream_decoder_create
@ -6378,7 +6385,7 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame
die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; } die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; }
enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl
enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit
enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.3.9" libvmaf.h compute_vmaf enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.5.2" libvmaf.h compute_vmaf
enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc
enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init && enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init &&
require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init

View file

@ -15,6 +15,16 @@ libavutil: 2017-10-21
API changes, most recent first: API changes, most recent first:
2020-06-12 - b09fb030c1 - lavu 56.55.100 - pixdesc.h
Add AV_PIX_FMT_X2RGB10.
2020-06-xx - xxxxxxxxxx - lavu 56.54.100 - frame.h
Add AV_FRAME_DATA_SEI_UNREGISTERED.
2020-06-xx - xxxxxxxxxx - lavu 56.53.100 - log.h opt.h
Add av_opt_child_class_iterate() and AVClass.child_class_iterate().
Deprecate av_opt_child_class_next() and AVClass.child_class_next().
2020-06-05 - ec39c2276a - lavu 56.50.100 - buffer.h 2020-06-05 - ec39c2276a - lavu 56.50.100 - buffer.h
Passing NULL as alloc argument to av_buffer_pool_init2() is now allowed. Passing NULL as alloc argument to av_buffer_pool_init2() is now allowed.

View file

@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
# could be handy for archiving the generated documentation or if some version # could be handy for archiving the generated documentation or if some version
# control system is used. # control system is used.
PROJECT_NUMBER = 4.3.1 PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description # Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a # for a project that appears at the top of each page and should give viewer a

View file

@ -1599,6 +1599,36 @@ Enable the use of global motion for block prediction. Default is true.
Enable block copy mode for intra block prediction. This mode is Enable block copy mode for intra block prediction. This mode is
useful for screen content. Default is true. useful for screen content. Default is true.
@item enable-rect-partitions (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable rectangular partitions. Default is true.
@item enable-1to4-partitions (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable 1:4/4:1 partitions. Default is true.
@item enable-ab-partitions (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable AB shape partitions. Default is true.
@item enable-angle-delta (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable angle delta intra prediction. Default is true.
@item enable-cfl-intra (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable chroma predicted from luma intra prediction. Default is true.
@item enable-filter-intra (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable filter intra predictor. Default is true.
@item enable-intra-edge-filter (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable intra edge filter. Default is true.
@item enable-smooth-intra (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable smooth intra prediction mode. Default is true.
@item enable-paeth-intra (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable paeth predictor in intra prediction. Default is true.
@item enable-palette (@emph{boolean}) (Requires libaom >= v2.0.0)
Enable palette prediction mode. Default is true.
@end table @end table
@section libkvazaar @section libkvazaar
@ -2740,17 +2770,17 @@ MPEG-2 video encoder.
@subsection Options @subsection Options
@table @option @table @option
@item profile @var{integer} @item profile
Select the mpeg2 profile to encode: Select the mpeg2 profile to encode:
@table @samp @table @samp
@item 422 @item 422
@item main @item high
@item ss @item ss
Spatially Scalable Spatially Scalable
@item snr @item snr
SNR Scalable SNR Scalable
@item high @item main
@item simple @item simple
@end table @end table

View file

@ -734,10 +734,6 @@ ffmpeg -dump_attachment:t "" -i INPUT
Technical note -- attachments are implemented as codec extradata, so this Technical note -- attachments are implemented as codec extradata, so this
option can actually be used to extract extradata from any stream, not just option can actually be used to extract extradata from any stream, not just
attachments. attachments.
@item -noautorotate
Disable automatically rotating video based on file metadata.
@end table @end table
@section Video Options @section Video Options
@ -819,6 +815,18 @@ Create the filtergraph specified by @var{filtergraph} and use it to
filter the stream. filter the stream.
This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}. This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}.
@item -autorotate
Automatically rotate the video according to file metadata. Enabled by
default, use @option{-noautorotate} to disable it.
@item -autoscale
Automatically scale the video according to the resolution of first frame.
Enabled by default, use @option{-noautoscale} to disable it. When autoscale is
disabled, all output frames of filter graph might not be in the same resolution
and may be inadequate for some encoder/muxer. Therefore, it is not recommended
to disable it unless you really know what you are doing.
Disable autoscale at your own risk.
@end table @end table
@section Advanced Video options @section Advanced Video options
@ -848,8 +856,8 @@ factor if negative.
Force interlacing support in encoder (MPEG-2 and MPEG-4 only). Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
Use this option if your input file is interlaced and you want Use this option if your input file is interlaced and you want
to keep the interlaced format for minimum losses. to keep the interlaced format for minimum losses.
The alternative is to deinterlace the input stream with The alternative is to deinterlace the input stream by use of a filter
@option{-deinterlace}, but deinterlacing introduces losses. such as @code{yadif} or @code{bwdif}, but deinterlacing introduces losses.
@item -psnr @item -psnr
Calculate PSNR of compressed frames. Calculate PSNR of compressed frames.
@item -vstats @item -vstats

View file

@ -4197,6 +4197,9 @@ If not specified, or the expressed duration is negative, the audio is
supposed to be generated forever. supposed to be generated forever.
Only used if plugin have zero inputs. Only used if plugin have zero inputs.
@item latency, l
Enable latency compensation, by default is disabled.
Only used if plugin have inputs.
@end table @end table
@subsection Examples @subsection Examples
@ -7180,6 +7183,42 @@ ffmpeg -f lavfi -i color=c=black:s=1280x720 -i video.mp4 -shortest -filter_compl
@end example @end example
@end itemize @end itemize
@section chromanr
Reduce chrominance noise.
The filter accepts the following options:
@table @option
@item thres
Set threshold for averaging chrominance values.
Sum of absolute difference of U and V pixel components or current
pixel and neighbour pixels lower than this threshold will be used in
averaging. Luma component is left unchanged and is copied to output.
Default value is 30. Allowed range is from 1 to 200.
@item sizew
Set horizontal radius of rectangle used for averaging.
Allowed range is from 1 to 100. Default value is 5.
@item sizeh
Set vertical radius of rectangle used for averaging.
Allowed range is from 1 to 100. Default value is 5.
@item stepw
Set horizontal step when averaging. Default value is 1.
Allowed range is from 1 to 50.
Mostly useful to speed-up filtering.
@item steph
Set vertical step when averaging. Default value is 1.
Allowed range is from 1 to 50.
Mostly useful to speed-up filtering.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
@section chromashift @section chromashift
Shift chroma pixels horizontally and/or vertically. Shift chroma pixels horizontally and/or vertically.
@ -9288,13 +9327,21 @@ TensorFlow backend. To enable this backend you
need to install the TensorFlow for C library (see need to install the TensorFlow for C library (see
@url{https://www.tensorflow.org/install/install_c}) and configure FFmpeg with @url{https://www.tensorflow.org/install/install_c}) and configure FFmpeg with
@code{--enable-libtensorflow} @code{--enable-libtensorflow}
@item openvino
OpenVINO backend. To enable this backend you
need to build and install the OpenVINO for C library (see
@url{https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md}) and configure FFmpeg with
@code{--enable-libopenvino} (--extra-cflags=-I... --extra-ldflags=-L... might
be needed if the header files and libraries are not installed into system path)
@end table @end table
Default value is @samp{native}. Default value is @samp{native}.
@item model @item model
Set path to model file specifying network architecture and its parameters. Set path to model file specifying network architecture and its parameters.
Note that different backends use different file formats. TensorFlow and native Note that different backends use different file formats. TensorFlow, OpenVINO and native
backend can load files for only its format. backend can load files for only its format.
Native model file (.model) can be generated from TensorFlow model file (.pb) by using tools/python/convert.py Native model file (.model) can be generated from TensorFlow model file (.pb) by using tools/python/convert.py
@ -12809,7 +12856,7 @@ The obtained VMAF score is printed through the logging system.
It requires Netflix's vmaf library (libvmaf) as a pre-requisite. It requires Netflix's vmaf library (libvmaf) as a pre-requisite.
After installing the library it can be enabled using: After installing the library it can be enabled using:
@code{./configure --enable-libvmaf --enable-version3}. @code{./configure --enable-libvmaf}.
If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}. If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}.
The filter has following options: The filter has following options:
@ -14342,9 +14389,15 @@ It accepts the following values:
@item yuv420 @item yuv420
force YUV420 output force YUV420 output
@item yuv420p10
force YUV420p10 output
@item yuv422 @item yuv422
force YUV422 output force YUV422 output
@item yuv422p10
force YUV422p10 output
@item yuv444 @item yuv444
force YUV444 output force YUV444 output
@ -17971,7 +18024,7 @@ subtitles=video.mkv:si=1
To make the subtitles stream from @file{sub.srt} appear in 80% transparent blue To make the subtitles stream from @file{sub.srt} appear in 80% transparent blue
@code{DejaVu Serif}, use: @code{DejaVu Serif}, use:
@example @example
subtitles=sub.srt:force_style='FontName=DejaVu Serif,PrimaryColour=&HCCFF0000' subtitles=sub.srt:force_style='Fontname=DejaVu Serif,PrimaryColour=&HCCFF0000'
@end example @end example
@section super2xsai @section super2xsai
@ -19313,6 +19366,46 @@ Truncated square pyramid projection.
@item he @item he
@item hequirect @item hequirect
Half equirectangular projection. Half equirectangular projection.
@item equisolid
Equisolid format.
Format specific options:
@table @option
@item h_fov
@item v_fov
@item d_fov
Set output horizontal/vertical/diagonal field of view. Values in degrees.
If diagonal field of view is set it overrides horizontal and vertical field of view.
@item ih_fov
@item iv_fov
@item id_fov
Set input horizontal/vertical/diagonal field of view. Values in degrees.
If diagonal field of view is set it overrides horizontal and vertical field of view.
@end table
@item og
Orthographic format.
Format specific options:
@table @option
@item h_fov
@item v_fov
@item d_fov
Set output horizontal/vertical/diagonal field of view. Values in degrees.
If diagonal field of view is set it overrides horizontal and vertical field of view.
@item ih_fov
@item iv_fov
@item id_fov
Set input horizontal/vertical/diagonal field of view. Values in degrees.
If diagonal field of view is set it overrides horizontal and vertical field of view.
@end table
@end table @end table
@item interp @item interp
@ -20733,6 +20826,12 @@ Input frame count.
@item on @item on
Output frame count. Output frame count.
@item in_time, it
The input timestamp expressed in seconds. It's NAN if the input timestamp is unknown.
@item out_time, time, ot
The output timestamp expressed in seconds.
@item x @item x
@item y @item y
Last calculated 'x' and 'y' position from 'x' and 'y' expression Last calculated 'x' and 'y' position from 'x' and 'y' expression
@ -20771,13 +20870,13 @@ display aspect ratio
@itemize @itemize
@item @item
Zoom-in up to 1.5 and pan at same time to some spot near center of picture: Zoom in up to 1.5x and pan at same time to some spot near center of picture:
@example @example
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360 zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
@end example @end example
@item @item
Zoom-in up to 1.5 and pan always at center of picture: Zoom in up to 1.5x and pan always at center of picture:
@example @example
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)' zoompan=z='min(zoom+0.0015,1.5)':d=700:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)'
@end example @end example
@ -20787,6 +20886,13 @@ Same as above but without pausing:
@example @example
zoompan=z='min(max(zoom,pzoom)+0.0015,1.5)':d=1:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)' zoompan=z='min(max(zoom,pzoom)+0.0015,1.5)':d=1:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)'
@end example @end example
@item
Zoom in 2x into center of picture only for the first second of the input video:
@example
zoompan=z='if(between(in_time,0,1),2,1)':d=1:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)'
@end example
@end itemize @end itemize
@anchor{zscale} @anchor{zscale}

View file

@ -751,6 +751,8 @@ following image formats are supported:
@tab Portable GrayMap image @tab Portable GrayMap image
@item PGMYUV @tab X @tab X @item PGMYUV @tab X @tab X
@tab PGM with U and V components in YUV 4:2:0 @tab PGM with U and V components in YUV 4:2:0
@item PGX @tab @tab X
@tab PGX file decoder
@item PIC @tab @tab X @item PIC @tab @tab X
@tab Pictor/PC Paint @tab Pictor/PC Paint
@item PNG @tab X @tab X @item PNG @tab X @tab X
@ -803,6 +805,7 @@ following image formats are supported:
@item Apple MJPEG-B @tab @tab X @item Apple MJPEG-B @tab @tab X
@item Apple Pixlet @tab @tab X @item Apple Pixlet @tab @tab X
@item Apple ProRes @tab X @tab X @item Apple ProRes @tab X @tab X
@tab fourcc: apch,apcn,apcs,apco,ap4h,ap4x
@item Apple QuickDraw @tab @tab X @item Apple QuickDraw @tab @tab X
@tab fourcc: qdrw @tab fourcc: qdrw
@item Asus v1 @tab X @tab X @item Asus v1 @tab X @tab X
@ -835,6 +838,8 @@ following image formats are supported:
@item BitJazz SheerVideo @tab @tab X @item BitJazz SheerVideo @tab @tab X
@item Bitmap Brothers JV video @tab @tab X @item Bitmap Brothers JV video @tab @tab X
@item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X @item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X
@item Brooktree ProSumer Video @tab @tab X
@tab fourcc: BT20
@item Brute Force & Ignorance @tab @tab X @item Brute Force & Ignorance @tab @tab X
@tab Used in the game Flash Traffic: City of Angels. @tab Used in the game Flash Traffic: City of Angels.
@item C93 video @tab @tab X @item C93 video @tab @tab X
@ -968,6 +973,7 @@ following image formats are supported:
@item MPEG-4 part 2 Microsoft variant version 3 @tab X @tab X @item MPEG-4 part 2 Microsoft variant version 3 @tab X @tab X
@item Newtek SpeedHQ @tab @tab X @item Newtek SpeedHQ @tab @tab X
@item Nintendo Gamecube THP video @tab @tab X @item Nintendo Gamecube THP video @tab @tab X
@item NotchLC @tab @tab X
@item NuppelVideo/RTjpeg @tab @tab X @item NuppelVideo/RTjpeg @tab @tab X
@tab Video encoding used in NuppelVideo files. @tab Video encoding used in NuppelVideo files.
@item On2 VP3 @tab @tab X @item On2 VP3 @tab @tab X
@ -986,8 +992,6 @@ following image formats are supported:
@tab encoding supported through external library libvpx @tab encoding supported through external library libvpx
@item Pinnacle TARGA CineWave YUV16 @tab @tab X @item Pinnacle TARGA CineWave YUV16 @tab @tab X
@tab fourcc: Y216 @tab fourcc: Y216
@item Prores @tab @tab X
@tab fourcc: apch,apcn,apcs,apco
@item Q-team QPEG @tab @tab X @item Q-team QPEG @tab @tab X
@tab fourccs: QPEG, Q1.0, Q1.1 @tab fourccs: QPEG, Q1.0, Q1.1
@item QuickTime 8BPS video @tab @tab X @item QuickTime 8BPS video @tab @tab X

View file

@ -398,6 +398,12 @@ are dropped till a frame with timecode is received.
Option @var{timecode_format} must be specified. Option @var{timecode_format} must be specified.
Defaults to @option{false}. Defaults to @option{false}.
@item enable_klv(@emph{bool})
If set to @option{true}, extracts KLV data from VANC and outputs KLV packets.
KLV VANC packets are joined based on MID and PSC fields and aggregated into
one KLV packet.
Defaults to @option{false}.
@end table @end table
@subsection Examples @subsection Examples

View file

@ -267,8 +267,10 @@ Override User-Agent field in HTTP header. Applicable only for HTTP output.
@item http_persistent @var{http_persistent} @item http_persistent @var{http_persistent}
Use persistent HTTP connections. Applicable only for HTTP output. Use persistent HTTP connections. Applicable only for HTTP output.
@item hls_playlist @var{hls_playlist} @item hls_playlist @var{hls_playlist}
Generate HLS playlist files as well. The master playlist is generated with the filename master.m3u8. Generate HLS playlist files as well. The master playlist is generated with the filename @var{hls_master_name}.
One media playlist file is generated for each stream with filenames media_0.m3u8, media_1.m3u8, etc. One media playlist file is generated for each stream with filenames media_0.m3u8, media_1.m3u8, etc.
@item hls_master_name @var{file_name}
HLS master playlist name. Default is "master.m3u8".
@item streaming @var{streaming} @item streaming @var{streaming}
Enable (1) or disable (0) chunk streaming mode of output. In chunk streaming Enable (1) or disable (0) chunk streaming mode of output. In chunk streaming
mode, each frame will be a moof fragment which forms a chunk. mode, each frame will be a moof fragment which forms a chunk.
@ -364,6 +366,10 @@ adjusting playback latency and buffer occupancy during normal playback by client
Set the maximum playback rate indicated as appropriate for the purposes of automatically Set the maximum playback rate indicated as appropriate for the purposes of automatically
adjusting playback latency and buffer occupancy during normal playback by clients. adjusting playback latency and buffer occupancy during normal playback by clients.
@item update_period @var{update_period}
Set the mpd update period ,for dynamic content.
The unit is second.
@end table @end table
@anchor{framecrc} @anchor{framecrc}
@ -2275,6 +2281,11 @@ certain (usually permanent) errors the recovery is not attempted even when
Specify whether to wait for the keyframe after recovering from Specify whether to wait for the keyframe after recovering from
queue overflow or failure. This option is set to 0 (false) by default. queue overflow or failure. This option is set to 0 (false) by default.
@item timeshift @var{duration}
Buffer the specified amount of packets and delay writing the output. Note that
@var{queue_size} must be big enough to store the packets for timeshift. At the
end of the input the fifo buffer is flushed at realtime speed.
@end table @end table
@subsection Examples @subsection Examples

View file

@ -38,6 +38,52 @@ ffmpeg -i INPUT -f alsa hw:1,7
@end example @end example
@end itemize @end itemize
@section AudioToolbox
AudioToolbox output device.
Allows native output to CoreAudio devices on OSX.
The output filename can be empty (or @code{-}) to refer to the default system output device or a number that refers to the device index as shown using: @code{-list_devices true}.
Alternatively, the audio input device can be chosen by index using the
@option{
-audio_device_index <INDEX>
}
, overriding any device name or index given in the input filename.
All available devices can be enumerated by using @option{-list_devices true}, listing
all device names, UIDs and corresponding indices.
@subsection Options
AudioToolbox supports the following options:
@table @option
@item -audio_device_index <INDEX>
Specify the audio device by its index. Overrides anything given in the output filename.
@end table
@subsection Examples
@itemize
@item
Print the list of supported devices and output a sine wave to the default device:
@example
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -list_devices true -
@end example
@item
Output a sine wave to the device with the index 2, overriding any output filename:
@example
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -audio_device_index 2 -
@end example
@end itemize
@section caca @section caca
CACA output device. CACA output device.

View file

@ -109,6 +109,21 @@ the received message may be truncated causing decoding errors.
The timeout in seconds during the initial connection to the broker. The The timeout in seconds during the initial connection to the broker. The
default value is rw_timeout, or 5 seconds if rw_timeout is not set. default value is rw_timeout, or 5 seconds if rw_timeout is not set.
@item delivery_mode @var{mode}
Sets the delivery mode of each message sent to broker.
The following values are accepted:
@table @samp
@item persistent
Delivery mode set to "persistent" (2). This is the default value.
Messages may be written to the broker's disk depending on its setup.
@item non-persistent
Delivery mode set to "non-persistent" (1).
Messages will stay in broker's memory unless the broker is under memory
pressure.
@end table
@end table @end table
@section async @section async
@ -520,6 +535,9 @@ audio/mpeg.
This enables support for Icecast versions < 2.4.0, that do not support the This enables support for Icecast versions < 2.4.0, that do not support the
HTTP PUT method but the SOURCE method. HTTP PUT method but the SOURCE method.
@item tls
Establish a TLS (HTTPS) connection to Icecast.
@end table @end table
@example @example

View file

@ -110,11 +110,13 @@ maximum of 2 digits. The @var{m} at the end expresses decimal value for
@emph{or} @emph{or}
@example @example
[-]@var{S}+[.@var{m}...] [-]@var{S}+[.@var{m}...][s|ms|us]
@end example @end example
@var{S} expresses the number of seconds, with the optional decimal part @var{S} expresses the number of seconds, with the optional decimal part
@var{m}. @var{m}. The optional literal suffixes @samp{s}, @samp{ms} or @samp{us}
indicate to interpret the value as seconds, milliseconds or microseconds,
respectively.
In both expressions, the optional @samp{-} indicates negative duration. In both expressions, the optional @samp{-} indicates negative duration.

View file

@ -202,13 +202,14 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
void show_help_children(const AVClass *class, int flags) void show_help_children(const AVClass *class, int flags)
{ {
const AVClass *child = NULL; void *iter = NULL;
const AVClass *child;
if (class->option) { if (class->option) {
av_opt_show2(&class, NULL, flags, 0); av_opt_show2(&class, NULL, flags, 0);
printf("\n"); printf("\n");
} }
while (child = av_opt_child_class_next(class, child)) while (child = av_opt_child_class_iterate(class, &iter))
show_help_children(child, flags); show_help_children(child, flags);
} }

View file

@ -229,6 +229,8 @@ typedef struct OptionsContext {
int nb_time_bases; int nb_time_bases;
SpecifierOpt *enc_time_bases; SpecifierOpt *enc_time_bases;
int nb_enc_time_bases; int nb_enc_time_bases;
SpecifierOpt *autoscale;
int nb_autoscale;
} OptionsContext; } OptionsContext;
typedef struct InputFilter { typedef struct InputFilter {
@ -479,6 +481,7 @@ typedef struct OutputStream {
int force_fps; int force_fps;
int top_field_first; int top_field_first;
int rotate_overridden; int rotate_overridden;
int autoscale;
double rotate_override_value; double rotate_override_value;
AVRational frame_aspect_ratio; AVRational frame_aspect_ratio;

View file

@ -470,7 +470,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ofilter->width || ofilter->height) { if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
char args[255]; char args[255];
AVFilterContext *filter; AVFilterContext *filter;
AVDictionaryEntry *e = NULL; AVDictionaryEntry *e = NULL;

View file

@ -62,6 +62,7 @@ static const char *opt_name_hwaccels[] = {"hwaccel", NULL};
static const char *opt_name_hwaccel_devices[] = {"hwaccel_device", NULL}; static const char *opt_name_hwaccel_devices[] = {"hwaccel_device", NULL};
static const char *opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL}; static const char *opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL};
static const char *opt_name_autorotate[] = {"autorotate", NULL}; static const char *opt_name_autorotate[] = {"autorotate", NULL};
static const char *opt_name_autoscale[] = {"autoscale", NULL};
static const char *opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL}; static const char *opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL};
static const char *opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL}; static const char *opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL};
static const char *opt_name_codec_tags[] = {"tag", "atag", "vtag", "stag", NULL}; static const char *opt_name_codec_tags[] = {"tag", "atag", "vtag", "stag", NULL};
@ -1462,6 +1463,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->encoder_opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc); ost->encoder_opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
MATCH_PER_STREAM_OPT(presets, str, preset, oc, st); MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
ost->autoscale = 1;
MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st);
if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) { if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
do { do {
buf = get_line(s); buf = get_line(s);
@ -3664,6 +3667,9 @@ const OptionDef options[] = {
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC | { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) }, OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
"automatically insert correct rotate filters" }, "automatically insert correct rotate filters" },
{ "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
"automatically insert a scale filter at the end of the filter graph" },
/* audio options */ /* audio options */
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames }, { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },

View file

@ -2854,7 +2854,7 @@ static int open_input_file(InputFile *ifile, const char *filename,
{ {
int err, i; int err, i;
AVFormatContext *fmt_ctx = NULL; AVFormatContext *fmt_ctx = NULL;
AVDictionaryEntry *t; AVDictionaryEntry *t = NULL;
int scan_all_pmts_set = 0; int scan_all_pmts_set = 0;
fmt_ctx = avformat_alloc_context(); fmt_ctx = avformat_alloc_context();
@ -2879,10 +2879,8 @@ static int open_input_file(InputFile *ifile, const char *filename,
ifile->fmt_ctx = fmt_ctx; ifile->fmt_ctx = fmt_ctx;
if (scan_all_pmts_set) if (scan_all_pmts_set)
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE); av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { while ((t = av_dict_get(format_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); av_log(NULL, AV_LOG_WARNING, "Option %s skipped - not known to demuxer.\n", t->key);
return AVERROR_OPTION_NOT_FOUND;
}
if (find_stream_info) { if (find_stream_info) {
AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts); AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);

View file

@ -536,6 +536,7 @@ OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PGX_DECODER) += pgxdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PIXLET_DECODER) += pixlet.o OBJS-$(CONFIG_PIXLET_DECODER) += pixlet.o
OBJS-$(CONFIG_PJS_DECODER) += textdec.o ass.o OBJS-$(CONFIG_PJS_DECODER) += textdec.o ass.o
@ -910,7 +911,7 @@ OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o
OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec_h2645.o OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec_h2645.o
OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o
OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o h265_profile_level.o
OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o
OBJS-$(CONFIG_MJPEG_VAAPI_HWACCEL) += vaapi_mjpeg.o OBJS-$(CONFIG_MJPEG_VAAPI_HWACCEL) += vaapi_mjpeg.o
OBJS-$(CONFIG_MPEG1_NVDEC_HWACCEL) += nvdec_mpeg12.o OBJS-$(CONFIG_MPEG1_NVDEC_HWACCEL) += nvdec_mpeg12.o

View file

@ -97,8 +97,13 @@ get_next:
avctx->audio_service_type = s->service_type; avctx->audio_service_type = s->service_type;
} }
if (avctx->codec_id != AV_CODEC_ID_EAC3) /* Calculate the average bit rate */
avctx->bit_rate = s->bit_rate; s->frame_number++;
if (avctx->codec_id != AV_CODEC_ID_EAC3) {
avctx->bit_rate =
(s->last_bit_rate * (s->frame_number -1) + s->bit_rate)/s->frame_number;
s->last_bit_rate = avctx->bit_rate;
}
} }
return i; return i;

View file

@ -55,6 +55,8 @@ typedef struct AACAC3ParseContext {
uint64_t state; uint64_t state;
int need_next_header; int need_next_header;
int frame_number;
int last_bit_rate;
enum AVCodecID codec_id; enum AVCodecID codec_id;
} AACAC3ParseContext; } AACAC3ParseContext;

View file

@ -941,15 +941,14 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
{ {
int ch; int ch;
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->buffer.samples, s->channels, 3 * 1024 * sizeof(s->buffer.samples[0]), alloc_fail); if (!FF_ALLOCZ_TYPED_ARRAY(s->buffer.samples, s->channels * 3 * 1024) ||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->cpe, s->chan_map[0], sizeof(ChannelElement), alloc_fail); !FF_ALLOCZ_TYPED_ARRAY(s->cpe, s->chan_map[0]))
return AVERROR(ENOMEM);
for(ch = 0; ch < s->channels; ch++) for(ch = 0; ch < s->channels; ch++)
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch; s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
return 0; return 0;
alloc_fail:
return AVERROR(ENOMEM);
} }
static av_cold void aac_encode_init_tables(void) static av_cold void aac_encode_init_tables(void)
@ -1078,13 +1077,13 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
s->options.mid_side = 0; s->options.mid_side = 0;
if ((ret = dsp_init(avctx, s)) < 0) if ((ret = dsp_init(avctx, s)) < 0)
goto fail; return ret;
if ((ret = alloc_buffers(avctx, s)) < 0) if ((ret = alloc_buffers(avctx, s)) < 0)
goto fail; return ret;
if ((ret = put_audio_specific_config(avctx))) if ((ret = put_audio_specific_config(avctx)))
goto fail; return ret;
sizes[0] = ff_aac_swb_size_1024[s->samplerate_index]; sizes[0] = ff_aac_swb_size_1024[s->samplerate_index];
sizes[1] = ff_aac_swb_size_128[s->samplerate_index]; sizes[1] = ff_aac_swb_size_128[s->samplerate_index];
@ -1094,7 +1093,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
grouping[i] = s->chan_map[i + 1] == TYPE_CPE; grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
s->chan_map[0], grouping)) < 0) s->chan_map[0], grouping)) < 0)
goto fail; return ret;
s->psypp = ff_psy_preprocess_init(avctx); s->psypp = ff_psy_preprocess_init(avctx);
ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON); ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON);
s->random_state = 0x1f2e3d4c; s->random_state = 0x1f2e3d4c;
@ -1114,9 +1113,6 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
ff_af_queue_init(avctx, &s->afq); ff_af_queue_init(avctx, &s->afq);
return 0; return 0;
fail:
aac_encode_end(avctx);
return ret;
} }
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM #define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
@ -1159,7 +1155,7 @@ AVCodec ff_aac_encoder = {
.close = aac_encode_end, .close = aac_encode_end,
.defaults = aac_encode_defaults, .defaults = aac_encode_defaults,
.supported_samplerates = mpeg4audio_sample_rates, .supported_samplerates = mpeg4audio_sample_rates,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },

View file

@ -2322,60 +2322,46 @@ static av_cold void set_bandwidth(AC3EncodeContext *s)
static av_cold int allocate_buffers(AC3EncodeContext *s) static av_cold int allocate_buffers(AC3EncodeContext *s)
{ {
AVCodecContext *avctx = s->avctx;
int blk, ch; int blk, ch;
int channels = s->channels + 1; /* includes coupling channel */ int channels = s->channels + 1; /* includes coupling channel */
int channel_blocks = channels * s->num_blocks; int channel_blocks = channels * s->num_blocks;
int total_coefs = AC3_MAX_COEFS * channel_blocks; int total_coefs = AC3_MAX_COEFS * channel_blocks;
if (s->allocate_sample_buffers(s)) if (s->allocate_sample_buffers(s))
goto alloc_fail; return AVERROR(ENOMEM);
if (!FF_ALLOC_TYPED_ARRAY(s->bap_buffer, total_coefs) ||
!FF_ALLOC_TYPED_ARRAY(s->bap1_buffer, total_coefs) ||
!FF_ALLOCZ_TYPED_ARRAY(s->mdct_coef_buffer, total_coefs) ||
!FF_ALLOC_TYPED_ARRAY(s->exp_buffer, total_coefs) ||
!FF_ALLOC_TYPED_ARRAY(s->grouped_exp_buffer, channel_blocks * 128) ||
!FF_ALLOC_TYPED_ARRAY(s->psd_buffer, total_coefs) ||
!FF_ALLOC_TYPED_ARRAY(s->band_psd_buffer, channel_blocks * 64) ||
!FF_ALLOC_TYPED_ARRAY(s->mask_buffer, channel_blocks * 64) ||
!FF_ALLOC_TYPED_ARRAY(s->qmant_buffer, total_coefs))
return AVERROR(ENOMEM);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->bap_buffer, total_coefs,
sizeof(*s->bap_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->bap1_buffer, total_coefs,
sizeof(*s->bap1_buffer), alloc_fail);
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->mdct_coef_buffer, total_coefs,
sizeof(*s->mdct_coef_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->exp_buffer, total_coefs,
sizeof(*s->exp_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->grouped_exp_buffer, channel_blocks, 128 *
sizeof(*s->grouped_exp_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->psd_buffer, total_coefs,
sizeof(*s->psd_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->band_psd_buffer, channel_blocks, 64 *
sizeof(*s->band_psd_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->mask_buffer, channel_blocks, 64 *
sizeof(*s->mask_buffer), alloc_fail);
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->qmant_buffer, total_coefs,
sizeof(*s->qmant_buffer), alloc_fail);
if (s->cpl_enabled) { if (s->cpl_enabled) {
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->cpl_coord_exp_buffer, channel_blocks, 16 * if (!FF_ALLOC_TYPED_ARRAY(s->cpl_coord_exp_buffer, channel_blocks * 16) ||
sizeof(*s->cpl_coord_exp_buffer), alloc_fail); !FF_ALLOC_TYPED_ARRAY(s->cpl_coord_mant_buffer, channel_blocks * 16))
FF_ALLOC_ARRAY_OR_GOTO(avctx, s->cpl_coord_mant_buffer, channel_blocks, 16 * return AVERROR(ENOMEM);
sizeof(*s->cpl_coord_mant_buffer), alloc_fail);
} }
for (blk = 0; blk < s->num_blocks; blk++) { for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk]; AC3Block *block = &s->blocks[blk];
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->mdct_coef, channels, sizeof(*block->mdct_coef),
alloc_fail); if (!FF_ALLOCZ_TYPED_ARRAY(block->mdct_coef, channels) ||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->exp, channels, sizeof(*block->exp), !FF_ALLOCZ_TYPED_ARRAY(block->exp, channels) ||
alloc_fail); !FF_ALLOCZ_TYPED_ARRAY(block->grouped_exp, channels) ||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->grouped_exp, channels, sizeof(*block->grouped_exp), !FF_ALLOCZ_TYPED_ARRAY(block->psd, channels) ||
alloc_fail); !FF_ALLOCZ_TYPED_ARRAY(block->band_psd, channels) ||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->psd, channels, sizeof(*block->psd), !FF_ALLOCZ_TYPED_ARRAY(block->mask, channels) ||
alloc_fail); !FF_ALLOCZ_TYPED_ARRAY(block->qmant, channels))
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->band_psd, channels, sizeof(*block->band_psd), return AVERROR(ENOMEM);
alloc_fail);
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->mask, channels, sizeof(*block->mask),
alloc_fail);
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->qmant, channels, sizeof(*block->qmant),
alloc_fail);
if (s->cpl_enabled) { if (s->cpl_enabled) {
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->cpl_coord_exp, channels, sizeof(*block->cpl_coord_exp), if (!FF_ALLOCZ_TYPED_ARRAY(block->cpl_coord_exp, channels) ||
alloc_fail); !FF_ALLOCZ_TYPED_ARRAY(block->cpl_coord_mant, channels))
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->cpl_coord_mant, channels, sizeof(*block->cpl_coord_mant), return AVERROR(ENOMEM);
alloc_fail);
} }
for (ch = 0; ch < channels; ch++) { for (ch = 0; ch < channels; ch++) {
@ -2397,28 +2383,26 @@ static av_cold int allocate_buffers(AC3EncodeContext *s)
} }
if (!s->fixed_point) { if (!s->fixed_point) {
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->fixed_coef_buffer, total_coefs, if (!FF_ALLOCZ_TYPED_ARRAY(s->fixed_coef_buffer, total_coefs))
sizeof(*s->fixed_coef_buffer), alloc_fail); return AVERROR(ENOMEM);
for (blk = 0; blk < s->num_blocks; blk++) { for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk]; AC3Block *block = &s->blocks[blk];
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->fixed_coef, channels, if (!FF_ALLOCZ_TYPED_ARRAY(block->fixed_coef, channels))
sizeof(*block->fixed_coef), alloc_fail); return AVERROR(ENOMEM);
for (ch = 0; ch < channels; ch++) for (ch = 0; ch < channels; ch++)
block->fixed_coef[ch] = &s->fixed_coef_buffer[AC3_MAX_COEFS * (s->num_blocks * ch + blk)]; block->fixed_coef[ch] = &s->fixed_coef_buffer[AC3_MAX_COEFS * (s->num_blocks * ch + blk)];
} }
} else { } else {
for (blk = 0; blk < s->num_blocks; blk++) { for (blk = 0; blk < s->num_blocks; blk++) {
AC3Block *block = &s->blocks[blk]; AC3Block *block = &s->blocks[blk];
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, block->fixed_coef, channels, if (!FF_ALLOCZ_TYPED_ARRAY(block->fixed_coef, channels))
sizeof(*block->fixed_coef), alloc_fail); return AVERROR(ENOMEM);
for (ch = 0; ch < channels; ch++) for (ch = 0; ch < channels; ch++)
block->fixed_coef[ch] = (int32_t *)block->mdct_coef[ch]; block->fixed_coef[ch] = (int32_t *)block->mdct_coef[ch];
} }
} }
return 0; return 0;
alloc_fail:
return AVERROR(ENOMEM);
} }
@ -2433,7 +2417,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
ret = validate_options(s); ret = validate_options(s);
if (ret) if (ret)
goto init_fail; return ret;
avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks; avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks;
avctx->initial_padding = AC3_BLOCK_SIZE; avctx->initial_padding = AC3_BLOCK_SIZE;
@ -2476,11 +2460,11 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
ret = s->mdct_init(s); ret = s->mdct_init(s);
if (ret) if (ret)
goto init_fail; return ret;
ret = allocate_buffers(s); ret = allocate_buffers(s);
if (ret) if (ret)
goto init_fail; return ret;
ff_audiodsp_init(&s->adsp); ff_audiodsp_init(&s->adsp);
ff_me_cmp_init(&s->mecc, avctx); ff_me_cmp_init(&s->mecc, avctx);
@ -2489,7 +2473,4 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
dprint_options(s); dprint_options(s);
return 0; return 0;
init_fail:
ff_ac3_encode_close(avctx);
return ret;
} }

View file

@ -155,6 +155,7 @@ AVCodec ff_ac3_fixed_encoder = {
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.priv_class = &ac3enc_class, .priv_class = &ac3enc_class,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.supported_samplerates = ff_ac3_sample_rate_tab, .supported_samplerates = ff_ac3_sample_rate_tab,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,
.defaults = ac3_defaults, .defaults = ac3_defaults,

View file

@ -41,19 +41,16 @@ int AC3_NAME(allocate_sample_buffers)(AC3EncodeContext *s)
{ {
int ch; int ch;
FF_ALLOC_OR_GOTO(s->avctx, s->windowed_samples, AC3_WINDOW_SIZE * if (!FF_ALLOC_TYPED_ARRAY(s->windowed_samples, AC3_WINDOW_SIZE) ||
sizeof(*s->windowed_samples), alloc_fail); !FF_ALLOC_TYPED_ARRAY(s->planar_samples, s->channels))
FF_ALLOC_ARRAY_OR_GOTO(s->avctx, s->planar_samples, s->channels, sizeof(*s->planar_samples),
alloc_fail);
for (ch = 0; ch < s->channels; ch++) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->planar_samples[ch],
(AC3_FRAME_SIZE+AC3_BLOCK_SIZE) * sizeof(**s->planar_samples),
alloc_fail);
}
return 0;
alloc_fail:
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
for (ch = 0; ch < s->channels; ch++) {
if (!(s->planar_samples[ch] = av_mallocz((AC3_FRAME_SIZE + AC3_BLOCK_SIZE) *
sizeof(**s->planar_samples))))
return AVERROR(ENOMEM);
}
return 0;
} }

View file

@ -162,12 +162,19 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
} }
break; break;
case AV_CODEC_ID_ADPCM_IMA_APM: case AV_CODEC_ID_ADPCM_IMA_APM:
if (avctx->extradata && avctx->extradata_size >= 16) { if (avctx->extradata) {
if (avctx->extradata_size >= 28) {
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
} else if (avctx->extradata_size >= 16) {
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18); c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88); c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18); c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88); c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
} }
}
break; break;
case AV_CODEC_ID_ADPCM_IMA_WS: case AV_CODEC_ID_ADPCM_IMA_WS:
if (avctx->extradata && avctx->extradata_size >= 2) if (avctx->extradata && avctx->extradata_size >= 2)

View file

@ -58,14 +58,11 @@ typedef struct ADPCMEncodeContext {
#define FREEZE_INTERVAL 128 #define FREEZE_INTERVAL 128
static av_cold int adpcm_encode_close(AVCodecContext *avctx);
static av_cold int adpcm_encode_init(AVCodecContext *avctx) static av_cold int adpcm_encode_init(AVCodecContext *avctx)
{ {
ADPCMEncodeContext *s = avctx->priv_data; ADPCMEncodeContext *s = avctx->priv_data;
uint8_t *extradata; uint8_t *extradata;
int i; int i;
int ret = AVERROR(ENOMEM);
if (avctx->channels > 2) { if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n"); av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
@ -89,14 +86,11 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
if (avctx->trellis) { if (avctx->trellis) {
int frontier = 1 << avctx->trellis; int frontier = 1 << avctx->trellis;
int max_paths = frontier * FREEZE_INTERVAL; int max_paths = frontier * FREEZE_INTERVAL;
FF_ALLOC_OR_GOTO(avctx, s->paths, if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
max_paths * sizeof(*s->paths), error); !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
FF_ALLOC_OR_GOTO(avctx, s->node_buf, !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
2 * frontier * sizeof(*s->node_buf), error); !FF_ALLOC_TYPED_ARRAY(s->trellis_hash, 65536))
FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, return AVERROR(ENOMEM);
2 * frontier * sizeof(*s->nodep_buf), error);
FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
65536 * sizeof(*s->trellis_hash), error);
} }
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
@ -123,7 +117,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->bits_per_coded_sample = 4; avctx->bits_per_coded_sample = 4;
avctx->block_align = BLKSIZE; avctx->block_align = BLKSIZE;
if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE))) if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE)))
goto error; return AVERROR(ENOMEM);
avctx->extradata_size = 32; avctx->extradata_size = 32;
extradata = avctx->extradata; extradata = avctx->extradata;
bytestream_put_le16(&extradata, avctx->frame_size); bytestream_put_le16(&extradata, avctx->frame_size);
@ -143,8 +137,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->sample_rate != 44100) { avctx->sample_rate != 44100) {
av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, " av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
"22050 or 44100\n"); "22050 or 44100\n");
ret = AVERROR(EINVAL); return AVERROR(EINVAL);
goto error;
} }
avctx->frame_size = 512 * (avctx->sample_rate / 11025); avctx->frame_size = 512 * (avctx->sample_rate / 11025);
break; break;
@ -153,13 +146,10 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->block_align = BLKSIZE; avctx->block_align = BLKSIZE;
break; break;
default: default:
ret = AVERROR(EINVAL); return AVERROR(EINVAL);
goto error;
} }
return 0; return 0;
error:
return ret;
} }
static av_cold int adpcm_encode_close(AVCodecContext *avctx) static av_cold int adpcm_encode_close(AVCodecContext *avctx)
@ -523,7 +513,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right */ /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
if (avctx->trellis > 0) { if (avctx->trellis > 0) {
FF_ALLOC_ARRAY_OR_GOTO(avctx, buf, avctx->channels, blocks * 8, error); if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
return AVERROR(ENOMEM);
for (ch = 0; ch < avctx->channels; ch++) { for (ch = 0; ch < avctx->channels; ch++) {
adpcm_compress_trellis(avctx, &samples_p[ch][1], adpcm_compress_trellis(avctx, &samples_p[ch][1],
buf + ch * blocks * 8, &c->status[ch], buf + ch * blocks * 8, &c->status[ch],
@ -618,7 +609,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
} }
if (avctx->trellis > 0) { if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); if (!(buf = av_malloc(2 * n)))
return AVERROR(ENOMEM);
adpcm_compress_trellis(avctx, samples + avctx->channels, buf, adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
&c->status[0], n, avctx->channels); &c->status[0], n, avctx->channels);
if (avctx->channels == 2) if (avctx->channels == 2)
@ -666,7 +658,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
if (avctx->trellis > 0) { if (avctx->trellis > 0) {
n = avctx->block_align - 7 * avctx->channels; n = avctx->block_align - 7 * avctx->channels;
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); if (!(buf = av_malloc(2 * n)))
return AVERROR(ENOMEM);
if (avctx->channels == 1) { if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
avctx->channels); avctx->channels);
@ -693,7 +686,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
case AV_CODEC_ID_ADPCM_YAMAHA: case AV_CODEC_ID_ADPCM_YAMAHA:
n = frame->nb_samples / 2; n = frame->nb_samples / 2;
if (avctx->trellis > 0) { if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error); if (!(buf = av_malloc(2 * n * 2)))
return AVERROR(ENOMEM);
n *= 2; n *= 2;
if (avctx->channels == 1) { if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
@ -724,8 +718,6 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
avpkt->size = pkt_size; avpkt->size = pkt_size;
*got_packet_ptr = 1; *got_packet_ptr = 1;
return 0; return 0;
error:
return AVERROR(ENOMEM);
} }
static const enum AVSampleFormat sample_fmts[] = { static const enum AVSampleFormat sample_fmts[] = {

View file

@ -489,6 +489,7 @@ static int allocate_buffers(ALACContext *alac)
{ {
int ch; int ch;
unsigned buf_size = alac->max_samples_per_frame * sizeof(int32_t); unsigned buf_size = alac->max_samples_per_frame * sizeof(int32_t);
unsigned extra_buf_size = buf_size + AV_INPUT_BUFFER_PADDING_SIZE;
for (ch = 0; ch < 2; ch++) { for (ch = 0; ch < 2; ch++) {
alac->predict_error_buffer[ch] = NULL; alac->predict_error_buffer[ch] = NULL;
@ -497,22 +498,19 @@ static int allocate_buffers(ALACContext *alac)
} }
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) { for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch], if (!(alac->predict_error_buffer[ch] = av_malloc(buf_size)))
buf_size, buf_alloc_fail); return AVERROR(ENOMEM);
alac->direct_output = alac->sample_size > 16; alac->direct_output = alac->sample_size > 16;
if (!alac->direct_output) { if (!alac->direct_output) {
FF_ALLOC_OR_GOTO(alac->avctx, alac->output_samples_buffer[ch], if (!(alac->output_samples_buffer[ch] = av_malloc(extra_buf_size)))
buf_size + AV_INPUT_BUFFER_PADDING_SIZE, buf_alloc_fail); return AVERROR(ENOMEM);
} }
FF_ALLOC_OR_GOTO(alac->avctx, alac->extra_bits_buffer[ch], if (!(alac->extra_bits_buffer[ch] = av_malloc(extra_buf_size)))
buf_size + AV_INPUT_BUFFER_PADDING_SIZE, buf_alloc_fail); return AVERROR(ENOMEM);
} }
return 0; return 0;
buf_alloc_fail:
alac_decode_close(alac->avctx);
return AVERROR(ENOMEM);
} }
static int alac_set_info(ALACContext *alac) static int alac_set_info(ALACContext *alac)
@ -625,5 +623,6 @@ AVCodec ff_alac_decoder = {
.close = alac_decode_close, .close = alac_decode_close,
.decode = alac_decode_frame, .decode = alac_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &alac_class .priv_class = &alac_class
}; };

View file

@ -238,6 +238,7 @@ extern AVCodec ff_pgm_encoder;
extern AVCodec ff_pgm_decoder; extern AVCodec ff_pgm_decoder;
extern AVCodec ff_pgmyuv_encoder; extern AVCodec ff_pgmyuv_encoder;
extern AVCodec ff_pgmyuv_decoder; extern AVCodec ff_pgmyuv_decoder;
extern AVCodec ff_pgx_decoder;
extern AVCodec ff_pictor_decoder; extern AVCodec ff_pictor_decoder;
extern AVCodec ff_pixlet_decoder; extern AVCodec ff_pixlet_decoder;
extern AVCodec ff_png_encoder; extern AVCodec ff_png_encoder;
@ -679,9 +680,7 @@ extern AVCodec ff_xsub_decoder;
/* external libraries */ /* external libraries */
extern AVCodec ff_aac_at_encoder; extern AVCodec ff_aac_at_encoder;
extern AVCodec ff_aac_at_decoder; extern AVCodec ff_aac_at_decoder;
extern AVCodec ff_aac_mf_encoder;
extern AVCodec ff_ac3_at_decoder; extern AVCodec ff_ac3_at_decoder;
extern AVCodec ff_ac3_mf_encoder;
extern AVCodec ff_adpcm_ima_qt_at_decoder; extern AVCodec ff_adpcm_ima_qt_at_decoder;
extern AVCodec ff_alac_at_encoder; extern AVCodec ff_alac_at_encoder;
extern AVCodec ff_alac_at_decoder; extern AVCodec ff_alac_at_decoder;
@ -693,7 +692,6 @@ extern AVCodec ff_ilbc_at_decoder;
extern AVCodec ff_mp1_at_decoder; extern AVCodec ff_mp1_at_decoder;
extern AVCodec ff_mp2_at_decoder; extern AVCodec ff_mp2_at_decoder;
extern AVCodec ff_mp3_at_decoder; extern AVCodec ff_mp3_at_decoder;
extern AVCodec ff_mp3_mf_encoder;
extern AVCodec ff_pcm_alaw_at_encoder; extern AVCodec ff_pcm_alaw_at_encoder;
extern AVCodec ff_pcm_alaw_at_decoder; extern AVCodec ff_pcm_alaw_at_decoder;
extern AVCodec ff_pcm_mulaw_at_encoder; extern AVCodec ff_pcm_mulaw_at_encoder;
@ -757,6 +755,8 @@ extern AVCodec ff_idf_decoder;
/* external libraries, that shouldn't be used by default if one of the /* external libraries, that shouldn't be used by default if one of the
* above is available */ * above is available */
extern AVCodec ff_aac_mf_encoder;
extern AVCodec ff_ac3_mf_encoder;
extern AVCodec ff_h263_v4l2m2m_encoder; extern AVCodec ff_h263_v4l2m2m_encoder;
extern AVCodec ff_libaom_av1_decoder; extern AVCodec ff_libaom_av1_decoder;
extern AVCodec ff_libopenh264_encoder; extern AVCodec ff_libopenh264_encoder;
@ -789,6 +789,7 @@ extern AVCodec ff_mjpeg_cuvid_decoder;
extern AVCodec ff_mjpeg_qsv_encoder; extern AVCodec ff_mjpeg_qsv_encoder;
extern AVCodec ff_mjpeg_qsv_decoder; extern AVCodec ff_mjpeg_qsv_decoder;
extern AVCodec ff_mjpeg_vaapi_encoder; extern AVCodec ff_mjpeg_vaapi_encoder;
extern AVCodec ff_mp3_mf_encoder;
extern AVCodec ff_mpeg1_cuvid_decoder; extern AVCodec ff_mpeg1_cuvid_decoder;
extern AVCodec ff_mpeg2_cuvid_decoder; extern AVCodec ff_mpeg2_cuvid_decoder;
extern AVCodec ff_mpeg2_qsv_encoder; extern AVCodec ff_mpeg2_qsv_encoder;

View file

@ -33,6 +33,7 @@
#include "libavutil/time.h" #include "libavutil/time.h"
#include "amfenc.h" #include "amfenc.h"
#include "encode.h"
#include "internal.h" #include "internal.h"
#if CONFIG_D3D11VA #if CONFIG_D3D11VA
@ -588,17 +589,27 @@ static void amf_release_buffer_with_frame_ref(AMFBuffer *frame_ref_storage_buffe
frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer); frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer);
} }
int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame) int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{ {
AmfContext *ctx = avctx->priv_data; AmfContext *ctx = avctx->priv_data;
AMFSurface *surface; AMFSurface *surface;
AMF_RESULT res; AMF_RESULT res;
int ret; int ret;
AMF_RESULT res_query;
AMFData *data = NULL;
AVFrame *frame = ctx->delayed_frame;
int block_and_wait;
if (!ctx->encoder) if (!ctx->encoder)
return AVERROR(EINVAL); return AVERROR(EINVAL);
if (!frame) { // submit drain if (!frame->buf[0]) {
ret = ff_encode_get_frame(avctx, frame);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
}
if (!frame->buf[0]) { // submit drain
if (!ctx->eof) { // submit drain one time only if (!ctx->eof) { // submit drain one time only
if (ctx->delayed_surface != NULL) { if (ctx->delayed_surface != NULL) {
ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
@ -613,15 +624,10 @@ int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Drain() failed with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Drain() failed with error %d\n", res);
} }
} }
} else{
return AVERROR_EOF;
} }
} else { // submit frame } else if (!ctx->delayed_surface) { // submit frame
int hw_surface = 0; int hw_surface = 0;
if (ctx->delayed_surface != NULL) {
return AVERROR(EAGAIN); // should not happen when called from ffmpeg, other clients may resubmit
}
// prepare surface from frame // prepare surface from frame
switch (frame->format) { switch (frame->format) {
#if CONFIG_D3D11VA #if CONFIG_D3D11VA
@ -693,38 +699,23 @@ int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
break; break;
} }
// submit surface // submit surface
res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
if (res == AMF_INPUT_FULL) { // handle full queue if (res == AMF_INPUT_FULL) { // handle full queue
//store surface for later submission //store surface for later submission
ctx->delayed_surface = surface; ctx->delayed_surface = surface;
if (surface->pVtbl->GetMemoryType(surface) == AMF_MEMORY_DX11) {
av_frame_ref(ctx->delayed_frame, frame);
}
} else { } else {
int64_t pts = frame->pts;
surface->pVtbl->Release(surface); surface->pVtbl->Release(surface);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
if ((ret = timestamp_queue_enqueue(avctx, frame->pts)) < 0) { av_frame_unref(frame);
if ((ret = timestamp_queue_enqueue(avctx, pts)) < 0) {
return ret; return ret;
} }
} }
} }
return 0;
}
int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
int ret;
AMF_RESULT res;
AMF_RESULT res_query;
AmfContext *ctx = avctx->priv_data;
AMFData *data = NULL;
int block_and_wait;
if (!ctx->encoder)
return AVERROR(EINVAL);
do { do {
block_and_wait = 0; block_and_wait = 0;

View file

@ -129,8 +129,6 @@ int ff_amf_encode_close(AVCodecContext *avctx);
/** /**
* Ecoding one frame - common function for all AMF encoders * Ecoding one frame - common function for all AMF encoders
*/ */
int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame);
int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
/** /**

View file

@ -383,7 +383,6 @@ AVCodec ff_h264_amf_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264, .id = AV_CODEC_ID_H264,
.init = amf_encode_init_h264, .init = amf_encode_init_h264,
.send_frame = ff_amf_send_frame,
.receive_packet = ff_amf_receive_packet, .receive_packet = ff_amf_receive_packet,
.close = ff_amf_encode_close, .close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext), .priv_data_size = sizeof(AmfContext),

View file

@ -69,7 +69,7 @@ static const AVOption options[] = {
{ "gop", "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" }, { "gop", "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" },
{ "idr", "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" }, { "idr", "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" },
{ "gops_per_idr", "GOPs per IDR 0-no IDR will be inserted", OFFSET(gops_per_idr), AV_OPT_TYPE_INT, { .i64 = 60 }, 0, INT_MAX, VE }, { "gops_per_idr", "GOPs per IDR 0-no IDR will be inserted", OFFSET(gops_per_idr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, VE },
{ "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE}, { "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
{ "vbaq", "Enable VBAQ", OFFSET(enable_vbaq), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE}, { "vbaq", "Enable VBAQ", OFFSET(enable_vbaq), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
{ "enforce_hrd", "Enforce HRD", OFFSET(enforce_hrd), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE}, { "enforce_hrd", "Enforce HRD", OFFSET(enforce_hrd), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
@ -313,7 +313,6 @@ AVCodec ff_hevc_amf_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_HEVC, .id = AV_CODEC_ID_HEVC,
.init = amf_encode_init_hevc, .init = amf_encode_init_hevc,
.send_frame = ff_amf_send_frame,
.receive_packet = ff_amf_receive_packet, .receive_packet = ff_amf_receive_packet,
.close = ff_amf_encode_close, .close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext), .priv_data_size = sizeof(AmfContext),

View file

@ -262,9 +262,8 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
for (i = 0; i < APE_FILTER_LEVELS; i++) { for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[s->fset][i]) if (!ape_filter_orders[s->fset][i])
break; break;
FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i], if (!(s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4)))
(ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4, return AVERROR(ENOMEM);
filter_alloc_fail);
} }
if (s->fileversion < 3860) { if (s->fileversion < 3860) {
@ -300,9 +299,6 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
return 0; return 0;
filter_alloc_fail:
ape_decode_close(avctx);
return AVERROR(ENOMEM);
} }
/** /**
@ -1638,6 +1634,7 @@ AVCodec ff_ape_decoder = {
.decode = ape_decode_frame, .decode = ape_decode_frame,
.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY | .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1, AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.flush = ape_flush, .flush = ape_flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16P,

View file

@ -483,7 +483,7 @@ static int ffat_decode(AVCodecContext *avctx, void *data,
if (avctx->codec_id == AV_CODEC_ID_AAC) { if (avctx->codec_id == AV_CODEC_ID_AAC) {
if (!at->extradata_size) { if (!at->extradata_size) {
uint8_t *side_data; uint8_t *side_data;
int side_data_size = 0; int side_data_size;
side_data = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, side_data = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
&side_data_size); &side_data_size);

View file

@ -34,8 +34,8 @@ static void av1_frame_merge_flush(AVBSFContext *bsf)
{ {
AV1FMergeContext *ctx = bsf->priv_data; AV1FMergeContext *ctx = bsf->priv_data;
ff_cbs_fragment_reset(ctx->cbc, &ctx->frag[0]); ff_cbs_fragment_reset(&ctx->frag[0]);
ff_cbs_fragment_reset(ctx->cbc, &ctx->frag[1]); ff_cbs_fragment_reset(&ctx->frag[1]);
av_packet_unref(ctx->in); av_packet_unref(ctx->in);
av_packet_unref(ctx->pkt); av_packet_unref(ctx->pkt);
} }
@ -93,7 +93,7 @@ eof:
ctx->idx = !ctx->idx; ctx->idx = !ctx->idx;
} else { } else {
for (i = 0; i < frag->nb_units; i++) { for (i = 0; i < frag->nb_units; i++) {
err = ff_cbs_insert_unit_content(ctx->cbc, tu, -1, frag->units[i].type, err = ff_cbs_insert_unit_content(tu, -1, frag->units[i].type,
frag->units[i].content, frag->units[i].content_ref); frag->units[i].content, frag->units[i].content_ref);
if (err < 0) if (err < 0)
goto fail; goto fail;
@ -108,7 +108,7 @@ eof:
else else
av_packet_unref(in); av_packet_unref(in);
ff_cbs_fragment_reset(ctx->cbc, &ctx->frag[ctx->idx]); ff_cbs_fragment_reset(&ctx->frag[ctx->idx]);
fail: fail:
if (err < 0 && err != AVERROR(EAGAIN)) if (err < 0 && err != AVERROR(EAGAIN))
@ -133,8 +133,8 @@ static void av1_frame_merge_close(AVBSFContext *bsf)
{ {
AV1FMergeContext *ctx = bsf->priv_data; AV1FMergeContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->cbc, &ctx->frag[0]); ff_cbs_fragment_free(&ctx->frag[0]);
ff_cbs_fragment_free(ctx->cbc, &ctx->frag[1]); ff_cbs_fragment_free(&ctx->frag[1]);
av_packet_free(&ctx->in); av_packet_free(&ctx->in);
av_packet_free(&ctx->pkt); av_packet_free(&ctx->pkt);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->cbc);

View file

@ -172,7 +172,7 @@ static int av1_frame_split_filter(AVBSFContext *ctx, AVPacket *out)
if (s->cur_frame == s->nb_frames) { if (s->cur_frame == s->nb_frames) {
av_packet_unref(s->buffer_pkt); av_packet_unref(s->buffer_pkt);
ff_cbs_fragment_reset(s->cbc, td); ff_cbs_fragment_reset(td);
} }
return 0; return 0;
@ -187,7 +187,7 @@ fail:
av_packet_unref(out); av_packet_unref(out);
av_packet_unref(s->buffer_pkt); av_packet_unref(s->buffer_pkt);
} }
ff_cbs_fragment_reset(s->cbc, td); ff_cbs_fragment_reset(td);
return ret; return ret;
} }
@ -224,7 +224,7 @@ static int av1_frame_split_init(AVBSFContext *ctx)
if (ret < 0) if (ret < 0)
av_log(ctx, AV_LOG_WARNING, "Failed to parse extradata.\n"); av_log(ctx, AV_LOG_WARNING, "Failed to parse extradata.\n");
ff_cbs_fragment_reset(s->cbc, td); ff_cbs_fragment_reset(td);
return 0; return 0;
} }
@ -234,7 +234,7 @@ static void av1_frame_split_flush(AVBSFContext *ctx)
AV1FSplitContext *s = ctx->priv_data; AV1FSplitContext *s = ctx->priv_data;
av_packet_unref(s->buffer_pkt); av_packet_unref(s->buffer_pkt);
ff_cbs_fragment_reset(s->cbc, &s->temporal_unit); ff_cbs_fragment_reset(&s->temporal_unit);
} }
static void av1_frame_split_close(AVBSFContext *ctx) static void av1_frame_split_close(AVBSFContext *ctx)
@ -242,7 +242,7 @@ static void av1_frame_split_close(AVBSFContext *ctx)
AV1FSplitContext *s = ctx->priv_data; AV1FSplitContext *s = ctx->priv_data;
av_packet_free(&s->buffer_pkt); av_packet_free(&s->buffer_pkt);
ff_cbs_fragment_free(s->cbc, &s->temporal_unit); ff_cbs_fragment_free(&s->temporal_unit);
ff_cbs_close(&s->cbc); ff_cbs_close(&s->cbc);
} }

View file

@ -151,7 +151,7 @@ static int av1_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(side_data, frag->data, frag->data_size); memcpy(side_data, frag->data, frag->data_size);
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
return 0; return 0;
} }
@ -195,13 +195,13 @@ static int av1_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
// If a Temporal Delimiter is present, it must be the first OBU. // If a Temporal Delimiter is present, it must be the first OBU.
if (frag->units[0].type == AV1_OBU_TEMPORAL_DELIMITER) { if (frag->units[0].type == AV1_OBU_TEMPORAL_DELIMITER) {
if (ctx->td == REMOVE) if (ctx->td == REMOVE)
ff_cbs_delete_unit(ctx->cbc, frag, 0); ff_cbs_delete_unit(frag, 0);
} else if (ctx->td == INSERT) { } else if (ctx->td == INSERT) {
td = (AV1RawOBU) { td = (AV1RawOBU) {
.header.obu_type = AV1_OBU_TEMPORAL_DELIMITER, .header.obu_type = AV1_OBU_TEMPORAL_DELIMITER,
}; };
err = ff_cbs_insert_unit_content(ctx->cbc, frag, 0, AV1_OBU_TEMPORAL_DELIMITER, err = ff_cbs_insert_unit_content(frag, 0, AV1_OBU_TEMPORAL_DELIMITER,
&td, NULL); &td, NULL);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to insert Temporal Delimiter.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to insert Temporal Delimiter.\n");
@ -212,7 +212,7 @@ static int av1_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (ctx->delete_padding) { if (ctx->delete_padding) {
for (i = frag->nb_units - 1; i >= 0; i--) { for (i = frag->nb_units - 1; i >= 0; i--) {
if (frag->units[i].type == AV1_OBU_PADDING) if (frag->units[i].type == AV1_OBU_PADDING)
ff_cbs_delete_unit(ctx->cbc, frag, i); ff_cbs_delete_unit(frag, i);
} }
} }
@ -224,7 +224,7 @@ static int av1_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -268,7 +268,7 @@ static int av1_metadata_init(AVBSFContext *bsf)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
return err; return err;
} }
@ -276,7 +276,7 @@ static void av1_metadata_close(AVBSFContext *bsf)
{ {
AV1MetadataContext *ctx = bsf->priv_data; AV1MetadataContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->cbc, &ctx->access_unit); ff_cbs_fragment_free(&ctx->access_unit);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->cbc);
} }

View file

@ -78,7 +78,7 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
av_log(avctx, AV_LOG_WARNING, "Failed to parse extradata.\n"); av_log(avctx, AV_LOG_WARNING, "Failed to parse extradata.\n");
} }
ff_cbs_fragment_reset(s->cbc, td); ff_cbs_fragment_reset(td);
} }
ret = ff_cbs_read(s->cbc, td, data, size); ret = ff_cbs_read(s->cbc, td, data, size);
@ -191,7 +191,7 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
end: end:
ff_cbs_fragment_reset(s->cbc, td); ff_cbs_fragment_reset(td);
s->cbc->log_ctx = NULL; s->cbc->log_ctx = NULL;
@ -225,7 +225,7 @@ static void av1_parser_close(AVCodecParserContext *ctx)
{ {
AV1ParseContext *s = ctx->priv_data; AV1ParseContext *s = ctx->priv_data;
ff_cbs_fragment_free(s->cbc, &s->temporal_unit); ff_cbs_fragment_free(&s->temporal_unit);
ff_cbs_close(&s->cbc); ff_cbs_close(&s->cbc);
} }

View file

@ -91,8 +91,7 @@ static av_cold int end(AVCodecContext *avctx)
{ {
AVRnContext *a = avctx->priv_data; AVRnContext *a = avctx->priv_data;
avcodec_close(a->mjpeg_avctx); avcodec_free_context(&a->mjpeg_avctx);
av_freep(&a->mjpeg_avctx);
return 0; return 0;
} }

View file

@ -285,7 +285,6 @@ int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
vlc->bits = nb_bits; vlc->bits = nb_bits;
if (flags & INIT_VLC_USE_NEW_STATIC) { if (flags & INIT_VLC_USE_NEW_STATIC) {
av_assert0(nb_codes + 1 <= FF_ARRAY_ELEMS(localbuf)); av_assert0(nb_codes + 1 <= FF_ARRAY_ELEMS(localbuf));
buf = localbuf;
localvlc = *vlc_arg; localvlc = *vlc_arg;
vlc = &localvlc; vlc = &localvlc;
vlc->table_size = 0; vlc->table_size = 0;
@ -293,11 +292,13 @@ int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
vlc->table = NULL; vlc->table = NULL;
vlc->table_allocated = 0; vlc->table_allocated = 0;
vlc->table_size = 0; vlc->table_size = 0;
}
if (nb_codes + 1 > FF_ARRAY_ELEMS(localbuf)) {
buf = av_malloc_array((nb_codes + 1), sizeof(VLCcode)); buf = av_malloc_array((nb_codes + 1), sizeof(VLCcode));
if (!buf) if (!buf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} } else
buf = localbuf;
av_assert0(symbols_size <= 2 || !symbols); av_assert0(symbols_size <= 2 || !symbols);
@ -309,7 +310,7 @@ int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
continue; \ continue; \
if (buf[j].bits > 3*nb_bits || buf[j].bits>32) { \ if (buf[j].bits > 3*nb_bits || buf[j].bits>32) { \
av_log(NULL, AV_LOG_ERROR, "Too long VLC (%d) in init_vlc\n", buf[j].bits);\ av_log(NULL, AV_LOG_ERROR, "Too long VLC (%d) in init_vlc\n", buf[j].bits);\
if (!(flags & INIT_VLC_USE_NEW_STATIC)) \ if (buf != localbuf) \
av_free(buf); \ av_free(buf); \
return AVERROR(EINVAL); \ return AVERROR(EINVAL); \
} \ } \
@ -317,7 +318,7 @@ int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
if (buf[j].code >= (1LL<<buf[j].bits)) { \ if (buf[j].code >= (1LL<<buf[j].bits)) { \
av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \ av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \
"init_vlc\n", buf[j].code, i); \ "init_vlc\n", buf[j].code, i); \
if (!(flags & INIT_VLC_USE_NEW_STATIC)) \ if (buf != localbuf) \
av_free(buf); \ av_free(buf); \
return AVERROR(EINVAL); \ return AVERROR(EINVAL); \
} \ } \
@ -346,6 +347,7 @@ int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
av_assert0(ret >= 0); av_assert0(ret >= 0);
*vlc_arg = *vlc; *vlc_arg = *vlc;
} else { } else {
if (buf != localbuf)
av_free(buf); av_free(buf);
if (ret < 0) { if (ret < 0) {
av_freep(&vlc->table); av_freep(&vlc->table);

View file

@ -96,6 +96,7 @@ const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
return NULL; return NULL;
} }
#if FF_API_CHILD_CLASS_NEXT
const AVClass *ff_bsf_child_class_next(const AVClass *prev) const AVClass *ff_bsf_child_class_next(const AVClass *prev)
{ {
const AVBitStreamFilter *f = NULL; const AVBitStreamFilter *f = NULL;
@ -115,3 +116,16 @@ const AVClass *ff_bsf_child_class_next(const AVClass *prev)
} }
return NULL; return NULL;
} }
#endif
const AVClass *ff_bsf_child_class_iterate(void **opaque)
{
const AVBitStreamFilter *f;
/* find next filter with priv options */
while ((f = av_bsf_iterate(opaque))) {
if (f->priv_class)
return f->priv_class;
}
return NULL;
}

View file

@ -79,7 +79,10 @@ static const AVClass bsf_class = {
.item_name = bsf_to_name, .item_name = bsf_to_name,
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
.child_next = bsf_child_next, .child_next = bsf_child_next,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = ff_bsf_child_class_next, .child_class_next = ff_bsf_child_class_next,
#endif
.child_class_iterate = ff_bsf_child_class_iterate,
.category = AV_CLASS_CATEGORY_BITSTREAM_FILTER, .category = AV_CLASS_CATEGORY_BITSTREAM_FILTER,
}; };

View file

@ -42,6 +42,10 @@ int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
*/ */
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt); int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
#if FF_API_CHILD_CLASS_NEXT
const AVClass *ff_bsf_child_class_next(const AVClass *prev); const AVClass *ff_bsf_child_class_next(const AVClass *prev);
#endif
const AVClass *ff_bsf_child_class_iterate(void **opaque);
#endif /* AVCODEC_BSF_INTERNAL_H */ #endif /* AVCODEC_BSF_INTERNAL_H */

View file

@ -127,8 +127,7 @@ void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
av_freep(ctx_ptr); av_freep(ctx_ptr);
} }
static void cbs_unit_uninit(CodedBitstreamContext *ctx, static void cbs_unit_uninit(CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit)
{ {
av_buffer_unref(&unit->content_ref); av_buffer_unref(&unit->content_ref);
unit->content = NULL; unit->content = NULL;
@ -139,13 +138,12 @@ static void cbs_unit_uninit(CodedBitstreamContext *ctx,
unit->data_bit_padding = 0; unit->data_bit_padding = 0;
} }
void ff_cbs_fragment_reset(CodedBitstreamContext *ctx, void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
CodedBitstreamFragment *frag)
{ {
int i; int i;
for (i = 0; i < frag->nb_units; i++) for (i = 0; i < frag->nb_units; i++)
cbs_unit_uninit(ctx, &frag->units[i]); cbs_unit_uninit(&frag->units[i]);
frag->nb_units = 0; frag->nb_units = 0;
av_buffer_unref(&frag->data_ref); av_buffer_unref(&frag->data_ref);
@ -154,10 +152,9 @@ void ff_cbs_fragment_reset(CodedBitstreamContext *ctx,
frag->data_bit_padding = 0; frag->data_bit_padding = 0;
} }
void ff_cbs_fragment_free(CodedBitstreamContext *ctx, void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
CodedBitstreamFragment *frag)
{ {
ff_cbs_fragment_reset(ctx, frag); ff_cbs_fragment_reset(frag);
av_freep(&frag->units); av_freep(&frag->units);
frag->nb_units_allocated = 0; frag->nb_units_allocated = 0;
@ -200,8 +197,7 @@ static int cbs_read_fragment_content(CodedBitstreamContext *ctx,
return 0; return 0;
} }
static int cbs_fill_fragment_data(CodedBitstreamContext *ctx, static int cbs_fill_fragment_data(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
const uint8_t *data, size_t size) const uint8_t *data, size_t size)
{ {
av_assert0(!frag->data && !frag->data_ref); av_assert0(!frag->data && !frag->data_ref);
@ -227,7 +223,7 @@ int ff_cbs_read_extradata(CodedBitstreamContext *ctx,
{ {
int err; int err;
err = cbs_fill_fragment_data(ctx, frag, par->extradata, err = cbs_fill_fragment_data(frag, par->extradata,
par->extradata_size); par->extradata_size);
if (err < 0) if (err < 0)
return err; return err;
@ -254,7 +250,7 @@ int ff_cbs_read_packet(CodedBitstreamContext *ctx,
frag->data_size = pkt->size; frag->data_size = pkt->size;
} else { } else {
err = cbs_fill_fragment_data(ctx, frag, pkt->data, pkt->size); err = cbs_fill_fragment_data(frag, pkt->data, pkt->size);
if (err < 0) if (err < 0)
return err; return err;
} }
@ -272,7 +268,7 @@ int ff_cbs_read(CodedBitstreamContext *ctx,
{ {
int err; int err;
err = cbs_fill_fragment_data(ctx, frag, data, size); err = cbs_fill_fragment_data(frag, data, size);
if (err < 0) if (err < 0)
return err; return err;
@ -328,7 +324,7 @@ static int cbs_write_unit_data(CodedBitstreamContext *ctx,
flush_put_bits(&pbc); flush_put_bits(&pbc);
ret = ff_cbs_alloc_unit_data(ctx, unit, put_bits_count(&pbc) / 8); ret = ff_cbs_alloc_unit_data(unit, put_bits_count(&pbc) / 8);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -641,8 +637,7 @@ int ff_cbs_write_signed(CodedBitstreamContext *ctx, PutBitContext *pbc,
} }
int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx, int ff_cbs_alloc_unit_content(CodedBitstreamUnit *unit,
CodedBitstreamUnit *unit,
size_t size, size_t size,
void (*free)(void *opaque, uint8_t *data)) void (*free)(void *opaque, uint8_t *data))
{ {
@ -662,8 +657,7 @@ int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx,
return 0; return 0;
} }
int ff_cbs_alloc_unit_data(CodedBitstreamContext *ctx, int ff_cbs_alloc_unit_data(CodedBitstreamUnit *unit,
CodedBitstreamUnit *unit,
size_t size) size_t size)
{ {
av_assert0(!unit->data && !unit->data_ref); av_assert0(!unit->data && !unit->data_ref);
@ -680,8 +674,7 @@ int ff_cbs_alloc_unit_data(CodedBitstreamContext *ctx,
return 0; return 0;
} }
static int cbs_insert_unit(CodedBitstreamContext *ctx, static int cbs_insert_unit(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position) int position)
{ {
CodedBitstreamUnit *units; CodedBitstreamUnit *units;
@ -719,8 +712,7 @@ static int cbs_insert_unit(CodedBitstreamContext *ctx,
return 0; return 0;
} }
int ff_cbs_insert_unit_content(CodedBitstreamContext *ctx, int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position, int position,
CodedBitstreamUnitType type, CodedBitstreamUnitType type,
void *content, void *content,
@ -742,7 +734,7 @@ int ff_cbs_insert_unit_content(CodedBitstreamContext *ctx,
content_ref = NULL; content_ref = NULL;
} }
err = cbs_insert_unit(ctx, frag, position); err = cbs_insert_unit(frag, position);
if (err < 0) { if (err < 0) {
av_buffer_unref(&content_ref); av_buffer_unref(&content_ref);
return err; return err;
@ -756,8 +748,7 @@ int ff_cbs_insert_unit_content(CodedBitstreamContext *ctx,
return 0; return 0;
} }
int ff_cbs_insert_unit_data(CodedBitstreamContext *ctx, int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position, int position,
CodedBitstreamUnitType type, CodedBitstreamUnitType type,
uint8_t *data, size_t data_size, uint8_t *data, size_t data_size,
@ -781,7 +772,7 @@ int ff_cbs_insert_unit_data(CodedBitstreamContext *ctx,
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
err = cbs_insert_unit(ctx, frag, position); err = cbs_insert_unit(frag, position);
if (err < 0) { if (err < 0) {
av_buffer_unref(&data_ref); av_buffer_unref(&data_ref);
return err; return err;
@ -796,14 +787,13 @@ int ff_cbs_insert_unit_data(CodedBitstreamContext *ctx,
return 0; return 0;
} }
void ff_cbs_delete_unit(CodedBitstreamContext *ctx, void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position) int position)
{ {
av_assert0(0 <= position && position < frag->nb_units av_assert0(0 <= position && position < frag->nb_units
&& "Unit to be deleted not in fragment."); && "Unit to be deleted not in fragment.");
cbs_unit_uninit(ctx, &frag->units[position]); cbs_unit_uninit(&frag->units[position]);
--frag->nb_units; --frag->nb_units;

View file

@ -330,23 +330,20 @@ int ff_cbs_write_packet(CodedBitstreamContext *ctx,
* Free the units contained in a fragment as well as the fragment's * Free the units contained in a fragment as well as the fragment's
* own data buffer, but not the units array itself. * own data buffer, but not the units array itself.
*/ */
void ff_cbs_fragment_reset(CodedBitstreamContext *ctx, void ff_cbs_fragment_reset(CodedBitstreamFragment *frag);
CodedBitstreamFragment *frag);
/** /**
* Free the units array of a fragment in addition to what * Free the units array of a fragment in addition to what
* ff_cbs_fragment_reset does. * ff_cbs_fragment_reset does.
*/ */
void ff_cbs_fragment_free(CodedBitstreamContext *ctx, void ff_cbs_fragment_free(CodedBitstreamFragment *frag);
CodedBitstreamFragment *frag);
/** /**
* Allocate a new internal content buffer of the given size in the unit. * Allocate a new internal content buffer of the given size in the unit.
* *
* The content will be zeroed. * The content will be zeroed.
*/ */
int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx, int ff_cbs_alloc_unit_content(CodedBitstreamUnit *unit,
CodedBitstreamUnit *unit,
size_t size, size_t size,
void (*free)(void *opaque, uint8_t *content)); void (*free)(void *opaque, uint8_t *content));
@ -355,8 +352,7 @@ int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx,
* *
* The data buffer will have input padding. * The data buffer will have input padding.
*/ */
int ff_cbs_alloc_unit_data(CodedBitstreamContext *ctx, int ff_cbs_alloc_unit_data(CodedBitstreamUnit *unit,
CodedBitstreamUnit *unit,
size_t size); size_t size);
/** /**
@ -365,8 +361,7 @@ int ff_cbs_alloc_unit_data(CodedBitstreamContext *ctx,
* The content structure continues to be owned by the caller if * The content structure continues to be owned by the caller if
* content_buf is not supplied. * content_buf is not supplied.
*/ */
int ff_cbs_insert_unit_content(CodedBitstreamContext *ctx, int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position, int position,
CodedBitstreamUnitType type, CodedBitstreamUnitType type,
void *content, void *content,
@ -379,8 +374,7 @@ int ff_cbs_insert_unit_content(CodedBitstreamContext *ctx,
* av_malloc() and will on success become owned by the unit after this * av_malloc() and will on success become owned by the unit after this
* call or freed on error. * call or freed on error.
*/ */
int ff_cbs_insert_unit_data(CodedBitstreamContext *ctx, int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position, int position,
CodedBitstreamUnitType type, CodedBitstreamUnitType type,
uint8_t *data, size_t data_size, uint8_t *data, size_t data_size,
@ -391,8 +385,7 @@ int ff_cbs_insert_unit_data(CodedBitstreamContext *ctx,
* *
* Requires position to be >= 0 and < frag->nb_units. * Requires position to be >= 0 and < frag->nb_units.
*/ */
void ff_cbs_delete_unit(CodedBitstreamContext *ctx, void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
CodedBitstreamFragment *frag,
int position); int position);

View file

@ -120,16 +120,11 @@ static int cbs_av1_write_uvlc(CodedBitstreamContext *ctx, PutBitContext *pbc,
if (ctx->trace_enable) if (ctx->trace_enable)
position = put_bits_count(pbc); position = put_bits_count(pbc);
if (value == 0) {
zeroes = 0;
put_bits(pbc, 1, 1);
} else {
zeroes = av_log2(value + 1); zeroes = av_log2(value + 1);
v = value - (1U << zeroes) + 1; v = value - (1U << zeroes) + 1;
put_bits(pbc, zeroes, 0); put_bits(pbc, zeroes, 0);
put_bits(pbc, 1, 1); put_bits(pbc, 1, 1);
put_bits(pbc, zeroes, v); put_bits(pbc, zeroes, v);
}
if (ctx->trace_enable) { if (ctx->trace_enable) {
char bits[65]; char bits[65];
@ -799,7 +794,7 @@ static int cbs_av1_split_fragment(CodedBitstreamContext *ctx,
goto fail; goto fail;
} }
err = ff_cbs_insert_unit_data(ctx, frag, -1, header.obu_type, err = ff_cbs_insert_unit_data(frag, -1, header.obu_type,
data, obu_length, frag->data_ref); data, obu_length, frag->data_ref);
if (err < 0) if (err < 0)
goto fail; goto fail;
@ -892,7 +887,7 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
GetBitContext gbc; GetBitContext gbc;
int err, start_pos, end_pos; int err, start_pos, end_pos;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*obu), err = ff_cbs_alloc_unit_content(unit, sizeof(*obu),
&cbs_av1_free_obu); &cbs_av1_free_obu);
if (err < 0) if (err < 0)
return err; return err;

View file

@ -472,8 +472,7 @@ typedef struct CodedBitstreamH264Context {
* On success, the payload will be owned by a unit in access_unit; * On success, the payload will be owned by a unit in access_unit;
* on failure, the content of the payload will be freed. * on failure, the content of the payload will be freed.
*/ */
int ff_cbs_h264_add_sei_message(CodedBitstreamContext *ctx, int ff_cbs_h264_add_sei_message(CodedBitstreamFragment *access_unit,
CodedBitstreamFragment *access_unit,
H264RawSEIPayload *payload); H264RawSEIPayload *payload);
/** /**
@ -485,8 +484,7 @@ int ff_cbs_h264_add_sei_message(CodedBitstreamContext *ctx,
* Requires nal_unit to be a unit in access_unit and position to be >= 0 * Requires nal_unit to be a unit in access_unit and position to be >= 0
* and < the payload count of the SEI nal_unit. * and < the payload count of the SEI nal_unit.
*/ */
void ff_cbs_h264_delete_sei_message(CodedBitstreamContext *ctx, void ff_cbs_h264_delete_sei_message(CodedBitstreamFragment *access_unit,
CodedBitstreamFragment *access_unit,
CodedBitstreamUnit *nal_unit, CodedBitstreamUnit *nal_unit,
int position); int position);

View file

@ -591,7 +591,7 @@ static int cbs_h2645_fragment_add_nals(CodedBitstreamContext *ctx,
ref = (nal->data == nal->raw_data) ? frag->data_ref ref = (nal->data == nal->raw_data) ? frag->data_ref
: packet->rbsp.rbsp_buffer_ref; : packet->rbsp.rbsp_buffer_ref;
err = ff_cbs_insert_unit_data(ctx, frag, -1, nal->type, err = ff_cbs_insert_unit_data(frag, -1, nal->type,
(uint8_t*)nal->data, size, ref); (uint8_t*)nal->data, size, ref);
if (err < 0) if (err < 0)
return err; return err;
@ -807,7 +807,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
{ {
H264RawSPS *sps; H264RawSPS *sps;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*sps), NULL); err = ff_cbs_alloc_unit_content(unit, sizeof(*sps), NULL);
if (err < 0) if (err < 0)
return err; return err;
sps = unit->content; sps = unit->content;
@ -824,7 +824,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
case H264_NAL_SPS_EXT: case H264_NAL_SPS_EXT:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(H264RawSPSExtension), sizeof(H264RawSPSExtension),
NULL); NULL);
if (err < 0) if (err < 0)
@ -840,7 +840,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
{ {
H264RawPPS *pps; H264RawPPS *pps;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*pps), err = ff_cbs_alloc_unit_content(unit, sizeof(*pps),
&cbs_h264_free_pps); &cbs_h264_free_pps);
if (err < 0) if (err < 0)
return err; return err;
@ -863,7 +863,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
H264RawSlice *slice; H264RawSlice *slice;
int pos, len; int pos, len;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*slice), err = ff_cbs_alloc_unit_content(unit, sizeof(*slice),
&cbs_h264_free_slice); &cbs_h264_free_slice);
if (err < 0) if (err < 0)
return err; return err;
@ -890,7 +890,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
case H264_NAL_AUD: case H264_NAL_AUD:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(H264RawAUD), NULL); sizeof(H264RawAUD), NULL);
if (err < 0) if (err < 0)
return err; return err;
@ -903,7 +903,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
case H264_NAL_SEI: case H264_NAL_SEI:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(H264RawSEI), err = ff_cbs_alloc_unit_content(unit, sizeof(H264RawSEI),
&cbs_h264_free_sei); &cbs_h264_free_sei);
if (err < 0) if (err < 0)
return err; return err;
@ -916,7 +916,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
case H264_NAL_FILLER_DATA: case H264_NAL_FILLER_DATA:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(H264RawFiller), NULL); sizeof(H264RawFiller), NULL);
if (err < 0) if (err < 0)
return err; return err;
@ -930,7 +930,7 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx,
case H264_NAL_END_SEQUENCE: case H264_NAL_END_SEQUENCE:
case H264_NAL_END_STREAM: case H264_NAL_END_STREAM:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(H264RawNALUnitHeader), sizeof(H264RawNALUnitHeader),
NULL); NULL);
if (err < 0) if (err < 0)
@ -966,7 +966,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
{ {
H265RawVPS *vps; H265RawVPS *vps;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*vps), err = ff_cbs_alloc_unit_content(unit, sizeof(*vps),
&cbs_h265_free_vps); &cbs_h265_free_vps);
if (err < 0) if (err < 0)
return err; return err;
@ -985,7 +985,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
{ {
H265RawSPS *sps; H265RawSPS *sps;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*sps), err = ff_cbs_alloc_unit_content(unit, sizeof(*sps),
&cbs_h265_free_sps); &cbs_h265_free_sps);
if (err < 0) if (err < 0)
return err; return err;
@ -1005,7 +1005,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
{ {
H265RawPPS *pps; H265RawPPS *pps;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*pps), err = ff_cbs_alloc_unit_content(unit, sizeof(*pps),
&cbs_h265_free_pps); &cbs_h265_free_pps);
if (err < 0) if (err < 0)
return err; return err;
@ -1041,7 +1041,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
H265RawSlice *slice; H265RawSlice *slice;
int pos, len; int pos, len;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*slice), err = ff_cbs_alloc_unit_content(unit, sizeof(*slice),
&cbs_h265_free_slice); &cbs_h265_free_slice);
if (err < 0) if (err < 0)
return err; return err;
@ -1068,7 +1068,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
case HEVC_NAL_AUD: case HEVC_NAL_AUD:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(H265RawAUD), NULL); sizeof(H265RawAUD), NULL);
if (err < 0) if (err < 0)
return err; return err;
@ -1082,7 +1082,7 @@ static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX: case HEVC_NAL_SEI_SUFFIX:
{ {
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(H265RawSEI), err = ff_cbs_alloc_unit_content(unit, sizeof(H265RawSEI),
&cbs_h265_free_sei); &cbs_h265_free_sei);
if (err < 0) if (err < 0)
@ -1531,8 +1531,7 @@ const CodedBitstreamType ff_cbs_type_h265 = {
.close = &cbs_h265_close, .close = &cbs_h265_close,
}; };
int ff_cbs_h264_add_sei_message(CodedBitstreamContext *ctx, int ff_cbs_h264_add_sei_message(CodedBitstreamFragment *au,
CodedBitstreamFragment *au,
H264RawSEIPayload *payload) H264RawSEIPayload *payload)
{ {
H264RawSEI *sei = NULL; H264RawSEI *sei = NULL;
@ -1577,7 +1576,7 @@ int ff_cbs_h264_add_sei_message(CodedBitstreamContext *ctx,
break; break;
} }
err = ff_cbs_insert_unit_content(ctx, au, i, H264_NAL_SEI, err = ff_cbs_insert_unit_content(au, i, H264_NAL_SEI,
sei, sei_ref); sei, sei_ref);
av_buffer_unref(&sei_ref); av_buffer_unref(&sei_ref);
if (err < 0) if (err < 0)
@ -1593,8 +1592,7 @@ fail:
return err; return err;
} }
void ff_cbs_h264_delete_sei_message(CodedBitstreamContext *ctx, void ff_cbs_h264_delete_sei_message(CodedBitstreamFragment *au,
CodedBitstreamFragment *au,
CodedBitstreamUnit *nal, CodedBitstreamUnit *nal,
int position) int position)
{ {
@ -1612,7 +1610,7 @@ void ff_cbs_h264_delete_sei_message(CodedBitstreamContext *ctx,
break; break;
} }
ff_cbs_delete_unit(ctx, au, i); ff_cbs_delete_unit(au, i);
} else { } else {
cbs_h264_free_sei_payload(&sei->payload[position]); cbs_h264_free_sei_payload(&sei->payload[position]);

View file

@ -226,7 +226,7 @@ static int cbs_jpeg_split_fragment(CodedBitstreamContext *ctx,
data_ref = frag->data_ref; data_ref = frag->data_ref;
} }
err = ff_cbs_insert_unit_data(ctx, frag, unit, marker, err = ff_cbs_insert_unit_data(frag, unit, marker,
data, data_size, data_ref); data, data_size, data_ref);
if (err < 0) if (err < 0)
return err; return err;
@ -252,7 +252,7 @@ static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx,
if (unit->type >= JPEG_MARKER_SOF0 && if (unit->type >= JPEG_MARKER_SOF0 &&
unit->type <= JPEG_MARKER_SOF3) { unit->type <= JPEG_MARKER_SOF3) {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawFrameHeader), sizeof(JPEGRawFrameHeader),
NULL); NULL);
if (err < 0) if (err < 0)
@ -264,7 +264,7 @@ static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx,
} else if (unit->type >= JPEG_MARKER_APPN && } else if (unit->type >= JPEG_MARKER_APPN &&
unit->type <= JPEG_MARKER_APPN + 15) { unit->type <= JPEG_MARKER_APPN + 15) {
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawApplicationData), sizeof(JPEGRawApplicationData),
&cbs_jpeg_free_application_data); &cbs_jpeg_free_application_data);
if (err < 0) if (err < 0)
@ -278,7 +278,7 @@ static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx,
JPEGRawScan *scan; JPEGRawScan *scan;
int pos; int pos;
err = ff_cbs_alloc_unit_content(ctx, unit, err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawScan), sizeof(JPEGRawScan),
&cbs_jpeg_free_scan); &cbs_jpeg_free_scan);
if (err < 0) if (err < 0)
@ -304,7 +304,7 @@ static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx,
#define SEGMENT(marker, type, func, free) \ #define SEGMENT(marker, type, func, free) \
case JPEG_MARKER_ ## marker: \ case JPEG_MARKER_ ## marker: \
{ \ { \
err = ff_cbs_alloc_unit_content(ctx, unit, \ err = ff_cbs_alloc_unit_content(unit, \
sizeof(type), free); \ sizeof(type), free); \
if (err < 0) \ if (err < 0) \
return err; \ return err; \

View file

@ -207,7 +207,7 @@ static int cbs_mpeg2_split_fragment(CodedBitstreamContext *ctx,
final = 1; final = 1;
} }
err = ff_cbs_insert_unit_data(ctx, frag, i, unit_type, (uint8_t*)start, err = ff_cbs_insert_unit_data(frag, i, unit_type, (uint8_t*)start,
unit_size, frag->data_ref); unit_size, frag->data_ref);
if (err < 0) if (err < 0)
return err; return err;
@ -235,7 +235,7 @@ static int cbs_mpeg2_read_unit(CodedBitstreamContext *ctx,
MPEG2RawSlice *slice; MPEG2RawSlice *slice;
int pos, len; int pos, len;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*slice), err = ff_cbs_alloc_unit_content(unit, sizeof(*slice),
&cbs_mpeg2_free_slice); &cbs_mpeg2_free_slice);
if (err < 0) if (err < 0)
return err; return err;
@ -265,7 +265,7 @@ static int cbs_mpeg2_read_unit(CodedBitstreamContext *ctx,
case start_code: \ case start_code: \
{ \ { \
type *header; \ type *header; \
err = ff_cbs_alloc_unit_content(ctx, unit, \ err = ff_cbs_alloc_unit_content(unit, \
sizeof(*header), free_func); \ sizeof(*header), free_func); \
if (err < 0) \ if (err < 0) \
return err; \ return err; \

View file

@ -451,7 +451,7 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
err = ff_cbs_insert_unit_data(ctx, frag, -1, 0, err = ff_cbs_insert_unit_data(frag, -1, 0,
frag->data + pos, frag->data + pos,
sfi.frame_sizes[i], sfi.frame_sizes[i],
frag->data_ref); frag->data_ref);
@ -469,7 +469,7 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
return 0; return 0;
} else { } else {
err = ff_cbs_insert_unit_data(ctx, frag, -1, 0, err = ff_cbs_insert_unit_data(frag, -1, 0,
frag->data, frag->data_size, frag->data, frag->data_size,
frag->data_ref); frag->data_ref);
if (err < 0) if (err < 0)
@ -497,7 +497,7 @@ static int cbs_vp9_read_unit(CodedBitstreamContext *ctx,
if (err < 0) if (err < 0)
return err; return err;
err = ff_cbs_alloc_unit_content(ctx, unit, sizeof(*frame), err = ff_cbs_alloc_unit_content(unit, sizeof(*frame),
&cbs_vp9_free_frame); &cbs_vp9_free_frame);
if (err < 0) if (err < 0)
return err; return err;

View file

@ -32,10 +32,6 @@
static const AVRational ms_tb = {1, 1000}; static const AVRational ms_tb = {1, 1000};
/*
* TODO list
* 1) handle font and color completely
*/
enum cc_mode { enum cc_mode {
CCMODE_POPON, CCMODE_POPON,
CCMODE_PAINTON, CCMODE_PAINTON,
@ -173,6 +169,18 @@ static const char *charset_overrides[4][128] =
}, },
}; };
static const unsigned char bg_attribs[8] = // Color
{
CCCOL_WHITE,
CCCOL_GREEN,
CCCOL_BLUE,
CCCOL_CYAN,
CCCOL_RED,
CCCOL_YELLOW,
CCCOL_MAGENTA,
CCCOL_BLACK,
};
static const unsigned char pac2_attribs[32][3] = // Color, font, ident static const unsigned char pac2_attribs[32][3] = // Color, font, ident
{ {
{ CCCOL_WHITE, CCFONT_REGULAR, 0 }, // 0x40 || 0x60 { CCCOL_WHITE, CCFONT_REGULAR, 0 }, // 0x40 || 0x60
@ -215,6 +223,7 @@ struct Screen {
uint8_t characters[SCREEN_ROWS+1][SCREEN_COLUMNS+1]; uint8_t characters[SCREEN_ROWS+1][SCREEN_COLUMNS+1];
uint8_t charsets[SCREEN_ROWS+1][SCREEN_COLUMNS+1]; uint8_t charsets[SCREEN_ROWS+1][SCREEN_COLUMNS+1];
uint8_t colors[SCREEN_ROWS+1][SCREEN_COLUMNS+1]; uint8_t colors[SCREEN_ROWS+1][SCREEN_COLUMNS+1];
uint8_t bgs[SCREEN_ROWS+1][SCREEN_COLUMNS+1];
uint8_t fonts[SCREEN_ROWS+1][SCREEN_COLUMNS+1]; uint8_t fonts[SCREEN_ROWS+1][SCREEN_COLUMNS+1];
/* /*
* Bitmask of used rows; if a bit is not set, the * Bitmask of used rows; if a bit is not set, the
@ -228,39 +237,37 @@ struct Screen {
typedef struct CCaptionSubContext { typedef struct CCaptionSubContext {
AVClass *class; AVClass *class;
int real_time; int real_time;
int data_field;
struct Screen screen[2]; struct Screen screen[2];
int active_screen; int active_screen;
uint8_t cursor_row; uint8_t cursor_row;
uint8_t cursor_column; uint8_t cursor_column;
uint8_t cursor_color; uint8_t cursor_color;
uint8_t bg_color;
uint8_t cursor_font; uint8_t cursor_font;
uint8_t cursor_charset; uint8_t cursor_charset;
AVBPrint buffer; AVBPrint buffer[2];
int buffer_index;
int buffer_changed; int buffer_changed;
int rollup; int rollup;
enum cc_mode mode; enum cc_mode mode;
int64_t start_time; int64_t buffer_time[2];
/* visible screen time */
int64_t startv_time;
int64_t end_time;
int screen_touched; int screen_touched;
int64_t last_real_time; int64_t last_real_time;
char prev_cmd[2]; uint8_t prev_cmd[2];
/* buffer to store pkt data */
uint8_t *pktbuf;
int pktbuf_size;
int readorder; int readorder;
} CCaptionSubContext; } CCaptionSubContext;
static av_cold int init_decoder(AVCodecContext *avctx) static av_cold int init_decoder(AVCodecContext *avctx)
{ {
int ret; int ret;
CCaptionSubContext *ctx = avctx->priv_data; CCaptionSubContext *ctx = avctx->priv_data;
av_bprint_init(&ctx->buffer, 0, AV_BPRINT_SIZE_UNLIMITED); av_bprint_init(&ctx->buffer[0], 0, AV_BPRINT_SIZE_UNLIMITED);
av_bprint_init(&ctx->buffer[1], 0, AV_BPRINT_SIZE_UNLIMITED);
/* taking by default roll up to 2 */ /* taking by default roll up to 2 */
ctx->mode = CCMODE_ROLLUP; ctx->mode = CCMODE_ROLLUP;
ctx->bg_color = CCCOL_BLACK;
ctx->rollup = 2; ctx->rollup = 2;
ctx->cursor_row = 10; ctx->cursor_row = 10;
ret = ff_ass_subtitle_header(avctx, "Monospace", ret = ff_ass_subtitle_header(avctx, "Monospace",
@ -282,9 +289,8 @@ static av_cold int init_decoder(AVCodecContext *avctx)
static av_cold int close_decoder(AVCodecContext *avctx) static av_cold int close_decoder(AVCodecContext *avctx)
{ {
CCaptionSubContext *ctx = avctx->priv_data; CCaptionSubContext *ctx = avctx->priv_data;
av_bprint_finalize(&ctx->buffer, NULL); av_bprint_finalize(&ctx->buffer[0], NULL);
av_freep(&ctx->pktbuf); av_bprint_finalize(&ctx->buffer[1], NULL);
ctx->pktbuf_size = 0;
return 0; return 0;
} }
@ -301,6 +307,7 @@ static void flush_decoder(AVCodecContext *avctx)
ctx->cursor_column = 0; ctx->cursor_column = 0;
ctx->cursor_font = 0; ctx->cursor_font = 0;
ctx->cursor_color = 0; ctx->cursor_color = 0;
ctx->bg_color = CCCOL_BLACK;
ctx->cursor_charset = 0; ctx->cursor_charset = 0;
ctx->active_screen = 0; ctx->active_screen = 0;
ctx->last_real_time = 0; ctx->last_real_time = 0;
@ -308,7 +315,8 @@ static void flush_decoder(AVCodecContext *avctx)
ctx->buffer_changed = 0; ctx->buffer_changed = 0;
if (!(avctx->flags2 & AV_CODEC_FLAG2_RO_FLUSH_NOOP)) if (!(avctx->flags2 & AV_CODEC_FLAG2_RO_FLUSH_NOOP))
ctx->readorder = 0; ctx->readorder = 0;
av_bprint_clear(&ctx->buffer); av_bprint_clear(&ctx->buffer[0]);
av_bprint_clear(&ctx->buffer[1]);
} }
/** /**
@ -319,11 +327,15 @@ static void write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
uint8_t col = ctx->cursor_column; uint8_t col = ctx->cursor_column;
char *row = screen->characters[ctx->cursor_row]; char *row = screen->characters[ctx->cursor_row];
char *font = screen->fonts[ctx->cursor_row]; char *font = screen->fonts[ctx->cursor_row];
char *color = screen->colors[ctx->cursor_row];
char *bg = screen->bgs[ctx->cursor_row];
char *charset = screen->charsets[ctx->cursor_row]; char *charset = screen->charsets[ctx->cursor_row];
if (col < SCREEN_COLUMNS) { if (col < SCREEN_COLUMNS) {
row[col] = ch; row[col] = ch;
font[col] = ctx->cursor_font; font[col] = ctx->cursor_font;
color[col] = ctx->cursor_color;
bg[col] = ctx->bg_color;
charset[col] = ctx->cursor_charset; charset[col] = ctx->cursor_charset;
ctx->cursor_charset = CCSET_BASIC_AMERICAN; ctx->cursor_charset = CCSET_BASIC_AMERICAN;
if (ch) ctx->cursor_column++; if (ch) ctx->cursor_column++;
@ -347,11 +359,13 @@ static void write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
* If the second byte doesn't pass parity, it returns INVALIDDATA * If the second byte doesn't pass parity, it returns INVALIDDATA
* user can ignore the whole pair and pass the other pair. * user can ignore the whole pair and pass the other pair.
*/ */
static int validate_cc_data_pair(uint8_t *cc_data_pair) static int validate_cc_data_pair(const uint8_t *cc_data_pair, uint8_t *hi)
{ {
uint8_t cc_valid = (*cc_data_pair & 4) >>2; uint8_t cc_valid = (*cc_data_pair & 4) >>2;
uint8_t cc_type = *cc_data_pair & 3; uint8_t cc_type = *cc_data_pair & 3;
*hi = cc_data_pair[1];
if (!cc_valid) if (!cc_valid)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -361,7 +375,7 @@ static int validate_cc_data_pair(uint8_t *cc_data_pair)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (!av_parity(cc_data_pair[1])) { if (!av_parity(cc_data_pair[1])) {
cc_data_pair[1]=0x7F; *hi = 0x7F;
} }
} }
@ -374,10 +388,6 @@ static int validate_cc_data_pair(uint8_t *cc_data_pair)
if (cc_type == 3 || cc_type == 2) if (cc_type == 3 || cc_type == 2)
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
/* remove parity bit */
cc_data_pair[1] &= 0x7F;
cc_data_pair[2] &= 0x7F;
return 0; return 0;
} }
@ -423,6 +433,7 @@ static void roll_up(CCaptionSubContext *ctx)
memcpy(screen->characters[i_row], screen->characters[i_row+1], SCREEN_COLUMNS); memcpy(screen->characters[i_row], screen->characters[i_row+1], SCREEN_COLUMNS);
memcpy(screen->colors[i_row], screen->colors[i_row+1], SCREEN_COLUMNS); memcpy(screen->colors[i_row], screen->colors[i_row+1], SCREEN_COLUMNS);
memcpy(screen->bgs[i_row], screen->bgs[i_row+1], SCREEN_COLUMNS);
memcpy(screen->fonts[i_row], screen->fonts[i_row+1], SCREEN_COLUMNS); memcpy(screen->fonts[i_row], screen->fonts[i_row+1], SCREEN_COLUMNS);
memcpy(screen->charsets[i_row], screen->charsets[i_row+1], SCREEN_COLUMNS); memcpy(screen->charsets[i_row], screen->charsets[i_row+1], SCREEN_COLUMNS);
if (CHECK_FLAG(screen->row_used, i_row + 1)) if (CHECK_FLAG(screen->row_used, i_row + 1))
@ -437,7 +448,11 @@ static int capture_screen(CCaptionSubContext *ctx)
int i, j, tab = 0; int i, j, tab = 0;
struct Screen *screen = ctx->screen + ctx->active_screen; struct Screen *screen = ctx->screen + ctx->active_screen;
enum cc_font prev_font = CCFONT_REGULAR; enum cc_font prev_font = CCFONT_REGULAR;
av_bprint_clear(&ctx->buffer); enum cc_color_code prev_color = CCCOL_WHITE;
enum cc_color_code prev_bg_color = CCCOL_BLACK;
const int bidx = ctx->buffer_index;
av_bprint_clear(&ctx->buffer[bidx]);
for (i = 0; screen->row_used && i < SCREEN_ROWS; i++) for (i = 0; screen->row_used && i < SCREEN_ROWS; i++)
{ {
@ -457,6 +472,8 @@ static int capture_screen(CCaptionSubContext *ctx)
if (CHECK_FLAG(screen->row_used, i)) { if (CHECK_FLAG(screen->row_used, i)) {
const char *row = screen->characters[i]; const char *row = screen->characters[i];
const char *font = screen->fonts[i]; const char *font = screen->fonts[i];
const char *bg = screen->bgs[i];
const char *color = screen->colors[i];
const char *charset = screen->charsets[i]; const char *charset = screen->charsets[i];
const char *override; const char *override;
int x, y, seen_char = 0; int x, y, seen_char = 0;
@ -468,10 +485,10 @@ static int capture_screen(CCaptionSubContext *ctx)
x = ASS_DEFAULT_PLAYRESX * (0.1 + 0.0250 * j); x = ASS_DEFAULT_PLAYRESX * (0.1 + 0.0250 * j);
y = ASS_DEFAULT_PLAYRESY * (0.1 + 0.0533 * i); y = ASS_DEFAULT_PLAYRESY * (0.1 + 0.0533 * i);
av_bprintf(&ctx->buffer, "{\\an7}{\\pos(%d,%d)}", x, y); av_bprintf(&ctx->buffer[bidx], "{\\an7}{\\pos(%d,%d)}", x, y);
for (; j < SCREEN_COLUMNS; j++) { for (; j < SCREEN_COLUMNS; j++) {
const char *e_tag = "", *s_tag = ""; const char *e_tag = "", *s_tag = "", *c_tag = "", *b_tag = "";
if (row[j] == 0) if (row[j] == 0)
break; break;
@ -500,38 +517,99 @@ static int capture_screen(CCaptionSubContext *ctx)
break; break;
} }
} }
if (prev_color != color[j]) {
switch (color[j]) {
case CCCOL_WHITE:
c_tag = "{\\c&HFFFFFF&}";
break;
case CCCOL_GREEN:
c_tag = "{\\c&H00FF00&}";
break;
case CCCOL_BLUE:
c_tag = "{\\c&HFF0000&}";
break;
case CCCOL_CYAN:
c_tag = "{\\c&HFFFF00&}";
break;
case CCCOL_RED:
c_tag = "{\\c&H0000FF&}";
break;
case CCCOL_YELLOW:
c_tag = "{\\c&H00FFFF&}";
break;
case CCCOL_MAGENTA:
c_tag = "{\\c&HFF00FF&}";
break;
}
}
if (prev_bg_color != bg[j]) {
switch (bg[j]) {
case CCCOL_WHITE:
b_tag = "{\\3c&HFFFFFF&}";
break;
case CCCOL_GREEN:
b_tag = "{\\3c&H00FF00&}";
break;
case CCCOL_BLUE:
b_tag = "{\\3c&HFF0000&}";
break;
case CCCOL_CYAN:
b_tag = "{\\3c&HFFFF00&}";
break;
case CCCOL_RED:
b_tag = "{\\3c&H0000FF&}";
break;
case CCCOL_YELLOW:
b_tag = "{\\3c&H00FFFF&}";
break;
case CCCOL_MAGENTA:
b_tag = "{\\3c&HFF00FF&}";
break;
case CCCOL_BLACK:
b_tag = "{\\3c&H000000&}";
break;
}
}
prev_font = font[j]; prev_font = font[j];
prev_color = color[j];
prev_bg_color = bg[j];
override = charset_overrides[(int)charset[j]][(int)row[j]]; override = charset_overrides[(int)charset[j]][(int)row[j]];
if (override) { if (override) {
av_bprintf(&ctx->buffer, "%s%s%s", e_tag, s_tag, override); av_bprintf(&ctx->buffer[bidx], "%s%s%s%s%s", e_tag, s_tag, c_tag, b_tag, override);
seen_char = 1; seen_char = 1;
} else if (row[j] == ' ' && !seen_char) { } else if (row[j] == ' ' && !seen_char) {
av_bprintf(&ctx->buffer, "%s%s\\h", e_tag, s_tag); av_bprintf(&ctx->buffer[bidx], "%s%s%s%s\\h", e_tag, s_tag, c_tag, b_tag);
} else { } else {
av_bprintf(&ctx->buffer, "%s%s%c", e_tag, s_tag, row[j]); av_bprintf(&ctx->buffer[bidx], "%s%s%s%s%c", e_tag, s_tag, c_tag, b_tag, row[j]);
seen_char = 1; seen_char = 1;
} }
} }
av_bprintf(&ctx->buffer, "\\N"); av_bprintf(&ctx->buffer[bidx], "\\N");
} }
} }
if (!av_bprint_is_complete(&ctx->buffer)) if (!av_bprint_is_complete(&ctx->buffer[bidx]))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (screen->row_used && ctx->buffer.len >= 2) { if (screen->row_used && ctx->buffer[bidx].len >= 2) {
ctx->buffer.len -= 2; ctx->buffer[bidx].len -= 2;
ctx->buffer.str[ctx->buffer.len] = 0; ctx->buffer[bidx].str[ctx->buffer[bidx].len] = 0;
} }
ctx->buffer_changed = 1; ctx->buffer_changed = 1;
return 0; return 0;
} }
static int reap_screen(CCaptionSubContext *ctx, int64_t pts) static void update_time(CCaptionSubContext *ctx, int64_t pts)
{ {
ctx->start_time = ctx->startv_time; ctx->buffer_time[0] = ctx->buffer_time[1];
ctx->startv_time = pts; ctx->buffer_time[1] = pts;
ctx->end_time = pts; }
return capture_screen(ctx);
static void handle_bgattr(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo)
{
const int i = (lo & 0xf) >> 1;
ctx->bg_color = bg_attribs[i];
} }
static void handle_textattr(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo) static void handle_textattr(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo)
@ -576,49 +654,55 @@ static void handle_pac(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo)
} }
} }
/** static int handle_edm(CCaptionSubContext *ctx)
* @param pts it is required to set end time
*/
static void handle_edm(CCaptionSubContext *ctx, int64_t pts)
{ {
struct Screen *screen = ctx->screen + ctx->active_screen; struct Screen *screen = ctx->screen + ctx->active_screen;
int ret;
// In buffered mode, keep writing to screen until it is wiped. // In buffered mode, keep writing to screen until it is wiped.
// Before wiping the display, capture contents to emit subtitle. // Before wiping the display, capture contents to emit subtitle.
if (!ctx->real_time) if (!ctx->real_time)
reap_screen(ctx, pts); ret = capture_screen(ctx);
screen->row_used = 0; screen->row_used = 0;
ctx->bg_color = CCCOL_BLACK;
// In realtime mode, emit an empty caption so the last one doesn't // In realtime mode, emit an empty caption so the last one doesn't
// stay on the screen. // stay on the screen.
if (ctx->real_time) if (ctx->real_time)
reap_screen(ctx, pts); ret = capture_screen(ctx);
return ret;
} }
static void handle_eoc(CCaptionSubContext *ctx, int64_t pts) static int handle_eoc(CCaptionSubContext *ctx)
{ {
// In buffered mode, we wait til the *next* EOC and int ret;
// reap what was already on the screen since the last EOC.
if (!ctx->real_time)
handle_edm(ctx,pts);
ctx->active_screen = !ctx->active_screen; ctx->active_screen = !ctx->active_screen;
// In buffered mode, we wait til the *next* EOC and
// capture what was already on the screen since the last EOC.
if (!ctx->real_time)
ret = handle_edm(ctx);
ctx->cursor_column = 0; ctx->cursor_column = 0;
// In realtime mode, we display the buffered contents (after // In realtime mode, we display the buffered contents (after
// flipping the buffer to active above) as soon as EOC arrives. // flipping the buffer to active above) as soon as EOC arrives.
if (ctx->real_time) if (ctx->real_time)
reap_screen(ctx, pts); ret = capture_screen(ctx);
return ret;
} }
static void handle_delete_end_of_row(CCaptionSubContext *ctx, char hi, char lo) static void handle_delete_end_of_row(CCaptionSubContext *ctx)
{ {
struct Screen *screen = get_writing_screen(ctx); struct Screen *screen = get_writing_screen(ctx);
write_char(ctx, screen, 0); write_char(ctx, screen, 0);
} }
static void handle_char(CCaptionSubContext *ctx, char hi, char lo, int64_t pts) static void handle_char(CCaptionSubContext *ctx, char hi, char lo)
{ {
struct Screen *screen = get_writing_screen(ctx); struct Screen *screen = get_writing_screen(ctx);
@ -658,11 +742,12 @@ static void handle_char(CCaptionSubContext *ctx, char hi, char lo, int64_t pts)
ff_dlog(ctx, "(%c)\n", hi); ff_dlog(ctx, "(%c)\n", hi);
} }
static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint8_t lo) static int process_cc608(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo)
{ {
int ret = 0;
if (hi == ctx->prev_cmd[0] && lo == ctx->prev_cmd[1]) { if (hi == ctx->prev_cmd[0] && lo == ctx->prev_cmd[1]) {
/* ignore redundant command */ return 0;
return;
} }
/* set prev command */ /* set prev command */
@ -675,6 +760,8 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
} else if ( ( hi == 0x11 && lo >= 0x20 && lo <= 0x2f ) || } else if ( ( hi == 0x11 && lo >= 0x20 && lo <= 0x2f ) ||
( hi == 0x17 && lo >= 0x2e && lo <= 0x2f) ) { ( hi == 0x17 && lo >= 0x2e && lo <= 0x2f) ) {
handle_textattr(ctx, hi, lo); handle_textattr(ctx, hi, lo);
} else if ((hi == 0x10 && lo >= 0x20 && lo <= 0x2f)) {
handle_bgattr(ctx, hi, lo);
} else if (hi == 0x14 || hi == 0x15 || hi == 0x1c) { } else if (hi == 0x14 || hi == 0x15 || hi == 0x1c) {
switch (lo) { switch (lo) {
case 0x20: case 0x20:
@ -682,7 +769,7 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
ctx->mode = CCMODE_POPON; ctx->mode = CCMODE_POPON;
break; break;
case 0x24: case 0x24:
handle_delete_end_of_row(ctx, hi, lo); handle_delete_end_of_row(ctx);
break; break;
case 0x25: case 0x25:
case 0x26: case 0x26:
@ -700,13 +787,13 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
break; break;
case 0x2c: case 0x2c:
/* erase display memory */ /* erase display memory */
handle_edm(ctx, pts); handle_edm(ctx);
break; break;
case 0x2d: case 0x2d:
/* carriage return */ /* carriage return */
ff_dlog(ctx, "carriage return\n"); ff_dlog(ctx, "carriage return\n");
if (!ctx->real_time) if (!ctx->real_time)
reap_screen(ctx, pts); ret = capture_screen(ctx);
roll_up(ctx); roll_up(ctx);
ctx->cursor_column = 0; ctx->cursor_column = 0;
break; break;
@ -722,7 +809,7 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
case 0x2f: case 0x2f:
/* end of caption */ /* end of caption */
ff_dlog(ctx, "handle_eoc\n"); ff_dlog(ctx, "handle_eoc\n");
handle_eoc(ctx, pts); ret = handle_eoc(ctx);
break; break;
default: default:
ff_dlog(ctx, "Unknown command 0x%hhx 0x%hhx\n", hi, lo); ff_dlog(ctx, "Unknown command 0x%hhx 0x%hhx\n", hi, lo);
@ -730,73 +817,93 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
} }
} else if (hi >= 0x11 && hi <= 0x13) { } else if (hi >= 0x11 && hi <= 0x13) {
/* Special characters */ /* Special characters */
handle_char(ctx, hi, lo, pts); handle_char(ctx, hi, lo);
} else if (hi >= 0x20) { } else if (hi >= 0x20) {
/* Standard characters (always in pairs) */ /* Standard characters (always in pairs) */
handle_char(ctx, hi, lo, pts); handle_char(ctx, hi, lo);
ctx->prev_cmd[0] = ctx->prev_cmd[1] = 0; ctx->prev_cmd[0] = ctx->prev_cmd[1] = 0;
} else if (hi == 0x17 && lo >= 0x21 && lo <= 0x23) { } else if (hi == 0x17 && lo >= 0x21 && lo <= 0x23) {
int i; int i;
/* Tab offsets (spacing) */ /* Tab offsets (spacing) */
for (i = 0; i < lo - 0x20; i++) { for (i = 0; i < lo - 0x20; i++) {
handle_char(ctx, ' ', 0, pts); handle_char(ctx, ' ', 0);
} }
} else { } else {
/* Ignoring all other non data code */ /* Ignoring all other non data code */
ff_dlog(ctx, "Unknown command 0x%hhx 0x%hhx\n", hi, lo); ff_dlog(ctx, "Unknown command 0x%hhx 0x%hhx\n", hi, lo);
} }
return ret;
} }
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt) static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
{ {
CCaptionSubContext *ctx = avctx->priv_data; CCaptionSubContext *ctx = avctx->priv_data;
AVSubtitle *sub = data; AVSubtitle *sub = data;
const int64_t start_time = sub->pts; int64_t in_time = sub->pts;
uint8_t *bptr = NULL; int64_t start_time;
int64_t end_time;
int bidx = ctx->buffer_index;
const uint8_t *bptr = avpkt->data;
int len = avpkt->size; int len = avpkt->size;
int ret = 0; int ret = 0;
int i; int i;
av_fast_padded_malloc(&ctx->pktbuf, &ctx->pktbuf_size, len);
if (!ctx->pktbuf) {
av_log(ctx, AV_LOG_WARNING, "Insufficient Memory of %d truncated to %d\n", len, ctx->pktbuf_size);
return AVERROR(ENOMEM);
}
memcpy(ctx->pktbuf, avpkt->data, len);
bptr = ctx->pktbuf;
for (i = 0; i < len; i += 3) { for (i = 0; i < len; i += 3) {
uint8_t cc_type = *(bptr + i) & 3; uint8_t hi, cc_type = bptr[i] & 1;
if (validate_cc_data_pair(bptr + i))
if (ctx->data_field < 0)
ctx->data_field = cc_type;
if (validate_cc_data_pair(bptr + i, &hi))
continue; continue;
/* ignoring data field 1 */
if(cc_type == 1) if (cc_type != ctx->data_field)
continue; continue;
else
process_cc608(ctx, start_time, *(bptr + i + 1) & 0x7f, *(bptr + i + 2) & 0x7f); ret = process_cc608(ctx, hi & 0x7f, bptr[i + 2] & 0x7f);
if (ret < 0)
return ret;
if (!ctx->buffer_changed) if (!ctx->buffer_changed)
continue; continue;
ctx->buffer_changed = 0; ctx->buffer_changed = 0;
if (*ctx->buffer.str || ctx->real_time) if (!ctx->real_time && ctx->mode == CCMODE_POPON)
{ ctx->buffer_index = bidx = !ctx->buffer_index;
ff_dlog(ctx, "cdp writing data (%s)\n",ctx->buffer.str);
ret = ff_ass_add_rect(sub, ctx->buffer.str, ctx->readorder++, 0, NULL, NULL); update_time(ctx, in_time);
if (ret < 0)
return ret; if (ctx->buffer[bidx].str[0] || ctx->real_time) {
sub->pts = ctx->start_time; ff_dlog(ctx, "cdp writing data (%s)\n", ctx->buffer[bidx].str);
start_time = ctx->buffer_time[0];
sub->pts = start_time;
end_time = ctx->buffer_time[1];
if (!ctx->real_time) if (!ctx->real_time)
sub->end_display_time = av_rescale_q(ctx->end_time - ctx->start_time, sub->end_display_time = av_rescale_q(end_time - start_time,
AV_TIME_BASE_Q, ms_tb); AV_TIME_BASE_Q, ms_tb);
else else
sub->end_display_time = -1; sub->end_display_time = -1;
ctx->buffer_changed = 0; ret = ff_ass_add_rect(sub, ctx->buffer[bidx].str, ctx->readorder++, 0, NULL, NULL);
if (ret < 0)
return ret;
ctx->last_real_time = sub->pts; ctx->last_real_time = sub->pts;
ctx->screen_touched = 0; ctx->screen_touched = 0;
} }
} }
if (!bptr && !ctx->real_time && ctx->buffer[!ctx->buffer_index].str[0]) {
bidx = !ctx->buffer_index;
ret = ff_ass_add_rect(sub, ctx->buffer[bidx].str, ctx->readorder++, 0, NULL, NULL);
if (ret < 0)
return ret;
sub->pts = ctx->buffer_time[1];
sub->end_display_time = av_rescale_q(ctx->buffer_time[1] - ctx->buffer_time[0],
AV_TIME_BASE_Q, ms_tb);
if (sub->end_display_time == 0)
sub->end_display_time = ctx->buffer[bidx].len * 20;
}
if (ctx->real_time && ctx->screen_touched && if (ctx->real_time && ctx->screen_touched &&
sub->pts > ctx->last_real_time + av_rescale_q(200, ms_tb, AV_TIME_BASE_Q)) { sub->pts > ctx->last_real_time + av_rescale_q(200, ms_tb, AV_TIME_BASE_Q)) {
ctx->last_real_time = sub->pts; ctx->last_real_time = sub->pts;
@ -805,7 +912,7 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
capture_screen(ctx); capture_screen(ctx);
ctx->buffer_changed = 0; ctx->buffer_changed = 0;
ret = ff_ass_add_rect(sub, ctx->buffer.str, ctx->readorder++, 0, NULL, NULL); ret = ff_ass_add_rect(sub, ctx->buffer[bidx].str, ctx->readorder++, 0, NULL, NULL);
if (ret < 0) if (ret < 0)
return ret; return ret;
sub->end_display_time = -1; sub->end_display_time = -1;
@ -819,6 +926,10 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
#define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM #define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "real_time", "emit subtitle events as they are decoded for real-time display", OFFSET(real_time), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, SD }, { "real_time", "emit subtitle events as they are decoded for real-time display", OFFSET(real_time), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, SD },
{ "data_field", "select data field", OFFSET(data_field), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, SD, "data_field" },
{ "auto", "pick first one that appears", 0, AV_OPT_TYPE_CONST, { .i64 =-1 }, 0, 0, SD, "data_field" },
{ "first", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, SD, "data_field" },
{ "second", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, SD, "data_field" },
{NULL} {NULL}
}; };
@ -840,4 +951,5 @@ AVCodec ff_ccaption_decoder = {
.flush = flush_decoder, .flush = flush_decoder,
.decode = decode, .decode = decode,
.priv_class = &ccaption_dec_class, .priv_class = &ccaption_dec_class,
.capabilities = AV_CODEC_CAP_DELAY,
}; };

View file

@ -41,15 +41,25 @@
#define ALPHA_COMPAND_GAIN 9400 #define ALPHA_COMPAND_GAIN 9400
enum CFHDParam { enum CFHDParam {
SampleType = 1,
SampleIndexTable = 2,
BitstreamMarker = 4,
TransformType = 10,
ChannelCount = 12, ChannelCount = 12,
SubbandCount = 14, SubbandCount = 14,
ImageWidth = 20, ImageWidth = 20,
ImageHeight = 21, ImageHeight = 21,
LowpassWidth = 27,
LowpassHeight = 28,
LowpassPrecision = 35, LowpassPrecision = 35,
HighpassWidth = 41,
HighpassHeight = 42,
SubbandNumber = 48, SubbandNumber = 48,
Quantization = 53, Quantization = 53,
BandHeader = 55,
ChannelNumber = 62, ChannelNumber = 62,
SampleFlags = 68, SampleFlags = 68,
EncodedFormat = 84,
BitsPerComponent = 101, BitsPerComponent = 101,
ChannelWidth = 104, ChannelWidth = 104,
ChannelHeight = 105, ChannelHeight = 105,
@ -479,7 +489,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
s->prescale_shift[1] = (data >> 3) & 0x7; s->prescale_shift[1] = (data >> 3) & 0x7;
s->prescale_shift[2] = (data >> 6) & 0x7; s->prescale_shift[2] = (data >> 6) & 0x7;
av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data); av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
} else if (tag == 27) { } else if (tag == LowpassWidth) {
av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data); av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) { if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n"); av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
@ -488,7 +498,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
} }
s->plane[s->channel_num].band[0][0].width = data; s->plane[s->channel_num].band[0][0].width = data;
s->plane[s->channel_num].band[0][0].stride = data; s->plane[s->channel_num].band[0][0].stride = data;
} else if (tag == 28) { } else if (tag == LowpassHeight) {
av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data); av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) { if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n"); av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
@ -496,9 +506,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
break; break;
} }
s->plane[s->channel_num].band[0][0].height = data; s->plane[s->channel_num].band[0][0].height = data;
} else if (tag == 1) } else if (tag == SampleType)
av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data); av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
else if (tag == 10) { else if (tag == TransformType) {
if (data != 0) { if (data != 0) {
avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data); avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
ret = AVERROR_PATCHWELCOME; ret = AVERROR_PATCHWELCOME;
@ -515,7 +525,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
avpriv_report_missing_feature(avctx, "Skip frame"); avpriv_report_missing_feature(avctx, "Skip frame");
ret = AVERROR_PATCHWELCOME; ret = AVERROR_PATCHWELCOME;
break; break;
} else if (tag == 2) { } else if (tag == SampleIndexTable) {
av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data); av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
if (data > bytestream2_get_bytes_left(&gb) / 4) { if (data > bytestream2_get_bytes_left(&gb) / 4) {
av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data); av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
@ -527,7 +537,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
uint16_t val2 = bytestream2_get_be16(&gb); uint16_t val2 = bytestream2_get_be16(&gb);
av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2); av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
} }
} else if (tag == 41) { } else if (tag == HighpassWidth) {
av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num); av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
if (data < 3) { if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n"); av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
@ -536,7 +546,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
} }
s->plane[s->channel_num].band[s->level][s->subband_num].width = data; s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8); s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
} else if (tag == 42) { } else if (tag == HighpassHeight) {
av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data); av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
if (data < 3) { if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n"); av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
@ -576,7 +586,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
break; break;
} }
s->bpc = data; s->bpc = data;
} else if (tag == 84) { } else if (tag == EncodedFormat) {
av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data); av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
if (data == 1) { if (data == 1) {
s->coded_format = AV_PIX_FMT_YUV422P10; s->coded_format = AV_PIX_FMT_YUV422P10;
@ -612,7 +622,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data); av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
/* Some kind of end of header tag */ /* Some kind of end of header tag */
if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height && if (tag == BitstreamMarker && data == 0x1a4a && s->coded_width && s->coded_height &&
s->coded_format != AV_PIX_FMT_NONE) { s->coded_format != AV_PIX_FMT_NONE) {
if (s->a_width != s->coded_width || s->a_height != s->coded_height || if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
s->a_format != s->coded_format) { s->a_format != s->coded_format) {
@ -645,7 +655,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual]; coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
/* Lowpass coefficients */ /* Lowpass coefficients */
if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) { if (tag == BitstreamMarker && data == 0xf0f && s->a_width && s->a_height) {
int lowpass_height = s->plane[s->channel_num].band[0][0].height; int lowpass_height = s->plane[s->channel_num].band[0][0].height;
int lowpass_width = s->plane[s->channel_num].band[0][0].width; int lowpass_width = s->plane[s->channel_num].band[0][0].width;
int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height; int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
@ -685,7 +695,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height); av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
} }
if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) { if (tag == BandHeader && s->subband_num_actual != 255 && s->a_width && s->a_height) {
int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height; int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width; int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width; int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;

View file

@ -282,14 +282,10 @@ typedef struct AVCodec {
int (*decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt); int (*decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt);
int (*close)(struct AVCodecContext *); int (*close)(struct AVCodecContext *);
/** /**
* Encode API with decoupled packet/frame dataflow. The API is the * Encode API with decoupled frame/packet dataflow. This function is called
* same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except * to get one output packet. It should call ff_encode_get_frame() to obtain
* that: * input data.
* - never called if the codec is closed or the wrong type,
* - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent,
* - only one drain frame is ever passed down,
*/ */
int (*send_frame)(struct AVCodecContext *avctx, const struct AVFrame *frame);
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt); int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
/** /**

View file

@ -1405,6 +1405,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("AVS2-P2/IEEE1857.4"), .long_name = NULL_IF_CONFIG_SMALL("AVS2-P2/IEEE1857.4"),
.props = AV_CODEC_PROP_LOSSY, .props = AV_CODEC_PROP_LOSSY,
}, },
{
.id = AV_CODEC_ID_PGX,
.type = AVMEDIA_TYPE_VIDEO,
.name = "pgx",
.long_name = NULL_IF_CONFIG_SMALL("PGX (JPEG2000 Test Format)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{ {
.id = AV_CODEC_ID_Y41P, .id = AV_CODEC_ID_Y41P,
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@ -2897,7 +2904,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.name = "tak", .name = "tak",
.long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"), .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, .props = AV_CODEC_PROP_LOSSLESS,
}, },
{ {
.id = AV_CODEC_ID_METASOUND, .id = AV_CODEC_ID_METASOUND,

View file

@ -241,6 +241,7 @@ enum AVCodecID {
AV_CODEC_ID_SCREENPRESSO, AV_CODEC_ID_SCREENPRESSO,
AV_CODEC_ID_RSCC, AV_CODEC_ID_RSCC,
AV_CODEC_ID_AVS2, AV_CODEC_ID_AVS2,
AV_CODEC_ID_PGX,
AV_CODEC_ID_Y41P = 0x8000, AV_CODEC_ID_Y41P = 0x8000,
AV_CODEC_ID_AVRP, AV_CODEC_ID_AVRP,

View file

@ -66,7 +66,7 @@ typedef struct FramePool {
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt) static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
{ {
int size = 0, ret; int size, ret;
const uint8_t *data; const uint8_t *data;
uint32_t flags; uint32_t flags;
int64_t val; int64_t val;

View file

@ -207,15 +207,11 @@ static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
int i, j, level, run; int i, j, level, run;
int max_level = 1 << (ctx->bit_depth + 2); int max_level = 1 << (ctx->bit_depth + 2);
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->orig_vlc_codes, if (!FF_ALLOCZ_TYPED_ARRAY(ctx->orig_vlc_codes, max_level * 4) ||
max_level, 4 * sizeof(*ctx->orig_vlc_codes), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->orig_vlc_bits, max_level * 4) ||
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->orig_vlc_bits, !(ctx->run_codes = av_mallocz(63 * 2)) ||
max_level, 4 * sizeof(*ctx->orig_vlc_bits), fail); !(ctx->run_bits = av_mallocz(63)))
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, return AVERROR(ENOMEM);
63 * 2, fail);
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits,
63, fail);
ctx->vlc_codes = ctx->orig_vlc_codes + max_level * 2; ctx->vlc_codes = ctx->orig_vlc_codes + max_level * 2;
ctx->vlc_bits = ctx->orig_vlc_bits + max_level * 2; ctx->vlc_bits = ctx->orig_vlc_bits + max_level * 2;
for (level = -max_level; level < max_level; level++) { for (level = -max_level; level < max_level; level++) {
@ -259,8 +255,6 @@ static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
ctx->run_bits[run] = ctx->cid_table->run_bits[i]; ctx->run_bits[run] = ctx->cid_table->run_bits[i];
} }
return 0; return 0;
fail:
return AVERROR(ENOMEM);
} }
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias) static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
@ -271,16 +265,11 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
const uint8_t *luma_weight_table = ctx->cid_table->luma_weight; const uint8_t *luma_weight_table = ctx->cid_table->luma_weight;
const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight; const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, if (!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l, ctx->m.avctx->qmax + 1) ||
(ctx->m.avctx->qmax + 1), 64 * sizeof(int), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c, ctx->m.avctx->qmax + 1) ||
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l16, ctx->m.avctx->qmax + 1) ||
(ctx->m.avctx->qmax + 1), 64 * sizeof(int), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c16, ctx->m.avctx->qmax + 1))
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l16, return AVERROR(ENOMEM);
(ctx->m.avctx->qmax + 1), 64 * 2 * sizeof(uint16_t),
fail);
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c16,
(ctx->m.avctx->qmax + 1), 64 * 2 * sizeof(uint16_t),
fail);
if (ctx->bit_depth == 8) { if (ctx->bit_depth == 8) {
for (i = 1; i < 64; i++) { for (i = 1; i < 64; i++) {
@ -339,27 +328,23 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
ctx->m.q_intra_matrix = ctx->qmatrix_l; ctx->m.q_intra_matrix = ctx->qmatrix_l;
return 0; return 0;
fail:
return AVERROR(ENOMEM);
} }
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx) static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
{ {
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->mb_rc, (ctx->m.avctx->qmax + 1), if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_rc, (ctx->m.avctx->qmax + 1) * ctx->m.mb_num))
ctx->m.mb_num * sizeof(RCEntry), fail); return AVERROR(ENOMEM);
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD) { if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD) {
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp, ctx->m.mb_num) ||
ctx->m.mb_num, sizeof(RCCMPEntry), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp_tmp, ctx->m.mb_num))
FF_ALLOCZ_ARRAY_OR_GOTO(ctx->m.avctx, ctx->mb_cmp_tmp, return AVERROR(ENOMEM);
ctx->m.mb_num, sizeof(RCCMPEntry), fail);
} }
ctx->frame_bits = (ctx->coding_unit_size - ctx->frame_bits = (ctx->coding_unit_size -
ctx->data_offset - 4 - ctx->min_padding) * 8; ctx->data_offset - 4 - ctx->min_padding) * 8;
ctx->qscale = 1; ctx->qscale = 1;
ctx->lambda = 2 << LAMBDA_FRAC_BITS; // qscale 2 ctx->lambda = 2 << LAMBDA_FRAC_BITS; // qscale 2
return 0; return 0;
fail:
return AVERROR(ENOMEM);
} }
static av_cold int dnxhd_encode_init(AVCodecContext *avctx) static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
@ -510,15 +495,11 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
if ((ret = dnxhd_init_rc(ctx)) < 0) if ((ret = dnxhd_init_rc(ctx)) < 0)
return ret; return ret;
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, if (!FF_ALLOCZ_TYPED_ARRAY(ctx->slice_size, ctx->m.mb_height) ||
ctx->m.mb_height * sizeof(uint32_t), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->slice_offs, ctx->m.mb_height) ||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_bits, ctx->m.mb_num) ||
ctx->m.mb_height * sizeof(uint32_t), fail); !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_qscale, ctx->m.mb_num))
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, return AVERROR(ENOMEM);
ctx->m.mb_num * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale,
ctx->m.mb_num * sizeof(uint8_t), fail);
#if FF_API_CODED_FRAME #if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
avctx->coded_frame->key_frame = 1; avctx->coded_frame->key_frame = 1;
@ -543,14 +524,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
for (i = 1; i < avctx->thread_count; i++) { for (i = 1; i < avctx->thread_count; i++) {
ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext)); ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
if (!ctx->thread[i]) if (!ctx->thread[i])
goto fail; return AVERROR(ENOMEM);
memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext)); memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
} }
} }
return 0; return 0;
fail: // for FF_ALLOCZ_OR_GOTO
return AVERROR(ENOMEM);
} }
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf) static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)

View file

@ -35,20 +35,12 @@
/* parser definition */ /* parser definition */
typedef struct DVBSubParseContext { typedef struct DVBSubParseContext {
uint8_t *packet_buf;
int packet_start; int packet_start;
int packet_index; int packet_index;
int in_packet; int in_packet;
uint8_t packet_buf[PARSE_BUF_SIZE];
} DVBSubParseContext; } DVBSubParseContext;
static av_cold int dvbsub_parse_init(AVCodecParserContext *s)
{
DVBSubParseContext *pc = s->priv_data;
pc->packet_buf = av_malloc(PARSE_BUF_SIZE);
return 0;
}
static int dvbsub_parse(AVCodecParserContext *s, static int dvbsub_parse(AVCodecParserContext *s,
AVCodecContext *avctx, AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size, const uint8_t **poutbuf, int *poutbuf_size,
@ -173,16 +165,8 @@ static int dvbsub_parse(AVCodecParserContext *s,
return buf_size; return buf_size;
} }
static av_cold void dvbsub_parse_close(AVCodecParserContext *s)
{
DVBSubParseContext *pc = s->priv_data;
av_freep(&pc->packet_buf);
}
AVCodecParser ff_dvbsub_parser = { AVCodecParser ff_dvbsub_parser = {
.codec_ids = { AV_CODEC_ID_DVB_SUBTITLE }, .codec_ids = { AV_CODEC_ID_DVB_SUBTITLE },
.priv_data_size = sizeof(DVBSubParseContext), .priv_data_size = sizeof(DVBSubParseContext),
.parser_init = dvbsub_parse_init,
.parser_parse = dvbsub_parse, .parser_parse = dvbsub_parse,
.parser_close = dvbsub_parse_close,
}; };

View file

@ -710,8 +710,8 @@ static void compute_default_clut(DVBSubContext *ctx, uint8_t *clut, AVSubtitleRe
} }
count = FFMAX(i - 1, 1); count = FFMAX(i - 1, 1);
for (i--; i>=0; i--) { for (i--; i >= 0; i--) {
int v = i*255/count; int v = i * 255 / count;
AV_WN32(clut + 4*list_inv[i], RGBA(v/2,v,v/2,v)); AV_WN32(clut + 4*list_inv[i], RGBA(v/2,v,v/2,v));
} }
} }
@ -737,7 +737,7 @@ static int save_subtitle_set(AVCodecContext *avctx, AVSubtitle *sub, int *got_ou
} }
/* Not touching AVSubtitles again*/ /* Not touching AVSubtitles again*/
if(sub->num_rects) { if (sub->num_rects) {
avpriv_request_sample(ctx, "Different Version of Segment asked Twice"); avpriv_request_sample(ctx, "Different Version of Segment asked Twice");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -747,7 +747,7 @@ static int save_subtitle_set(AVCodecContext *avctx, AVSubtitle *sub, int *got_ou
sub->num_rects++; sub->num_rects++;
} }
if(ctx->compute_edt == 0) { if (ctx->compute_edt == 0) {
sub->end_display_time = ctx->time_out * 1000; sub->end_display_time = ctx->time_out * 1000;
*got_output = 1; *got_output = 1;
} else if (ctx->prev_start != AV_NOPTS_VALUE) { } else if (ctx->prev_start != AV_NOPTS_VALUE) {
@ -813,7 +813,7 @@ static int save_subtitle_set(AVCodecContext *avctx, AVSubtitle *sub, int *got_ou
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto fail; goto fail;
} }
memcpy(rect->data[1], clut_table, (1 << region->depth) * sizeof(uint32_t)); memcpy(rect->data[1], clut_table, (1 << region->depth) * sizeof(*clut_table));
rect->data[0] = av_malloc(region->buf_size); rect->data[0] = av_malloc(region->buf_size);
if (!rect->data[0]) { if (!rect->data[0]) {
@ -851,7 +851,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
return 0; return 0;
fail: fail:
if (sub->rects) { if (sub->rects) {
for(i=0; i<sub->num_rects; i++) { for (i=0; i < sub->num_rects; i++) {
rect = sub->rects[i]; rect = sub->rects[i];
if (rect) { if (rect) {
av_freep(&rect->data[0]); av_freep(&rect->data[0]);
@ -1073,11 +1073,11 @@ static int dvbsub_parse_clut_segment(AVCodecContext *avctx,
clut = get_clut(ctx, clut_id); clut = get_clut(ctx, clut_id);
if (!clut) { if (!clut) {
clut = av_malloc(sizeof(DVBSubCLUT)); clut = av_malloc(sizeof(*clut));
if (!clut) if (!clut)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(clut, &default_clut, sizeof(DVBSubCLUT)); memcpy(clut, &default_clut, sizeof(*clut));
clut->id = clut_id; clut->id = clut_id;
clut->version = -1; clut->version = -1;
@ -1163,7 +1163,7 @@ static int dvbsub_parse_region_segment(AVCodecContext *avctx,
region = get_region(ctx, region_id); region = get_region(ctx, region_id);
if (!region) { if (!region) {
region = av_mallocz(sizeof(DVBSubRegion)); region = av_mallocz(sizeof(*region));
if (!region) if (!region)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -1210,7 +1210,7 @@ static int dvbsub_parse_region_segment(AVCodecContext *avctx,
} }
region->depth = 1 << (((*buf++) >> 2) & 7); region->depth = 1 << (((*buf++) >> 2) & 7);
if(region->depth<2 || region->depth>8){ if (region->depth < 2 || region->depth > 8) {
av_log(avctx, AV_LOG_ERROR, "region depth %d is invalid\n", region->depth); av_log(avctx, AV_LOG_ERROR, "region depth %d is invalid\n", region->depth);
region->depth= 4; region->depth= 4;
} }
@ -1244,7 +1244,7 @@ static int dvbsub_parse_region_segment(AVCodecContext *avctx,
object = get_object(ctx, object_id); object = get_object(ctx, object_id);
if (!object) { if (!object) {
object = av_mallocz(sizeof(DVBSubObject)); object = av_mallocz(sizeof(*object));
if (!object) if (!object)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -1255,7 +1255,7 @@ static int dvbsub_parse_region_segment(AVCodecContext *avctx,
object->type = (*buf) >> 6; object->type = (*buf) >> 6;
display = av_mallocz(sizeof(DVBSubObjectDisplay)); display = av_mallocz(sizeof(*display));
if (!display) if (!display)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -1318,7 +1318,7 @@ static int dvbsub_parse_page_segment(AVCodecContext *avctx,
ff_dlog(avctx, "Page time out %ds, state %d\n", ctx->time_out, page_state); ff_dlog(avctx, "Page time out %ds, state %d\n", ctx->time_out, page_state);
if(ctx->compute_edt == 1) if (ctx->compute_edt == 1)
save_subtitle_set(avctx, sub, got_output); save_subtitle_set(avctx, sub, got_output);
if (page_state == 1 || page_state == 2) { if (page_state == 1 || page_state == 2) {
@ -1352,7 +1352,7 @@ static int dvbsub_parse_page_segment(AVCodecContext *avctx,
} }
if (!display) { if (!display) {
display = av_mallocz(sizeof(DVBSubRegionDisplay)); display = av_mallocz(sizeof(*display));
if (!display) if (!display)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
@ -1601,7 +1601,7 @@ static int dvbsub_display_end_segment(AVCodecContext *avctx, const uint8_t *buf,
{ {
DVBSubContext *ctx = avctx->priv_data; DVBSubContext *ctx = avctx->priv_data;
if(ctx->compute_edt == 0) if (ctx->compute_edt == 0)
save_subtitle_set(avctx, sub, got_output); save_subtitle_set(avctx, sub, got_output);
#ifdef DEBUG #ifdef DEBUG
save_display_set(ctx); save_display_set(ctx);
@ -1717,12 +1717,12 @@ static int dvbsub_decode(AVCodecContext *avctx,
} }
end: end:
if(ret < 0) { if (ret < 0) {
*got_sub_ptr = 0; *got_sub_ptr = 0;
avsubtitle_free(sub); avsubtitle_free(sub);
return ret; return ret;
} else { } else {
if(ctx->compute_edt == 1 ) if (ctx->compute_edt == 1)
FFSWAP(int64_t, ctx->prev_start, sub->pts); FFSWAP(int64_t, ctx->prev_start, sub->pts);
} }
@ -1730,10 +1730,11 @@ end:
} }
#define DS AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_SUBTITLE_PARAM #define DS AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_SUBTITLE_PARAM
#define OFFSET(x) offsetof(DVBSubContext, x)
static const AVOption options[] = { static const AVOption options[] = {
{"compute_edt", "compute end of time using pts or timeout", offsetof(DVBSubContext, compute_edt), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DS}, {"compute_edt", "compute end of time using pts or timeout", OFFSET(compute_edt), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DS},
{"compute_clut", "compute clut when not available(-1) or always(1) or never(0)", offsetof(DVBSubContext, compute_clut), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, DS}, {"compute_clut", "compute clut when not available(-1) or always(1) or never(0)", OFFSET(compute_clut), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, DS},
{"dvb_substream", "", offsetof(DVBSubContext, substream), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 63, DS}, {"dvb_substream", "", OFFSET(substream), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 63, DS},
{NULL} {NULL}
}; };
static const AVClass dvbsubdec_class = { static const AVClass dvbsubdec_class = {

View file

@ -32,11 +32,6 @@ typedef struct DVDSubParseContext {
int packet_index; int packet_index;
} DVDSubParseContext; } DVDSubParseContext;
static av_cold int dvdsub_parse_init(AVCodecParserContext *s)
{
return 0;
}
static int dvdsub_parse(AVCodecParserContext *s, static int dvdsub_parse(AVCodecParserContext *s,
AVCodecContext *avctx, AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size, const uint8_t **poutbuf, int *poutbuf_size,
@ -92,7 +87,6 @@ static av_cold void dvdsub_parse_close(AVCodecParserContext *s)
AVCodecParser ff_dvdsub_parser = { AVCodecParser ff_dvdsub_parser = {
.codec_ids = { AV_CODEC_ID_DVD_SUBTITLE }, .codec_ids = { AV_CODEC_ID_DVD_SUBTITLE },
.priv_data_size = sizeof(DVDSubParseContext), .priv_data_size = sizeof(DVDSubParseContext),
.parser_init = dvdsub_parse_init,
.parser_parse = dvdsub_parse, .parser_parse = dvdsub_parse,
.parser_close = dvdsub_parse_close, .parser_close = dvdsub_parse_close,
}; };

View file

@ -26,61 +26,43 @@
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#include "avcodec.h" #include "avcodec.h"
#include "encode.h"
#include "frame_thread_encoder.h" #include "frame_thread_encoder.h"
#include "internal.h" #include "internal.h"
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size) int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
{ {
if (avpkt->size < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
return AVERROR(EINVAL);
}
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n", av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE); size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
av_assert0(!avpkt->data);
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
if (!avpkt->data || avpkt->size < size) {
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size); av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer; avpkt->data = avctx->internal->byte_buffer;
avpkt->size = avctx->internal->byte_buffer_size;
}
}
if (avpkt->data) {
AVBufferRef *buf = avpkt->buf;
if (avpkt->size < size) {
av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
return AVERROR(EINVAL);
}
av_init_packet(avpkt);
avpkt->buf = buf;
avpkt->size = size; avpkt->size = size;
return 0; }
} else {
if (!avpkt->data) {
int ret = av_new_packet(avpkt, size); int ret = av_new_packet(avpkt, size);
if (ret < 0) if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size); av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return ret; return ret;
} }
return 0;
} }
/** /**
* Pad last frame with silence. * Pad last frame with silence.
*/ */
static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src) static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
{ {
AVFrame *frame = NULL;
int ret; int ret;
if (!(frame = av_frame_alloc()))
return AVERROR(ENOMEM);
frame->format = src->format; frame->format = src->format;
frame->channel_layout = src->channel_layout; frame->channel_layout = src->channel_layout;
frame->channels = src->channels; frame->channels = src->channels;
@ -101,246 +83,10 @@ static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
s->channels, s->sample_fmt)) < 0) s->channels, s->sample_fmt)) < 0)
goto fail; goto fail;
*dst = frame;
return 0; return 0;
fail: fail:
av_frame_free(&frame); av_frame_unref(frame);
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
AVFrame *extended_frame = NULL;
AVFrame *padded_frame = NULL;
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
av_packet_unref(avpkt);
return 0;
}
/* ensure that extended_data is properly set */
if (frame && !frame->extended_data) {
if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
avctx->channels > AV_NUM_DATA_POINTERS) {
av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
"with more than %d channels, but extended_data is not set.\n",
AV_NUM_DATA_POINTERS);
return AVERROR(EINVAL);
}
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
extended_frame = av_frame_alloc();
if (!extended_frame)
return AVERROR(ENOMEM);
memcpy(extended_frame, frame, sizeof(AVFrame));
extended_frame->extended_data = extended_frame->data;
frame = extended_frame;
}
/* extract audio service type metadata */
if (frame) {
AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
}
/* check for valid frame size */
if (frame) {
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
if (frame->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
ret = AVERROR(EINVAL);
goto end;
}
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame (avcodec_encode_audio2)\n", avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
if (frame->nb_samples < avctx->frame_size) {
ret = pad_last_frame(avctx, &padded_frame, frame);
if (ret < 0)
goto end;
frame = padded_frame;
avctx->internal->last_audio_frame = 1;
}
if (frame->nb_samples != avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
}
}
av_assert0(avctx->codec->encode2);
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (*got_packet_ptr) {
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
avpkt->dts = avpkt->pts;
} else {
avpkt->size = 0;
}
}
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
goto end;
}
}
if (!ret) {
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr) {
av_packet_unref(avpkt);
goto end;
}
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
end:
av_frame_free(&padded_frame);
av_free(extended_frame);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0';
if (!frame &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
(avctx->internal->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) {
av_packet_unref(avpkt);
return 0;
}
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
return AVERROR(EINVAL);
if (frame && frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame && (frame->width == 0 || frame->height == 0))
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
av_assert0(avctx->codec->encode2);
if (CONFIG_FRAME_THREAD_ENCODER &&
avctx->internal->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (*got_packet_ptr && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
}
av_assert0(ret <= 0);
emms_c();
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
return ret;
}
}
if (!ret) {
if (!*got_packet_ptr)
avpkt->size = 0;
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr)
av_packet_unref(avpkt);
return ret; return ret;
} }
@ -358,101 +104,353 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
return ret; return ret;
} }
static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet) int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
{ {
AVCodecInternal *avci = avctx->internal;
if (avci->draining)
return AVERROR_EOF;
if (!avci->buffer_frame->buf[0])
return AVERROR(EAGAIN);
av_frame_move_ref(frame, avci->buffer_frame);
return 0;
}
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
EncodeSimpleContext *es = &avci->es;
AVFrame *frame = es->in_frame;
int got_packet;
int ret; int ret;
*got_packet = 0;
av_packet_unref(avctx->internal->buffer_pkt); if (avci->draining_done)
avctx->internal->buffer_pkt_valid = 0; return AVERROR_EOF;
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { if (!frame->buf[0] && !avci->draining) {
ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt, av_frame_unref(frame);
frame, got_packet); ret = ff_encode_get_frame(avctx, frame);
} else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { if (ret < 0 && ret != AVERROR_EOF)
ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt, return ret;
frame, got_packet);
} else {
ret = AVERROR(EINVAL);
} }
if (ret >= 0 && *got_packet) { if (!frame->buf[0]) {
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
(avci->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME)))
return AVERROR_EOF;
// Flushing is signaled with a NULL frame
frame = NULL;
}
got_packet = 0;
av_assert0(avctx->codec->encode2);
if (CONFIG_FRAME_THREAD_ENCODER &&
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
}
av_assert0(ret <= 0);
emms_c();
if (!ret && got_packet) {
if (avpkt->data) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
goto end;
}
if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
avpkt->dts = avpkt->pts;
}
}
if (avci->draining && !got_packet)
avci->draining_done = 1;
end:
if (ret < 0 || !got_packet)
av_packet_unref(avpkt);
if (frame) {
if (!ret)
avctx->frame_number++;
av_frame_unref(frame);
}
if (got_packet)
// Encoders must always return ref-counted buffers. // Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted. // Side-data only packets have no data and can be not ref-counted.
av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf); av_assert0(!avpkt->data || avpkt->buf);
avctx->internal->buffer_pkt_valid = 1;
ret = 0;
} else {
av_packet_unref(avctx->internal->buffer_pkt);
}
return ret; return ret;
} }
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame) static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{ {
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) int ret;
return AVERROR(EINVAL);
if (avctx->internal->draining) while (!avpkt->data && !avpkt->side_data) {
return AVERROR_EOF; ret = encode_simple_internal(avctx, avpkt);
if (ret < 0)
if (!frame) { return ret;
avctx->internal->draining = 1;
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
} }
if (avctx->codec->send_frame) return 0;
return avctx->codec->send_frame(avctx, frame);
// Emulation via old API. Do it here instead of avcodec_receive_packet, because:
// 1. if the AVFrame is not refcounted, the copying will be much more
// expensive than copying the packet data
// 2. assume few users use non-refcounted AVPackets, so usually no copy is
// needed
if (avctx->internal->buffer_pkt_valid)
return AVERROR(EAGAIN);
return do_encode(avctx, frame, &(int){0});
} }
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
{ {
av_packet_unref(avpkt); AVCodecInternal *avci = avctx->internal;
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) if (avci->draining_done)
return AVERROR_EOF;
av_assert0(!avpkt->data && !avpkt->side_data);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0';
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
return AVERROR(EINVAL); return AVERROR(EINVAL);
}
if (avctx->codec->receive_packet) { if (avctx->codec->receive_packet) {
int ret;
if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return AVERROR_EOF;
ret = avctx->codec->receive_packet(avctx, avpkt); ret = avctx->codec->receive_packet(avctx, avpkt);
if (!ret) if (!ret)
// Encoders must always return ref-counted buffers. // Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted. // Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf); av_assert0(!avpkt->data || avpkt->buf);
} else
ret = encode_simple_receive_packet(avctx, avpkt);
if (ret == AVERROR_EOF)
avci->draining_done = 1;
return ret; return ret;
}
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
{
AVCodecInternal *avci = avctx->internal;
AVFrame *dst = avci->buffer_frame;
int ret;
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
/* extract audio service type metadata */
AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
/* check for valid frame size */
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
if (src->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
return AVERROR(EINVAL);
}
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
return AVERROR(EINVAL);
} }
// Emulation via old API. if (src->nb_samples < avctx->frame_size) {
ret = pad_last_frame(avctx, dst, src);
if (!avctx->internal->buffer_pkt_valid) { if (ret < 0)
int got_packet; return ret;
int ret;
if (!avctx->internal->draining) avctx->internal->last_audio_frame = 1;
return AVERROR(EAGAIN); } else if (src->nb_samples > avctx->frame_size) {
ret = do_encode(avctx, NULL, &got_packet); av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
return AVERROR(EINVAL);
}
}
}
if (!dst->data[0]) {
ret = av_frame_ref(dst, src);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret >= 0 && !got_packet)
return AVERROR_EOF;
} }
av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
avctx->internal->buffer_pkt_valid = 0;
return 0; return 0;
} }
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
if (avci->draining)
return AVERROR_EOF;
if (avci->buffer_frame->data[0])
return AVERROR(EAGAIN);
if (!frame) {
avci->draining = 1;
} else {
ret = encode_send_frame_internal(avctx, frame);
if (ret < 0)
return ret;
}
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
}
return 0;
}
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
av_packet_unref(avpkt);
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
av_packet_move_ref(avpkt, avci->buffer_pkt);
} else {
ret = encode_receive_packet_internal(avctx, avpkt);
if (ret < 0)
return ret;
}
return 0;
}
static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt,
int *got_packet, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
AVPacket user_pkt;
int ret;
*got_packet = 0;
if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame->width == 0 || frame->height == 0)
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
}
ret = avcodec_send_frame(avctx, frame);
if (ret == AVERROR_EOF)
ret = 0;
else if (ret == AVERROR(EAGAIN)) {
/* we fully drain all the output in each encode call, so this should not
* ever happen */
return AVERROR_BUG;
} else if (ret < 0)
return ret;
av_packet_move_ref(&user_pkt, avpkt);
while (ret >= 0) {
ret = avcodec_receive_packet(avctx, avpkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
goto finish;
}
if (avpkt != avci->compat_encode_packet) {
if (avpkt->data && user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
av_buffer_unref(&avpkt->buf);
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
av_init_packet(&user_pkt);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
av_packet_unref(avpkt);
ret = AVERROR(EINVAL);
goto finish;
}
}
*got_packet = 1;
avpkt = avci->compat_encode_packet;
} else {
if (!avci->compat_decode_warned) {
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* "
"API cannot return all the packets for this encoder. "
"Some packets will be dropped. Update your code to the "
"new encoding API to fix this.\n");
avci->compat_decode_warned = 1;
av_packet_unref(avpkt);
}
}
if (avci->draining)
break;
}
finish:
if (ret < 0)
av_packet_unref(&user_pkt);
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}

39
externals/ffmpeg/libavcodec/encode.h vendored Executable file
View file

@ -0,0 +1,39 @@
/*
* generic encoding-related code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_ENCODE_H
#define AVCODEC_ENCODE_H
#include "libavutil/frame.h"
#include "avcodec.h"
/**
* Called by encoders to get the next frame for encoding.
*
* @param frame An empty frame to be filled with data.
* @return 0 if a new reference has been successfully written to frame
* AVERROR(EAGAIN) if no data is currently available
* AVERROR_EOF if end of stream has been reached, so no more data
* will be available
*/
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame);
#endif /* AVCODEC_ENCODE_H */

View file

@ -125,7 +125,7 @@ static int filter_units_filter(AVBSFContext *bsf, AVPacket *pkt)
} }
if (ctx->mode == REMOVE ? j < ctx->nb_types if (ctx->mode == REMOVE ? j < ctx->nb_types
: j >= ctx->nb_types) : j >= ctx->nb_types)
ff_cbs_delete_unit(ctx->cbc, frag, i); ff_cbs_delete_unit(frag, i);
} }
if (frag->nb_units == 0) { if (frag->nb_units == 0) {
@ -143,7 +143,7 @@ static int filter_units_filter(AVBSFContext *bsf, AVPacket *pkt)
fail: fail:
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
return err; return err;
} }
@ -199,7 +199,7 @@ static int filter_units_init(AVBSFContext *bsf)
av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n");
} }
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
} }
return err; return err;
@ -211,7 +211,7 @@ static void filter_units_close(AVBSFContext *bsf)
av_freep(&ctx->type_list); av_freep(&ctx->type_list);
ff_cbs_fragment_free(ctx->cbc, &ctx->fragment); ff_cbs_fragment_free(&ctx->fragment);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->cbc);
} }

View file

@ -686,5 +686,6 @@ AVCodec ff_h261_decoder = {
.close = h261_decode_end, .close = h261_decode_end,
.decode = h261_decode_frame, .decode = h261_decode_frame,
.capabilities = AV_CODEC_CAP_DR1, .capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.max_lowres = 3, .max_lowres = 3,
}; };

View file

@ -770,7 +770,7 @@ AVCodec ff_h263_decoder = {
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_CLEANUP,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3, .max_lowres = 3,
.pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
@ -788,7 +788,7 @@ AVCodec ff_h263p_decoder = {
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_CLEANUP,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3, .max_lowres = 3,
.pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,

View file

@ -49,7 +49,8 @@ enum {
typedef struct H264MetadataContext { typedef struct H264MetadataContext {
const AVClass *class; const AVClass *class;
CodedBitstreamContext *cbc; CodedBitstreamContext *input;
CodedBitstreamContext *output;
CodedBitstreamFragment access_unit; CodedBitstreamFragment access_unit;
int done_first_au; int done_first_au;
@ -289,7 +290,7 @@ static int h264_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
if (!side_data_size) if (!side_data_size)
return 0; return 0;
err = ff_cbs_read(ctx->cbc, au, side_data, side_data_size); err = ff_cbs_read(ctx->input, au, side_data, side_data_size);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read extradata from packet side data.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read extradata from packet side data.\n");
return err; return err;
@ -303,7 +304,7 @@ static int h264_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
} }
} }
err = ff_cbs_write_fragment_data(ctx->cbc, au); err = ff_cbs_write_fragment_data(ctx->output, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write extradata into packet side data.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write extradata into packet side data.\n");
return err; return err;
@ -314,7 +315,7 @@ static int h264_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(side_data, au->data, au->data_size); memcpy(side_data, au->data, au->data_size);
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
return 0; return 0;
} }
@ -334,7 +335,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (err < 0) if (err < 0)
goto fail; goto fail;
err = ff_cbs_read_packet(ctx->cbc, au, pkt); err = ff_cbs_read_packet(ctx->input, au, pkt);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
goto fail; goto fail;
@ -349,7 +350,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
// If an AUD is present, it must be the first NAL unit. // If an AUD is present, it must be the first NAL unit.
if (au->units[0].type == H264_NAL_AUD) { if (au->units[0].type == H264_NAL_AUD) {
if (ctx->aud == REMOVE) if (ctx->aud == REMOVE)
ff_cbs_delete_unit(ctx->cbc, au, 0); ff_cbs_delete_unit(au, 0);
} else { } else {
if (ctx->aud == INSERT) { if (ctx->aud == INSERT) {
static const int primary_pic_type_table[] = { static const int primary_pic_type_table[] = {
@ -390,7 +391,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
.primary_pic_type = j, .primary_pic_type = j,
}; };
err = ff_cbs_insert_unit_content(ctx->cbc, au, err = ff_cbs_insert_unit_content(au,
0, H264_NAL_AUD, &aud, NULL); 0, H264_NAL_AUD, &aud, NULL);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n");
@ -448,7 +449,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
udu->data_length = len + 1; udu->data_length = len + 1;
memcpy(udu->data, ctx->sei_user_data + i + 1, len + 1); memcpy(udu->data, ctx->sei_user_data + i + 1, len + 1);
err = ff_cbs_h264_add_sei_message(ctx->cbc, au, &payload); err = ff_cbs_h264_add_sei_message(au, &payload);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to add user data SEI " av_log(bsf, AV_LOG_ERROR, "Failed to add user data SEI "
"message to access unit.\n"); "message to access unit.\n");
@ -467,7 +468,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (ctx->delete_filler) { if (ctx->delete_filler) {
for (i = au->nb_units - 1; i >= 0; i--) { for (i = au->nb_units - 1; i >= 0; i--) {
if (au->units[i].type == H264_NAL_FILLER_DATA) { if (au->units[i].type == H264_NAL_FILLER_DATA) {
ff_cbs_delete_unit(ctx->cbc, au, i); ff_cbs_delete_unit(au, i);
continue; continue;
} }
@ -478,8 +479,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
for (j = sei->payload_count - 1; j >= 0; j--) { for (j = sei->payload_count - 1; j >= 0; j--) {
if (sei->payload[j].payload_type == if (sei->payload[j].payload_type ==
H264_SEI_TYPE_FILLER_PAYLOAD) H264_SEI_TYPE_FILLER_PAYLOAD)
ff_cbs_h264_delete_sei_message(ctx->cbc, au, ff_cbs_h264_delete_sei_message(au, &au->units[i], j);
&au->units[i], j);
} }
} }
} }
@ -503,8 +503,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (ctx->display_orientation == REMOVE || if (ctx->display_orientation == REMOVE ||
ctx->display_orientation == INSERT) { ctx->display_orientation == INSERT) {
ff_cbs_h264_delete_sei_message(ctx->cbc, au, ff_cbs_h264_delete_sei_message(au, &au->units[i], j);
&au->units[i], j);
continue; continue;
} }
@ -595,7 +594,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (write) { if (write) {
disp->display_orientation_repetition_period = 1; disp->display_orientation_repetition_period = 1;
err = ff_cbs_h264_add_sei_message(ctx->cbc, au, &payload); err = ff_cbs_h264_add_sei_message(au, &payload);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to add display orientation " av_log(bsf, AV_LOG_ERROR, "Failed to add display orientation "
"SEI message to access unit.\n"); "SEI message to access unit.\n");
@ -604,7 +603,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
} }
} }
err = ff_cbs_write_packet(ctx->cbc, pkt, au); err = ff_cbs_write_packet(ctx->output, pkt, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
goto fail; goto fail;
@ -614,7 +613,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -628,12 +627,15 @@ static int h264_metadata_init(AVBSFContext *bsf)
CodedBitstreamFragment *au = &ctx->access_unit; CodedBitstreamFragment *au = &ctx->access_unit;
int err, i; int err, i;
err = ff_cbs_init(&ctx->cbc, AV_CODEC_ID_H264, bsf); err = ff_cbs_init(&ctx->input, AV_CODEC_ID_H264, bsf);
if (err < 0)
return err;
err = ff_cbs_init(&ctx->output, AV_CODEC_ID_H264, bsf);
if (err < 0) if (err < 0)
return err; return err;
if (bsf->par_in->extradata) { if (bsf->par_in->extradata) {
err = ff_cbs_read_extradata(ctx->cbc, au, bsf->par_in); err = ff_cbs_read_extradata(ctx->input, au, bsf->par_in);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read extradata.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read extradata.\n");
goto fail; goto fail;
@ -647,7 +649,7 @@ static int h264_metadata_init(AVBSFContext *bsf)
} }
} }
err = ff_cbs_write_extradata(ctx->cbc, bsf->par_out, au); err = ff_cbs_write_extradata(ctx->output, bsf->par_out, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n");
goto fail; goto fail;
@ -656,7 +658,7 @@ static int h264_metadata_init(AVBSFContext *bsf)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
return err; return err;
} }
@ -664,8 +666,9 @@ static void h264_metadata_close(AVBSFContext *bsf)
{ {
H264MetadataContext *ctx = bsf->priv_data; H264MetadataContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->cbc, &ctx->access_unit); ff_cbs_fragment_free(&ctx->access_unit);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->input);
ff_cbs_close(&ctx->output);
} }
#define OFFSET(x) offsetof(H264MetadataContext, x) #define OFFSET(x) offsetof(H264MetadataContext, x)

View file

@ -95,7 +95,7 @@ static int h264_redundant_pps_filter(AVBSFContext *bsf, AVPacket *pkt)
if (!au_has_sps) { if (!au_has_sps) {
av_log(bsf, AV_LOG_VERBOSE, "Deleting redundant PPS " av_log(bsf, AV_LOG_VERBOSE, "Deleting redundant PPS "
"at %"PRId64".\n", pkt->pts); "at %"PRId64".\n", pkt->pts);
ff_cbs_delete_unit(ctx->input, au, i); ff_cbs_delete_unit(au, i);
i--; i--;
continue; continue;
} }
@ -113,7 +113,7 @@ static int h264_redundant_pps_filter(AVBSFContext *bsf, AVPacket *pkt)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->output, au); ff_cbs_fragment_reset(au);
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -161,7 +161,7 @@ static int h264_redundant_pps_init(AVBSFContext *bsf)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->output, au); ff_cbs_fragment_reset(au);
return err; return err;
} }
@ -175,7 +175,7 @@ static void h264_redundant_pps_close(AVBSFContext *bsf)
{ {
H264RedundantPPSContext *ctx = bsf->priv_data; H264RedundantPPSContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->input, &ctx->access_unit); ff_cbs_fragment_free(&ctx->access_unit);
ff_cbs_close(&ctx->input); ff_cbs_close(&ctx->input);
ff_cbs_close(&ctx->output); ff_cbs_close(&ctx->output);
} }

View file

@ -52,6 +52,10 @@ void ff_h264_sei_uninit(H264SEIContext *h)
h->afd.present = 0; h->afd.present = 0;
av_buffer_unref(&h->a53_caption.buf_ref); av_buffer_unref(&h->a53_caption.buf_ref);
for (int i = 0; i < h->unregistered.nb_buf_ref; i++)
av_buffer_unref(&h->unregistered.buf_ref[i]);
h->unregistered.nb_buf_ref = 0;
av_freep(&h->unregistered.buf_ref);
} }
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps,
@ -260,25 +264,34 @@ static int decode_unregistered_user_data(H264SEIUnregistered *h, GetBitContext *
{ {
uint8_t *user_data; uint8_t *user_data;
int e, build, i; int e, build, i;
AVBufferRef *buf_ref, **tmp;
if (size < 16 || size >= INT_MAX - 1) if (size < 16 || size >= INT_MAX - 1)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
user_data = av_malloc(size + 1); tmp = av_realloc_array(h->buf_ref, h->nb_buf_ref + 1, sizeof(*h->buf_ref));
if (!user_data) if (!tmp)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
h->buf_ref = tmp;
buf_ref = av_buffer_alloc(size + 1);
if (!buf_ref)
return AVERROR(ENOMEM);
user_data = buf_ref->data;
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
user_data[i] = get_bits(gb, 8); user_data[i] = get_bits(gb, 8);
user_data[i] = 0; user_data[i] = 0;
buf_ref->size = size;
h->buf_ref[h->nb_buf_ref++] = buf_ref;
e = sscanf(user_data + 16, "x264 - core %d", &build); e = sscanf(user_data + 16, "x264 - core %d", &build);
if (e == 1 && build > 0) if (e == 1 && build > 0)
h->x264_build = build; h->x264_build = build;
if (e == 1 && build == 1 && !strncmp(user_data+16, "x264 - core 0000", 16)) if (e == 1 && build == 1 && !strncmp(user_data+16, "x264 - core 0000", 16))
h->x264_build = 67; h->x264_build = 67;
av_free(user_data);
return 0; return 0;
} }

View file

@ -126,6 +126,8 @@ typedef struct H264SEIA53Caption {
typedef struct H264SEIUnregistered { typedef struct H264SEIUnregistered {
int x264_build; int x264_build;
AVBufferRef **buf_ref;
int nb_buf_ref;
} H264SEIUnregistered; } H264SEIUnregistered;
typedef struct H264SEIRecoveryPoint { typedef struct H264SEIRecoveryPoint {

View file

@ -1125,9 +1125,10 @@ static int h264_export_frame_props(H264Context *h)
{ {
const SPS *sps = h->ps.sps; const SPS *sps = h->ps.sps;
H264Picture *cur = h->cur_pic_ptr; H264Picture *cur = h->cur_pic_ptr;
AVFrame *out = cur->f;
cur->f->interlaced_frame = 0; out->interlaced_frame = 0;
cur->f->repeat_pict = 0; out->repeat_pict = 0;
/* Signal interlacing information externally. */ /* Signal interlacing information externally. */
/* Prioritize picture timing SEI information over used /* Prioritize picture timing SEI information over used
@ -1150,59 +1151,59 @@ static int h264_export_frame_props(H264Context *h)
break; break;
case H264_SEI_PIC_STRUCT_TOP_FIELD: case H264_SEI_PIC_STRUCT_TOP_FIELD:
case H264_SEI_PIC_STRUCT_BOTTOM_FIELD: case H264_SEI_PIC_STRUCT_BOTTOM_FIELD:
cur->f->interlaced_frame = 1; out->interlaced_frame = 1;
break; break;
case H264_SEI_PIC_STRUCT_TOP_BOTTOM: case H264_SEI_PIC_STRUCT_TOP_BOTTOM:
case H264_SEI_PIC_STRUCT_BOTTOM_TOP: case H264_SEI_PIC_STRUCT_BOTTOM_TOP:
if (FIELD_OR_MBAFF_PICTURE(h)) if (FIELD_OR_MBAFF_PICTURE(h))
cur->f->interlaced_frame = 1; out->interlaced_frame = 1;
else else
// try to flag soft telecine progressive // try to flag soft telecine progressive
cur->f->interlaced_frame = h->prev_interlaced_frame; out->interlaced_frame = h->prev_interlaced_frame;
break; break;
case H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
case H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: case H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
/* Signal the possibility of telecined film externally /* Signal the possibility of telecined film externally
* (pic_struct 5,6). From these hints, let the applications * (pic_struct 5,6). From these hints, let the applications
* decide if they apply deinterlacing. */ * decide if they apply deinterlacing. */
cur->f->repeat_pict = 1; out->repeat_pict = 1;
break; break;
case H264_SEI_PIC_STRUCT_FRAME_DOUBLING: case H264_SEI_PIC_STRUCT_FRAME_DOUBLING:
cur->f->repeat_pict = 2; out->repeat_pict = 2;
break; break;
case H264_SEI_PIC_STRUCT_FRAME_TRIPLING: case H264_SEI_PIC_STRUCT_FRAME_TRIPLING:
cur->f->repeat_pict = 4; out->repeat_pict = 4;
break; break;
} }
if ((pt->ct_type & 3) && if ((pt->ct_type & 3) &&
pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP) pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0; out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
} else { } else {
/* Derive interlacing flag from used decoding process. */ /* Derive interlacing flag from used decoding process. */
cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h); out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
} }
h->prev_interlaced_frame = cur->f->interlaced_frame; h->prev_interlaced_frame = out->interlaced_frame;
if (cur->field_poc[0] != cur->field_poc[1]) { if (cur->field_poc[0] != cur->field_poc[1]) {
/* Derive top_field_first from field pocs. */ /* Derive top_field_first from field pocs. */
cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
} else { } else {
if (sps->pic_struct_present_flag && h->sei.picture_timing.present) { if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
/* Use picture timing SEI information. Even if it is a /* Use picture timing SEI information. Even if it is a
* information of a past frame, better than nothing. */ * information of a past frame, better than nothing. */
if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM || if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP) h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
cur->f->top_field_first = 1; out->top_field_first = 1;
else else
cur->f->top_field_first = 0; out->top_field_first = 0;
} else if (cur->f->interlaced_frame) { } else if (out->interlaced_frame) {
/* Default to top field first when pic_struct_present_flag /* Default to top field first when pic_struct_present_flag
* is not set but interlaced frame detected */ * is not set but interlaced frame detected */
cur->f->top_field_first = 1; out->top_field_first = 1;
} else { } else {
/* Most likely progressive */ /* Most likely progressive */
cur->f->top_field_first = 0; out->top_field_first = 0;
} }
} }
@ -1211,7 +1212,7 @@ static int h264_export_frame_props(H264Context *h)
h->sei.frame_packing.content_interpretation_type > 0 && h->sei.frame_packing.content_interpretation_type > 0 &&
h->sei.frame_packing.content_interpretation_type < 3) { h->sei.frame_packing.content_interpretation_type < 3) {
H264SEIFramePacking *fp = &h->sei.frame_packing; H264SEIFramePacking *fp = &h->sei.frame_packing;
AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f); AVStereo3D *stereo = av_stereo3d_create_side_data(out);
if (stereo) { if (stereo) {
switch (fp->arrangement_type) { switch (fp->arrangement_type) {
case H264_SEI_FPA_TYPE_CHECKERBOARD: case H264_SEI_FPA_TYPE_CHECKERBOARD:
@ -1258,7 +1259,7 @@ static int h264_export_frame_props(H264Context *h)
h->sei.display_orientation.vflip)) { h->sei.display_orientation.vflip)) {
H264SEIDisplayOrientation *o = &h->sei.display_orientation; H264SEIDisplayOrientation *o = &h->sei.display_orientation;
double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16); double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
AVFrameSideData *rotation = av_frame_new_side_data(cur->f, AVFrameSideData *rotation = av_frame_new_side_data(out,
AV_FRAME_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9); sizeof(int32_t) * 9);
if (rotation) { if (rotation) {
@ -1269,7 +1270,7 @@ static int h264_export_frame_props(H264Context *h)
} }
if (h->sei.afd.present) { if (h->sei.afd.present) {
AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD, AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD,
sizeof(uint8_t)); sizeof(uint8_t));
if (sd) { if (sd) {
@ -1281,7 +1282,7 @@ static int h264_export_frame_props(H264Context *h)
if (h->sei.a53_caption.buf_ref) { if (h->sei.a53_caption.buf_ref) {
H264SEIA53Caption *a53 = &h->sei.a53_caption; H264SEIA53Caption *a53 = &h->sei.a53_caption;
AVFrameSideData *sd = av_frame_new_side_data_from_buf(cur->f, AV_FRAME_DATA_A53_CC, a53->buf_ref); AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
if (!sd) if (!sd)
av_buffer_unref(&a53->buf_ref); av_buffer_unref(&a53->buf_ref);
a53->buf_ref = NULL; a53->buf_ref = NULL;
@ -1289,11 +1290,25 @@ static int h264_export_frame_props(H264Context *h)
h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
} }
if (h->sei.picture_timing.timecode_cnt > 0) { for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
uint32_t tc = 0; H264SEIUnregistered *unreg = &h->sei.unregistered;
uint32_t *tc_sd;
AVFrameSideData *tcside = av_frame_new_side_data(cur->f, if (unreg->buf_ref[i]) {
AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
AV_FRAME_DATA_SEI_UNREGISTERED,
unreg->buf_ref[i]);
if (!sd)
av_buffer_unref(&unreg->buf_ref[i]);
unreg->buf_ref[i] = NULL;
}
}
h->sei.unregistered.nb_buf_ref = 0;
if (h->sei.picture_timing.timecode_cnt > 0) {
uint32_t *tc_sd;
char tcbuf[AV_TIMECODE_STR_SIZE];
AVFrameSideData *tcside = av_frame_new_side_data(out,
AV_FRAME_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE,
sizeof(uint32_t)*4); sizeof(uint32_t)*4);
if (!tcside) if (!tcside)
@ -1303,33 +1318,15 @@ static int h264_export_frame_props(H264Context *h)
tc_sd[0] = h->sei.picture_timing.timecode_cnt; tc_sd[0] = h->sei.picture_timing.timecode_cnt;
for (int i = 0; i < tc_sd[0]; i++) { for (int i = 0; i < tc_sd[0]; i++) {
uint32_t frames; int drop = h->sei.picture_timing.timecode[i].dropframe;
int hh = h->sei.picture_timing.timecode[i].hours;
int mm = h->sei.picture_timing.timecode[i].minutes;
int ss = h->sei.picture_timing.timecode[i].seconds;
int ff = h->sei.picture_timing.timecode[i].frame;
/* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS. tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
See SMPTE ST 12-1:2014 Sec 12.1 for more info. */ av_timecode_make_smpte_tc_string(tcbuf, tc_sd[i + 1], 0);
if (av_cmp_q(h->avctx->framerate, (AVRational) {30, 1}) == 1) { av_dict_set(&out->metadata, "timecode", tcbuf, 0);
frames = h->sei.picture_timing.timecode[i].frame / 2;
if (h->sei.picture_timing.timecode[i].frame % 2 == 1) {
if (av_cmp_q(h->avctx->framerate, (AVRational) {50, 1}) == 0)
tc |= (1 << 7);
else
tc |= (1 << 23);
}
} else {
frames = h->sei.picture_timing.timecode[i].frame;
}
tc |= h->sei.picture_timing.timecode[i].dropframe << 30;
tc |= (frames / 10) << 28;
tc |= (frames % 10) << 24;
tc |= (h->sei.picture_timing.timecode[i].seconds / 10) << 20;
tc |= (h->sei.picture_timing.timecode[i].seconds % 10) << 16;
tc |= (h->sei.picture_timing.timecode[i].minutes / 10) << 12;
tc |= (h->sei.picture_timing.timecode[i].minutes % 10) << 8;
tc |= (h->sei.picture_timing.timecode[i].hours / 10) << 4;
tc |= (h->sei.picture_timing.timecode[i].hours % 10);
tc_sd[i + 1] = tc;
} }
h->sei.picture_timing.timecode_cnt = 0; h->sei.picture_timing.timecode_cnt = 0;
} }

View file

@ -182,40 +182,27 @@ int ff_h264_alloc_tables(H264Context *h)
{ {
const int big_mb_num = h->mb_stride * (h->mb_height + 1); const int big_mb_num = h->mb_stride * (h->mb_height + 1);
const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1); const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
const int st_size = big_mb_num + h->mb_stride;
int x, y; int x, y;
FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode, if (!FF_ALLOCZ_TYPED_ARRAY(h->intra4x4_pred_mode, row_mb_num * 8) ||
row_mb_num, 8 * sizeof(uint8_t), fail) !FF_ALLOCZ_TYPED_ARRAY(h->non_zero_count, big_mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(h->slice_table_base, st_size) ||
!FF_ALLOCZ_TYPED_ARRAY(h->cbp_table, big_mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(h->chroma_pred_mode_table, big_mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[0], row_mb_num * 8) ||
!FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[1], row_mb_num * 8) ||
!FF_ALLOCZ_TYPED_ARRAY(h->direct_table, big_mb_num * 4) ||
!FF_ALLOCZ_TYPED_ARRAY(h->list_counts, big_mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(h->mb2b_xy, big_mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(h->mb2br_xy, big_mb_num))
return AVERROR(ENOMEM);
h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode; h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
big_mb_num * 48 * sizeof(uint8_t), fail)
FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
(big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
big_mb_num * sizeof(uint16_t), fail)
FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
big_mb_num * sizeof(uint8_t), fail)
FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
row_mb_num, 16 * sizeof(uint8_t), fail);
FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
row_mb_num, 16 * sizeof(uint8_t), fail);
h->slice_ctx[0].mvd_table[0] = h->mvd_table[0]; h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
h->slice_ctx[0].mvd_table[1] = h->mvd_table[1]; h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
4 * big_mb_num * sizeof(uint8_t), fail);
FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
big_mb_num * sizeof(uint8_t), fail)
memset(h->slice_table_base, -1, memset(h->slice_table_base, -1,
(big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base)); st_size * sizeof(*h->slice_table_base));
h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1; h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
big_mb_num * sizeof(uint32_t), fail);
FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
big_mb_num * sizeof(uint32_t), fail);
for (y = 0; y < h->mb_height; y++) for (y = 0; y < h->mb_height; y++)
for (x = 0; x < h->mb_width; x++) { for (x = 0; x < h->mb_width; x++) {
const int mb_xy = x + y * h->mb_stride; const int mb_xy = x + y * h->mb_stride;
@ -226,9 +213,6 @@ int ff_h264_alloc_tables(H264Context *h)
} }
return 0; return 0;
fail:
return AVERROR(ENOMEM);
} }
/** /**
@ -253,8 +237,8 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
if (sl != h->slice_ctx) { if (sl != h->slice_ctx) {
memset(er, 0, sizeof(*er)); memset(er, 0, sizeof(*er));
} else } else if (CONFIG_ERROR_RESILIENCE) {
if (CONFIG_ERROR_RESILIENCE) { const int er_size = h->mb_height * h->mb_stride * (4*sizeof(int) + 1);
/* init ER */ /* init ER */
er->avctx = h->avctx; er->avctx = h->avctx;
@ -269,8 +253,11 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
er->b8_stride = h->mb_width * 2 + 1; er->b8_stride = h->mb_width * 2 + 1;
// error resilience code looks cleaner with this // error resilience code looks cleaner with this
FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, if (!FF_ALLOCZ_TYPED_ARRAY(er->mb_index2xy, h->mb_num + 1) ||
(h->mb_num + 1) * sizeof(int), fail); !FF_ALLOCZ_TYPED_ARRAY(er->error_status_table, mb_array_size) ||
!FF_ALLOCZ_TYPED_ARRAY(er->er_temp_buffer, er_size) ||
!FF_ALLOCZ_TYPED_ARRAY(sl->dc_val_base, yc_size))
return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
for (y = 0; y < h->mb_height; y++) for (y = 0; y < h->mb_height; y++)
for (x = 0; x < h->mb_width; x++) for (x = 0; x < h->mb_width; x++)
@ -278,15 +265,6 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) * er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
h->mb_stride + h->mb_width; h->mb_stride + h->mb_width;
FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
mb_array_size * sizeof(uint8_t), fail);
FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
h->mb_height * h->mb_stride * (4*sizeof(int) + 1), fail);
FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
yc_size * sizeof(int16_t), fail);
er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2; er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1; er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
er->dc_val[2] = er->dc_val[1] + c_size; er->dc_val[2] = er->dc_val[1] + c_size;
@ -295,9 +273,6 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
} }
return 0; return 0;
fail:
return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
} }
static int h264_init_context(AVCodecContext *avctx, H264Context *h) static int h264_init_context(AVCodecContext *avctx, H264Context *h)
@ -872,7 +847,7 @@ fail:
return ret; return ret;
} }
static int is_extra(const uint8_t *buf, int buf_size) static int is_avcc_extradata(const uint8_t *buf, int buf_size)
{ {
int cnt= buf[5]&0x1f; int cnt= buf[5]&0x1f;
const uint8_t *p= buf+6; const uint8_t *p= buf+6;
@ -999,16 +974,15 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data,
if (buf_size == 0) if (buf_size == 0)
return send_next_delayed_frame(h, pict, got_frame, 0); return send_next_delayed_frame(h, pict, got_frame, 0);
if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) { if (av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
int side_size; int side_size;
uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
if (is_extra(side, side_size))
ff_h264_decode_extradata(side, side_size, ff_h264_decode_extradata(side, side_size,
&h->ps, &h->is_avc, &h->nal_length_size, &h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx); avctx->err_recognition, avctx);
} }
if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) { if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
if (is_extra(buf, buf_size)) if (is_avcc_extradata(buf, buf_size))
return ff_h264_decode_extradata(buf, buf_size, return ff_h264_decode_extradata(buf, buf_size,
&h->ps, &h->is_avc, &h->nal_length_size, &h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx); avctx->err_recognition, avctx);

View file

@ -40,7 +40,8 @@ enum {
typedef struct H265MetadataContext { typedef struct H265MetadataContext {
const AVClass *class; const AVClass *class;
CodedBitstreamContext *cbc; CodedBitstreamContext *input;
CodedBitstreamContext *output;
CodedBitstreamFragment access_unit; CodedBitstreamFragment access_unit;
H265RawAUD aud_nal; H265RawAUD aud_nal;
@ -350,7 +351,7 @@ static int h265_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
if (!side_data_size) if (!side_data_size)
return 0; return 0;
err = ff_cbs_read(ctx->cbc, au, side_data, side_data_size); err = ff_cbs_read(ctx->input, au, side_data, side_data_size);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read extradata from packet side data.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read extradata from packet side data.\n");
return err; return err;
@ -372,7 +373,7 @@ static int h265_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
} }
} }
err = ff_cbs_write_fragment_data(ctx->cbc, au); err = ff_cbs_write_fragment_data(ctx->output, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write extradata into packet side data.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write extradata into packet side data.\n");
return err; return err;
@ -383,7 +384,7 @@ static int h265_metadata_update_side_data(AVBSFContext *bsf, AVPacket *pkt)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(side_data, au->data, au->data_size); memcpy(side_data, au->data, au->data_size);
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
return 0; return 0;
} }
@ -402,7 +403,7 @@ static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
if (err < 0) if (err < 0)
goto fail; goto fail;
err = ff_cbs_read_packet(ctx->cbc, au, pkt); err = ff_cbs_read_packet(ctx->input, au, pkt);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
goto fail; goto fail;
@ -417,7 +418,7 @@ static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
// If an AUD is present, it must be the first NAL unit. // If an AUD is present, it must be the first NAL unit.
if (au->units[0].type == HEVC_NAL_AUD) { if (au->units[0].type == HEVC_NAL_AUD) {
if (ctx->aud == REMOVE) if (ctx->aud == REMOVE)
ff_cbs_delete_unit(ctx->cbc, au, 0); ff_cbs_delete_unit(au, 0);
} else { } else {
if (ctx->aud == INSERT) { if (ctx->aud == INSERT) {
H265RawAUD *aud = &ctx->aud_nal; H265RawAUD *aud = &ctx->aud_nal;
@ -449,8 +450,7 @@ static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
}; };
aud->pic_type = pic_type; aud->pic_type = pic_type;
err = ff_cbs_insert_unit_content(ctx->cbc, au, err = ff_cbs_insert_unit_content(au, 0, HEVC_NAL_AUD, aud, NULL);
0, HEVC_NAL_AUD, aud, NULL);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n");
goto fail; goto fail;
@ -474,7 +474,7 @@ static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
} }
} }
err = ff_cbs_write_packet(ctx->cbc, pkt, au); err = ff_cbs_write_packet(ctx->output, pkt, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
goto fail; goto fail;
@ -482,7 +482,7 @@ static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -496,12 +496,15 @@ static int h265_metadata_init(AVBSFContext *bsf)
CodedBitstreamFragment *au = &ctx->access_unit; CodedBitstreamFragment *au = &ctx->access_unit;
int err, i; int err, i;
err = ff_cbs_init(&ctx->cbc, AV_CODEC_ID_HEVC, bsf); err = ff_cbs_init(&ctx->input, AV_CODEC_ID_HEVC, bsf);
if (err < 0)
return err;
err = ff_cbs_init(&ctx->output, AV_CODEC_ID_HEVC, bsf);
if (err < 0) if (err < 0)
return err; return err;
if (bsf->par_in->extradata) { if (bsf->par_in->extradata) {
err = ff_cbs_read_extradata(ctx->cbc, au, bsf->par_in); err = ff_cbs_read_extradata(ctx->input, au, bsf->par_in);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to read extradata.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to read extradata.\n");
goto fail; goto fail;
@ -523,7 +526,7 @@ static int h265_metadata_init(AVBSFContext *bsf)
} }
} }
err = ff_cbs_write_extradata(ctx->cbc, bsf->par_out, au); err = ff_cbs_write_extradata(ctx->output, bsf->par_out, au);
if (err < 0) { if (err < 0) {
av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n"); av_log(bsf, AV_LOG_ERROR, "Failed to write extradata.\n");
goto fail; goto fail;
@ -532,7 +535,7 @@ static int h265_metadata_init(AVBSFContext *bsf)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, au); ff_cbs_fragment_reset(au);
return err; return err;
} }
@ -540,8 +543,9 @@ static void h265_metadata_close(AVBSFContext *bsf)
{ {
H265MetadataContext *ctx = bsf->priv_data; H265MetadataContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->cbc, &ctx->access_unit); ff_cbs_fragment_free(&ctx->access_unit);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->input);
ff_cbs_close(&ctx->output);
} }
#define OFFSET(x) offsetof(H265MetadataContext, x) #define OFFSET(x) offsetof(H265MetadataContext, x)

View file

@ -86,18 +86,18 @@ static const float intensity_ratio_table[] =
{ {
2.0, 1.85714, 1.71429, 1.57143, 1.42857, 1.28571, 1.14286, 1.0, 2.0, 1.85714, 1.71429, 1.57143, 1.42857, 1.28571, 1.14286, 1.0,
0.857143, 0.714286, 0.571429, 0.428571, 0.285714, 0.142857, 0.0, 0.0, 0.857143, 0.714286, 0.571429, 0.428571, 0.285714, 0.142857, 0.0, 0.0,
0, 1.87066e-08, 2.49253e-08, 3.32113e-08, 4.42518e-08, 5.89626e-08, 7.85637e-08, 1.04681e-07, };
static const float scale_conversion_table[] =
{
0, 0, 1.87066e-08, 2.49253e-08, 3.32113e-08, 4.42518e-08, 5.89626e-08, 7.85637e-08, 1.04681e-07,
1.3948e-07, 1.85848e-07, 2.4763e-07, 3.2995e-07, 4.39636e-07, 5.85785e-07, 7.80519e-07, 1.03999e-06, 1.3948e-07, 1.85848e-07, 2.4763e-07, 3.2995e-07, 4.39636e-07, 5.85785e-07, 7.80519e-07, 1.03999e-06,
1.38572e-06, 1.84637e-06, 2.46017e-06, 3.27801e-06, 4.36772e-06, 5.8197e-06, 7.75435e-06, 1.03321e-05, 1.38572e-06, 1.84637e-06, 2.46017e-06, 3.27801e-06, 4.36772e-06, 5.8197e-06, 7.75435e-06, 1.03321e-05,
1.37669e-05, 1.83435e-05, 2.44414e-05, 3.25665e-05, 4.33927e-05, 5.78179e-05, 7.70384e-05, 0.000102648, 1.37669e-05, 1.83435e-05, 2.44414e-05, 3.25665e-05, 4.33927e-05, 5.78179e-05, 7.70384e-05, 0.000102648,
0.000136772, 0.00018224, 0.000242822, 0.000323544, 0.000431101, 0.000574413, 0.000765366, 0.0010198, 0.000136772, 0.00018224, 0.000242822, 0.000323544, 0.000431101, 0.000574413, 0.000765366, 0.0010198,
0.00135881, 0.00181053, 0.0024124, 0.00321437, 0.00428293, 0.00570671, 0.00760381, 0.0101316, 0.00135881, 0.00181053, 0.0024124, 0.00321437, 0.00428293, 0.00570671, 0.00760381, 0.0101316,
0.0134996, 0.0179873, 0.0239669, 0.0319343, 0.0425503, 0.0566954, 0.0755428, 0.100656, 0.0134996, 0.0179873, 0.0239669, 0.0319343, 0.0425503, 0.0566954, 0.0755428, 0.100656,
0.134117, 0.178702, 0.238108, 0.317263, 0.422731, 0.563261, 0.750507, 0.0, 0.134117, 0.178702, 0.238108, 0.317263, 0.422731, 0.563261, 0.750507,
};
static const float scale_conversion_table[] =
{
1.0, 1.33243, 1.77538, 2.36557, 3.15196, 4.19978, 5.59592, 7.45618, 1.0, 1.33243, 1.77538, 2.36557, 3.15196, 4.19978, 5.59592, 7.45618,
9.93486, 13.2375, 17.6381, 23.5016, 31.3143, 41.7242, 55.5947, 74.0762, 9.93486, 13.2375, 17.6381, 23.5016, 31.3143, 41.7242, 55.5947, 74.0762,
98.7015, 131.513, 175.232, 233.485, 311.103, 414.524, 552.326, 735.937, 98.7015, 131.513, 175.232, 233.485, 311.103, 414.524, 552.326, 735.937,
@ -108,6 +108,8 @@ static const float scale_conversion_table[] =
9.55285e+06, 1.27285e+07, 1.69599e+07, 2.25979e+07, 3.01102e+07, 4.01198e+07, 5.3457e+07, 0, 9.55285e+06, 1.27285e+07, 1.69599e+07, 2.25979e+07, 3.01102e+07, 4.01198e+07, 5.3457e+07, 0,
}; };
static const int scale_conv_bias = 64;
static const float dequantizer_scaling_table[] = static const float dequantizer_scaling_table[] =
{ {
1.58838e-07, 2.11641e-07, 2.81998e-07, 3.75743e-07, 5.00652e-07, 6.67085e-07, 8.88846e-07, 1.18433e-06, 1.58838e-07, 2.11641e-07, 2.81998e-07, 3.75743e-07, 5.00652e-07, 6.67085e-07, 8.88846e-07, 1.18433e-06,

View file

@ -267,7 +267,7 @@ static void apply_intensity_stereo(HCAContext *s, ChannelContext *ch1, ChannelCo
int index, unsigned band_count, unsigned base_band_count, int index, unsigned band_count, unsigned base_band_count,
unsigned stereo_band_count) unsigned stereo_band_count)
{ {
float ratio_l = intensity_ratio_table[ch1->intensity[index]]; float ratio_l = intensity_ratio_table[ch2->intensity[index]];
float ratio_r = ratio_l - 2.0f; float ratio_r = ratio_l - 2.0f;
float *c1 = &ch1->imdct_in[base_band_count]; float *c1 = &ch1->imdct_in[base_band_count];
float *c2 = &ch2->imdct_in[base_band_count]; float *c2 = &ch2->imdct_in[base_band_count];
@ -291,7 +291,8 @@ static void reconstruct_hfr(HCAContext *s, ChannelContext *ch,
for (int i = 0, k = start_band, l = start_band - 1; i < hfr_group_count; i++){ for (int i = 0, k = start_band, l = start_band - 1; i < hfr_group_count; i++){
for (int j = 0; j < bands_per_hfr_group && k < total_band_count && l >= 0; j++, k++, l--){ for (int j = 0; j < bands_per_hfr_group && k < total_band_count && l >= 0; j++, k++, l--){
ch->imdct_in[k] = scale_conversion_table[ (ch->hfr_scale[i] - ch->scale_factors[l]) & 63 ] * ch->imdct_in[l]; ch->imdct_in[k] = scale_conversion_table[ scale_conv_bias +
av_clip_intp2(ch->hfr_scale[i] - ch->scale_factors[l], 6) ] * ch->imdct_in[l];
} }
} }

View file

@ -213,6 +213,32 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB
return 0; return 0;
} }
static int decode_nal_sei_user_data_unregistered(HEVCSEIUnregistered *s, GetBitContext *gb,
int size)
{
AVBufferRef *buf_ref, **tmp;
if (size < 16 || size >= INT_MAX - 1)
return AVERROR_INVALIDDATA;
tmp = av_realloc_array(s->buf_ref, s->nb_buf_ref + 1, sizeof(*s->buf_ref));
if (!tmp)
return AVERROR(ENOMEM);
s->buf_ref = tmp;
buf_ref = av_buffer_alloc(size + 1);
if (!buf_ref)
return AVERROR(ENOMEM);
for (int i = 0; i < size; i++)
buf_ref->data[i] = get_bits(gb, 8);
buf_ref->data[size] = 0;
buf_ref->size = size;
s->buf_ref[s->nb_buf_ref++] = buf_ref;
return 0;
}
static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCSEI *s, GetBitContext *gb, static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCSEI *s, GetBitContext *gb,
int size) int size)
{ {
@ -280,6 +306,53 @@ static int decode_nal_sei_alternative_transfer(HEVCSEIAlternativeTransfer *s, Ge
return 0; return 0;
} }
static int decode_nal_sei_timecode(HEVCSEITimeCode *s, GetBitContext *gb)
{
s->num_clock_ts = get_bits(gb, 2);
for (int i = 0; i < s->num_clock_ts; i++) {
s->clock_timestamp_flag[i] = get_bits(gb, 1);
if (s->clock_timestamp_flag[i]) {
s->units_field_based_flag[i] = get_bits(gb, 1);
s->counting_type[i] = get_bits(gb, 5);
s->full_timestamp_flag[i] = get_bits(gb, 1);
s->discontinuity_flag[i] = get_bits(gb, 1);
s->cnt_dropped_flag[i] = get_bits(gb, 1);
s->n_frames[i] = get_bits(gb, 9);
if (s->full_timestamp_flag[i]) {
s->seconds_value[i] = av_clip(get_bits(gb, 6), 0, 59);
s->minutes_value[i] = av_clip(get_bits(gb, 6), 0, 59);
s->hours_value[i] = av_clip(get_bits(gb, 5), 0, 23);
} else {
s->seconds_flag[i] = get_bits(gb, 1);
if (s->seconds_flag[i]) {
s->seconds_value[i] = av_clip(get_bits(gb, 6), 0, 59);
s->minutes_flag[i] = get_bits(gb, 1);
if (s->minutes_flag[i]) {
s->minutes_value[i] = av_clip(get_bits(gb, 6), 0, 59);
s->hours_flag[i] = get_bits(gb, 1);
if (s->hours_flag[i]) {
s->hours_value[i] = av_clip(get_bits(gb, 5), 0, 23);
}
}
}
}
s->time_offset_length[i] = get_bits(gb, 5);
if (s->time_offset_length[i] > 0) {
s->time_offset_value[i] = get_bits(gb, s->time_offset_length[i]);
}
}
}
s->present = 1;
return 0;
}
static int decode_nal_sei_prefix(GetBitContext *gb, void *logctx, HEVCSEI *s, static int decode_nal_sei_prefix(GetBitContext *gb, void *logctx, HEVCSEI *s,
const HEVCParamSets *ps, int type, int size) const HEVCParamSets *ps, int type, int size)
{ {
@ -300,8 +373,12 @@ static int decode_nal_sei_prefix(GetBitContext *gb, void *logctx, HEVCSEI *s,
return decode_nal_sei_active_parameter_sets(s, gb, logctx); return decode_nal_sei_active_parameter_sets(s, gb, logctx);
case HEVC_SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35: case HEVC_SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35:
return decode_nal_sei_user_data_registered_itu_t_t35(s, gb, size); return decode_nal_sei_user_data_registered_itu_t_t35(s, gb, size);
case HEVC_SEI_TYPE_USER_DATA_UNREGISTERED:
return decode_nal_sei_user_data_unregistered(&s->unregistered, gb, size);
case HEVC_SEI_TYPE_ALTERNATIVE_TRANSFER_CHARACTERISTICS: case HEVC_SEI_TYPE_ALTERNATIVE_TRANSFER_CHARACTERISTICS:
return decode_nal_sei_alternative_transfer(&s->alternative_transfer, gb); return decode_nal_sei_alternative_transfer(&s->alternative_transfer, gb);
case HEVC_SEI_TYPE_TIME_CODE:
return decode_nal_sei_timecode(&s->timecode, gb);
default: default:
av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type); av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
skip_bits_long(gb, 8 * size); skip_bits_long(gb, 8 * size);
@ -371,4 +448,9 @@ int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s,
void ff_hevc_reset_sei(HEVCSEI *s) void ff_hevc_reset_sei(HEVCSEI *s)
{ {
av_buffer_unref(&s->a53_caption.buf_ref); av_buffer_unref(&s->a53_caption.buf_ref);
for (int i = 0; i < s->unregistered.nb_buf_ref; i++)
av_buffer_unref(&s->unregistered.buf_ref[i]);
s->unregistered.nb_buf_ref = 0;
av_freep(&s->unregistered.buf_ref);
} }

View file

@ -91,6 +91,11 @@ typedef struct HEVCSEIA53Caption {
AVBufferRef *buf_ref; AVBufferRef *buf_ref;
} HEVCSEIA53Caption; } HEVCSEIA53Caption;
typedef struct HEVCSEIUnregistered {
AVBufferRef **buf_ref;
int nb_buf_ref;
} HEVCSEIUnregistered;
typedef struct HEVCSEIMasteringDisplay { typedef struct HEVCSEIMasteringDisplay {
int present; int present;
uint16_t display_primaries[3][2]; uint16_t display_primaries[3][2];
@ -110,16 +115,38 @@ typedef struct HEVCSEIAlternativeTransfer {
int preferred_transfer_characteristics; int preferred_transfer_characteristics;
} HEVCSEIAlternativeTransfer; } HEVCSEIAlternativeTransfer;
typedef struct HEVCSEITimeCode {
int present;
uint8_t num_clock_ts;
uint8_t clock_timestamp_flag[3];
uint8_t units_field_based_flag[3];
uint8_t counting_type[3];
uint8_t full_timestamp_flag[3];
uint8_t discontinuity_flag[3];
uint8_t cnt_dropped_flag[3];
uint16_t n_frames[3];
uint8_t seconds_value[3];
uint8_t minutes_value[3];
uint8_t hours_value[3];
uint8_t seconds_flag[3];
uint8_t minutes_flag[3];
uint8_t hours_flag[3];
uint8_t time_offset_length[3];
int32_t time_offset_value[3];
} HEVCSEITimeCode;
typedef struct HEVCSEI { typedef struct HEVCSEI {
HEVCSEIPictureHash picture_hash; HEVCSEIPictureHash picture_hash;
HEVCSEIFramePacking frame_packing; HEVCSEIFramePacking frame_packing;
HEVCSEIDisplayOrientation display_orientation; HEVCSEIDisplayOrientation display_orientation;
HEVCSEIPictureTiming picture_timing; HEVCSEIPictureTiming picture_timing;
HEVCSEIA53Caption a53_caption; HEVCSEIA53Caption a53_caption;
HEVCSEIUnregistered unregistered;
HEVCSEIMasteringDisplay mastering_display; HEVCSEIMasteringDisplay mastering_display;
HEVCSEIContentLight content_light; HEVCSEIContentLight content_light;
int active_seq_parameter_set_id; int active_seq_parameter_set_id;
HEVCSEIAlternativeTransfer alternative_transfer; HEVCSEIAlternativeTransfer alternative_transfer;
HEVCSEITimeCode timecode;
} HEVCSEI; } HEVCSEI;
struct HEVCParamSets; struct HEVCParamSets;

View file

@ -32,6 +32,7 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h" #include "libavutil/stereo3d.h"
#include "libavutil/timecode.h"
#include "bswapdsp.h" #include "bswapdsp.h"
#include "bytestream.h" #include "bytestream.h"
@ -348,6 +349,15 @@ static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
avctx->colorspace = AVCOL_SPC_UNSPECIFIED; avctx->colorspace = AVCOL_SPC_UNSPECIFIED;
} }
avctx->chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;
if (sps->chroma_format_idc == 1) {
if (sps->vui.chroma_loc_info_present_flag) {
if (sps->vui.chroma_sample_loc_type_top_field <= 5)
avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
} else
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
}
if (vps->vps_timing_info_present_flag) { if (vps->vps_timing_info_present_flag) {
num = vps->vps_num_units_in_tick; num = vps->vps_num_units_in_tick;
den = vps->vps_time_scale; den = vps->vps_time_scale;
@ -414,6 +424,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif #endif
#if CONFIG_HEVC_VDPAU_HWACCEL
*fmt++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_HEVC_NVDEC_HWACCEL #if CONFIG_HEVC_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA; *fmt++ = AV_PIX_FMT_CUDA;
#endif #endif
@ -435,6 +448,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV444P12: case AV_PIX_FMT_YUV444P12:
#if CONFIG_HEVC_VDPAU_HWACCEL
*fmt++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_HEVC_NVDEC_HWACCEL #if CONFIG_HEVC_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA; *fmt++ = AV_PIX_FMT_CUDA;
#endif #endif
@ -2794,6 +2810,46 @@ static int set_side_data(HEVCContext *s)
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
} }
for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
HEVCSEIUnregistered *unreg = &s->sei.unregistered;
if (unreg->buf_ref[i]) {
AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
AV_FRAME_DATA_SEI_UNREGISTERED,
unreg->buf_ref[i]);
if (!sd)
av_buffer_unref(&unreg->buf_ref[i]);
unreg->buf_ref[i] = NULL;
}
}
s->sei.unregistered.nb_buf_ref = 0;
if (s->sei.timecode.present) {
uint32_t *tc_sd;
char tcbuf[AV_TIMECODE_STR_SIZE];
AVFrameSideData *tcside = av_frame_new_side_data(out, AV_FRAME_DATA_S12M_TIMECODE,
sizeof(uint32_t) * 4);
if (!tcside)
return AVERROR(ENOMEM);
tc_sd = (uint32_t*)tcside->data;
tc_sd[0] = s->sei.timecode.num_clock_ts;
for (int i = 0; i < tc_sd[0]; i++) {
int drop = s->sei.timecode.cnt_dropped_flag[i];
int hh = s->sei.timecode.hours_value[i];
int mm = s->sei.timecode.minutes_value[i];
int ss = s->sei.timecode.seconds_value[i];
int ff = s->sei.timecode.n_frames[i];
tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
av_timecode_make_smpte_tc_string(tcbuf, tc_sd[i + 1], 0);
av_dict_set(&out->metadata, "timecode", tcbuf, 0);
}
s->sei.timecode.num_clock_ts = 0;
}
return 0; return 0;
} }

View file

@ -171,12 +171,10 @@ av_cold struct FFIIRFilterCoeffs *ff_iir_filter_init_coeffs(void *avc,
if (order <= 0 || order > MAXORDER || cutoff_ratio >= 1.0) if (order <= 0 || order > MAXORDER || cutoff_ratio >= 1.0)
return NULL; return NULL;
FF_ALLOCZ_OR_GOTO(avc, c, sizeof(FFIIRFilterCoeffs), if (!(c = av_mallocz(sizeof(*c))) ||
init_fail); !(c->cx = av_malloc (sizeof(c->cx[0]) * ((order >> 1) + 1))) ||
FF_ALLOC_OR_GOTO(avc, c->cx, sizeof(c->cx[0]) * ((order >> 1) + 1), !(c->cy = av_malloc (sizeof(c->cy[0]) * order)))
init_fail); goto free;
FF_ALLOC_OR_GOTO(avc, c->cy, sizeof(c->cy[0]) * order,
init_fail);
c->order = order; c->order = order;
switch (filt_type) { switch (filt_type) {
@ -190,13 +188,12 @@ av_cold struct FFIIRFilterCoeffs *ff_iir_filter_init_coeffs(void *avc,
break; break;
default: default:
av_log(avc, AV_LOG_ERROR, "filter type is not currently implemented\n"); av_log(avc, AV_LOG_ERROR, "filter type is not currently implemented\n");
goto init_fail; goto free;
} }
if (!ret) if (!ret)
return c; return c;
free:
init_fail:
ff_iir_filter_free_coeffsp(&c); ff_iir_filter_free_coeffsp(&c);
return NULL; return NULL;
} }

View file

@ -110,9 +110,12 @@
typedef struct DecodeSimpleContext { typedef struct DecodeSimpleContext {
AVPacket *in_pkt; AVPacket *in_pkt;
AVFrame *out_frame;
} DecodeSimpleContext; } DecodeSimpleContext;
typedef struct EncodeSimpleContext {
AVFrame *in_frame;
} EncodeSimpleContext;
typedef struct AVCodecInternal { typedef struct AVCodecInternal {
/** /**
* Whether the parent AVCodecContext is a copy of the context which had * Whether the parent AVCodecContext is a copy of the context which had
@ -151,6 +154,8 @@ typedef struct AVCodecInternal {
void *frame_thread_encoder; void *frame_thread_encoder;
EncodeSimpleContext es;
/** /**
* Number of audio samples to skip at the start of the next decoded frame * Number of audio samples to skip at the start of the next decoded frame
*/ */
@ -170,7 +175,6 @@ typedef struct AVCodecInternal {
* buffers for using new encode/decode API through legacy API * buffers for using new encode/decode API through legacy API
*/ */
AVPacket *buffer_pkt; AVPacket *buffer_pkt;
int buffer_pkt_valid; // encoding: packet without data can be valid
AVFrame *buffer_frame; AVFrame *buffer_frame;
int draining_done; int draining_done;
int compat_decode_warned; int compat_decode_warned;
@ -181,6 +185,7 @@ typedef struct AVCodecInternal {
* of the packet (that should be submitted in the next decode call */ * of the packet (that should be submitted in the next decode call */
size_t compat_decode_partial_size; size_t compat_decode_partial_size;
AVFrame *compat_decode_frame; AVFrame *compat_decode_frame;
AVPacket *compat_encode_packet;
int showed_multi_packet_warning; int showed_multi_packet_warning;
@ -373,6 +378,21 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx);
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
void **data, size_t *sei_size); void **data, size_t *sei_size);
/**
* Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info
*
* @param frame Raw frame to get S12M timecode side data from
* @param prefix_len Number of bytes to allocate before SEI message
* @param data Pointer to a variable to store allocated memory
* Upon return the variable will hold NULL on error or if frame has no S12M timecode info.
* Otherwise it will point to prefix_len uninitialized bytes followed by
* *sei_size SEI message
* @param sei_size Pointer to a variable to store generated SEI message length
* @return Zero on success, negative error code on failure
*/
int ff_alloc_timecode_sei(const AVFrame *frame, size_t prefix_len,
void **data, size_t *sei_size);
/** /**
* Get an estimated video bitrate based on frame size, frame rate and coded * Get an estimated video bitrate based on frame size, frame rate and coded
* bits per pixel. * bits per pixel.

View file

@ -509,9 +509,6 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
// update precincts size: 2^n value // update precincts size: 2^n value
reslevel->log2_prec_width = codsty->log2_prec_widths[reslevelno]; reslevel->log2_prec_width = codsty->log2_prec_widths[reslevelno];
reslevel->log2_prec_height = codsty->log2_prec_heights[reslevelno]; reslevel->log2_prec_height = codsty->log2_prec_heights[reslevelno];
if (!reslevel->log2_prec_width || !reslevel->log2_prec_height) {
return AVERROR_INVALIDDATA;
}
/* Number of bands for each resolution level */ /* Number of bands for each resolution level */
if (reslevelno == 0) if (reslevelno == 0)

View file

@ -144,6 +144,7 @@ typedef struct Jpeg2000CodingStyle {
uint8_t prog_order; // progression order uint8_t prog_order; // progression order
uint8_t log2_prec_widths[JPEG2000_MAX_RESLEVELS]; // precincts size according resolution levels uint8_t log2_prec_widths[JPEG2000_MAX_RESLEVELS]; // precincts size according resolution levels
uint8_t log2_prec_heights[JPEG2000_MAX_RESLEVELS]; // TODO: initialize prec_size array with 0? uint8_t log2_prec_heights[JPEG2000_MAX_RESLEVELS]; // TODO: initialize prec_size array with 0?
uint8_t init;
} Jpeg2000CodingStyle; } Jpeg2000CodingStyle;
typedef struct Jpeg2000QuantStyle { typedef struct Jpeg2000QuantStyle {

View file

@ -269,6 +269,8 @@ static int get_siz(Jpeg2000DecoderContext *s)
const enum AVPixelFormat *possible_fmts = NULL; const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0; int possible_fmts_nb = 0;
int ret; int ret;
int o_dimx, o_dimy; //original image dimensions.
int dimx, dimy;
if (bytestream2_get_bytes_left(&s->g) < 36) { if (bytestream2_get_bytes_left(&s->g) < 36) {
av_log(s->avctx, AV_LOG_ERROR, "Insufficient space for SIZ\n"); av_log(s->avctx, AV_LOG_ERROR, "Insufficient space for SIZ\n");
@ -286,10 +288,6 @@ static int get_siz(Jpeg2000DecoderContext *s)
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (s->image_offset_x || s->image_offset_y) {
avpriv_request_sample(s->avctx, "Support for image offsets");
return AVERROR_PATCHWELCOME;
}
if (av_image_check_size2(s->width, s->height, s->avctx->max_pixels, AV_PIX_FMT_NONE, 0, s->avctx)) { if (av_image_check_size2(s->width, s->height, s->avctx->max_pixels, AV_PIX_FMT_NONE, 0, s->avctx)) {
avpriv_request_sample(s->avctx, "Large Dimensions"); avpriv_request_sample(s->avctx, "Large Dimensions");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
@ -371,11 +369,18 @@ static int get_siz(Jpeg2000DecoderContext *s)
} }
/* compute image size with reduction factor */ /* compute image size with reduction factor */
ret = ff_set_dimensions(s->avctx, o_dimx = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x, s->reduction_factor);
s->reduction_factor), o_dimy = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y, s->reduction_factor);
s->reduction_factor)); dimx = ff_jpeg2000_ceildiv(o_dimx, s->cdx[0]);
dimy = ff_jpeg2000_ceildiv(o_dimy, s->cdy[0]);
for (i = 1; i < s->ncomponents; i++) {
dimx = FFMAX(dimx, ff_jpeg2000_ceildiv(o_dimx, s->cdx[i]));
dimy = FFMAX(dimy, ff_jpeg2000_ceildiv(o_dimy, s->cdy[i]));
}
ret = ff_set_dimensions(s->avctx, dimx, dimy);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -427,6 +432,18 @@ static int get_siz(Jpeg2000DecoderContext *s)
s->cdef[3] = 3; s->cdef[3] = 3;
i = 0; i = 0;
} }
} else if (ncomponents == 3 && s->precision == 8 &&
s->cdx[0] == s->cdx[1] && s->cdx[0] == s->cdx[2] &&
s->cdy[0] == s->cdy[1] && s->cdy[0] == s->cdy[2]) {
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
i = 0;
} else if (ncomponents == 2 && s->precision == 8 &&
s->cdx[0] == s->cdx[1] && s->cdy[0] == s->cdy[1]) {
s->avctx->pix_fmt = AV_PIX_FMT_YA8;
i = 0;
} else if (ncomponents == 1 && s->precision == 8) {
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
i = 0;
} }
} }
@ -558,7 +575,7 @@ static int get_cod(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c,
if ((ret = get_cox(s, &tmp)) < 0) if ((ret = get_cox(s, &tmp)) < 0)
return ret; return ret;
tmp.init = 1;
for (compno = 0; compno < s->ncomponents; compno++) for (compno = 0; compno < s->ncomponents; compno++)
if (!(properties[compno] & HAD_COC)) if (!(properties[compno] & HAD_COC))
memcpy(c + compno, &tmp, sizeof(tmp)); memcpy(c + compno, &tmp, sizeof(tmp));
@ -596,6 +613,7 @@ static int get_coc(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c,
return ret; return ret;
properties[compno] |= HAD_COC; properties[compno] |= HAD_COC;
c->init = 1;
return 0; return 0;
} }
@ -969,12 +987,11 @@ static int init_tile(Jpeg2000DecoderContext *s, int tileno)
comp->coord_o[0][1] = tile->coord[0][1]; comp->coord_o[0][1] = tile->coord[0][1];
comp->coord_o[1][0] = tile->coord[1][0]; comp->coord_o[1][0] = tile->coord[1][0];
comp->coord_o[1][1] = tile->coord[1][1]; comp->coord_o[1][1] = tile->coord[1][1];
if (compno) {
comp->coord_o[0][0] /= s->cdx[compno]; comp->coord_o[0][0] = ff_jpeg2000_ceildiv(comp->coord_o[0][0], s->cdx[compno]);
comp->coord_o[0][1] /= s->cdx[compno]; comp->coord_o[0][1] = ff_jpeg2000_ceildiv(comp->coord_o[0][1], s->cdx[compno]);
comp->coord_o[1][0] /= s->cdy[compno]; comp->coord_o[1][0] = ff_jpeg2000_ceildiv(comp->coord_o[1][0], s->cdy[compno]);
comp->coord_o[1][1] /= s->cdy[compno]; comp->coord_o[1][1] = ff_jpeg2000_ceildiv(comp->coord_o[1][1], s->cdy[compno]);
}
comp->coord[0][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][0], s->reduction_factor); comp->coord[0][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][0], s->reduction_factor);
comp->coord[0][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][1], s->reduction_factor); comp->coord[0][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][1], s->reduction_factor);
@ -983,7 +1000,8 @@ static int init_tile(Jpeg2000DecoderContext *s, int tileno)
if (!comp->roi_shift) if (!comp->roi_shift)
comp->roi_shift = s->roi_shift[compno]; comp->roi_shift = s->roi_shift[compno];
if (!codsty->init)
return AVERROR_INVALIDDATA;
if (ret = ff_jpeg2000_init_component(comp, codsty, qntsty, if (ret = ff_jpeg2000_init_component(comp, codsty, qntsty,
s->cbps[compno], s->cdx[compno], s->cbps[compno], s->cdx[compno],
s->cdy[compno], s->avctx)) s->cdy[compno], s->avctx))
@ -1927,18 +1945,23 @@ static inline void tile_codeblocks(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile
float *datap = comp->f_data; \ float *datap = comp->f_data; \
int32_t *i_datap = comp->i_data; \ int32_t *i_datap = comp->i_data; \
int cbps = s->cbps[compno]; \ int cbps = s->cbps[compno]; \
int w = tile->comp[compno].coord[0][1] - s->image_offset_x; \ int w = tile->comp[compno].coord[0][1] - \
ff_jpeg2000_ceildiv(s->image_offset_x, s->cdx[compno]); \
int h = tile->comp[compno].coord[1][1] - \
ff_jpeg2000_ceildiv(s->image_offset_y, s->cdy[compno]); \
int plane = 0; \ int plane = 0; \
\ \
if (planar) \ if (planar) \
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1); \ plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1); \
\ \
y = tile->comp[compno].coord[1][0] - s->image_offset_y / s->cdy[compno]; \ y = tile->comp[compno].coord[1][0] - \
ff_jpeg2000_ceildiv(s->image_offset_y, s->cdy[compno]); \
line = (PIXEL *)picture->data[plane] + y * (picture->linesize[plane] / sizeof(PIXEL));\ line = (PIXEL *)picture->data[plane] + y * (picture->linesize[plane] / sizeof(PIXEL));\
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y++) { \ for (; y < h; y++) { \
PIXEL *dst; \ PIXEL *dst; \
\ \
x = tile->comp[compno].coord[0][0] - s->image_offset_x / s->cdx[compno]; \ x = tile->comp[compno].coord[0][0] - \
ff_jpeg2000_ceildiv(s->image_offset_x, s->cdx[compno]); \
dst = line + x * pixelsize + compno*!planar; \ dst = line + x * pixelsize + compno*!planar; \
\ \
if (codsty->transform == FF_DWT97) { \ if (codsty->transform == FF_DWT97) { \

View file

@ -96,6 +96,16 @@ typedef struct AOMEncoderContext {
int enable_restoration; int enable_restoration;
int usage; int usage;
int tune; int tune;
int enable_rect_partitions;
int enable_1to4_partitions;
int enable_ab_partitions;
int enable_angle_delta;
int enable_cfl_intra;
int enable_paeth_intra;
int enable_smooth_intra;
int enable_intra_edge_filter;
int enable_palette;
int enable_filter_intra;
} AOMContext; } AOMContext;
static const char *const ctlidstr[] = { static const char *const ctlidstr[] = {
@ -135,6 +145,18 @@ static const char *const ctlidstr[] = {
#endif #endif
[AV1E_SET_ENABLE_CDEF] = "AV1E_SET_ENABLE_CDEF", [AV1E_SET_ENABLE_CDEF] = "AV1E_SET_ENABLE_CDEF",
[AOME_SET_TUNING] = "AOME_SET_TUNING", [AOME_SET_TUNING] = "AOME_SET_TUNING",
#if AOM_ENCODER_ABI_VERSION >= 22
[AV1E_SET_ENABLE_1TO4_PARTITIONS] = "AV1E_SET_ENABLE_1TO4_PARTITIONS",
[AV1E_SET_ENABLE_AB_PARTITIONS] = "AV1E_SET_ENABLE_AB_PARTITIONS",
[AV1E_SET_ENABLE_RECT_PARTITIONS] = "AV1E_SET_ENABLE_RECT_PARTITIONS",
[AV1E_SET_ENABLE_ANGLE_DELTA] = "AV1E_SET_ENABLE_ANGLE_DELTA",
[AV1E_SET_ENABLE_CFL_INTRA] = "AV1E_SET_ENABLE_CFL_INTRA",
[AV1E_SET_ENABLE_FILTER_INTRA] = "AV1E_SET_ENABLE_FILTER_INTRA",
[AV1E_SET_ENABLE_INTRA_EDGE_FILTER] = "AV1E_SET_ENABLE_INTRA_EDGE_FILTER",
[AV1E_SET_ENABLE_PAETH_INTRA] = "AV1E_SET_ENABLE_PAETH_INTRA",
[AV1E_SET_ENABLE_SMOOTH_INTRA] = "AV1E_SET_ENABLE_SMOOTH_INTRA",
[AV1E_SET_ENABLE_PALETTE] = "AV1E_SET_ENABLE_PALETTE",
#endif
}; };
static av_cold void log_encoder_error(AVCodecContext *avctx, const char *desc) static av_cold void log_encoder_error(AVCodecContext *avctx, const char *desc)
@ -698,6 +720,28 @@ static av_cold int aom_init(AVCodecContext *avctx,
codecctl_int(avctx, AV1E_SET_ENABLE_CDEF, ctx->enable_cdef); codecctl_int(avctx, AV1E_SET_ENABLE_CDEF, ctx->enable_cdef);
if (ctx->enable_restoration >= 0) if (ctx->enable_restoration >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_RESTORATION, ctx->enable_restoration); codecctl_int(avctx, AV1E_SET_ENABLE_RESTORATION, ctx->enable_restoration);
#if AOM_ENCODER_ABI_VERSION >= 22
if (ctx->enable_rect_partitions >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_RECT_PARTITIONS, ctx->enable_rect_partitions);
if (ctx->enable_1to4_partitions >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_1TO4_PARTITIONS, ctx->enable_1to4_partitions);
if (ctx->enable_ab_partitions >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_AB_PARTITIONS, ctx->enable_ab_partitions);
if (ctx->enable_angle_delta >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_ANGLE_DELTA, ctx->enable_angle_delta);
if (ctx->enable_cfl_intra >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_CFL_INTRA, ctx->enable_cfl_intra);
if (ctx->enable_filter_intra >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_FILTER_INTRA, ctx->enable_filter_intra);
if (ctx->enable_intra_edge_filter >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_INTRA_EDGE_FILTER, ctx->enable_intra_edge_filter);
if (ctx->enable_paeth_intra >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_PAETH_INTRA, ctx->enable_paeth_intra);
if (ctx->enable_smooth_intra >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_SMOOTH_INTRA, ctx->enable_smooth_intra);
if (ctx->enable_palette >= 0)
codecctl_int(avctx, AV1E_SET_ENABLE_PALETTE, ctx->enable_palette);
#endif
codecctl_int(avctx, AOME_SET_STATIC_THRESHOLD, ctx->static_thresh); codecctl_int(avctx, AOME_SET_STATIC_THRESHOLD, ctx->static_thresh);
if (ctx->crf >= 0) if (ctx->crf >= 0)
@ -1107,6 +1151,17 @@ static const AVOption options[] = {
{ "tune", "The metric that the encoder tunes for. Automatically chosen by the encoder by default", OFFSET(tune), AV_OPT_TYPE_INT, {.i64 = -1}, -1, AOM_TUNE_SSIM, VE, "tune"}, { "tune", "The metric that the encoder tunes for. Automatically chosen by the encoder by default", OFFSET(tune), AV_OPT_TYPE_INT, {.i64 = -1}, -1, AOM_TUNE_SSIM, VE, "tune"},
{ "psnr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AOM_TUNE_PSNR}, 0, 0, VE, "tune"}, { "psnr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AOM_TUNE_PSNR}, 0, 0, VE, "tune"},
{ "ssim", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AOM_TUNE_SSIM}, 0, 0, VE, "tune"}, { "ssim", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AOM_TUNE_SSIM}, 0, 0, VE, "tune"},
FF_AV1_PROFILE_OPTS
{ "enable-rect-partitions", "Enable rectangular partitions", OFFSET(enable_rect_partitions), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-1to4-partitions", "Enable 1:4/4:1 partitions", OFFSET(enable_1to4_partitions), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-ab-partitions", "Enable ab shape partitions", OFFSET(enable_ab_partitions), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-angle-delta", "Enable angle delta intra prediction", OFFSET(enable_angle_delta), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-cfl-intra", "Enable chroma predicted from luma intra prediction", OFFSET(enable_cfl_intra), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-filter-intra", "Enable filter intra predictor", OFFSET(enable_filter_intra), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-intra-edge-filter", "Enable intra edge filter", OFFSET(enable_intra_edge_filter), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-smooth-intra", "Enable smooth intra prediction mode", OFFSET(enable_smooth_intra), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-paeth-intra", "Enable paeth predictor in intra prediction", OFFSET(enable_paeth_intra), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ "enable-palette", "Enable palette prediction mode", OFFSET(enable_palette), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE},
{ NULL }, { NULL },
}; };

View file

@ -30,12 +30,15 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avcodec.h" #include "avcodec.h"
#include "encode.h"
#include "internal.h" #include "internal.h"
typedef struct librav1eContext { typedef struct librav1eContext {
const AVClass *class; const AVClass *class;
RaContext *ctx; RaContext *ctx;
AVFrame *frame;
RaFrame *rframe;
AVBSFContext *bsf; AVBSFContext *bsf;
uint8_t *pass_data; uint8_t *pass_data;
@ -165,7 +168,12 @@ static av_cold int librav1e_encode_close(AVCodecContext *avctx)
rav1e_context_unref(ctx->ctx); rav1e_context_unref(ctx->ctx);
ctx->ctx = NULL; ctx->ctx = NULL;
} }
if (ctx->rframe) {
rav1e_frame_unref(ctx->rframe);
ctx->rframe = NULL;
}
av_frame_free(&ctx->frame);
av_bsf_free(&ctx->bsf); av_bsf_free(&ctx->bsf);
av_freep(&ctx->pass_data); av_freep(&ctx->pass_data);
@ -180,6 +188,10 @@ static av_cold int librav1e_encode_init(AVCodecContext *avctx)
int rret; int rret;
int ret = 0; int ret = 0;
ctx->frame = av_frame_alloc();
if (!ctx->frame)
return AVERROR(ENOMEM);
cfg = rav1e_config_default(); cfg = rav1e_config_default();
if (!cfg) { if (!cfg) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
@ -416,18 +428,27 @@ end:
return ret; return ret;
} }
static int librav1e_send_frame(AVCodecContext *avctx, const AVFrame *frame) static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
{ {
librav1eContext *ctx = avctx->priv_data; librav1eContext *ctx = avctx->priv_data;
RaFrame *rframe = NULL; RaFrame *rframe = ctx->rframe;
RaPacket *rpkt = NULL;
int ret; int ret;
if (frame) { if (!rframe) {
AVFrame *frame = ctx->frame;
ret = ff_encode_get_frame(avctx, frame);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
if (frame->buf[0]) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
rframe = rav1e_frame_new(ctx->ctx); rframe = rav1e_frame_new(ctx->ctx);
if (!rframe) { if (!rframe) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
av_frame_unref(frame);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
@ -438,17 +459,23 @@ static int librav1e_send_frame(AVCodecContext *avctx, const AVFrame *frame)
(frame->height >> shift) * frame->linesize[i], (frame->height >> shift) * frame->linesize[i],
frame->linesize[i], bytes); frame->linesize[i], bytes);
} }
av_frame_unref(frame);
}
} }
ret = rav1e_send_frame(ctx->ctx, rframe); ret = rav1e_send_frame(ctx->ctx, rframe);
if (rframe) if (rframe)
if (ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
ctx->rframe = rframe; /* Queue is full. Store the RaFrame to retry next call */
} else {
rav1e_frame_unref(rframe); /* No need to unref if flushing. */ rav1e_frame_unref(rframe); /* No need to unref if flushing. */
ctx->rframe = NULL;
}
switch (ret) { switch (ret) {
case RA_ENCODER_STATUS_SUCCESS: case RA_ENCODER_STATUS_SUCCESS:
break;
case RA_ENCODER_STATUS_ENOUGH_DATA: case RA_ENCODER_STATUS_ENOUGH_DATA:
return AVERROR(EAGAIN); break;
case RA_ENCODER_STATUS_FAILURE: case RA_ENCODER_STATUS_FAILURE:
av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret)); av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
return AVERROR_EXTERNAL; return AVERROR_EXTERNAL;
@ -457,15 +484,6 @@ static int librav1e_send_frame(AVCodecContext *avctx, const AVFrame *frame)
return AVERROR_UNKNOWN; return AVERROR_UNKNOWN;
} }
return 0;
}
static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
{
librav1eContext *ctx = avctx->priv_data;
RaPacket *rpkt = NULL;
int ret;
retry: retry:
if (avctx->flags & AV_CODEC_FLAG_PASS1) { if (avctx->flags & AV_CODEC_FLAG_PASS1) {
@ -490,9 +508,7 @@ retry:
} }
return AVERROR_EOF; return AVERROR_EOF;
case RA_ENCODER_STATUS_ENCODED: case RA_ENCODER_STATUS_ENCODED:
if (avctx->internal->draining)
goto retry; goto retry;
return AVERROR(EAGAIN);
case RA_ENCODER_STATUS_NEED_MORE_DATA: case RA_ENCODER_STATUS_NEED_MORE_DATA:
if (avctx->internal->draining) { if (avctx->internal->draining) {
av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n"); av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
@ -592,7 +608,6 @@ AVCodec ff_librav1e_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AV1, .id = AV_CODEC_ID_AV1,
.init = librav1e_encode_init, .init = librav1e_encode_init,
.send_frame = librav1e_send_frame,
.receive_packet = librav1e_receive_packet, .receive_packet = librav1e_receive_packet,
.close = librav1e_encode_close, .close = librav1e_encode_close,
.priv_data_size = sizeof(librav1eContext), .priv_data_size = sizeof(librav1eContext),

View file

@ -220,7 +220,7 @@ static int vpx_decode(AVCodecContext *avctx,
struct vpx_image *img, *img_alpha; struct vpx_image *img, *img_alpha;
int ret; int ret;
uint8_t *side_data = NULL; uint8_t *side_data = NULL;
int side_data_size = 0; int side_data_size;
ret = decode_frame(avctx, &ctx->decoder, avpkt->data, avpkt->size); ret = decode_frame(avctx, &ctx->decoder, avpkt->data, avpkt->size);
if (ret) if (ret)

View file

@ -310,8 +310,8 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
if (!cpb_props) if (!cpb_props)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
cpb_props->buffer_size = ctx->params->rc.vbvBufferSize * 1000; cpb_props->buffer_size = ctx->params->rc.vbvBufferSize * 1000;
cpb_props->max_bitrate = ctx->params->rc.vbvMaxBitrate * 1000; cpb_props->max_bitrate = ctx->params->rc.vbvMaxBitrate * 1000LL;
cpb_props->avg_bitrate = ctx->params->rc.bitrate * 1000; cpb_props->avg_bitrate = ctx->params->rc.bitrate * 1000LL;
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1; ctx->params->bRepeatHeaders = 1;

View file

@ -22,6 +22,7 @@
#define _WIN32_WINNT 0x0602 #define _WIN32_WINNT 0x0602
#endif #endif
#include "encode.h"
#include "mf_utils.h" #include "mf_utils.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
@ -30,6 +31,7 @@
typedef struct MFContext { typedef struct MFContext {
AVClass *av_class; AVClass *av_class;
AVFrame *frame;
int is_video, is_audio; int is_video, is_audio;
GUID main_subtype; GUID main_subtype;
IMFTransform *mft; IMFTransform *mft;
@ -398,26 +400,6 @@ static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
return 0; return 0;
} }
static int mf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
MFContext *c = avctx->priv_data;
int ret;
IMFSample *sample = NULL;
if (frame) {
sample = mf_avframe_to_sample(avctx, frame);
if (!sample)
return AVERROR(ENOMEM);
if (c->is_video && c->codec_api) {
if (frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
}
}
ret = mf_send_sample(avctx, sample);
if (sample)
IMFSample_Release(sample);
return ret;
}
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample) static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
{ {
MFContext *c = avctx->priv_data; MFContext *c = avctx->priv_data;
@ -500,9 +482,36 @@ static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{ {
IMFSample *sample; MFContext *c = avctx->priv_data;
IMFSample *sample = NULL;
int ret; int ret;
if (!c->frame->buf[0]) {
ret = ff_encode_get_frame(avctx, c->frame);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
}
if (c->frame->buf[0]) {
sample = mf_avframe_to_sample(avctx, c->frame);
if (!sample) {
av_frame_unref(c->frame);
return AVERROR(ENOMEM);
}
if (c->is_video && c->codec_api) {
if (c->frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
}
}
ret = mf_send_sample(avctx, sample);
if (sample)
IMFSample_Release(sample);
if (ret != AVERROR(EAGAIN))
av_frame_unref(c->frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
ret = mf_receive_sample(avctx, &sample); ret = mf_receive_sample(avctx, &sample);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -1034,6 +1043,10 @@ static int mf_init(AVCodecContext *avctx)
const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id); const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
int use_hw = 0; int use_hw = 0;
c->frame = av_frame_alloc();
if (!c->frame)
return AVERROR(ENOMEM);
c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO; c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
c->is_video = !c->is_audio; c->is_video = !c->is_audio;
c->reorder_delay = AV_NOPTS_VALUE; c->reorder_delay = AV_NOPTS_VALUE;
@ -1122,6 +1135,8 @@ static int mf_close(AVCodecContext *avctx)
ff_free_mf(&c->mft); ff_free_mf(&c->mft);
av_frame_free(&c->frame);
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
avctx->extradata_size = 0; avctx->extradata_size = 0;
@ -1146,7 +1161,6 @@ static int mf_close(AVCodecContext *avctx)
.priv_data_size = sizeof(MFContext), \ .priv_data_size = sizeof(MFContext), \
.init = mf_init, \ .init = mf_init, \
.close = mf_close, \ .close = mf_close, \
.send_frame = mf_send_frame, \
.receive_packet = mf_receive_packet, \ .receive_packet = mf_receive_packet, \
EXTRA \ EXTRA \
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID, \ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID, \

View file

@ -531,7 +531,7 @@ static av_cold int mlp_encode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d. Supported " av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d. Supported "
"sample rates are 44100, 88200, 176400, 48000, " "sample rates are 44100, 88200, 176400, 48000, "
"96000, and 192000.\n", avctx->sample_rate); "96000, and 192000.\n", avctx->sample_rate);
return -1; return AVERROR(EINVAL);
} }
ctx->coded_sample_rate[1] = -1 & 0xf; ctx->coded_sample_rate[1] = -1 & 0xf;
@ -564,7 +564,7 @@ static av_cold int mlp_encode_init(AVCodecContext *avctx)
default: default:
av_log(avctx, AV_LOG_ERROR, "Sample format not supported. " av_log(avctx, AV_LOG_ERROR, "Sample format not supported. "
"Only 16- and 24-bit samples are supported.\n"); "Only 16- and 24-bit samples are supported.\n");
return -1; return AVERROR(EINVAL);
} }
ctx->coded_sample_fmt[1] = -1 & 0xf; ctx->coded_sample_fmt[1] = -1 & 0xf;
@ -638,7 +638,7 @@ static av_cold int mlp_encode_init(AVCodecContext *avctx)
ctx->channel_arrangement = 12; break; ctx->channel_arrangement = 12; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unsupported channel arrangement\n"); av_log(avctx, AV_LOG_ERROR, "Unsupported channel arrangement\n");
return -1; return AVERROR(EINVAL);
} }
ctx->flags = FLAGS_DVDA; ctx->flags = FLAGS_DVDA;
ctx->channel_occupancy = ff_mlp_ch_info[ctx->channel_arrangement].channel_occupancy; ctx->channel_occupancy = ff_mlp_ch_info[ctx->channel_arrangement].channel_occupancy;
@ -666,7 +666,7 @@ static av_cold int mlp_encode_init(AVCodecContext *avctx)
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unsupported channel arrangement\n"); av_log(avctx, AV_LOG_ERROR, "Unsupported channel arrangement\n");
return -1; return AVERROR(EINVAL);
} }
ctx->flags = 0; ctx->flags = 0;
ctx->channel_occupancy = 0; ctx->channel_occupancy = 0;
@ -1190,7 +1190,7 @@ static unsigned int write_access_unit(MLPEncodeContext *ctx, uint8_t *buf,
int total_length; int total_length;
if (buf_size < 4) if (buf_size < 4)
return -1; return AVERROR(EINVAL);
/* Frame header will be written at the end. */ /* Frame header will be written at the end. */
buf += 4; buf += 4;
@ -1198,7 +1198,7 @@ static unsigned int write_access_unit(MLPEncodeContext *ctx, uint8_t *buf,
if (restart_frame) { if (restart_frame) {
if (buf_size < 28) if (buf_size < 28)
return -1; return AVERROR(EINVAL);
write_major_sync(ctx, buf, buf_size); write_major_sync(ctx, buf, buf_size);
buf += 28; buf += 28;
buf_size -= 28; buf_size -= 28;
@ -1820,7 +1820,8 @@ static int apply_filter(MLPEncodeContext *ctx, unsigned int channel)
if (!filter_state_buffer[i]) { if (!filter_state_buffer[i]) {
av_log(ctx->avctx, AV_LOG_ERROR, av_log(ctx->avctx, AV_LOG_ERROR,
"Not enough memory for applying filters.\n"); "Not enough memory for applying filters.\n");
return -1; ret = AVERROR(ENOMEM);
goto free_and_return;
} }
} }
@ -1848,7 +1849,7 @@ static int apply_filter(MLPEncodeContext *ctx, unsigned int channel)
residual = sample - (accum & mask); residual = sample - (accum & mask);
if (residual < SAMPLE_MIN(24) || residual > SAMPLE_MAX(24)) { if (residual < SAMPLE_MIN(24) || residual > SAMPLE_MAX(24)) {
ret = -1; ret = AVERROR_INVALIDDATA;
goto free_and_return; goto free_and_return;
} }
@ -2226,9 +2227,6 @@ static int mlp_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
if ((ret = ff_alloc_packet2(avctx, avpkt, 87500 * avctx->channels, 0)) < 0) if ((ret = ff_alloc_packet2(avctx, avpkt, 87500 * avctx->channels, 0)) < 0)
return ret; return ret;
if (!frame)
return 1;
/* add current frame to queue */ /* add current frame to queue */
if ((ret = ff_af_queue_add(&ctx->afq, frame)) < 0) if ((ret = ff_af_queue_add(&ctx->afq, frame)) < 0)
return ret; return ret;
@ -2267,7 +2265,7 @@ static int mlp_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
if (ctx->frame_size[ctx->frame_index] > MAX_BLOCKSIZE) { if (ctx->frame_size[ctx->frame_index] > MAX_BLOCKSIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame size (%d > %d)\n", av_log(avctx, AV_LOG_ERROR, "Invalid frame size (%d > %d)\n",
ctx->frame_size[ctx->frame_index], MAX_BLOCKSIZE); ctx->frame_size[ctx->frame_index], MAX_BLOCKSIZE);
return -1; return AVERROR_INVALIDDATA;
} }
restart_frame = !ctx->frame_index; restart_frame = !ctx->frame_index;
@ -2389,7 +2387,7 @@ AVCodec ff_mlp_encoder = {
.init = mlp_encode_init, .init = mlp_encode_init,
.encode2 = mlp_encode_frame, .encode2 = mlp_encode_frame,
.close = mlp_encode_close, .close = mlp_encode_close,
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE},
.supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0}, .supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0},
.channel_layouts = ff_mlp_channel_layouts, .channel_layouts = ff_mlp_channel_layouts,
@ -2405,7 +2403,7 @@ AVCodec ff_truehd_encoder = {
.init = mlp_encode_init, .init = mlp_encode_init,
.encode2 = mlp_encode_frame, .encode2 = mlp_encode_frame,
.close = mlp_encode_close, .close = mlp_encode_close,
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL, .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE},
.supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0}, .supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0},
.channel_layouts = (const uint64_t[]) {AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_5POINT0_BACK, AV_CH_LAYOUT_5POINT1_BACK, 0}, .channel_layouts = (const uint64_t[]) {AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_5POINT0_BACK, AV_CH_LAYOUT_5POINT1_BACK, 0},

View file

@ -57,8 +57,7 @@ typedef struct Mpeg1Context {
AVPanScan pan_scan; /* some temporary storage for the panscan */ AVPanScan pan_scan; /* some temporary storage for the panscan */
AVStereo3D stereo3d; AVStereo3D stereo3d;
int has_stereo3d; int has_stereo3d;
uint8_t *a53_caption; AVBufferRef *a53_buf_ref;
int a53_caption_size;
uint8_t afd; uint8_t afd;
int has_afd; int has_afd;
int slice_count; int slice_count;
@ -1635,13 +1634,13 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan)); memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
if (s1->a53_caption) { if (s1->a53_buf_ref) {
AVFrameSideData *sd = av_frame_new_side_data( AVFrameSideData *sd = av_frame_new_side_data_from_buf(
s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC, s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
s1->a53_caption_size); s1->a53_buf_ref);
if (sd) if (!sd)
memcpy(sd->data, s1->a53_caption, s1->a53_caption_size); av_buffer_unref(&s1->a53_buf_ref);
av_freep(&s1->a53_caption); s1->a53_buf_ref = NULL;
} }
if (s1->has_stereo3d) { if (s1->has_stereo3d) {
@ -2242,14 +2241,18 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx,
/* extract A53 Part 4 CC data */ /* extract A53 Part 4 CC data */
int cc_count = p[5] & 0x1f; int cc_count = p[5] & 0x1f;
if (cc_count > 0 && buf_size >= 7 + cc_count * 3) { if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
av_freep(&s1->a53_caption); int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
s1->a53_caption_size = cc_count * 3; const uint64_t new_size = (old_size + cc_count
s1->a53_caption = av_malloc(s1->a53_caption_size); * UINT64_C(3));
if (!s1->a53_caption) { int ret;
s1->a53_caption_size = 0;
} else { if (new_size > INT_MAX)
memcpy(s1->a53_caption, p + 7, s1->a53_caption_size); return AVERROR(EINVAL);
}
ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
if (ret >= 0)
memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
} }
return 1; return 1;
@ -2258,19 +2261,23 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx,
/* extract SCTE-20 CC data */ /* extract SCTE-20 CC data */
GetBitContext gb; GetBitContext gb;
int cc_count = 0; int cc_count = 0;
int i; int i, ret;
init_get_bits(&gb, p + 2, buf_size - 2); init_get_bits(&gb, p + 2, buf_size - 2);
cc_count = get_bits(&gb, 5); cc_count = get_bits(&gb, 5);
if (cc_count > 0) { if (cc_count > 0) {
av_freep(&s1->a53_caption); int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
s1->a53_caption_size = cc_count * 3; const uint64_t new_size = (old_size + cc_count
s1->a53_caption = av_mallocz(s1->a53_caption_size); * UINT64_C(3));
if (!s1->a53_caption) { if (new_size > INT_MAX)
s1->a53_caption_size = 0; return AVERROR(EINVAL);
} else {
ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
if (ret >= 0) {
uint8_t field, cc1, cc2; uint8_t field, cc1, cc2;
uint8_t *cap = s1->a53_caption; uint8_t *cap = s1->a53_buf_ref->data;
memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) { for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
skip_bits(&gb, 2); // priority skip_bits(&gb, 2); // priority
field = get_bits(&gb, 2); field = get_bits(&gb, 2);
@ -2322,21 +2329,23 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx,
* on the even field. There also exist DVDs in the wild that encode an odd field count and the * on the even field. There also exist DVDs in the wild that encode an odd field count and the
* caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */ * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
int cc_count = 0; int cc_count = 0;
int i; int i, ret;
// There is a caption count field in the data, but it is often // There is a caption count field in the data, but it is often
// incorrect. So count the number of captions present. // incorrect. So count the number of captions present.
for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6) for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
cc_count++; cc_count++;
// Transform the DVD format into A53 Part 4 format // Transform the DVD format into A53 Part 4 format
if (cc_count > 0) { if (cc_count > 0) {
av_freep(&s1->a53_caption); int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
s1->a53_caption_size = cc_count * 6; const uint64_t new_size = (old_size + cc_count
s1->a53_caption = av_malloc(s1->a53_caption_size); * UINT64_C(6));
if (!s1->a53_caption) { if (new_size > INT_MAX)
s1->a53_caption_size = 0; return AVERROR(EINVAL);
} else {
ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
if (ret >= 0) {
uint8_t field1 = !!(p[4] & 0x80); uint8_t field1 = !!(p[4] & 0x80);
uint8_t *cap = s1->a53_caption; uint8_t *cap = s1->a53_buf_ref->data;
p += 5; p += 5;
for (i = 0; i < cc_count; i++) { for (i = 0; i < cc_count; i++) {
cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd; cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
@ -2846,6 +2855,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
s2->current_picture_ptr = NULL; s2->current_picture_ptr = NULL;
if (s2->timecode_frame_start != -1 && *got_output) { if (s2->timecode_frame_start != -1 && *got_output) {
char tcbuf[AV_TIMECODE_STR_SIZE];
AVFrameSideData *tcside = av_frame_new_side_data(picture, AVFrameSideData *tcside = av_frame_new_side_data(picture,
AV_FRAME_DATA_GOP_TIMECODE, AV_FRAME_DATA_GOP_TIMECODE,
sizeof(int64_t)); sizeof(int64_t));
@ -2853,6 +2863,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
memcpy(tcside->data, &s2->timecode_frame_start, sizeof(int64_t)); memcpy(tcside->data, &s2->timecode_frame_start, sizeof(int64_t));
av_timecode_make_mpeg_tc_string(tcbuf, s2->timecode_frame_start);
av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
s2->timecode_frame_start = -1; s2->timecode_frame_start = -1;
} }
} }
@ -2873,9 +2886,8 @@ static av_cold int mpeg_decode_end(AVCodecContext *avctx)
{ {
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
if (s->mpeg_enc_ctx_allocated)
ff_mpv_common_end(&s->mpeg_enc_ctx); ff_mpv_common_end(&s->mpeg_enc_ctx);
av_freep(&s->a53_caption); av_buffer_unref(&s->a53_buf_ref);
return 0; return 0;
} }
@ -2891,7 +2903,7 @@ AVCodec ff_mpeg1video_decoder = {
.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_CLEANUP,
.flush = flush, .flush = flush,
.max_lowres = 3, .max_lowres = 3,
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context), .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context),
@ -2924,7 +2936,7 @@ AVCodec ff_mpeg2video_decoder = {
.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SLICE_THREADS, AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_CLEANUP,
.flush = flush, .flush = flush,
.max_lowres = 3, .max_lowres = 3,
.profiles = NULL_IF_CONFIG_SMALL(ff_mpeg2_video_profiles), .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg2_video_profiles),
@ -2968,7 +2980,7 @@ AVCodec ff_mpegvideo_decoder = {
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_CLEANUP,
.flush = flush, .flush = flush,
.max_lowres = 3, .max_lowres = 3,
}; };

View file

@ -137,7 +137,7 @@ static int mpeg2_metadata_update_fragment(AVBSFContext *bsf,
se->vertical_size_extension << 12 | sh->vertical_size_value, se->vertical_size_extension << 12 | sh->vertical_size_value,
}; };
err = ff_cbs_insert_unit_content(ctx->cbc, frag, se_pos + 1, err = ff_cbs_insert_unit_content(frag, se_pos + 1,
MPEG2_START_EXTENSION, MPEG2_START_EXTENSION,
&ctx->sequence_display_extension, &ctx->sequence_display_extension,
NULL); NULL);
@ -200,7 +200,7 @@ static int mpeg2_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
if (err < 0) if (err < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -252,7 +252,7 @@ static int mpeg2_metadata_init(AVBSFContext *bsf)
err = 0; err = 0;
fail: fail:
ff_cbs_fragment_reset(ctx->cbc, frag); ff_cbs_fragment_reset(frag);
return err; return err;
} }
@ -260,7 +260,7 @@ static void mpeg2_metadata_close(AVBSFContext *bsf)
{ {
MPEG2MetadataContext *ctx = bsf->priv_data; MPEG2MetadataContext *ctx = bsf->priv_data;
ff_cbs_fragment_free(ctx->cbc, &ctx->fragment); ff_cbs_fragment_free(&ctx->fragment);
ff_cbs_close(&ctx->cbc); ff_cbs_close(&ctx->cbc);
} }

View file

@ -3603,7 +3603,8 @@ AVCodec ff_mpeg4_decoder = {
AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_FRAME_THREADS, AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM |
FF_CODEC_CAP_ALLOCATE_PROGRESS, FF_CODEC_CAP_ALLOCATE_PROGRESS |
FF_CODEC_CAP_INIT_CLEANUP,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3, .max_lowres = 3,
.pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,

View file

@ -78,20 +78,15 @@ int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
// at uvlinesize. It supports only YUV420 so 24x24 is enough // at uvlinesize. It supports only YUV420 so 24x24 is enough
// linesize * interlaced * MBsize // linesize * interlaced * MBsize
// we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, sc->edge_emu_buffer, alloc_size, EMU_EDGE_HEIGHT, if (!FF_ALLOCZ_TYPED_ARRAY(sc->edge_emu_buffer, alloc_size * EMU_EDGE_HEIGHT) ||
fail); !FF_ALLOCZ_TYPED_ARRAY(me->scratchpad, alloc_size * 4 * 16 * 2))
return AVERROR(ENOMEM);
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, me->scratchpad, alloc_size, 4 * 16 * 2,
fail)
me->temp = me->scratchpad; me->temp = me->scratchpad;
sc->rd_scratchpad = me->scratchpad; sc->rd_scratchpad = me->scratchpad;
sc->b_scratchpad = me->scratchpad; sc->b_scratchpad = me->scratchpad;
sc->obmc_scratchpad = me->scratchpad + 16; sc->obmc_scratchpad = me->scratchpad + 16;
return 0; return 0;
fail:
av_freep(&sc->edge_emu_buffer);
return AVERROR(ENOMEM);
} }
/** /**

Some files were not shown because too many files have changed in this diff Show more