[FFmpeg-devel] [PATCH] avcodec/hevcdec: set the SEI parameters early on the AVCodecContext
It's better to do it before the buffers are actually created. At least in VLC we currently don't support changing some parameters dynamically easily so we don't use the information if it comes after the buffer are created. Co-authored-by: James Almer --- The same problem may exist with H264 alternative_transfer but I don't have a sample to test with and the code seems a bit different. --- libavcodec/hevcdec.c | 18 +++--- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c index 515b346535..f54f46aa5d 100644 --- a/libavcodec/hevcdec.c +++ b/libavcodec/hevcdec.c @@ -313,6 +313,7 @@ static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb) static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps, const HEVCSPS *sps) { +const HEVCContext *s = avctx->priv_data; const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data; const HEVCWindow *ow = &sps->output_window; unsigned int num = 0, den = 0; @@ -355,6 +356,16 @@ static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps, if (num != 0 && den != 0) av_reduce(&avctx->framerate.den, &avctx->framerate.num, num, den, 1 << 30); + +if (s->sei.a53_caption.a53_caption) { +avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; +} + +if (s->sei.alternative_transfer.present && + av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) && +s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) { +avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics; +} } static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) @@ -2775,13 +2786,6 @@ static int set_side_data(HEVCContext *s) memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size); av_freep(&s->sei.a53_caption.a53_caption); s->sei.a53_caption.a53_caption_size = 0; -s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; -} - -if (s->sei.alternative_transfer.present && - av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) && -s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) { -s->avctx->color_trc = out->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics; } return 0; -- 2.17.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [FFMPEG DEVEL] [PATCH v6] fftools/ffprobe: Add S12M Timecode output as side data (such as SEI TC)
--- Add S12M Timecode output with the show_frame option Multiple timecodes (3) for one frame support Control side date Size to 16 Correct ffrpobe.xsd to allow multiple timecodes in side_data element --- doc/ffprobe.xsd | 8 fftools/ffprobe.c | 14 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/doc/ffprobe.xsd b/doc/ffprobe.xsd index 3e58da0f46..818e54c2c5 100644 --- a/doc/ffprobe.xsd +++ b/doc/ffprobe.xsd @@ -147,11 +147,19 @@ + + + + + + + + diff --git a/fftools/ffprobe.c b/fftools/ffprobe.c index dea489d02e..6e290d5d88 100644 --- a/fftools/ffprobe.c +++ b/fftools/ffprobe.c @@ -165,6 +165,7 @@ typedef enum { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_SIDE_DATA, +SECTION_ID_FRAME_SIDE_DATA_TIMECODE, SECTION_ID_FRAME_LOG, SECTION_ID_FRAME_LOGS, SECTION_ID_LIBRARY_VERSION, @@ -209,7 +210,8 @@ static struct section sections[] = { [SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } }, [SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" }, [SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" }, -[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } }, +[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } }, +[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } }, [SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } }, [SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, }, [SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } }, @@ -2199,6 +2201,16 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, char tcbuf[AV_TIMECODE_STR_SIZE]; av_timecode_make_mpeg_tc_string(tcbuf, *(int64_t *)(sd->data)); print_str("timecode", tcbuf); +} else if (sd->type == AV_FRAME_DATA_S12M_TIMECODE && sd->size == 16) { +uint32_t *tc = (uint32_t*)sd->data; +int m = FFMIN(tc[0],3); +for (int j = 1; j <= m ; j++) { +char tcbuf[AV_TIMECODE_STR_SIZE]; +av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0); +writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE); +print_str("value", tcbuf); +writer_print_section_footer(w); +} } else if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) { AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data; -- 2.11.0 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/2] avfilter/f_loop: warn about unset loop sizes
On 5/23/19, Marton Balint wrote: > Signed-off-by: Marton Balint > --- > libavfilter/f_loop.c | 13 + > 1 file changed, 13 insertions(+) > > diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c > index fcbd742eb4..5ec44d9da2 100644 > --- a/libavfilter/f_loop.c > +++ b/libavfilter/f_loop.c > @@ -55,6 +55,15 @@ typedef struct LoopContext { > #define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM > #define OFFSET(x) offsetof(LoopContext, x) > > +static void check_size(AVFilterContext *ctx) > +{ > +LoopContext *s = ctx->priv; > + > +if (!s->size) > +av_log(ctx, AV_LOG_WARNING, "Number of %s to loop is not set!\n", > + ctx->input_pads[0].type == AVMEDIA_TYPE_VIDEO ? "frames" : > "samples"); > +} > + > #if CONFIG_ALOOP_FILTER > > static int aconfig_input(AVFilterLink *inlink) > @@ -67,6 +76,8 @@ static int aconfig_input(AVFilterLink *inlink) > if (!s->fifo || !s->left) > return AVERROR(ENOMEM); > > +check_size(ctx); > + > return 0; > } > > @@ -250,6 +261,8 @@ static av_cold int init(AVFilterContext *ctx) > if (!s->frames) > return AVERROR(ENOMEM); > > +check_size(ctx); > + > return 0; > } > lgtm > -- > 2.16.4 > > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] libavfilter/dnn_native: Add support of dilated convolution in dnn_native.
Xuewei Meng 于2019年5月22日周三 下午9:09写道: > > Add dilation parameter in dnn native to support dilated convolution. > > Signed-off-by: Xuewei Meng > --- > libavfilter/dnn_backend_native.c | 17 + > libavfilter/dnn_backend_native.h | 1 + > 2 files changed, 10 insertions(+), 8 deletions(-) > > diff --git a/libavfilter/dnn_backend_native.c > b/libavfilter/dnn_backend_native.c > index 3c8465a283..82e900bd8c 100644 > --- a/libavfilter/dnn_backend_native.c > +++ b/libavfilter/dnn_backend_native.c > @@ -63,7 +63,7 @@ static DNNReturnType set_input_output_native(void *model, > DNNInputData *input, c > cur_channels = conv_params->output_num; > > if (conv_params->padding_method == VALID) { > -int pad_size = conv_params->kernel_size - 1; > +int pad_size = (conv_params->kernel_size - 1) * > conv_params->dilation; > cur_height -= pad_size; > cur_width -= pad_size; > } > @@ -164,6 +164,7 @@ DNNModel *ff_dnn_load_model_native(const char > *model_filename) > ff_dnn_free_model_native(&model); > return NULL; > } > +conv_params->dilation = (int32_t)avio_rl32(model_file_context); > conv_params->padding_method = > (int32_t)avio_rl32(model_file_context); > conv_params->activation = (int32_t)avio_rl32(model_file_context); > conv_params->input_num = (int32_t)avio_rl32(model_file_context); > @@ -171,7 +172,7 @@ DNNModel *ff_dnn_load_model_native(const char > *model_filename) > conv_params->kernel_size = > (int32_t)avio_rl32(model_file_context); > kernel_size = conv_params->input_num * conv_params->output_num * >conv_params->kernel_size * > conv_params->kernel_size; > -dnn_size += 20 + (kernel_size + conv_params->output_num << 2); > +dnn_size += 24 + (kernel_size + conv_params->output_num << 2); > if (dnn_size > file_size || conv_params->input_num <= 0 || > conv_params->output_num <= 0 || conv_params->kernel_size <= > 0){ > avio_closep(&model_file_context); > @@ -233,7 +234,7 @@ static void convolve(const float *input, float *output, > const ConvolutionalParam > int src_linesize = width * conv_params->input_num; > int filter_linesize = conv_params->kernel_size * conv_params->input_num; > int filter_size = conv_params->kernel_size * filter_linesize; > -int pad_size = (conv_params->padding_method == VALID) ? > (conv_params->kernel_size - 1) / 2 : 0; > +int pad_size = (conv_params->padding_method == VALID) ? > (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0; > > for (int y = pad_size; y < height - pad_size; ++y) { > for (int x = pad_size; x < width - pad_size; ++x) { > @@ -245,12 +246,12 @@ static void convolve(const float *input, float *output, > const ConvolutionalParam > for (int kernel_x = 0; kernel_x < > conv_params->kernel_size; ++kernel_x) { > float input_pel; > if (conv_params->padding_method == > SAME_CLAMP_TO_EDGE) { > -int y_pos = CLAMP_TO_EDGE(y + kernel_y - > radius, height); > -int x_pos = CLAMP_TO_EDGE(x + kernel_x - > radius, width); > +int y_pos = CLAMP_TO_EDGE(y + (kernel_y - > radius) * conv_params->dilation, height); > +int x_pos = CLAMP_TO_EDGE(x + (kernel_x - > radius) * conv_params->dilation, width); > input_pel = input[y_pos * src_linesize + > x_pos * conv_params->input_num + ch]; > } else { > -int y_pos = y + kernel_y - radius; > -int x_pos = x + kernel_x - radius; > +int y_pos = y + (kernel_y - radius) * > conv_params->dilation; > +int x_pos = x + (kernel_x - radius) * > conv_params->dilation; > input_pel = (x_pos < 0 || x_pos >= width || > y_pos < 0 || y_pos >= height) ? 0.0 : > input[y_pos * > src_linesize + x_pos * conv_params->input_num + ch]; > } > @@ -334,7 +335,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel > *model, DNNData *output > convolve(network->layers[layer - 1].output, > network->layers[layer].output, conv_params, cur_width, cur_height); > cur_channels = conv_params->output_num; > if (conv_params->padding_method == VALID) { > -int pad_size = conv_params->kernel_size - 1; > +int pad_size = (conv_params->kernel_size - 1) * > conv_params->dilation; > cur_height -= pad_size; >
Re: [FFmpeg-devel] [PATCH 1/2] avfilter/f_loop: fix video loop issues with 0 size or when size is bigger than input
On 5/23/19, Marton Balint wrote: > Fixes infinte loop with -vf loop=loop=1 and also fixes looping when the > input > is less frames than the specified loop size. > > Possible regressions since ef1aadffc785b48ed62c45d954289e754f43ef46. > > Signed-off-by: Marton Balint > --- > libavfilter/f_loop.c | 8 +--- > 1 file changed, 5 insertions(+), 3 deletions(-) > > diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c > index d9d55f9837..fcbd742eb4 100644 > --- a/libavfilter/f_loop.c > +++ b/libavfilter/f_loop.c > @@ -343,7 +343,7 @@ static int activate(AVFilterContext *ctx) > > FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); > > -if (!s->eof && (s->nb_frames < s->size || !s->loop)) { > +if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) { > ret = ff_inlink_consume_frame(inlink, &frame); > if (ret < 0) > return ret; > @@ -352,11 +352,13 @@ static int activate(AVFilterContext *ctx) > } > > if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { > -if (status == AVERROR_EOF) > +if (status == AVERROR_EOF) { > +s->size = s->nb_frames; > s->eof = 1; > +} > } > > -if (s->eof && (s->loop == 0 || s->nb_frames < s->size)) { > +if (s->eof && (!s->loop || !s->size)) { > ff_outlink_set_status(outlink, AVERROR_EOF, s->duration); > return 0; > } > -- > 2.16.4 > lgtm > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH v3] avformat/ifv: added support for ifv cctv files
On 5/15/19, Swaraj Hota wrote: > Fixes ticket #2956. > > Signed-off-by: Swaraj Hota > --- > Revised patch based on previous discussions. > Some of the changes are: > - using AVIndexEntry now > - demuxer is totally index based (removed linear search) > - added seeking functionality with timestamps > > There are some timing issues though, due to which seeking does not > work in the files with audio (works fine for files without audio). > I tried a lot but couldn't figure it out, maybe I don't understand > timing stuff clearly. Any suggestions regarding this will be really > helpful. Thanks in advance! > > --- > Changelog| 1 + > libavformat/Makefile | 1 + > libavformat/allformats.c | 1 + > libavformat/ifv.c| 316 +++ > libavformat/version.h| 4 +- > 5 files changed, 321 insertions(+), 2 deletions(-) > create mode 100644 libavformat/ifv.c > > diff --git a/Changelog b/Changelog > index e6b209ae0a..e0b27657d7 100644 > --- a/Changelog > +++ b/Changelog > @@ -30,6 +30,7 @@ version : > - colorhold filter > - xmedian filter > - asr filter > +- IFV demuxer > > > version 4.1: > diff --git a/libavformat/Makefile b/libavformat/Makefile > index 99be60d184..f68d41e4a5 100644 > --- a/libavformat/Makefile > +++ b/libavformat/Makefile > @@ -231,6 +231,7 @@ OBJS-$(CONFIG_ICO_MUXER) += icoenc.o > OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o > OBJS-$(CONFIG_IDF_DEMUXER) += bintext.o sauce.o > OBJS-$(CONFIG_IFF_DEMUXER) += iff.o > +OBJS-$(CONFIG_IFV_DEMUXER) += ifv.o > OBJS-$(CONFIG_ILBC_DEMUXER) += ilbc.o > OBJS-$(CONFIG_ILBC_MUXER)+= ilbc.o > OBJS-$(CONFIG_IMAGE2_DEMUXER)+= img2dec.o img2.o > diff --git a/libavformat/allformats.c b/libavformat/allformats.c > index d316a0529a..cd00834807 100644 > --- a/libavformat/allformats.c > +++ b/libavformat/allformats.c > @@ -188,6 +188,7 @@ extern AVOutputFormat ff_ico_muxer; > extern AVInputFormat ff_idcin_demuxer; > extern AVInputFormat ff_idf_demuxer; > extern AVInputFormat ff_iff_demuxer; > +extern AVInputFormat ff_ifv_demuxer; > extern AVInputFormat ff_ilbc_demuxer; > extern AVOutputFormat ff_ilbc_muxer; > extern AVInputFormat ff_image2_demuxer; > diff --git a/libavformat/ifv.c b/libavformat/ifv.c > new file mode 100644 > index 00..c834b3b63c > --- /dev/null > +++ b/libavformat/ifv.c > @@ -0,0 +1,316 @@ > +/* > + * IFV demuxer > + * > + * Copyright (c) 2019 Swaraj Hota > + * > + * This file is part of FFmpeg. > + * > + * FFmpeg is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; either > + * version 2.1 of the License, or (at your option) any later version. > + * > + * FFmpeg is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with FFmpeg; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 > USA > + */ > + > +#include "avformat.h" > +#include "internal.h" > +#include "avio_internal.h" > + > + > +typedef struct IFVContext { > +uint32_t next_video_index; > +uint32_t next_audio_index; > +uint32_t total_vframes; > +uint32_t total_aframes; > + > +int width, height; > +int is_audio_present; > +int sample_rate; > + > +int video_stream_index; > +int audio_stream_index; > +} IFVContext; > + > +static int ifv_probe(const AVProbeData *p) > +{ > +static const uint8_t ifv_magic[] = {0x11, 0xd2, 0xd3, 0xab, 0xba, 0xa9, > +0xcf, 0x11, 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65, 0x44}; > + > +if (!memcmp(p->buf, ifv_magic, sizeof(ifv_magic))) > +return AVPROBE_SCORE_MAX; > + > +return 0; > +} > + > +static int read_index(AVFormatContext *s, > + enum AVMediaType frame_type, > + uint32_t start_index) > +{ > +IFVContext *ifv = s->priv_data; > +AVStream *st; > +int64_t pos, size, timestamp; > +uint32_t end_index, i; > +int ret; > + > +if (frame_type == AVMEDIA_TYPE_VIDEO) { > +end_index = ifv->total_vframes; > +st = s->streams[ifv->video_stream_index]; > +} else { > +end_index = ifv->total_aframes; > +st = s->streams[ifv->audio_stream_index]; > +} > + > +for (i = start_index; i < end_index; i++) { > +pos = avio_rl32(s->pb); > +size = avio_rl32(s->pb); > + > +avio_skip(s->pb, 8); > +timestamp = avio_rl32(s->pb); You sure about this? Doing real reverse engineering of relevant application would show how timestamps a
Re: [FFmpeg-devel] [PATCH 2/3] avcodec/nvenc: add master display and light level sei for HDR10
On Wed, May 22, 2019 at 9:54 PM Timo Rothenpieler wrote: > On 22.05.2019 08:59, lance.lmw...@gmail.com wrote: > > From: Limin Wang > > > > The testing command for the HDR10 output with nvenc: > > $ ./ffmpeg_g -y -i 4K.mp4 -c:v hevc_nvenc -g 7 -color_primaries bt2020 > -colorspace bt2020_ncl -color_trc smpte2084 -sei hdr10 \ > > -master_display > "G(13250,34500)B(7500,3000)R(34000,16000)WP(15635,16450)L(1000,50)" > -max_cll "0, 0" test.ts > > > > Please notice it is preferable to use the frame sei side data than > master_display and max_cll paramters config > > --- > > libavcodec/nvenc.c | 129 > > libavcodec/nvenc.h | 18 ++ > > libavcodec/nvenc_hevc.c | 11 > > 3 files changed, 158 insertions(+) > > > > diff --git a/libavcodec/nvenc.c b/libavcodec/nvenc.c > > index 75dda6d689..3fd0eca4a5 100644 > > --- a/libavcodec/nvenc.c > > +++ b/libavcodec/nvenc.c > > @@ -22,6 +22,9 @@ > > #include "config.h" > > > > #include "nvenc.h" > > +#include "cbs_h265.h" > > +#include "hevc_sei.h" > > +#include "put_bits.h" > > > > #include "libavutil/hwcontext_cuda.h" > > #include "libavutil/hwcontext.h" > > @@ -30,6 +33,7 @@ > > #include "libavutil/avassert.h" > > #include "libavutil/mem.h" > > #include "libavutil/pixdesc.h" > > +#include "libavutil/mastering_display_metadata.h" > > #include "internal.h" > > > > #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, dl_fn->cuda_dl, x) > > @@ -1491,6 +1495,46 @@ av_cold int ff_nvenc_encode_init(AVCodecContext > *avctx) > > ctx->data_pix_fmt = avctx->pix_fmt; > > } > > > > +ctx->display_primaries_x[0] = 13250; > > +ctx->display_primaries_y[0] = 34500; > > +ctx->display_primaries_x[1] = 7500; > > +ctx->display_primaries_y[1] = 3000; > > +ctx->display_primaries_x[2] = 34000; > > +ctx->display_primaries_y[2] = 16000; > > +ctx->white_point_x = 15635; > > +ctx->white_point_y = 16450; > > +ctx->max_display_mastering_luminance = 1000; > > +ctx->min_display_mastering_luminance = 500; > > +ctx->max_content_light_level = 0; > > +ctx->max_pic_average_light_level = 0; > > Does all this really belong into an encoder? The command line parameter > also looks very arcane. > To me, this looks more like a filter or something should add this as > extra data, and then multiple encoders can pick it up from there. > > Yes, now Nvidia video sdk can't support output HDR SEI data directly, however its API allow us to set it by SeiPayloadArray in parameters by the format, that's the only ways to get 4K HDR output by nvenc. Below is the discussion for the background FYI: https://ffmpeg.zeranoe.com/forum/viewtopic.php?t=3729&start=10 https://devtalk.nvidia.com/default/topic/976304/video-technologies/nvidia-video-codec-sdk-7-0-hevc-custom-sei-support-problem/ For the master_display and max_cll parameters, it's same format as x265, in case the input is raw video, you can configure it by parameters. With the patches, you can get correct HDR10 and HLG with SDR 4K content. Or after you get transcode 4K file by Nvenc, you'll lost SEI data, you had to using the below project to patch the data. https://github.com/SK-Hardwired/nv_hevc_hdr_patcher Thanks, Limin > Same goes for patch 3/3. Patch 1/3 is looks OK to me. > > > Timo > > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 2/7] libavfilter/vf_overlay.c: Add '\' for every line of the blend_slice_yuv function by vim column edit
From: Limin Wang --- libavfilter/vf_overlay.c | 52 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index b468cedf2e..c1abd3e1b2 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -598,32 +598,32 @@ static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, } } -static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, - AVFrame *dst, const AVFrame *src, - int hsub, int vsub, - int main_has_alpha, - int x, int y, - int is_straight, - int jobnr, int nb_jobs) -{ -OverlayContext *s = ctx->priv; -const int src_w = src->width; -const int src_h = src->height; -const int dst_w = dst->width; -const int dst_h = dst->height; - -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha, -s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1, -jobnr, nb_jobs); -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha, -s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1, -jobnr, nb_jobs); -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha, -s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 1, -jobnr, nb_jobs); - -if (main_has_alpha) -alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs); +static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, \ + AVFrame *dst, const AVFrame *src, \ + int hsub, int vsub, \ + int main_has_alpha, \ + int x, int y, \ + int is_straight, \ + int jobnr, int nb_jobs) \ +{ \ +OverlayContext *s = ctx->priv; \ +const int src_w = src->width; \ +const int src_h = src->height; \ +const int dst_w = dst->width; \ +const int dst_h = dst->height; \ + \ +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,\ +s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1, \ +jobnr, nb_jobs); \ +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,\ +s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1, \ +jobnr, nb_jobs); \ +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,\ +s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 1, \ +jobnr, nb_jobs); \ + \ +if (main_has_alpha)
[FFmpeg-devel] [PATCH 6/7] libavfilter/vf_overlay.c: using the nbits and depth for 8bits and 10bit support
From: Limin Wang --- libavfilter/vf_overlay.c | 69 +--- 1 file changed, 44 insertions(+), 25 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index ee51a54659..8376494efc 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -464,22 +464,26 @@ static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); \ int yp = y>>vsub; \ int xp = x>>hsub; \ -uint8_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \ +uint##depth##_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \ int jmax, j, k, kmax; \ int slice_start, slice_end; \ +const int max = (1 << nbits) - 1; \ +const int mid = (1 << (nbits -1)) ; \ +int bytes = depth / 8; \ \ +dst_step /= bytes; \ j = FFMAX(-yp, 0); \ jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); \ \ slice_start = j + (jmax * jobnr) / nb_jobs; \ slice_end = j + (jmax * (jobnr+1)) / nb_jobs; \ \ -sp = src->data[i] + (slice_start) * src->linesize[i]; \ -dp = dst->data[dst_plane] \ +sp = (uint##depth##_t *)(src->data[i] + (slice_start) * src->linesize[i]); \ +dp = (uint##depth##_t *)(dst->data[dst_plane] \ + (yp + slice_start) * dst->linesize[dst_plane] \ - + dst_offset; \ -ap = src->data[3] + (slice_start << vsub) * src->linesize[3]; \ -dap = dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]; \ + + dst_offset); \ +ap = (uint##depth##_t *)(src->data[3] + (slice_start << vsub) * src->linesize[3]); \ +dap = (uint##depth##_t *)(dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]); \ \ for (j = slice_start; j < slice_end; j++) { \ k = FFMAX(-xp, 0); \ @@ -489,7 +493,7 @@ static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext da = dap + ((xp+k) << hsub); \ kmax = FFMIN(-xp + dst_wp, src_wp); \ \ -if (((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \ +if (nbits == 8 && ((vsub && j+1 < src_hp) || !
[FFmpeg-devel] [PATCH 7/7] libavfilter/vf_overlay.c: add the yuv420p10 10bit support
From: Limin Wang The test ffmpeg command in iMAC: ./ffmpeg -y -i input.ts -i ./logo.png -filter_complex overlay=50:50:format=yuv420p10 -c:v hevc_videotoolbox ./test.ts Now I have tested with 8bit and check the result is OK --- libavfilter/vf_overlay.c | 54 ++-- libavfilter/vf_overlay.h | 1 + 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 8376494efc..48e6a90f8b 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -153,7 +153,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar } static const enum AVPixelFormat alpha_pix_fmts[] = { -AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, +AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE }; @@ -172,6 +172,14 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE }; +static const enum AVPixelFormat main_pix_fmts_yuv420p10[] = { +AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE, +AV_PIX_FMT_NONE +}; +static const enum AVPixelFormat overlay_pix_fmts_yuv420p10[] = { +AV_PIX_FMT_YUVA420P10LE, AV_PIX_FMT_NONE +}; + static const enum AVPixelFormat main_pix_fmts_yuv422[] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE }; @@ -217,6 +225,13 @@ static int query_formats(AVFilterContext *ctx) goto fail; } break; +case OVERLAY_FORMAT_YUV420P10: +if (!(main_formats= ff_make_format_list(main_pix_fmts_yuv420p10)) || +!(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420p10))) { +ret = AVERROR(ENOMEM); +goto fail; +} +break; case OVERLAY_FORMAT_YUV422: if (!(main_formats= ff_make_format_list(main_pix_fmts_yuv422)) || !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422))) { @@ -494,7 +509,7 @@ static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext kmax = FFMIN(-xp + dst_wp, src_wp); \ \ if (nbits == 8 && ((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \ -int c = octx->blend_row[i](d, da, s, a, kmax - k, src->linesize[3]); \ +int c = octx->blend_row[i]((uint8_t*)d, (uint8_t*)da, (uint8_t*)s, (uint8_t*)a, kmax - k, src->linesize[3]); \ \ s += c; \ d += dst_step * c; \ @@ -539,18 +554,18 @@ static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext if (nbits > 8) \ *d = (*d * (max - alpha) + *s * alpha) / max; \ else \ -*d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); \ +*d = FAST_DIV255(*d * (max - alpha) + *s * alpha); \ } else { \ if (nbits > 8) { \ if (i && yuv) \ -*d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - 128, -128, 128) + 128; \ +*d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - mid, -mid, mid) + mid; \ else \ -*d = FFMIN((*d * (max - alpha) + *s * alpha) / max + *s, 255); \ +*d = FFMIN((*d * (max - alpha) + *s * alpha) / max + *s, max); \ } else {
[FFmpeg-devel] [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for the macro defined function
From: Limin Wang --- libavfilter/vf_overlay.c | 12 ++-- 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 0a8f089c0d..b468cedf2e 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -500,7 +500,7 @@ static av_always_inline void blend_plane(AVFilterContext *ctx, for (; k < kmax; k++) { int alpha_v, alpha_h, alpha; -// average alpha for color components, improve quality +/* average alpha for color components, improve quality */ if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { alpha = (a[0] + a[src->linesize[3]] + a[1] + a[src->linesize[3]+1]) >> 2; @@ -512,10 +512,10 @@ static av_always_inline void blend_plane(AVFilterContext *ctx, alpha = (alpha_v + alpha_h) >> 1; } else alpha = a[0]; -// if the main channel has an alpha channel, alpha has to be calculated -// to create an un-premultiplied (straight) alpha value +/* if the main channel has an alpha channel, alpha has to be calculated */ +/* to create an un-premultiplied (straight) alpha value */ if (main_has_alpha && alpha != 0 && alpha != 255) { -// average alpha for color components, improve quality +/* average alpha for color components, improve quality */ uint8_t alpha_d; if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { alpha_d = (da[0] + da[dst->linesize[3]] + @@ -556,7 +556,7 @@ static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, int x, int y, int jobnr, int nb_jobs) { -uint8_t alpha; ///< the amount of overlay to blend on to main +uint8_t alpha; /* the amount of overlay to blend on to main */ uint8_t *s, *sa, *d, *da; int i, imax, j, jmax; int slice_start, slice_end; @@ -587,7 +587,7 @@ static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, *d = *s; break; default: -// apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha +/* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ *d += FAST_DIV255((255 - *d) * *s); } d += 1; -- 2.21.0 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 4/7] libavfilter/vf_overlay.c: Add '\' for every line of the blend_plane function
From: Limin Wang --- libavfilter/vf_overlay.c | 214 +++ 1 file changed, 107 insertions(+), 107 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index f36be1601b..ba8147f579 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -441,113 +441,113 @@ static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, } } -static av_always_inline void blend_plane(AVFilterContext *ctx, - AVFrame *dst, const AVFrame *src, - int src_w, int src_h, - int dst_w, int dst_h, - int i, int hsub, int vsub, - int x, int y, - int main_has_alpha, - int dst_plane, - int dst_offset, - int dst_step, - int straight, - int yuv, - int jobnr, - int nb_jobs) -{ -OverlayContext *octx = ctx->priv; -int src_wp = AV_CEIL_RSHIFT(src_w, hsub); -int src_hp = AV_CEIL_RSHIFT(src_h, vsub); -int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub); -int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); -int yp = y>>vsub; -int xp = x>>hsub; -uint8_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; -int jmax, j, k, kmax; -int slice_start, slice_end; - -j = FFMAX(-yp, 0); -jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); - -slice_start = j + (jmax * jobnr) / nb_jobs; -slice_end = j + (jmax * (jobnr+1)) / nb_jobs; - -sp = src->data[i] + (slice_start) * src->linesize[i]; -dp = dst->data[dst_plane] - + (yp + slice_start) * dst->linesize[dst_plane] - + dst_offset; -ap = src->data[3] + (slice_start << vsub) * src->linesize[3]; -dap = dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]; - -for (j = slice_start; j < slice_end; j++) { -k = FFMAX(-xp, 0); -d = dp + (xp+k) * dst_step; -s = sp + k; -a = ap + (kblend_row[i](d, da, s, a, kmax - k, src->linesize[3]); - -s += c; -d += dst_step * c; -da += (1 << hsub) * c; -a += (1 << hsub) * c; -k += c; -} -for (; k < kmax; k++) { -int alpha_v, alpha_h, alpha; - -/* average alpha for color components, improve quality */ -if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { -alpha = (a[0] + a[src->linesize[3]] + - a[1] + a[src->linesize[3]+1]) >> 2; -} else if (hsub || vsub) { -alpha_h = hsub && k+1 < src_wp ? -(a[0] + a[1]) >> 1 : a[0]; -alpha_v = vsub && j+1 < src_hp ? -(a[0] + a[src->linesize[3]]) >> 1 : a[0]; -alpha = (alpha_v + alpha_h) >> 1; -} else -alpha = a[0]; -/* if the main channel has an alpha channel, alpha has to be calculated */ -/* to create an un-premultiplied (straight) alpha value */ -if (main_has_alpha && alpha != 0 && alpha != 255) { -/* average alpha for color components, improve quality */ -uint8_t alpha_d; -if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { -alpha_d = (da[0] + da[dst->linesize[3]] + - da[1] + da[dst->linesize[3]+1]) >> 2; -} else if (hsub || vsub) { -alpha_h = hsub && k+1 < src_wp ? -(da[0] + da[1]) >> 1 : da[0]; -alpha_v = vsub && j+1 < src_hp ? -(da[0] + da[dst->linesize[3]]) >> 1 : da[0]; -alpha_d = (alpha_v + alpha_h) >> 1; -} else -alpha_d = da[0]; -alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); -} -if (straight) { -*d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); -} else { -if (i && yuv) -*d = av_clip(FAST_DIV255((*d - 128) * (255 - alpha)) + *s - 128, -128, 128) + 128; -else -*d = FFMIN(FAST_DIV255(*d * (255 - alpha)) + *s, 255); -} -s++; -d += dst_step; -da += 1 << hsub; -a += 1 << hsub; -} -dp += dst->linesize[dst_plane]; -sp += src->linesize[i]; -ap += (1 << vsub) * src->linesize[3]; -dap += (1 << vsub) * dst->linesize[3]; -} +static av_always_inline void blend_plan
[FFmpeg-devel] [PATCH 3/7] libavfilter/vf_overlay.c: Add '\' for every line of the alpha_composite function
From: Limin Wang --- libavfilter/vf_overlay.c | 92 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index c1abd3e1b2..f36be1601b 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -550,52 +550,52 @@ static av_always_inline void blend_plane(AVFilterContext *ctx, } } -static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, - int src_w, int src_h, - int dst_w, int dst_h, - int x, int y, - int jobnr, int nb_jobs) -{ -uint8_t alpha; /* the amount of overlay to blend on to main */ -uint8_t *s, *sa, *d, *da; -int i, imax, j, jmax; -int slice_start, slice_end; - -imax = FFMIN(-y + dst_h, src_h); -slice_start = (imax * jobnr) / nb_jobs; -slice_end = ((imax * (jobnr+1)) / nb_jobs); - -i = FFMAX(-y, 0); -sa = src->data[3] + (i + slice_start) * src->linesize[3]; -da = dst->data[3] + (y + i + slice_start) * dst->linesize[3]; - -for (i = i + slice_start; i < slice_end; i++) { -j = FFMAX(-x, 0); -s = sa + j; -d = da + x+j; - -for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { -alpha = *s; -if (alpha != 0 && alpha != 255) { -uint8_t alpha_d = *d; -alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); -} -switch (alpha) { -case 0: -break; -case 255: -*d = *s; -break; -default: -/* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ -*d += FAST_DIV255((255 - *d) * *s); -} -d += 1; -s += 1; -} -da += dst->linesize[3]; -sa += src->linesize[3]; -} +static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, \ + int src_w, int src_h, \ + int dst_w, int dst_h, \ + int x, int y, \ + int jobnr, int nb_jobs) \ +{ \ +uint8_t alpha; /* the amount of overlay to blend on to main */ \ +uint8_t *s, *sa, *d, *da; \ +int i, imax, j, jmax; \ +int slice_start, slice_end; \ + \ +imax = FFMIN(-y + dst_h, src_h); \ +slice_start = (imax * jobnr) / nb_jobs; \ +slice_end = ((imax * (jobnr+1)) / nb_jobs); \ + \ +i = FFMAX(-y, 0); \ +sa = src->data[3] + (i + slice_start) * src->linesize[3]; \ +da = dst->data[3] + (y + i + slice_start) * dst->linesize[3]; \ + \ +for (i = i + slice_start; i < slice_end; i++) { \ +j = FFMAX(-x, 0); \ +s = sa + j; \ +d = da + x+j; \ + \ +for (jmax = FFMIN(-x + dst_w,
[FFmpeg-devel] [PATCH 5/7] libavfilter/vf_overlay.c: define the macro-style function to support 8bit and 10bit blend, keep the 8bit function same now
From: Limin Wang --- libavfilter/vf_overlay.c | 52 ++-- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index ba8147f579..ee51a54659 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -441,7 +441,8 @@ static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, } } -static av_always_inline void blend_plane(AVFilterContext *ctx, \ +#define DEFINE_BLEND_PLANE(depth, nbits) \ +static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext *ctx, \ AVFrame *dst, const AVFrame *src, \ int src_w, int src_h, \ int dst_w, int dst_h, \ @@ -549,8 +550,10 @@ static av_always_inline void blend_plane(AVFilterContext *ctx, dap += (1 << vsub) * dst->linesize[3]; \ } \ } +DEFINE_BLEND_PLANE(8, 8); -static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, \ +#define DEFINE_ALPHA_COMPOSITE(depth, nbits) \ +static inline void alpha_composite_##depth##_##nbits##bits(const AVFrame *src, const AVFrame *dst, \ int src_w, int src_h, \ int dst_w, int dst_h, \ int x, int y, \ @@ -597,8 +600,10 @@ static inline void alpha_composite(const AVFrame *src, const AVFrame *dst, sa += src->linesize[3]; \ } \ } +DEFINE_ALPHA_COMPOSITE(8, 8); -static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, \ +#define DEFINE_BLEND_SLICE_YUV(depth, nbits) \ +static av_always_inline void blend_slice_yuv_##depth##_##nbits##bits(AVFilterContext *ctx, \ AVFrame *dst, const AVFrame *src, \ int hsub, int vsub, \ int main_has_alpha, \ @@ -612,19 +617,20 @@ static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, const int dst_w = dst->width; \ const int dst_h = dst->height; \ \ -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,\ +blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,\ s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1, \ jobnr, nb_jobs); \ -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,\ +blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,\ s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1, \ jobnr, nb_jobs); \ -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,\ +blend_plane_##depth##_##nbits##bits(ctx, dst, src, sr
Re: [FFmpeg-devel] [PATCH 2/7] libavfilter/vf_overlay.c: Add '\' for every line of the blend_slice_yuv function by vim column edit
On 5/24/19, lance.lmw...@gmail.com wrote: > From: Limin Wang > > --- > libavfilter/vf_overlay.c | 52 > 1 file changed, 26 insertions(+), 26 deletions(-) > > diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c > index b468cedf2e..c1abd3e1b2 100644 > --- a/libavfilter/vf_overlay.c > +++ b/libavfilter/vf_overlay.c > @@ -598,32 +598,32 @@ static inline void alpha_composite(const AVFrame *src, > const AVFrame *dst, > } > } > > -static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, > - AVFrame *dst, const AVFrame > *src, > - int hsub, int vsub, > - int main_has_alpha, > - int x, int y, > - int is_straight, > - int jobnr, int nb_jobs) > -{ > -OverlayContext *s = ctx->priv; > -const int src_w = src->width; > -const int src_h = src->height; > -const int dst_w = dst->width; > -const int dst_h = dst->height; > - > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, > x, y, main_has_alpha, > -s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, > s->main_desc->comp[0].step, is_straight, 1, > -jobnr, nb_jobs); > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, > x, y, main_has_alpha, > -s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, > s->main_desc->comp[1].step, is_straight, 1, > -jobnr, nb_jobs); > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, > x, y, main_has_alpha, > -s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, > s->main_desc->comp[2].step, is_straight, 1, > -jobnr, nb_jobs); > - > -if (main_has_alpha) > -alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, > nb_jobs); > +static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, >\ > + AVFrame *dst, const AVFrame > *src, \ > + int hsub, int vsub, >\ > + int main_has_alpha, >\ > + int x, int y, >\ > + int is_straight, >\ > + int jobnr, int nb_jobs) >\ > +{ >\ > +OverlayContext *s = ctx->priv; >\ > +const int src_w = src->width; >\ > +const int src_h = src->height; >\ > +const int dst_w = dst->width; >\ > +const int dst_h = dst->height; >\ > + >\ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, > x, y, main_has_alpha,\ > +s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, > s->main_desc->comp[0].step, is_straight, 1, \ > +jobnr, nb_jobs); >\ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, > x, y, main_has_alpha,\ > +s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, > s->main_desc->comp[1].step, is_straight, 1, \ > +jobnr, nb_jobs); >\ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, > x, y, main_has_alpha,\ > +s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, > s->main_desc->comp[2].step, is_straight, 1, \ > +jobnr, nb_jobs); >\ > + >\ > +if (main_has_alpha) >\ > +alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, > nb_jobs); \ > } > > static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx, > -- > 2.21.0 > Why? Does not make sense. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-
Re: [FFmpeg-devel] [PATCH 2/2] avformat/mpjpegdec: fix strict boundary search string
On Thu, May 23, 2019 at 06:06:43PM +0200, Moritz Barsnick wrote: > According to RFC1341, the multipart boundary indicated by the > Content-Type header must be prepended by CRLF + "--", and followed > by CRLF. In the case of strict MIME header boundary handling, the > "--" was forgotten to add. > > Fixes trac #7921. > > Signed-off-by: Moritz Barsnick > --- > libavformat/mpjpegdec.c | 4 ++-- > 1 file changed, 2 insertions(+), 2 deletions(-) > > diff --git a/libavformat/mpjpegdec.c b/libavformat/mpjpegdec.c > index a23469c0ec..f145766e6e 100644 > --- a/libavformat/mpjpegdec.c > +++ b/libavformat/mpjpegdec.c > @@ -306,8 +306,8 @@ static int mpjpeg_read_packet(AVFormatContext *s, > AVPacket *pkt) > boundary = mpjpeg_get_boundary(s->pb); > } > if (boundary != NULL) { > -mpjpeg->boundary = boundary; > -mpjpeg->searchstr = av_asprintf( "\r\n%s\r\n", boundary ); > +mpjpeg->boundary = av_asprintf("--%s", boundary); memleak also a testcase would be good to have, idealy in fate thx [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB When the tyrant has disposed of foreign enemies by conquest or treaty, and there is nothing more to fear from them, then he is always stirring up some war or other, in order that the people may require a leader. -- Plato signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for the macro defined function
On 2019-05-24 11:36, lance.lmw...@gmail.com wrote: > From: Limin Wang > > ... Why? And these are "comments" not "commands". signature.asc Description: OpenPGP digital signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/2] avcodec/pngdec: Check nb_blocks
On Sun, Apr 28, 2019 at 12:37:12PM +0200, Paul B Mahol wrote: > On 4/28/19, Michael Niedermayer wrote: > > Fixes: Timeout (23sec -> 0.5sec) > > Fixes: > > 14329/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_LSCR_fuzzer-5679252923482112 > > > > Found-by: continuous fuzzing process > > https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg > > Signed-off-by: Michael Niedermayer > > --- > > libavcodec/pngdec.c | 2 ++ > > 1 file changed, 2 insertions(+) > > > > diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c > > index 6a681be29d..1dcde6cbc9 100644 > > --- a/libavcodec/pngdec.c > > +++ b/libavcodec/pngdec.c > > @@ -1541,6 +1541,8 @@ static int decode_frame_lscr(AVCodecContext *avctx, > > return ret; > > > > nb_blocks = bytestream2_get_le16(gb); > > +if (2 + nb_blocks * 12 > bytestream2_get_bytes_left(gb)) > > I prefer if this is not reversed. of course, will unreverse and push as "if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * 12)" thanks [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB There will always be a question for which you do not know the correct answer. signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/2] avcodec/pngdec: Check nb_blocks
On 5/24/19, Michael Niedermayer wrote: > On Sun, Apr 28, 2019 at 12:37:12PM +0200, Paul B Mahol wrote: >> On 4/28/19, Michael Niedermayer wrote: >> > Fixes: Timeout (23sec -> 0.5sec) >> > Fixes: >> > 14329/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_LSCR_fuzzer-5679252923482112 >> > >> > Found-by: continuous fuzzing process >> > https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg >> > Signed-off-by: Michael Niedermayer >> > --- >> > libavcodec/pngdec.c | 2 ++ >> > 1 file changed, 2 insertions(+) >> > >> > diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c >> > index 6a681be29d..1dcde6cbc9 100644 >> > --- a/libavcodec/pngdec.c >> > +++ b/libavcodec/pngdec.c >> > @@ -1541,6 +1541,8 @@ static int decode_frame_lscr(AVCodecContext >> > *avctx, >> > return ret; >> > >> > nb_blocks = bytestream2_get_le16(gb); >> > +if (2 + nb_blocks * 12 > bytestream2_get_bytes_left(gb)) >> >> I prefer if this is not reversed. > > of course, will unreverse and push as "if (bytestream2_get_bytes_left(gb) < > 2 + nb_blocks * 12)" > > thanks OK. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/2] avformat/webm_chunk: Check header filename length
On Fri, May 03, 2019 at 06:31:00AM +, Andreas Rheinhardt wrote: > Michael Niedermayer: > > Signed-off-by: Michael Niedermayer > > --- > > libavformat/webm_chunk.c | 8 +++- > > 1 file changed, 7 insertions(+), 1 deletion(-) > > > > diff --git a/libavformat/webm_chunk.c b/libavformat/webm_chunk.c > > index 561ec152e7..e2fbd8be1d 100644 > > --- a/libavformat/webm_chunk.c > > +++ b/libavformat/webm_chunk.c > > @@ -88,6 +88,8 @@ static int get_chunk_filename(AVFormatContext *s, int > > is_header, char filename[M > > { > > WebMChunkContext *wc = s->priv_data; > > AVFormatContext *oc = wc->avf; > > +int len; > > + > > if (!filename) { > > return AVERROR(EINVAL); > > } > > @@ -96,7 +98,11 @@ static int get_chunk_filename(AVFormatContext *s, int > > is_header, char filename[M > > av_log(oc, AV_LOG_ERROR, "No header filename provided\n"); > > return AVERROR(EINVAL); > > } > > -av_strlcpy(filename, wc->header_filename, MAX_FILENAME_SIZE); > > +len = av_strlcpy(filename, wc->header_filename, MAX_FILENAME_SIZE); > > +if (len >= MAX_FILENAME_SIZE) { > > +av_log(oc, AV_LOG_ERROR, "header filename too long\n"); > > +return AVERROR(EINVAL); > > +} > > } else { > > if (av_get_frame_filename(filename, MAX_FILENAME_SIZE, > >s->url, wc->chunk_index - 1) < 0) { > > > len has an unnecessarily broad scope. The string is intentionally > started with a lower case letter because the parameter "header" is > lower case, too, isn't it? If so, it's fine and LGTM apart from the scope. Ill capitalize the error message and move the "int len" into the block thanks [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB Complexity theory is the science of finding the exact solution to an approximation. Benchmarking OTOH is finding an approximation of the exact signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for the macro defined function
On Fri, May 24, 2019 at 17:36:10 +0800, lance.lmw...@gmail.com wrote: > Subject: [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for > the macro defined function ^ It's not "commands", it's "comments", or "comment style". Moritz ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/2] avformat/webm_chunk: Fix argument length of get_chunk_filename()
On Fri, May 03, 2019 at 06:03:02PM +0200, Michael Niedermayer wrote: > On Fri, May 03, 2019 at 06:11:00AM +, Andreas Rheinhardt wrote: > > Michael Niedermayer: > > > Signed-off-by: Michael Niedermayer > > > --- > > > libavformat/webm_chunk.c | 2 +- > > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > > > diff --git a/libavformat/webm_chunk.c b/libavformat/webm_chunk.c > > > index 2c99753b5b..561ec152e7 100644 > > > --- a/libavformat/webm_chunk.c > > > +++ b/libavformat/webm_chunk.c > > > @@ -84,7 +84,7 @@ static int chunk_mux_init(AVFormatContext *s) > > > return 0; > > > } > > > > > > -static int get_chunk_filename(AVFormatContext *s, int is_header, char > > > *filename) > > > +static int get_chunk_filename(AVFormatContext *s, int is_header, char > > > filename[MAX_FILENAME_SIZE]) > > > { > > > WebMChunkContext *wc = s->priv_data; > > > AVFormatContext *oc = wc->avf; > > > > > 1. This is not a fix, merely a cosmetic clarification. After all, this > > change does not allow the compiler to infer that every pointer > > corresponding to the filename argument will point to an array of at > > least MAX_FILENAME_SIZE elements. (C99 added a static keyword for this.) > > static analyzers can use such hints to detect violations > but the real intent here was that the human developer would see > from just looking at the argument that it has a implied size. > And that way to avoid a mistake will apply with a improved commit message thanks [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB Why not whip the teacher when the pupil misbehaves? -- Diogenes of Sinope signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/2] avformat/webm_chunk: Fix argument length of get_chunk_filename()
On Fri, May 03, 2019 at 06:11:00AM +, Andreas Rheinhardt wrote: > Michael Niedermayer: > > Signed-off-by: Michael Niedermayer > > --- > > libavformat/webm_chunk.c | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/libavformat/webm_chunk.c b/libavformat/webm_chunk.c > > index 2c99753b5b..561ec152e7 100644 > > --- a/libavformat/webm_chunk.c > > +++ b/libavformat/webm_chunk.c > > @@ -84,7 +84,7 @@ static int chunk_mux_init(AVFormatContext *s) > > return 0; > > } > > > > -static int get_chunk_filename(AVFormatContext *s, int is_header, char > > *filename) > > +static int get_chunk_filename(AVFormatContext *s, int is_header, char > > filename[MAX_FILENAME_SIZE]) > > { > > WebMChunkContext *wc = s->priv_data; > > AVFormatContext *oc = wc->avf; > > > 1. This is not a fix, merely a cosmetic clarification. After all, this > change does not allow the compiler to infer that every pointer > corresponding to the filename argument will point to an array of at > least MAX_FILENAME_SIZE elements. (C99 added a static keyword for this.) > > 2. You could just as well remove the check for whether filename is > NULL as the new form makes it clear that this function should not be > called with a NULL pointer as filename. (Of course, compilers can (and > do) already optimize the check away by looking at both calls of this > function.) possible, but removing the check isnt truly related to this change thx [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB Democracy is the form of government in which you can choose your dictator signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/2] avformat/mpjpegdec: fix finding multipart boundary parameter
On Thu, May 23, 2019 at 06:06:42PM +0200, Moritz Barsnick wrote: > The string matching function's return value was evaluated incorrectly. > > Fixes trac #7920. > > Signed-off-by: Moritz Barsnick > --- > libavformat/mpjpegdec.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/libavformat/mpjpegdec.c b/libavformat/mpjpegdec.c > index e653b5cc93..a23469c0ec 100644 > --- a/libavformat/mpjpegdec.c > +++ b/libavformat/mpjpegdec.c > @@ -271,7 +271,7 @@ static char* mpjpeg_get_boundary(AVIOContext* pb) > while (av_isspace(*start)) > start++; > > -if (!av_stristart(start, "boundary=", &start)) { > +if (av_stristart(start, "boundary=", &start)) { > end = strchr(start, ';'); > if (end) > len = end - start - 1; LGTM a testcase in fate would be good thx [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB No great genius has ever existed without some touch of madness. -- Aristotle signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for the macro defined function
On 2019-05-24 12:06, James Darnley wrote: > On 2019-05-24 11:36, lance.lmw...@gmail.com wrote: >> From: Limin Wang >> >> ... > > Why? I see why: so you don't screw-up the macros you create later. signature.asc Description: OpenPGP digital signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] libavfilter: Add derain filter.
Remove the rain in the input image/video by applying the derain methods based on convolutional neural networks. Training scripts as well as scripts for model generation are provided in the repository at https://github.com/XueweiMeng/derain_filter.git. Signed-off-by: Xuewei Meng --- doc/filters.texi | 34 ++ libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/vf_derain.c | 219 +++ 4 files changed, 255 insertions(+) create mode 100644 libavfilter/vf_derain.c diff --git a/doc/filters.texi b/doc/filters.texi index 4fdcfe919e..f1d3841ed3 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -8248,6 +8248,40 @@ delogo=x=0:y=0:w=100:h=77:band=10 @end itemize +@section derain + +Remove the rain in the input image/video by applying the derain methods based on +convolutional neural networks. Supported models: + +@itemize +@item +Recurrent Squeeze-and-Excitation Context Aggregation Net (RESCAN). +See @url{http://openaccess.thecvf.com/content_ECCV_2018/papers/Xia_Li_Recurrent_Squeeze-and-Excitation_Context_ECCV_2018_paper.pdf}. +@end itemize + +Training scripts as well as scripts for model generation are provided in +the repository at @url{https://github.com/XueweiMeng/derain_filter.git}. + +The filter accepts the following options: + +@table @option +@item dnn_backend +Specify which DNN backend to use for model loading and execution. This option accepts +the following values: + +@table @samp +@item native +Native implementation of DNN loading and execution. +@end table +Default value is @samp{native}. + +@item model +Set path to model file specifying network architecture and its parameters. +Note that different backends use different file formats. TensorFlow backend +can load files for both formats, while native backend can load files for only +its format. +@end table + @section deshake Attempt to fix small changes in horizontal and/or vertical shift. This diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 9a61c25b05..b7191d0081 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -200,6 +200,7 @@ OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o OBJS-$(CONFIG_DEBLOCK_FILTER)+= vf_deblock.o OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o +OBJS-$(CONFIG_DERAIN_FILTER) += vf_derain.o OBJS-$(CONFIG_DECONVOLVE_FILTER) += vf_convolve.o framesync.o OBJS-$(CONFIG_DEDOT_FILTER) += vf_dedot.o OBJS-$(CONFIG_DEFLATE_FILTER)+= vf_neighbor.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 40534738ee..f3c8883960 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -196,6 +196,7 @@ extern AVFilter ff_vf_deinterlace_vaapi; extern AVFilter ff_vf_dejudder; extern AVFilter ff_vf_delogo; extern AVFilter ff_vf_denoise_vaapi; +extern AVFilter ff_vf_derain; extern AVFilter ff_vf_deshake; extern AVFilter ff_vf_despill; extern AVFilter ff_vf_detelecine; diff --git a/libavfilter/vf_derain.c b/libavfilter/vf_derain.c new file mode 100644 index 00..05d9fc5e7d --- /dev/null +++ b/libavfilter/vf_derain.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2019 Xuewei Meng + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Filter implementing image derain filter using deep convolutional networks. + * https://arxiv.org/abs/1609.05158 + * http://openaccess.thecvf.com/content_ECCV_2018/html/Xia_Li_Recurrent_Squeeze-and-Excitation_Context_ECCV_2018_paper.html + */ + +#include "libavutil/opt.h" +#include "libavformat/avio.h" +#include "libswscale/swscale.h" +#include "avfilter.h" +#include "dnn_interface.h" +#include "formats.h" +#include "internal.h" + +typedef struct DRContext { +const AVClass *class; + +char *model_filename; +DNNBackendType backend_type; +DNNModule *dnn_module; +DNNModel *model; +DNNInputData input; +DNNDataoutput; +} DRContext; + +#define CLIP(x, min, max) (x < min ? min : (x > max ? max : x)) +#define OFFSET(x) offsetof(DRContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FL
Re: [FFmpeg-devel] native mode in FFmpeg DNN module
Em qui, 23 de mai de 2019 às 00:06, Guo, Yejun escreveu: > > > > > > > > > > Option 2) > > > > > > > Write c code in FFmpeg to convert tensorflow file format (format > > > > > > > 1) > > > directly > > > > > > into memory representation (format 3), and so we controls > > > > > > everything in > > > > > > ffmpeg community. And the conversion can be extended to import more > > > file > > > > > > formats such as torch, darknet, etc. One example is that OpenCV uses > > > this > > > > > > method. > > > > > > > > > > > > > > The in memory representation (format 3) can still be current. > > > > > > > > > > > > > > > > > > > Option 2 would be ideal, as it does not introduce any dependency for > > > > > > using the native backend. > > > > > > Yet I'm not sure how complex implementing the tf model reader can > > be, > > > > > > If I remember correctly the student said it was not trivial at the > > > > > > time. > > > > > > > > > > yes, it is not easy, but I think it is worthy to do it. Here is a > > > > > reference > > > example > > > > > for the complexity, see > > > > > > > > > > https://github.com/opencv/opencv/blob/master/modules/dnn/src/tensorflow/ > > > > > tf_importer.cpp. > > > > > > > > > > > > > > > > > Is the tf model file stable? if not it will be a maintenance burden > > > > > > to > > > > > > keep it working whenever tf releases a new version. This point makes > > > > > > me think having control over our file format is good. > > > > > > > > > > imho, this issue is always there, no matter which method used, unless > > > > > our > > > > > format could be exported by tensorflow (it has little possibility). > > > > > > > > > > Whenever tf releases a new version with a new file format, we still > > > > > have > > to > > > > > change the python script in phase 1 (convert tf file model to our > > > > > format) > > > which > > > > > is even an external dependency at > > > > > https://github.com/HighVoltageRocknRoll/sr, > > > > > > > > > > As from effort perspective, the current implementation is better since > > > python > > > > > script is simpler. But I think we are still worth implementing option > > > > > 2 as > > the > > > > > ideal technical direction. > > > > > > > > I checked a bit more about https://github.com/HighVoltageRocknRoll/sr, > > > > it > > is > > > actually > > > > not an converter (from tf model to native model), but hard code for > > > > given > > > models. > > > > And the native model is not exactly the same as tf model, it even > > > > changes > > the > > > behavior > > > > of pad parameter of conv layer. > > > > > > > > If community is open to option 2, I'll try it. > > > > > > > Option 2 is fine for me. > > > > that's great, :) > > looks that option 2 is a bit complex, TF model file is in protocol buffers > (protobuf) format and not easy to parse it with simple c code. > > Since there is no official c support for protobuf, let's first image how the > work can be done via official c++ support. > > 1. get protobuf compiler protoc, .h header files and .so library files > (download or build from > https://github.com/protocolbuffers/protobuf/tree/master/src). > 2. get tensorflow model's .proto files from > https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/framework. > 3. generate .cc/.h files from .proto files (see step 2) via protoc (see step > 1). > 4. let the generated .cc/.h files be part of ffmpeg source tree, and build > with protobuf header/library files. > 5. at run time, the protobuf libraries are invoked. It means that the system > should have installed protobuf dev package. > > furthermore, there is a compatible problem between the protobuf compiler, > header files and library files. > So, as a practice to fix it, the method is to make the protobuf source code > be part of ffmpeg source tree. (it is a common practice, so we can many other > projects contain the protobuf source code). > > I guess the above method is not acceptable in ffmpeg. I would be glad to > continue if the community embrace this change. :) Indeed I think it is not acceptable. > > While the current implementation has external dependency, my new suggestion > is: > - add a python script under .../libavfilter/dnn/ (all other dnn source > files will be also moved here later), so ffmpeg has the full control on it. I'm not sure about the policy on putting secondary scripts with the main code, but another option is to create a repo controlled by ffmpeg maybe? I think this option would also help GSoC students that work with dnn, so they don't have to depend on previous students maintaining independent repositories. > - it is a script to convert tensorflow model file into native model file. > (other formats such as caffe, torch can also be supported later if needed) > > thanks. > > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Add format option
On 24/05/2019 01:49, Josh Allmann wrote: Makes certain usages of the lavfi API easier. --- libavfilter/vf_scale_cuda.c | 12 +++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index b7cdb81081..6b1ef2bb6f 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -81,6 +81,7 @@ typedef struct CUDAScaleContext { char *w_expr; ///< width expression string char *h_expr; ///< height expression string +char *format_str; CUcontext cu_ctx; CUmodulecu_module; @@ -101,7 +102,15 @@ static av_cold int cudascale_init(AVFilterContext *ctx) { CUDAScaleContext *s = ctx->priv; -s->format = AV_PIX_FMT_NONE; +if (!strcmp(s->format_str, "same")) { +s->format = AV_PIX_FMT_NONE; +} else { +s->format = av_get_pix_fmt(s->format_str); +if (s->format == AV_PIX_FMT_NONE) { +av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str); +return AVERROR(EINVAL); +} +} s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); @@ -533,6 +542,7 @@ fail: static const AVOption options[] = { { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS }, { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS }, +{ "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, { NULL }, }; I'm not sure what to think about a dummy option like this. It might be very confusing for users to see a format option, which only accepts a single value, "same", and effectively does nothing. Not strictly against it, since I can see the convenience it adds when building command lines, but I'd like some second opinions on this. smime.p7s Description: S/MIME Cryptographic Signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avcodec/decode: Do not output subtitle frames if the packet is marked with `AV_PKT_FLAG_DISCARD`.
On 23/05/2019 23:58, Darren Mo wrote: > To clarify, do you mean we should merge this now or wait for the second > patch, which fixes the root cause? I have no strong opinion on it. Unsure which is better for a user experience, given they'll both be broken in some way. I'd say merge if you feel it is most correct; nobody has raised any objections. - Derek ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avformat/cache - delete cache file after closing handle
On 24-05-2019 11:24 AM, Gyan wrote: On 24-05-2019 02:06 AM, Hendrik Leppkes wrote: On Thu, May 23, 2019 at 9:55 PM Gyan wrote: On 24-05-2019 12:58 AM, Nicolas George wrote: Gyan (12019-05-24): avpriv_io_delete will call the file protocol's delete which is guarded with a header check, not done here. Do you have report of a build failure caused by unlink()? No. I assume that the guard in the file proto callback is germane and the patch was subject to review. Do you know it isn't? Since we now have a generic wrapper function, isn't that better for future maintenance? Not that I see. Something that makes it harder to follow the code from the call site to the actual function is not good for maintenance. The point of modularizing the op here is to have a single interface - why stick with a direct external call? At the time it was added, there was no alternative. Don't mind it either way, but this feels like a useless game of ping-pong. Just stick to unlink. If there is no reason to change it, then the preference is to just keep it as-is. We're not using the file protocol to create the file, so no reason to use it to delete it. Will push tonight. Gyan ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/libx265: Support full range videos
Signed-off-by: Derek Buitenhuis --- libavcodec/libx265.c | 18 +- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/libavcodec/libx265.c b/libavcodec/libx265.c index 07bca81aef..f56def53d5 100644 --- a/libavcodec/libx265.c +++ b/libavcodec/libx265.c @@ -133,6 +133,14 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) return AVERROR(EINVAL); } + +ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1; + +ctx->params->vui.bEnableVideoFullRangeFlag = avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || + avctx->pix_fmt == AV_PIX_FMT_YUVJ422P || + avctx->pix_fmt == AV_PIX_FMT_YUVJ444P || + avctx->color_range == AVCOL_RANGE_JPEG; + if ((avctx->color_primaries <= AVCOL_PRI_SMPTE432 && avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) || (avctx->color_trc <= AVCOL_TRC_ARIB_STD_B67 && @@ -140,7 +148,6 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) (avctx->colorspace <= AVCOL_SPC_ICTCP && avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) { -ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1; ctx->params->vui.bEnableColorDescriptionPresentFlag = 1; // x265 validates the parameters internally @@ -454,8 +461,11 @@ FF_ENABLE_DEPRECATION_WARNINGS static const enum AVPixelFormat x265_csp_eight[] = { AV_PIX_FMT_YUV420P, +AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV422P, +AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV444P, +AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE @@ -463,8 +473,11 @@ static const enum AVPixelFormat x265_csp_eight[] = { static const enum AVPixelFormat x265_csp_ten[] = { AV_PIX_FMT_YUV420P, +AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV422P, +AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV444P, +AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_GBRP, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, @@ -477,8 +490,11 @@ static const enum AVPixelFormat x265_csp_ten[] = { static const enum AVPixelFormat x265_csp_twelve[] = { AV_PIX_FMT_YUV420P, +AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV422P, +AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV444P, +AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_GBRP, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, -- 2.20.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] native mode in FFmpeg DNN module
> 在 2019年5月24日,20:34,Pedro Arthur 写道: > > Em qui, 23 de mai de 2019 às 00:06, Guo, Yejun escreveu: >> >> >> Option 2) Write c code in FFmpeg to convert tensorflow file format (format 1) directly >>> into memory representation (format 3), and so we controls everything in >>> ffmpeg community. And the conversion can be extended to import more file >>> formats such as torch, darknet, etc. One example is that OpenCV uses this >>> method. The in memory representation (format 3) can still be current. >>> >>> Option 2 would be ideal, as it does not introduce any dependency for >>> using the native backend. >>> Yet I'm not sure how complex implementing the tf model reader can >>> be, >>> If I remember correctly the student said it was not trivial at the >>> time. >> >> yes, it is not easy, but I think it is worthy to do it. Here is a >> reference example >> for the complexity, see >> >>> https://github.com/opencv/opencv/blob/master/modules/dnn/src/tensorflow/ >> tf_importer.cpp. >> >>> >>> Is the tf model file stable? if not it will be a maintenance burden to >>> keep it working whenever tf releases a new version. This point makes >>> me think having control over our file format is good. >> >> imho, this issue is always there, no matter which method used, unless our >> format could be exported by tensorflow (it has little possibility). >> >> Whenever tf releases a new version with a new file format, we still have >>> to >> change the python script in phase 1 (convert tf file model to our format) which >> is even an external dependency at >> https://github.com/HighVoltageRocknRoll/sr, >> >> As from effort perspective, the current implementation is better since python >> script is simpler. But I think we are still worth implementing option 2 >> as >>> the >> ideal technical direction. > > I checked a bit more about https://github.com/HighVoltageRocknRoll/sr, it >>> is actually > not an converter (from tf model to native model), but hard code for given models. > And the native model is not exactly the same as tf model, it even changes >>> the behavior > of pad parameter of conv layer. > > If community is open to option 2, I'll try it. > Option 2 is fine for me. >>> >>> that's great, :) >> >> looks that option 2 is a bit complex, TF model file is in protocol buffers >> (protobuf) format and not easy to parse it with simple c code. >> >> Since there is no official c support for protobuf, let's first image how the >> work can be done via official c++ support. >> >> 1. get protobuf compiler protoc, .h header files and .so library files >> (download or build from >> https://github.com/protocolbuffers/protobuf/tree/master/src). >> 2. get tensorflow model's .proto files from >> https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/framework. >> 3. generate .cc/.h files from .proto files (see step 2) via protoc (see step >> 1). >> 4. let the generated .cc/.h files be part of ffmpeg source tree, and build >> with protobuf header/library files. >> 5. at run time, the protobuf libraries are invoked. It means that the system >> should have installed protobuf dev package. >> >> furthermore, there is a compatible problem between the protobuf compiler, >> header files and library files. >> So, as a practice to fix it, the method is to make the protobuf source code >> be part of ffmpeg source tree. (it is a common practice, so we can many >> other projects contain the protobuf source code). >> >> I guess the above method is not acceptable in ffmpeg. I would be glad to >> continue if the community embrace this change. :) > Indeed I think it is not acceptable. > >> >> While the current implementation has external dependency, my new suggestion >> is: >> - add a python script under .../libavfilter/dnn/ (all other dnn source >> files will be also moved here later), so ffmpeg has the full control on it. > I'm not sure about the policy on putting secondary scripts with the > main code, but another option is to create a repo controlled by ffmpeg > maybe? > I think this option would also help GSoC students that work with dnn, > so they don't have to depend on previous students maintaining > independent repositories. Yes, I agreed with you. I think this is a better way. maintaining the repositories at one repo controlled by ffmpeg. > >> - it is a script to convert tensorflow model file into native model file. >> (other formats such as caffe, torch can also be supported later if needed) >> >> thanks. >> >> ___ >> ffmpeg-devel mailing list >> ffmpeg-devel@ffmpeg.org >> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel >> >> To unsubscribe, visit link above, or email >>
[FFmpeg-devel] [PATCH] swresample/swresample: check for invalid sample rates
Signed-off-by: Paul B Mahol --- libswresample/swresample.c | 8 1 file changed, 8 insertions(+) diff --git a/libswresample/swresample.c b/libswresample/swresample.c index 6d28e6a798..1ac5ef9a30 100644 --- a/libswresample/swresample.c +++ b/libswresample/swresample.c @@ -164,6 +164,14 @@ av_cold int swr_init(struct SwrContext *s){ return AVERROR(EINVAL); } +if(s-> in_sample_rate <= 0){ +av_log(s, AV_LOG_ERROR, "Requested input sample rate %d is invalid\n", s->in_sample_rate); +return AVERROR(EINVAL); +} +if(s->out_sample_rate <= 0){ +av_log(s, AV_LOG_ERROR, "Requested output sample rate %d is invalid\n", s->out_sample_rate); +return AVERROR(EINVAL); +} s->out.ch_count = s-> user_out_ch_count; s-> in.ch_count = s-> user_in_ch_count; s->used_ch_count = s->user_used_ch_count; -- 2.17.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Add format option
On Fri, 24 May 2019 at 06:00, Timo Rothenpieler wrote: > > On 24/05/2019 01:49, Josh Allmann wrote: > > Makes certain usages of the lavfi API easier. > > --- > > libavfilter/vf_scale_cuda.c | 12 +++- > > 1 file changed, 11 insertions(+), 1 deletion(-) > > > > diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c > > index b7cdb81081..6b1ef2bb6f 100644 > > --- a/libavfilter/vf_scale_cuda.c > > +++ b/libavfilter/vf_scale_cuda.c > > @@ -81,6 +81,7 @@ typedef struct CUDAScaleContext { > > > > char *w_expr; ///< width expression string > > char *h_expr; ///< height expression string > > +char *format_str; > > > > CUcontext cu_ctx; > > CUmodulecu_module; > > @@ -101,7 +102,15 @@ static av_cold int cudascale_init(AVFilterContext *ctx) > > { > > CUDAScaleContext *s = ctx->priv; > > > > -s->format = AV_PIX_FMT_NONE; > > +if (!strcmp(s->format_str, "same")) { > > +s->format = AV_PIX_FMT_NONE; > > +} else { > > +s->format = av_get_pix_fmt(s->format_str); > > +if (s->format == AV_PIX_FMT_NONE) { > > +av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", > > s->format_str); > > +return AVERROR(EINVAL); > > +} > > +} > > s->frame = av_frame_alloc(); > > if (!s->frame) > > return AVERROR(ENOMEM); > > @@ -533,6 +542,7 @@ fail: > > static const AVOption options[] = { > > { "w", "Output video width", OFFSET(w_expr), > > AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS }, > > { "h", "Output video height", OFFSET(h_expr), > > AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS }, > > +{ "format", "Output pixel format", OFFSET(format_str), > > AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, > > { NULL }, > > }; > > I'm not sure what to think about a dummy option like this. It might be > very confusing for users to see a format option, which only accepts a > single value, "same", and effectively does nothing. > Not sure I understand the issue. "same" is the default (terminology borrowed from the scale_npp filter), and it'll assign the format to whatever is passed in (eg, format=yuv420p assigns that). > > Not strictly against it, since I can see the convenience it adds when > building command lines, but I'd like some second opinions on this. > Actually I'm using the API, albeit with some of lavfi conveniences to parse filter strings. This avoids "wiring in" the output format manually when crossing the lavfi boundary. Here's a example that demonstrates the issue via CLI (this may actually be a bug elsewhere?): Broken: ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts -an -lavfi scale_cuda=w=426:h=240,hwdownload,format=yuv420p -c:v libx264 out.ts Working: ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts -an -lavfi scale_cuda=w=426:h=240:format=yuv420p,hwdownload,format=yuv420p -c:v libx264 out.ts ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Reset frame size after acquiring from hwframe.
The first frame is scaled correctly, and subsequent frames are over-scaled / cropped since the frame data is reset with the hwframe after each invocation of the scaler. The hwframe-allocated frame has a width/height that is 32-bit aligned. The scaler uses this aligned width / height as its target, leading to "over-scaling" and then cropping of the result. To generate a broken test sample: ffmpeg -hwaccel cuvid -c:v h264_cuvid -i -an \ -lavfi scale_cuda=w=426:h=240 -c:v h264_nvenc --- Tested with NV12 and 420P inputs. Noting that YUV444P seems generally broken - both before/after this patch. libavfilter/vf_scale_cuda.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index 6b1ef2bb6f..13eb3ad24c 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -489,6 +489,8 @@ static int cudascale_scale(AVFilterContext *ctx, AVFrame *out, AVFrame *in) av_frame_move_ref(out, s->frame); av_frame_move_ref(s->frame, s->tmp_frame); +s->frame->width = s->planes_out[0].width; +s->frame->height= s->planes_out[0].height; ret = av_frame_copy_props(out, in); if (ret < 0) -- 2.17.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Add format option
On 24.05.2019 18:27, Josh Allmann wrote: On Fri, 24 May 2019 at 06:00, Timo Rothenpieler wrote: On 24/05/2019 01:49, Josh Allmann wrote: Makes certain usages of the lavfi API easier. --- libavfilter/vf_scale_cuda.c | 12 +++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index b7cdb81081..6b1ef2bb6f 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -81,6 +81,7 @@ typedef struct CUDAScaleContext { char *w_expr; ///< width expression string char *h_expr; ///< height expression string +char *format_str; CUcontext cu_ctx; CUmodulecu_module; @@ -101,7 +102,15 @@ static av_cold int cudascale_init(AVFilterContext *ctx) { CUDAScaleContext *s = ctx->priv; -s->format = AV_PIX_FMT_NONE; +if (!strcmp(s->format_str, "same")) { +s->format = AV_PIX_FMT_NONE; +} else { +s->format = av_get_pix_fmt(s->format_str); +if (s->format == AV_PIX_FMT_NONE) { +av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str); +return AVERROR(EINVAL); +} +} s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); @@ -533,6 +542,7 @@ fail: static const AVOption options[] = { { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS }, { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS }, +{ "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, { NULL }, }; I'm not sure what to think about a dummy option like this. It might be very confusing for users to see a format option, which only accepts a single value, "same", and effectively does nothing. Not sure I understand the issue. "same" is the default (terminology borrowed from the scale_npp filter), and it'll assign the format to whatever is passed in (eg, format=yuv420p assigns that). Oh, I misread that code as just always throwing an error if it's != "same". Unfortunately, that option is omitted for a reason. If you look at scalecuda_resize: https://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavfilter/vf_scale_cuda.c;h=b7cdb81081ff4a34e7b641c533fc23a5714fed61;hb=HEAD#l380 It has the assumption built into it that the output frame has the same format as the input frame. So if you were to set format=nv12 and then input a yuv420p frame, this will most likely crash or at least severely misbehave. I would not be opposed to scale_cuda gaining the ability to also change frame pix_fmts, we are lacking such a filter at the moment if one ignores scale_npp. But in its current state, it can't do that. Not strictly against it, since I can see the convenience it adds when building command lines, but I'd like some second opinions on this. Actually I'm using the API, albeit with some of lavfi conveniences to parse filter strings. This avoids "wiring in" the output format manually when crossing the lavfi boundary. Here's a example that demonstrates the issue via CLI (this may actually be a bug elsewhere?): Broken: ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts -an -lavfi scale_cuda=w=426:h=240,hwdownload,format=yuv420p -c:v libx264 out.ts Working: ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts -an -lavfi scale_cuda=w=426:h=240:format=yuv420p,hwdownload,format=yuv420p -c:v libx264 out.ts Is this actually working in a sense where the result looks sensible? Cause with how the code currently is, scale_cuda with format set to yuv420p and getting nv12 as input from h264_cuvid will produce a frame labeled as yuv420p but containing nv12 data. smime.p7s Description: S/MIME Cryptographic Signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Reset frame size after acquiring from hwframe.
On 24.05.2019 18:39, Josh Allmann wrote: The first frame is scaled correctly, and subsequent frames are over-scaled / cropped since the frame data is reset with the hwframe after each invocation of the scaler. The hwframe-allocated frame has a width/height that is 32-bit aligned. The scaler uses this aligned width / height as its target, leading to "over-scaling" and then cropping of the result. To generate a broken test sample: ffmpeg -hwaccel cuvid -c:v h264_cuvid -i -an \ -lavfi scale_cuda=w=426:h=240 -c:v h264_nvenc --- Tested with NV12 and 420P inputs. Noting that YUV444P seems generally broken - both before/after this patch. libavfilter/vf_scale_cuda.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index 6b1ef2bb6f..13eb3ad24c 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -489,6 +489,8 @@ static int cudascale_scale(AVFilterContext *ctx, AVFrame *out, AVFrame *in) av_frame_move_ref(out, s->frame); av_frame_move_ref(s->frame, s->tmp_frame); +s->frame->width = s->planes_out[0].width; +s->frame->height= s->planes_out[0].height; ret = av_frame_copy_props(out, in); if (ret < 0) This is certainly correct. The original author of this must have wrongly assumed that av_frame_copy_props takes care of it. Wonder how this was never noticed before. Applied, thanks. smime.p7s Description: S/MIME Cryptographic Signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] swresample/swresample: check for invalid sample rates
On 24/05/2019 17:05, Paul B Mahol wrote: > Signed-off-by: Paul B Mahol > --- > libswresample/swresample.c | 8 > 1 file changed, 8 insertions(+) Seems reasonable. What happens if these aren't in place? - Derek ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Add format option
On Fri, 24 May 2019 at 09:55, Timo Rothenpieler wrote: > > On 24.05.2019 18:27, Josh Allmann wrote: > > On Fri, 24 May 2019 at 06:00, Timo Rothenpieler > > wrote: > >> > >> On 24/05/2019 01:49, Josh Allmann wrote: > >>> Makes certain usages of the lavfi API easier. > >>> --- > >>>libavfilter/vf_scale_cuda.c | 12 +++- > >>>1 file changed, 11 insertions(+), 1 deletion(-) > >>> > >>> diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c > >>> index b7cdb81081..6b1ef2bb6f 100644 > >>> --- a/libavfilter/vf_scale_cuda.c > >>> +++ b/libavfilter/vf_scale_cuda.c > >>> @@ -81,6 +81,7 @@ typedef struct CUDAScaleContext { > >>> > >>>char *w_expr; ///< width expression string > >>>char *h_expr; ///< height expression string > >>> +char *format_str; > >>> > >>>CUcontext cu_ctx; > >>>CUmodulecu_module; > >>> @@ -101,7 +102,15 @@ static av_cold int cudascale_init(AVFilterContext > >>> *ctx) > >>>{ > >>>CUDAScaleContext *s = ctx->priv; > >>> > >>> -s->format = AV_PIX_FMT_NONE; > >>> +if (!strcmp(s->format_str, "same")) { > >>> +s->format = AV_PIX_FMT_NONE; > >>> +} else { > >>> +s->format = av_get_pix_fmt(s->format_str); > >>> +if (s->format == AV_PIX_FMT_NONE) { > >>> +av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", > >>> s->format_str); > >>> +return AVERROR(EINVAL); > >>> +} > >>> +} > >>>s->frame = av_frame_alloc(); > >>>if (!s->frame) > >>>return AVERROR(ENOMEM); > >>> @@ -533,6 +542,7 @@ fail: > >>>static const AVOption options[] = { > >>>{ "w", "Output video width", OFFSET(w_expr), > >>> AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS }, > >>>{ "h", "Output video height", OFFSET(h_expr), > >>> AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS }, > >>> +{ "format", "Output pixel format", OFFSET(format_str), > >>> AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, > >>>{ NULL }, > >>>}; > >> > >> I'm not sure what to think about a dummy option like this. It might be > >> very confusing for users to see a format option, which only accepts a > >> single value, "same", and effectively does nothing. > >> > > > > Not sure I understand the issue. "same" is the default (terminology > > borrowed from the scale_npp filter), and it'll assign the format to > > whatever is passed in (eg, format=yuv420p assigns that). > > Oh, I misread that code as just always throwing an error if it's != "same". > > Unfortunately, that option is omitted for a reason. > If you look at scalecuda_resize: > https://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavfilter/vf_scale_cuda.c;h=b7cdb81081ff4a34e7b641c533fc23a5714fed61;hb=HEAD#l380 > > It has the assumption built into it that the output frame has the same > format as the input frame. So if you were to set format=nv12 and then > input a yuv420p frame, this will most likely crash or at least severely > misbehave. > > I would not be opposed to scale_cuda gaining the ability to also change > frame pix_fmts, we are lacking such a filter at the moment if one > ignores scale_npp. > But in its current state, it can't do that. > Ah! Makes sense now - thanks for the explanation. > >> > >> Not strictly against it, since I can see the convenience it adds when > >> building command lines, but I'd like some second opinions on this. > >> > > > > Actually I'm using the API, albeit with some of lavfi conveniences to > > parse filter strings. This avoids "wiring in" the output format > > manually when crossing the lavfi boundary. > > > > Here's a example that demonstrates the issue via CLI (this may > > actually be a bug elsewhere?): > > > > Broken: > > ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts > > -an -lavfi scale_cuda=w=426:h=240,hwdownload,format=yuv420p -c:v > > libx264 out.ts > > > > Working: > > ffmpeg -loglevel verbose -hwaccel cuvid -c:v h264_cuvid -i input.ts > > -an -lavfi scale_cuda=w=426:h=240:format=yuv420p,hwdownload,format=yuv420p > > -c:v libx264 out.ts > > Is this actually working in a sense where the result looks sensible? > Cause with how the code currently is, scale_cuda with format set to > yuv420p and getting nv12 as input from h264_cuvid will produce a frame > labeled as yuv420p but containing nv12 data. > You are correct - I didn't look at the output here. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/2] avformat/webm_chunk: Check header filename length
Michael Niedermayer: > On Fri, May 03, 2019 at 06:31:00AM +, Andreas Rheinhardt wrote: >> Michael Niedermayer: >>> Signed-off-by: Michael Niedermayer >>> --- >>> libavformat/webm_chunk.c | 8 +++- >>> 1 file changed, 7 insertions(+), 1 deletion(-) >>> >>> diff --git a/libavformat/webm_chunk.c b/libavformat/webm_chunk.c >>> index 561ec152e7..e2fbd8be1d 100644 >>> --- a/libavformat/webm_chunk.c >>> +++ b/libavformat/webm_chunk.c >>> @@ -88,6 +88,8 @@ static int get_chunk_filename(AVFormatContext *s, int >>> is_header, char filename[M >>> { >>> WebMChunkContext *wc = s->priv_data; >>> AVFormatContext *oc = wc->avf; >>> +int len; >>> + >>> if (!filename) { >>> return AVERROR(EINVAL); >>> } >>> @@ -96,7 +98,11 @@ static int get_chunk_filename(AVFormatContext *s, int >>> is_header, char filename[M >>> av_log(oc, AV_LOG_ERROR, "No header filename provided\n"); >>> return AVERROR(EINVAL); >>> } >>> -av_strlcpy(filename, wc->header_filename, MAX_FILENAME_SIZE); >>> +len = av_strlcpy(filename, wc->header_filename, MAX_FILENAME_SIZE); >>> +if (len >= MAX_FILENAME_SIZE) { >>> +av_log(oc, AV_LOG_ERROR, "header filename too long\n"); >>> +return AVERROR(EINVAL); >>> +} >>> } else { >>> if (av_get_frame_filename(filename, MAX_FILENAME_SIZE, >>>s->url, wc->chunk_index - 1) < 0) { >>> >> len has an unnecessarily broad scope. The string is intentionally >> started with a lower case letter because the parameter "header" is >> lower case, too, isn't it? If so, it's fine and LGTM apart from the scope. > > Ill capitalize the error message and move the "int len" into the block > > thanks > Good. The patch that apparently aroused your interest in the webm_chunk muxer was part of a patchset that was mainly about fixing ticket #5752, a NULL pointer dereference. Would you mind taking a look at the other patches? They are here: https://ffmpeg.org/pipermail/ffmpeg-devel/2019-April/242903.html https://ffmpeg.org/pipermail/ffmpeg-devel/2019-April/242902.html - Andreas ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avformat/cache - delete cache file after closing handle
On 24-05-2019 08:53 PM, Gyan wrote: On 24-05-2019 11:24 AM, Gyan wrote: Will push tonight. Pushed as 50789e356d65270698d0d8495323ebe76a46091a Gyan ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Reset frame size after acquiring from hwframe.
Actually, I've submitted the fix a while ago and got ignored. I then wrote to Timo directly to his email and got ignored again. https://github.com/Svechnikov/ffmpeg-scale-cuda-problem Le ven. 24 mai 2019 19:25, Timo Rothenpieler a écrit : > On 24.05.2019 18:39, Josh Allmann wrote: > > The first frame is scaled correctly, and subsequent frames are > > over-scaled / cropped since the frame data is reset with the > > hwframe after each invocation of the scaler. > > > > The hwframe-allocated frame has a width/height that is 32-bit > > aligned. The scaler uses this aligned width / height as its target, > > leading to "over-scaling" and then cropping of the result. > > > > To generate a broken test sample: > > > >ffmpeg -hwaccel cuvid -c:v h264_cuvid -i -an \ > > -lavfi scale_cuda=w=426:h=240 -c:v h264_nvenc > > --- > > > > Tested with NV12 and 420P inputs. > > > > Noting that YUV444P seems generally broken - both before/after this > patch. > > > > > > libavfilter/vf_scale_cuda.c | 2 ++ > > 1 file changed, 2 insertions(+) > > > > diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c > > index 6b1ef2bb6f..13eb3ad24c 100644 > > --- a/libavfilter/vf_scale_cuda.c > > +++ b/libavfilter/vf_scale_cuda.c > > @@ -489,6 +489,8 @@ static int cudascale_scale(AVFilterContext *ctx, > AVFrame *out, AVFrame *in) > > > > av_frame_move_ref(out, s->frame); > > av_frame_move_ref(s->frame, s->tmp_frame); > > +s->frame->width = s->planes_out[0].width; > > +s->frame->height= s->planes_out[0].height; > > > > ret = av_frame_copy_props(out, in); > > if (ret < 0) > > This is certainly correct. The original author of this must have wrongly > assumed that av_frame_copy_props takes care of it. > Wonder how this was never noticed before. > > Applied, thanks. > > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavfi/vf_scale_cuda: Reset frame size after acquiring from hwframe.
On 24.05.2019 21:32, Сергей Свечников wrote: Actually, I've submitted the fix a while ago and got ignored. I then wrote to Timo directly to his email and got ignored again. https://github.com/Svechnikov/ffmpeg-scale-cuda-problem I simply missed the original mail to the list, sorry about that. The mail you sent to me directly got caught by Spamassassin due to "Excess base64 in From: field", which it also tagged this mail with, but it coming via this list saved it. smime.p7s Description: S/MIME Cryptographic Signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] swresample/swresample: check for invalid sample rates
On 5/24/19, Derek Buitenhuis wrote: > On 24/05/2019 17:05, Paul B Mahol wrote: >> Signed-off-by: Paul B Mahol >> --- >> libswresample/swresample.c | 8 >> 1 file changed, 8 insertions(+) > > Seems reasonable. What happens if these aren't in place? It may divide by zero and crash with floating-point exception. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avformat/utils: fix stream ordering for program ID stream specifiers
On Thu, 23 May 2019, Marton Balint wrote: On Sun, 19 May 2019, Marton Balint wrote: Fixes a regression introduced in dbfd042983eed8586d4048795c00af820f5b6b1f. Will apply soon. Applied. Regards, Marton ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/2] avcodec/scpr: fix checking ret value of decode_run_i
On Thu, 23 May 2019, Paul B Mahol wrote: On 5/23/19, Marton Balint wrote: Fixes Coverity CID 1441460. Signed-off-by: Marton Balint --- libavcodec/scpr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/scpr.c b/libavcodec/scpr.c index 10fc994ecf..317950dafb 100644 --- a/libavcodec/scpr.c +++ b/libavcodec/scpr.c @@ -359,7 +359,7 @@ static int decompress_i(AVCodecContext *avctx, uint32_t *dst, int linesize) ret = decode_run_i(avctx, ptype, run, &x, &y, clr, dst, linesize, &lx, &ly, backstep, off, &cx, &cx1); -if (run < 0) +if (ret < 0) return ret; } OK Thanks, applied the series. Regards, Marton ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/2] avfilter/f_loop: fix video loop issues with 0 size or when size is bigger than input
On Fri, 24 May 2019, Paul B Mahol wrote: On 5/23/19, Marton Balint wrote: Fixes infinte loop with -vf loop=loop=1 and also fixes looping when the input is less frames than the specified loop size. Possible regressions since ef1aadffc785b48ed62c45d954289e754f43ef46. Signed-off-by: Marton Balint --- libavfilter/f_loop.c | 8 +--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c index d9d55f9837..fcbd742eb4 100644 --- a/libavfilter/f_loop.c +++ b/libavfilter/f_loop.c @@ -343,7 +343,7 @@ static int activate(AVFilterContext *ctx) FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); -if (!s->eof && (s->nb_frames < s->size || !s->loop)) { +if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) { ret = ff_inlink_consume_frame(inlink, &frame); if (ret < 0) return ret; @@ -352,11 +352,13 @@ static int activate(AVFilterContext *ctx) } if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { -if (status == AVERROR_EOF) +if (status == AVERROR_EOF) { +s->size = s->nb_frames; s->eof = 1; +} } -if (s->eof && (s->loop == 0 || s->nb_frames < s->size)) { +if (s->eof && (!s->loop || !s->size)) { ff_outlink_set_status(outlink, AVERROR_EOF, s->duration); return 0; } -- 2.16.4 lgtm Thanks, applied the series. Regards, Marton ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 1/7] libavfilter/vf_overlay.c: change the commands style for the macro defined function
___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [FFMPEG DEVEL] [PATCH v6] fftools/ffprobe: Add S12M Timecode output as side data (such as SEI TC)
On Fri, 24 May 2019, Antonin Gouzer wrote: --- Add S12M Timecode output with the show_frame option Multiple timecodes (3) for one frame support Control side date Size to 16 Correct ffrpobe.xsd to allow multiple timecodes in side_data element --- doc/ffprobe.xsd | 8 fftools/ffprobe.c | 14 +- 2 files changed, 21 insertions(+), 1 deletion(-) I almost committed this, but found another issue: the JSON output contained the "timecode" key multiple times. Apparently in order for the JSON output to work, we need a separate section for arrays. I managed to make those changes (see the attached patch), it affected the XML output slightly as well. Please let me know if this is satisfactory to you. Thanks, MartonFrom 3e1274d2250fca79acb5eb7b636ee924c6be2640 Mon Sep 17 00:00:00 2001 From: Antonin Gouzer Date: Fri, 24 May 2019 09:46:50 +0200 Subject: [PATCH] fftools/ffprobe: Add S12M Timecode output as side data (such as SEI TC) Slightly modified by Marton Balint to produce valid json as well. Signed-off-by: Marton Balint --- doc/ffprobe.xsd | 14 ++ fftools/ffprobe.c | 18 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/doc/ffprobe.xsd b/doc/ffprobe.xsd index 3e58da0f46..97dc67def6 100644 --- a/doc/ffprobe.xsd +++ b/doc/ffprobe.xsd @@ -147,11 +147,25 @@ + + + + + + + + + + + + + + diff --git a/fftools/ffprobe.c b/fftools/ffprobe.c index dea489d02e..3becb6330e 100644 --- a/fftools/ffprobe.c +++ b/fftools/ffprobe.c @@ -165,6 +165,8 @@ typedef enum { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_SIDE_DATA, +SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, +SECTION_ID_FRAME_SIDE_DATA_TIMECODE, SECTION_ID_FRAME_LOG, SECTION_ID_FRAME_LOGS, SECTION_ID_LIBRARY_VERSION, @@ -209,7 +211,9 @@ static struct section sections[] = { [SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } }, [SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" }, [SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" }, -[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } }, +[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, -1 } }, +[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } }, +[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } }, [SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } }, [SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, }, [SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } }, @@ -2199,6 +2203,18 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, char tcbuf[AV_TIMECODE_STR_SIZE]; av_timecode_make_mpeg_tc_string(tcbuf, *(int64_t *)(sd->data)); print_str("timecode", tcbuf); +} else if (sd->type == AV_FRAME_DATA_S12M_TIMECODE && sd->size == 16) { +uint32_t *tc = (uint32_t*)sd->data; +int m = FFMIN(tc[0],3); +writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST); +for (int j = 1; j <= m ; j++) { +char tcbuf[AV_TIMECODE_STR_SIZE]; +av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0); +writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE); +print_str("value", tcbuf); +writer_print_section_footer(w); +} +writer_print_section_footer(w); } else if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) { AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data; -- 2.16.4 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] lavc/videotoolboxenc: Fix compilation for IOS < 11.0 and OSX, < 10.13
Am 22.05.19 um 14:27 schrieb Thilo Borgmann: > Am 22.05.19 um 01:41 schrieb Jan Ekström: >> Hi, >> >> On Tue, May 14, 2019 at 4:16 PM Thilo Borgmann >> wrote: >>> >>> $Subject >>> >>> Tested compilation only, sanity test actually using it appreciated. >>> >> >> Thanks for the patch. To be completely fair, this is not to fix >> compilation for specific target systems, but rather to fix compilation >> on older SDKs (building with newer SDKs you can still build aiming for >> macOS starting from 10.9, for example). >> >> I didn't notice a patch landed on the encoder side that utilized the >> defines without further checking/ifdefs. Too bad. I think I >> specifically didn't yet merge the full/limited range patch on the >> decoder side due to related reasons. >> >> I did notice that VLC just re-defined these enum values themselves to >> stop needing to have ifdefs depending on which SDK is being utilized >> (https://github.com/videolan/vlc/commit/1b7e1c4bfcda375e2d4e657135aeaf3732e44af2#diff-a11cdb805d111956af60619d7dfa022bR735). >> I wonder if we should have a helper header that would re-define these >> enum values with their name. That way the code would look correct, and >> the resulting binary has the same features independent of the SDK it >> had been built under. >> >> What would be the opinion of people on a solution like this? > > Tested with a local definition of the symbols (like a would be header would > do). > Seems to work for building with -macosx-version-min=XXX. > > Also checked with VLC, they do these checks via thinks like: > > #ifndef MAC_OS_X_VERSION_10_13 > ... > #endif > > Wich might be a better alternative to what I suggested. > > Thus I would be fine with a helping header. If nobody else cares, should I try to come up with something like this? Or do you want to? -Thilo ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH 2/7] libavfilter/vf_overlay.c: Add '\' for every line of the blend_slice_yuv function by vim column edit
Am Fr., 24. Mai 2019 um 11:36 Uhr schrieb : > > From: Limin Wang > > --- > libavfilter/vf_overlay.c | 52 > 1 file changed, 26 insertions(+), 26 deletions(-) > > diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c > index b468cedf2e..c1abd3e1b2 100644 > --- a/libavfilter/vf_overlay.c > +++ b/libavfilter/vf_overlay.c > @@ -598,32 +598,32 @@ static inline void alpha_composite(const AVFrame *src, > const AVFrame *dst, > } > } > > -static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, > - AVFrame *dst, const AVFrame > *src, > - int hsub, int vsub, > - int main_has_alpha, > - int x, int y, > - int is_straight, > - int jobnr, int nb_jobs) > -{ > -OverlayContext *s = ctx->priv; > -const int src_w = src->width; > -const int src_h = src->height; > -const int dst_w = dst->width; > -const int dst_h = dst->height; > - > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, > y, main_has_alpha, > -s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, > s->main_desc->comp[0].step, is_straight, 1, > -jobnr, nb_jobs); > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, > y, main_has_alpha, > -s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, > s->main_desc->comp[1].step, is_straight, 1, > -jobnr, nb_jobs); > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, > y, main_has_alpha, > -s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, > s->main_desc->comp[2].step, is_straight, 1, > -jobnr, nb_jobs); > - > -if (main_has_alpha) > -alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, > nb_jobs); > +static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, > \ > + AVFrame *dst, const AVFrame > *src, \ > + int hsub, int vsub, > \ > + int main_has_alpha, > \ > + int x, int y, > \ > + int is_straight, > \ > + int jobnr, int nb_jobs) > \ > +{ > \ > +OverlayContext *s = ctx->priv; > \ > +const int src_w = src->width; > \ > +const int src_h = src->height; > \ > +const int dst_w = dst->width; > \ > +const int dst_h = dst->height; > \ > + > \ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, > y, main_has_alpha,\ > +s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, > s->main_desc->comp[0].step, is_straight, 1, \ > +jobnr, nb_jobs); > \ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, > y, main_has_alpha,\ > +s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, > s->main_desc->comp[1].step, is_straight, 1, \ > +jobnr, nb_jobs); > \ > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, > y, main_has_alpha,\ > +s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, > s->main_desc->comp[2].step, is_straight, 1, \ > +jobnr, nb_jobs); >
Re: [FFmpeg-devel] [PATCH 2/7] libavfilter/vf_overlay.c: Add '\' for every line of the blend_slice_yuv function by vim column edit
On Saturday, May 25, 2019, Carl Eugen Hoyos wrote: > Am Fr., 24. Mai 2019 um 11:36 Uhr schrieb : > > > > From: Limin Wang > > > > --- > > libavfilter/vf_overlay.c | 52 > > 1 file changed, 26 insertions(+), 26 deletions(-) > > > > diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c > > index b468cedf2e..c1abd3e1b2 100644 > > --- a/libavfilter/vf_overlay.c > > +++ b/libavfilter/vf_overlay.c > > @@ -598,32 +598,32 @@ static inline void alpha_composite(const AVFrame > *src, const AVFrame *dst, > > } > > } > > > > -static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, > > - AVFrame *dst, const > AVFrame *src, > > - int hsub, int vsub, > > - int main_has_alpha, > > - int x, int y, > > - int is_straight, > > - int jobnr, int nb_jobs) > > -{ > > -OverlayContext *s = ctx->priv; > > -const int src_w = src->width; > > -const int src_h = src->height; > > -const int dst_w = dst->width; > > -const int dst_h = dst->height; > > - > > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, > 0, x, y, main_has_alpha, > > -s->main_desc->comp[0].plane, > s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1, > > -jobnr, nb_jobs); > > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, > vsub, x, y, main_has_alpha, > > -s->main_desc->comp[1].plane, > s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1, > > -jobnr, nb_jobs); > > -blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, > vsub, x, y, main_has_alpha, > > -s->main_desc->comp[2].plane, > s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 1, > > -jobnr, nb_jobs); > > - > > -if (main_has_alpha) > > -alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, > jobnr, nb_jobs); > > +static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, > \ > > + AVFrame *dst, const > AVFrame *src, \ > > + int hsub, int vsub, >\ > > + int main_has_alpha, >\ > > + int x, int y, >\ > > + int is_straight, > \ > > + int jobnr, int nb_jobs) >\ > > +{ > \ > > +OverlayContext *s = ctx->priv; >\ > > +const int src_w = src->width; > \ > > +const int src_h = src->height; >\ > > +const int dst_w = dst->width; > \ > > +const int dst_h = dst->height; >\ > > + >\ > > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, > 0, x, y, main_has_alpha,\ > > +s->main_desc->comp[0].plane, > s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1, \ > > +jobnr, nb_jobs); >\ > > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, > vsub, x, y, main_has_alpha,\ > > +s->main_desc->comp[1].plane, > s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1, \ > > +jobnr, nb_jobs); >\ > > +blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, > vsub, x, y, main_has_alpha,\ > > +s->main_desc->comp[2].plane, > s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 1, \ > > +jobnr, nb_jobs); >\ > > + >\ > > +if (main_has_alpha) > \ > > +alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, > jobnr, nb_jobs); \ > > I believe merging patches 2, 3 and 4 makes understanding them
[FFmpeg-devel] [PATCH v1 1/2] lavf/vf_transpose: add exif orientation support
Add exif orientation support and expose an option. --- libavfilter/vf_transpose.c | 258 + 1 file changed, 207 insertions(+), 51 deletions(-) diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c index dd54947bd9..4aebfb9ee4 100644 --- a/libavfilter/vf_transpose.c +++ b/libavfilter/vf_transpose.c @@ -46,6 +46,9 @@ typedef struct TransVtable { void (*transpose_block)(uint8_t *src, ptrdiff_t src_linesize, uint8_t *dst, ptrdiff_t dst_linesize, int w, int h); +void (*copyline_block)(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h); } TransVtable; typedef struct TransContext { @@ -56,7 +59,7 @@ typedef struct TransContext { int passthrough;///< PassthroughType, landscape passthrough mode enabled int dir;///< TransposeDir - +int orientation;///< Orientation TransVtable vtables[4]; } TransContext; @@ -182,6 +185,105 @@ static void transpose_8x8_64_c(uint8_t *src, ptrdiff_t src_linesize, transpose_block_64_c(src, src_linesize, dst, dst_linesize, 8, 8); } + +static void copyline_block_8(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x ++) { +i = line_dir < 0 ? w-x-1 : x; +dst[i] = src[x]; +} +} +} + +static void copyline_block_16(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x ++) { +i = line_dir < 0 ? w-x-1 : x; +*((uint16_t *)(dst + 2*i)) = *((uint16_t *)(src + 2*x)); +} +} +} + +static void copyline_block_24(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x++) { +int32_t v = AV_RB24(src + 3*x); +i = line_dir < 0 ? w-x-1 : x; +AV_WB24(dst + 3*i, v); +} +} +} + +static void copyline_block_32(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x ++) { +i = line_dir < 0 ? w-x-1 : x; +*((uint32_t *)(dst + 4*i)) = *((uint32_t *)(src + 4*x)); +} +} +} + +static void copyline_block_48(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x++) { +int64_t v = AV_RB48(src + 6*x); +i = line_dir < 0 ? w-x-1 : x; +AV_WB48(dst + 6*i, v); +} +} +} + +static void copyline_block_64(uint8_t *src, ptrdiff_t src_linesize, + uint8_t *dst, ptrdiff_t dst_linesize, + int line_dir, int w, int h) +{ +int x, y, i; +for (y = 0; y < h; y++, dst += dst_linesize, src += src_linesize) { +for (x = 0; x < w; x ++) { +i = line_dir < 0 ? w-x-1 : x; +*((uint64_t *)(dst + 8*i)) = *((uint64_t *)(src + 8*x)); +} +} +} + +static void set_outlink_width_height(AVFilterLink *inlink, AVFilterLink *outlink, int transpose) +{ +if (transpose) { +outlink->w = inlink->h; +outlink->h = inlink->w; + +if (inlink->sample_aspect_ratio.num) +outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 }, + inlink->sample_aspect_ratio); +else +outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; +} else { +outlink->w = inlink->w; +outlink->h = inlink->h; +outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; +} +} + static int config_props_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; @@ -213,33 +315,44 @@ static int config_props_output(AVFilterLink *outlink) av_assert0(desc_in->nb_components == desc_out->nb_components); - av_image_fill_max_pixsteps(s->pixsteps, NULL, desc_out); + +if (s->orientation && s
[FFmpeg-devel] [PATCH v1 2/2] fftools/ffmpeg: add exif orientation support per frame's metadata
Fix #6945 Rotate or/and flip frame according to frame's metadata orientation --- fftools/ffmpeg.c| 3 ++- fftools/ffmpeg.h| 3 ++- fftools/ffmpeg_filter.c | 19 ++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index 01f04103cf..da4c19c782 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -2142,7 +2142,8 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) break; case AVMEDIA_TYPE_VIDEO: need_reinit |= ifilter->width != frame->width || - ifilter->height != frame->height; + ifilter->height != frame->height || + ifilter->orientation != get_frame_orientation(frame); break; } diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h index eb1eaf6363..54532ef0eb 100644 --- a/fftools/ffmpeg.h +++ b/fftools/ffmpeg.h @@ -244,7 +244,7 @@ typedef struct InputFilter { // parameters configured for this input int format; -int width, height; +int width, height, orientation; AVRational sample_aspect_ratio; int sample_rate; @@ -649,6 +649,7 @@ int init_complex_filtergraph(FilterGraph *fg); void sub2video_update(InputStream *ist, AVSubtitle *sub); int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame); +int get_frame_orientation(const AVFrame* frame); int ffmpeg_parse_options(int argc, char **argv); diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c index 72838de1e2..b230dafdc9 100644 --- a/fftools/ffmpeg_filter.c +++ b/fftools/ffmpeg_filter.c @@ -743,6 +743,18 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter) return 0; } +int get_frame_orientation(const AVFrame *frame) +{ +AVDictionaryEntry *entry = NULL; +int orientation = 1; // orientation indicates 'Normal' + +// read exif orientation data +entry = av_dict_get(frame->metadata, "Orientation", NULL, 0); +if (entry) +orientation = atoi(entry->value); +return orientation > 8 ? 1 : orientation; +} + static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in) { @@ -809,7 +821,11 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, if (ist->autorotate) { double theta = get_rotation(ist->st); -if (fabs(theta - 90) < 1.0) { +if (fabs(theta) < 1.0) { // no rotation info in stream meta +char transpose_args[32]; +snprintf(transpose_args, sizeof(transpose_args), "orientation=%i", ifilter->orientation); +ret = insert_filter(&last_filter, &pad_idx, "transpose", transpose_args); +} else if (fabs(theta - 90) < 1.0) { ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock"); } else if (fabs(theta - 180) < 1.0) { ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL); @@ -1191,6 +1207,7 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame) ifilter->width = frame->width; ifilter->height = frame->height; ifilter->sample_aspect_ratio = frame->sample_aspect_ratio; +ifilter->orientation = get_frame_orientation(frame); ifilter->sample_rate = frame->sample_rate; ifilter->channels= frame->channels; -- 2.17.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/vp3data: combine eob_run_base and eob_run_get_bits tables
--- This provides a small readability improvement. I observe no performance change on x86_64 or arm6. libavcodec/vp3.c | 6 +++--- libavcodec/vp3data.h | 9 - 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index b248c90413..63f60c9109 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -988,9 +988,9 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ if ((unsigned) token <= 6U) { -eob_run = eob_run_base[token]; -if (eob_run_get_bits[token]) -eob_run += get_bits(gb, eob_run_get_bits[token]); +eob_run = eob_run_table[token].base; +if (eob_run_table[token].bits) +eob_run += get_bits(gb, eob_run_table[token].bits); if (!eob_run) eob_run = INT_MAX; diff --git a/libavcodec/vp3data.h b/libavcodec/vp3data.h index c82b1b3a86..d520a10c76 100644 --- a/libavcodec/vp3data.h +++ b/libavcodec/vp3data.h @@ -198,11 +198,10 @@ static const int8_t fixed_motion_vector_table[64] = { }; /* only tokens 0..6 indicate eob runs */ -static const uint8_t eob_run_base[7] = { -1, 2, 3, 4, 8, 16, 0 -}; -static const uint8_t eob_run_get_bits[7] = { -0, 0, 0, 2, 3, 4, 12 +static const struct { +uint8_t base, bits; +} eob_run_table[7] = { +{1, 0}, {2, 0}, {3, 0}, {4, 2}, {8, 3}, {16, 4}, {0, 12} }; static const uint8_t zero_run_base[32] = { -- 2.20.1 -- Peter (A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B) signature.asc Description: PGP signature ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH V2 0/2] Update docs
V2: - Add version note for eld_v2 option - Update checkheaders docs Jun Zhao (2): doc/build_system: Document checkheaders/alltools and consistency fixes doc/encoders: Document eld_v2 option for libfdk_aac encoder. doc/build_system.txt |8 doc/encoders.texi|8 2 files changed, 12 insertions(+), 4 deletions(-) ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH V2 2/2] doc/encoders: Document eld_v2 option for libfdk_aac encoder.
From: Jun Zhao Document eld_v2 option for libfdk_aac encoder. Signed-off-by: Jun Zhao --- doc/encoders.texi |8 1 files changed, 8 insertions(+), 0 deletions(-) diff --git a/doc/encoders.texi b/doc/encoders.texi index ef12c73..eefd124 100644 --- a/doc/encoders.texi +++ b/doc/encoders.texi @@ -733,6 +733,14 @@ if set to 0. Default value is 0. +@item eld_v2 +Enable ELDv2 (LD-MPS extension for ELD stereo signals) for ELDv2 if set to 1, +disabled if set to 0. + +Note that option is available when fdk-aac version (AACENCODER_LIB_VL0.AACENCODER_LIB_VL1.AACENCODER_LIB_VL2) > (4.0.0). + +Default value is 0. + @item signaling Set SBR/PS signaling style. -- 1.7.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH V2] lavfi/lut: Add slice threading support
V2: - update comments Jun Zhao (1): lavfi/lut: Add slice threading support libavfilter/vf_lut.c | 329 +- 1 files changed, 216 insertions(+), 113 deletions(-) ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH V2] lavfi/lut: Add slice threading support
From: Jun Zhao Used the command for 1080p h264 clip as follow: a). ffmpeg -i input -vf lutyuv="u=128:v=128" -f null /dev/null b). ffmpeg -i input -vf lutrgb="g=0:b=0" -f null /dev/null after enabled the slice threading, the fps change from: a). 144fps to 258fps (lutyuv) b). 94fps to 153fps (lutrgb) in Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz Signed-off-by: Jun Zhao --- libavfilter/vf_lut.c | 329 +- 1 files changed, 216 insertions(+), 113 deletions(-) diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c index c815ddc..7cb3b87 100644 --- a/libavfilter/vf_lut.c +++ b/libavfilter/vf_lut.c @@ -337,13 +337,193 @@ static int config_props(AVFilterLink *inlink) return 0; } +struct thread_data { +AVFrame *in; +AVFrame *out; + +int w; +int h; +}; + +/* packed, 16-bit */ +static int lut_packed_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ +LutContext *s = ctx->priv; +const struct thread_data *td = arg; + +uint16_t *inrow, *outrow, *inrow0, *outrow0; +int i, j; +const int w = td->w; +const int h = td->h; +AVFrame *in = td->in; +AVFrame *out = td->out; +const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut; +const int in_linesize = in->linesize[0] / 2; +const int out_linesize = out->linesize[0] / 2; +const int step = s->step; + +const int slice_start = (h * jobnr ) / nb_jobs; +const int slice_end = (h * (jobnr+1)) / nb_jobs; + +inrow0 = (uint16_t *)in ->data[0]; +outrow0 = (uint16_t *)out->data[0]; + +for (i = slice_start; i < slice_end; i++) { +inrow = inrow0 + i * in_linesize; +outrow = outrow0 + i * out_linesize; +for (j = 0; j < w; j++) { + +switch (step) { +#if HAVE_BIGENDIAN +case 4: outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // Fall-through +case 3: outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // Fall-through +case 2: outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // Fall-through +default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]); +#else +case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through +case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through +case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through +default: outrow[0] = tab[0][inrow[0]]; +#endif +} +outrow += step; +inrow += step; +} +} + +return 0; +} + +/* packed, 8-bit */ +static int lut_packed_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ +LutContext *s = ctx->priv; +const struct thread_data *td = arg; + +uint8_t *inrow, *outrow, *inrow0, *outrow0; +int i, j; +const int w = td->w; +const int h = td->h; +AVFrame *in = td->in; +AVFrame *out = td->out; +const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut; +const int in_linesize = in->linesize[0]; +const int out_linesize = out->linesize[0]; +const int step = s->step; + +const int slice_start = (h * jobnr ) / nb_jobs; +const int slice_end = (h * (jobnr+1)) / nb_jobs; + +inrow0 = in ->data[0]; +outrow0 = out->data[0]; + +for (i = slice_start; i < slice_end; i++) { +inrow = inrow0 + i * in_linesize; +outrow = outrow0 + i * out_linesize; +for (j = 0; j < w; j++) { +switch (step) { +case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through +case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through +case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through +default: outrow[0] = tab[0][inrow[0]]; +} +outrow += step; +inrow += step; +} +} + +return 0; +} + +/* planar >8 bit depth */ +static int lut_planar_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ +LutContext *s = ctx->priv; +const struct thread_data *td = arg; + +uint16_t *inrow, *outrow; +int i, j, plane; + +AVFrame *in = td->in; +AVFrame *out = td->out; + +for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { +int vsub = plane == 1 || plane == 2 ? s->vsub : 0; +int hsub = plane == 1 || plane == 2 ? s->hsub : 0; +int h = AV_CEIL_RSHIFT(td->h, vsub); +int w = AV_CEIL_RSHIFT(td->w, hsub); +const uint16_t *tab = s->lut[plane]; +const int in_linesize = in->linesize[plane] / 2; +const int out_linesize = out->linesize[plane] / 2; + +const int slice_start = (h * jobnr ) / nb_jobs; +const int slice_end = (h * (jobnr+1)) / nb_jobs; + +inrow = (uint16_t *)(in ->data[plane] + slice_start * in_linesize); +outrow = (uint16_t *)(out->data[plane] + slice_start * out_linesize); + +for (i = slice_start; i < slice_end; i++) { +
[FFmpeg-devel] [PATCH V4] lavfi/colorlevels: Add slice threading support
From: Jun Zhao Add slice threading support, use the command like: ./ffmpeg -i input -vf colorlevels -f null /dev/null with 1080p h264 clip, the fps from 39 fps to 79 fps in the local(Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz) Signed-off-by: Jun Zhao --- libavfilter/vf_colorlevels.c | 121 +++--- 1 files changed, 102 insertions(+), 19 deletions(-) diff --git a/libavfilter/vf_colorlevels.c b/libavfilter/vf_colorlevels.c index 5385a5e..4e47089 100644 --- a/libavfilter/vf_colorlevels.c +++ b/libavfilter/vf_colorlevels.c @@ -105,6 +105,79 @@ static int config_input(AVFilterLink *inlink) return 0; } +struct thread_data { +const uint8_t *srcrow; +uint8_t *dstrow; +int dst_linesize; +int src_linesize; + +double coeff; +uint8_t offset; + +int h; + +int imin; +int omin; +}; + +static int colorlevel_slice_8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ +ColorLevelsContext *s = ctx->priv; +const struct thread_data *td = arg; + +int process_h = td->h; +const int slice_start = (process_h * jobnr ) / nb_jobs; +const int slice_end = (process_h * (jobnr+1)) / nb_jobs; +int x, y; +const uint8_t *srcrow = td->srcrow; +uint8_t *dstrow = td->dstrow; +const int step = s->step; +const uint8_t offset = td->offset; + +int imin = td->imin; +int omin = td->omin; +double coeff = td->coeff; + +for (y = slice_start; y < slice_end; y++) { +const uint8_t *src = srcrow + y * td->src_linesize; +uint8_t *dst = dstrow + y * td->dst_linesize; + +for (x = 0; x < s->linesize; x += step) +dst[x + offset] = av_clip_uint8((src[x + offset] - imin) * coeff + omin); +} + +return 0; +} + +static int colorlevel_slice_16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ +ColorLevelsContext *s = ctx->priv; +const struct thread_data *td = arg; + +int process_h = td->h; +const int slice_start = (process_h * jobnr ) / nb_jobs; +const int slice_end = (process_h * (jobnr+1)) / nb_jobs; +int x, y; +const uint8_t *srcrow = td->srcrow; +uint8_t *dstrow = td->dstrow; +const int step = s->step; +const uint8_t offset = td->offset; + +int imin = td->imin; +int omin = td->omin; +double coeff = td->coeff; + +for (y = slice_start; y < slice_end; y++) { +const uint16_t *src = (const uint16_t *)(srcrow + y * td->src_linesize); +uint16_t *dst = (uint16_t *)(dstrow + y * td->dst_linesize); + +for (x = 0; x < s->linesize; x += step) +dst[x + offset] = av_clip_uint16((src[x + offset] - imin) * coeff + omin); +} + +return 0; +} + static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; @@ -137,6 +210,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) int omin = lrint(r->out_min * UINT8_MAX); int omax = lrint(r->out_max * UINT8_MAX); double coeff; +struct thread_data td; if (imin < 0) { imin = UINT8_MAX; @@ -162,15 +236,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) srcrow = in->data[0]; coeff = (omax - omin) / (double)(imax - imin); -for (y = 0; y < inlink->h; y++) { -const uint8_t *src = srcrow; -uint8_t *dst = dstrow; - -for (x = 0; x < s->linesize; x += step) -dst[x + offset] = av_clip_uint8((src[x + offset] - imin) * coeff + omin); -dstrow += out->linesize[0]; -srcrow += in->linesize[0]; -} + +td.srcrow= srcrow; +td.dstrow= dstrow; +td.dst_linesize = out->linesize[0]; +td.src_linesize = in->linesize[0]; +td.coeff = coeff; +td.offset= offset; +td.h = inlink->h; +td.imin = imin; +td.omin = omin; + +ctx->internal->execute(ctx, colorlevel_slice_8, &td, NULL, + FFMIN(inlink->h, ff_filter_get_nb_threads(ctx))); } break; case 2: @@ -184,6 +262,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) int omin = lrint(r->out_min * UINT16_MAX); int omax = lrint(r->out_max * UINT16_MAX); double coeff; +struct thread_data td; if (imin < 0) { imin = UINT16_MAX; @@ -209,15 +288,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) srcrow = in->data[0]; coeff = (omax - omin) / (double)(imax - imin); -for (y = 0; y < inlink->h; y++) { -const uint16_t *src = (const uint16_t*)srcrow; -uint16_t *dst = (uint16_t *)dstrow; - -for (x = 0; x <
[FFmpeg-devel] [PATCH V4] lavfi/colorlevels: Add slice threading support
V4: - fix clip issue for 16bits case Jun Zhao (1): lavfi/colorlevels: Add slice threading support libavfilter/vf_colorlevels.c | 121 +++--- 1 files changed, 102 insertions(+), 19 deletions(-) ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH V2 1/2] doc/build_system: Document checkheaders/alltools and consistency fixes
From: Jun Zhao Document checkheaders/alltools and consistency fixes Signed-off-by: Jun Zhao --- doc/build_system.txt |8 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/build_system.txt b/doc/build_system.txt index 325a9e8..3d6a21a 100644 --- a/doc/build_system.txt +++ b/doc/build_system.txt @@ -36,11 +36,11 @@ install examples Build all examples located in doc/examples. -libavformat/output-example -Build the libavformat basic example. +checkheaders +Check headers dependencies. -libswscale/swscale-test -Build the swscale self-test (useful also as an example). +alltools +Build all tools in tools directory. config Reconfigure the project with the current configuration. -- 1.7.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avcodec/libx265: Support full range videos
On 5/24/2019 12:23 PM, Derek Buitenhuis wrote: > Signed-off-by: Derek Buitenhuis > --- > libavcodec/libx265.c | 18 +- > 1 file changed, 17 insertions(+), 1 deletion(-) > > diff --git a/libavcodec/libx265.c b/libavcodec/libx265.c > index 07bca81aef..f56def53d5 100644 > --- a/libavcodec/libx265.c > +++ b/libavcodec/libx265.c > @@ -133,6 +133,14 @@ static av_cold int libx265_encode_init(AVCodecContext > *avctx) > return AVERROR(EINVAL); > } > > + Unnecessary empty line. > +ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1; > + > +ctx->params->vui.bEnableVideoFullRangeFlag = avctx->pix_fmt == > AV_PIX_FMT_YUVJ420P || > + avctx->pix_fmt == > AV_PIX_FMT_YUVJ422P || > + avctx->pix_fmt == > AV_PIX_FMT_YUVJ444P || Could we not? The idea is to eventually kill these, so we should at least try to not make them even more widespread... > + avctx->color_range == > AVCOL_RANGE_JPEG; > + > if ((avctx->color_primaries <= AVCOL_PRI_SMPTE432 && > avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) || > (avctx->color_trc <= AVCOL_TRC_ARIB_STD_B67 && > @@ -140,7 +148,6 @@ static av_cold int libx265_encode_init(AVCodecContext > *avctx) > (avctx->colorspace <= AVCOL_SPC_ICTCP && > avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) { > > -ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1; > ctx->params->vui.bEnableColorDescriptionPresentFlag = 1; > > // x265 validates the parameters internally > @@ -454,8 +461,11 @@ FF_ENABLE_DEPRECATION_WARNINGS > > static const enum AVPixelFormat x265_csp_eight[] = { > AV_PIX_FMT_YUV420P, > +AV_PIX_FMT_YUVJ420P, > AV_PIX_FMT_YUV422P, > +AV_PIX_FMT_YUVJ422P, > AV_PIX_FMT_YUV444P, > +AV_PIX_FMT_YUVJ444P, > AV_PIX_FMT_GBRP, > AV_PIX_FMT_GRAY8, > AV_PIX_FMT_NONE > @@ -463,8 +473,11 @@ static const enum AVPixelFormat x265_csp_eight[] = { > > static const enum AVPixelFormat x265_csp_ten[] = { > AV_PIX_FMT_YUV420P, > +AV_PIX_FMT_YUVJ420P, > AV_PIX_FMT_YUV422P, > +AV_PIX_FMT_YUVJ422P, > AV_PIX_FMT_YUV444P, > +AV_PIX_FMT_YUVJ444P, > AV_PIX_FMT_GBRP, > AV_PIX_FMT_YUV420P10, > AV_PIX_FMT_YUV422P10, > @@ -477,8 +490,11 @@ static const enum AVPixelFormat x265_csp_ten[] = { > > static const enum AVPixelFormat x265_csp_twelve[] = { > AV_PIX_FMT_YUV420P, > +AV_PIX_FMT_YUVJ420P, > AV_PIX_FMT_YUV422P, > +AV_PIX_FMT_YUVJ422P, > AV_PIX_FMT_YUV444P, > +AV_PIX_FMT_YUVJ444P, > AV_PIX_FMT_GBRP, > AV_PIX_FMT_YUV420P10, > AV_PIX_FMT_YUV422P10, > ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
Re: [FFmpeg-devel] [PATCH] avcodec/hevcdec: set the SEI parameters early on the AVCodecContext
On 5/24/2019 4:11 AM, Steve Lhomme wrote: > It's better to do it before the buffers are actually created. At least in VLC > we currently don't support changing some parameters dynamically easily so we > don't use the information if it comes after the buffer are created. Glad to know my solution worked :D > > Co-authored-by: James Almer > --- > The same problem may exist with H264 alternative_transfer but I don't have a > sample to test with and the code seems a bit different. Should be a matter of moving the relevant chunk to h264_init_ps(), i think. > --- > libavcodec/hevcdec.c | 18 +++--- > 1 file changed, 11 insertions(+), 7 deletions(-) > > diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c > index 515b346535..f54f46aa5d 100644 > --- a/libavcodec/hevcdec.c > +++ b/libavcodec/hevcdec.c > @@ -313,6 +313,7 @@ static int decode_lt_rps(HEVCContext *s, LongTermRPS > *rps, GetBitContext *gb) > static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets > *ps, > const HEVCSPS *sps) > { > +const HEVCContext *s = avctx->priv_data; Maybe instead change the export_stream_params() prototype to have the callers directly pass HEVCContext *s and HEVSPS *sps, then declare HEVCParamSets *ps = &s->ps here. Could also do HEVCSEI *sei = &s->sei, so it's consistent. > const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data; > const HEVCWindow *ow = &sps->output_window; > unsigned int num = 0, den = 0; > @@ -355,6 +356,16 @@ static void export_stream_params(AVCodecContext *avctx, > const HEVCParamSets *ps, > if (num != 0 && den != 0) > av_reduce(&avctx->framerate.den, &avctx->framerate.num, >num, den, 1 << 30); > + > +if (s->sei.a53_caption.a53_caption) { I don't think these are available at this point. It's a per frame SEI, and this function is called before slice data is parsed. Notice how a53_caption is freed as soon as it's exported as frame side data in set_side_data(). > +avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; > +} > + > +if (s->sei.alternative_transfer.present && > + > av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) > && > +s->sei.alternative_transfer.preferred_transfer_characteristics != > AVCOL_TRC_UNSPECIFIED) { > +avctx->color_trc = > s->sei.alternative_transfer.preferred_transfer_characteristics; > +} > } > > static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) > @@ -2775,13 +2786,6 @@ static int set_side_data(HEVCContext *s) > memcpy(sd->data, s->sei.a53_caption.a53_caption, > s->sei.a53_caption.a53_caption_size); > av_freep(&s->sei.a53_caption.a53_caption); > s->sei.a53_caption.a53_caption_size = 0; > -s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; > -} > - > -if (s->sei.alternative_transfer.present && > - > av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) > && > -s->sei.alternative_transfer.preferred_transfer_characteristics != > AVCOL_TRC_UNSPECIFIED) { > -s->avctx->color_trc = out->color_trc = > s->sei.alternative_transfer.preferred_transfer_characteristics; > } > > return 0; > ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".