[FFmpeg-devel] [PATCH v8 0/3] Fix mpeg1/2 stream copy

2020-02-25 Thread Nicolas Gaullier
The patch "avcodec/mpeg12dec: Do not alter avctx->rc_buffer_size" must be 
applied first.
https://patchwork.ffmpeg.org/project/ffmpeg/patch/20200224161256.44-1-nicolas.gaullier@cji.paris/

Patch 1/3 and 2/3 have not changed since last version.
Patch 3/3 has been updated with 2xFFMAX removed (Michael's review)
and avctx->rc_buffer_size is not used anymore (James's review)

Nicolas Gaullier (3):
  avformat/utils: Make find_stream_info get side data from codec context
  avcodec/utils: Fix ff_add_cpb_side_data() add twice
  avcodec/mpeg12dec: Add CPB coded side data

 libavcodec/mpeg12dec.c   |  7 +++
 libavcodec/utils.c   |  5 +
 libavformat/utils.c  | 18 ++
 tests/ref/fate/mxf-probe-d10 |  3 +++
 tests/ref/fate/ts-demux  |  2 +-
 5 files changed, 34 insertions(+), 1 deletion(-)

-- 
2.25.0.windows.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH v8 2/3] avcodec/utils: Fix ff_add_cpb_side_data() add twice

2020-02-25 Thread Nicolas Gaullier
Makes it behave similarly to av_stream_add_side_data().
---
 libavcodec/utils.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index c4dc136d3c..08e2d5b68b 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1980,6 +1980,11 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext 
*avctx)
 AVPacketSideData *tmp;
 AVCPBProperties  *props;
 size_t size;
+int i;
+
+for (i = 0; i < avctx->nb_coded_side_data; i++)
+if (avctx->coded_side_data[i].type == AV_PKT_DATA_CPB_PROPERTIES)
+return (AVCPBProperties  *)avctx->coded_side_data[i].data;
 
 props = av_cpb_properties_alloc(&size);
 if (!props)
-- 
2.25.0.windows.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH v8 1/3] avformat/utils: Make find_stream_info get side data from codec context

2020-02-25 Thread Nicolas Gaullier
This will allow probing input coded side data, and also forwarding them to the 
output.
---
 libavformat/utils.c | 18 ++
 1 file changed, 18 insertions(+)

diff --git a/libavformat/utils.c b/libavformat/utils.c
index cb15f6a4b3..a58e47fabc 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -3599,6 +3599,21 @@ static int extract_extradata(AVStream *st, const 
AVPacket *pkt)
 return 0;
 }
 
+static int add_coded_side_data(AVStream *st, AVCodecContext *avctx)
+{
+int i;
+
+for (i = 0; i < avctx->nb_coded_side_data; i++) {
+const AVPacketSideData *sd_src = &avctx->coded_side_data[i];
+uint8_t *dst_data;
+dst_data = av_stream_new_side_data(st, sd_src->type, sd_src->size);
+if (!dst_data)
+return AVERROR(ENOMEM);
+memcpy(dst_data, sd_src->data, sd_src->size);
+}
+return 0;
+}
+
 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
 {
 int i, count = 0, ret = 0, j;
@@ -4138,6 +4153,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
 ret = avcodec_parameters_from_context(st->codecpar, 
st->internal->avctx);
 if (ret < 0)
 goto find_stream_info_err;
+ret = add_coded_side_data(st, st->internal->avctx);
+if (ret < 0)
+goto find_stream_info_err;
 #if FF_API_LOWRES
 // The decoder might reduce the video size by the lowres factor.
 if (st->internal->avctx->lowres && orig_w) {
-- 
2.25.0.windows.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH v8 3/3] avcodec/mpeg12dec: Add CPB coded side data

2020-02-25 Thread Nicolas Gaullier
This fixes mpeg2video stream copies to mpeg muxer like this:
  ffmpeg -i xdcamhd.mxf -c:v copy output.mpg
---
 libavcodec/mpeg12dec.c   | 7 +++
 tests/ref/fate/mxf-probe-d10 | 3 +++
 tests/ref/fate/ts-demux  | 2 +-
 3 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 2945728edd..f3b33812ad 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -1399,6 +1399,7 @@ static void mpeg_decode_sequence_extension(Mpeg1Context 
*s1)
 MpegEncContext *s = &s1->mpeg_enc_ctx;
 int horiz_size_ext, vert_size_ext;
 int bit_rate_ext;
+AVCPBProperties *cpb_props;
 
 skip_bits(&s->gb, 1); /* profile and level esc*/
 s->avctx->profile   = get_bits(&s->gb, 3);
@@ -1430,6 +1431,12 @@ static void mpeg_decode_sequence_extension(Mpeg1Context 
*s1)
 ff_dlog(s->avctx, "sequence extension\n");
 s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
 
+if (cpb_props = ff_add_cpb_side_data(s->avctx)) {
+cpb_props->buffer_size = s1->rc_buffer_size;
+if (s->bit_rate != 0x3*400)
+cpb_props->max_bitrate = s->bit_rate;
+}
+
 if (s->avctx->debug & FF_DEBUG_PICT_INFO)
 av_log(s->avctx, AV_LOG_DEBUG,
"profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, 
bitrate:%"PRId64"\n",
diff --git a/tests/ref/fate/mxf-probe-d10 b/tests/ref/fate/mxf-probe-d10
index ab564467b5..317d4ae4c5 100644
--- a/tests/ref/fate/mxf-probe-d10
+++ b/tests/ref/fate/mxf-probe-d10
@@ -50,6 +50,9 @@ DISPOSITION:clean_effects=0
 DISPOSITION:attached_pic=0
 DISPOSITION:timed_thumbnails=0
 
TAG:file_package_umid=0x060A2B340101010501010D131300AE86B20091310580080046A54011
+[SIDE_DATA]
+side_data_type=CPB properties
+[/SIDE_DATA]
 [/STREAM]
 [STREAM]
 index=1
diff --git a/tests/ref/fate/ts-demux b/tests/ref/fate/ts-demux
index eb13ecc684..cdf34d6af0 100644
--- a/tests/ref/fate/ts-demux
+++ b/tests/ref/fate/ts-demux
@@ -15,7 +15,7 @@
 1,   5760,   5760, 2880, 1536, 0xbab5129c
 1,   8640,   8640, 2880, 1536, 0x602f034b, S=1,1, 
0x00bd00bd
 1,  11520,  11520, 2880,  906, 0x69cdcbcd
-0,  32037,  36541, 1501,   114336, 0x37a215a8, S=1,1, 
0x00e000e0
+0,  32037,  36541, 1501,   114336, 0x37a215a8, S=2,1, 
0x00e000e0,   24, 0x663d0b52
 0,  33538,  33538, 1501,12560, 0xb559a3d4, F=0x0, S=1,
1, 0x00e000e0
 0,  35040,  35040, 1501,12704, 0x2614adf4, F=0x0, S=1,
1, 0x00e000e0
 0,  36541,  41046, 1501,51976, 0x9ff1dbfe, F=0x0, S=1,
1, 0x00e000e0
-- 
2.25.0.windows.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 03/12] lavfi: drop vf_qp

2020-02-25 Thread Anton Khirnov
Quoting Michael Niedermayer (2020-02-24 20:15:43)
> On Mon, Feb 24, 2020 at 03:54:45PM +0100, Anton Khirnov wrote:
> > Quoting Carl Eugen Hoyos (2020-02-24 13:50:57)
> > > Am Mo., 24. Feb. 2020 um 13:40 Uhr schrieb Anton Khirnov 
> > > :
> > > >
> > > > It fundamentally depends on an API that has been deprecated for five
> > > > years, has seen no commits since that time and is of highly dubious
> > > > usefulness.
> > > 
> > > Please explain how the removed functionality was replaced.
> > 
> > It was not, for the reasons mentioned in the commit message. 
> 
> > In my view,
> > the fact that nobody fixed it in all that time proves that nobody cares
> > about this functionality and thus that there is no value in keeping it.
> 
> your reasoning only works if there is a problem that requires a fix.
> 
> Your reasoning here seems
> BIG problem in A && noone fixes it -> noone cares about A
> 
> My view is
> whoever sees a problem in A (i do not really) should fix it.
> 
> Maybe iam missing something and there is in fact a big problem in the
> code. But if that is the case iam not aware of the problem and thats
> why i did nothing for years "fixing" it. Its not that i do not care.
> 
> So what is really the issue here ?
> if i build vf_qp.o i get
> ./libavutil/frame.h:719:1: note: 'av_frame_get_qp_table' has been explicitly 
> marked deprecated here
> attribute_deprecated
> 
> ./libavutil/frame.h:721:1: note: 'av_frame_set_qp_table' has been explicitly 
> marked deprecated here
> attribute_deprecated

Yes, I believe there is a problem, or more precisely a bunch of related
problems. Not really that big, but real ones nevertheless.

One aspect of the problem is precisely the fact that this functionality
has been deprecated and nobody has addressed this deprecation in many
years. Paul was concerned about our reputation - I believe having so
many deprecation warnings during build is very bad for our reputation.
But more importantly, it is confusing the developers and users about
what they should use and what not. If you cared about keeping this code,
you should have undeprecated it.

Two ather aspects of the problem are:
- this API is only really workable for MPEG1/2 and closely related
  codecs like JPEG/H.263/MPEG4 ASP/RV
- it is undocumented, the data layout is not defined
If you really want to keep it, those two points should be addressed.

> 
> if i look at git history these where deprecated in
> commit 7df37dd319f2d9d3e1becd5d433884e3ccfa1ee2
> Author: James Almer 
> Date:   Mon Oct 23 11:10:48 2017 -0300
> 
> avutil/frame: deprecate getters and setters for AVFrame fields
> 
> The fields can be accessed directly, so these are not needed anymore.
> 
> This says the field can be accessed directly, so certainly its not
> deprecated in favor of the side data API.
> 
> and in fact av_frame_get_qp_table / av_frame_set_qp_table do use the 
> side data API already so none of this makes sense really.
> And the whole argument about five years also isnt correct as
> october 2017 is not 5 years ago

The accessors may have been deprecated in 2017, but the entire
"exporting QP tables" functionality was deprecated long before that. In
any case, it does not matter when exactly that was.

> 
> 
> > 
> > Furthermore, I believe this filter (and all the associated
> > "postprocessing" ones) are anachronistic relics of the DivX era. They
> > were in fashion around ~2005 (though I doubt they were actually
> > improving anything even then) but nobody with a clue has used them since
> > H.264 took over.
> 
> well, for old videos (which still exist today) and i mean the stuff
> that used 8x8 dct based codecs mpeg1 to mpeg4, divx, msmpeg4, realvideo
> also jpeg and that use not very high bitrate. (very high bitrate of course
> doesnt have much to improve toward)
> 
> There is a quite noticable quality improvment when using postprocessing
> with the right parameters both subjective and objective (PSNR IIRC)
> And at the rare but not noneexisting occurance where i do want to watch
> such a video i always use one of these filters.
> In realty that has often been the spp filter but thats probably not
> important.
> In general if you can see 8x8 blocks without the filter, these filters
> will make the video simply look better.
> 
> if passing QP helps for the above usecase probably depends on how
> variable QP is in the video one wants to watch or if a single fixed
> hand tuned QP works well (it often does indeed)

But that is precisely the question at hand. Is passing QP tables a
useful thing to have?
Also, do note that MPV removed those filters and according to its
developer nobody ever missed them or complained about their removal.
Furthermore, people in https://github.com/mpv-player/mpv/issues/2792
suggest that other filters may do as good or better job.

> 
> Another usecase for passing QP was lossless re-encoding.
> I do not know how common this has been used (iam not using it and its not
> my idea originally), this of

[FFmpeg-devel] [PATCH V2 2/3] avfilter/vf_dnn_processing.c: add planar yuv format support

2020-02-25 Thread Guo, Yejun
Only the Y channel is handled by dnn, the UV channels are copied
without changes.

The command to use srcnn.pb (see vf_sr) looks like:
./ffmpeg -i 480p.jpg -vf 
format=yuv420p,scale=w=iw*2:h=ih*2,dnn_processing=dnn_backend=tensorflow:model=srcnn.pb:input=x:output=y
 -y srcnn.jpg

Signed-off-by: Guo, Yejun 
---
 doc/filters.texi|  9 ++
 libavfilter/vf_dnn_processing.c | 72 +
 2 files changed, 81 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index 8300aac..33b7857 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -9195,6 +9195,8 @@ Set the output name of the dnn network.
 
 @end table
 
+@subsection Examples
+
 @itemize
 @item
 Halve the red channle of the frame with format rgb24:
@@ -9208,6 +9210,12 @@ Halve the pixel value of the frame with format gray32f:
 ffmpeg -i input.jpg -vf 
format=grayf32,dnn_processing=model=halve_gray_float.model:input=dnn_in:output=dnn_out:dnn_backend=native
 -y out.native.png
 @end example
 
+@item
+Handle the Y channel with srcnn.pb (see @ref{sr} filter) for frame with 
yuv420p (planar YUV formats supported):
+@example
+./ffmpeg -i 480p.jpg -vf 
format=yuv420p,scale=w=iw*2:h=ih*2,dnn_processing=dnn_backend=tensorflow:model=srcnn.pb:input=x:output=y
 -y srcnn.jpg
+@end example
+
 @end itemize
 
 @section drawbox
@@ -17306,6 +17314,7 @@ Set quality level. The value @code{max} can be used to 
set the maximum level,
 currently @code{6}.
 @end table
 
+@anchor{sr}
 @section sr
 
 Scale the input by applying one of the super-resolution methods based on
diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index 4d0ee78..f9458f0 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -110,6 +110,8 @@ static int query_formats(AVFilterContext *context)
 static const enum AVPixelFormat pix_fmts[] = {
 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32,
+AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
 AV_PIX_FMT_NONE
 };
 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
@@ -163,6 +165,11 @@ static int check_modelinput_inlink(const DNNData 
*model_input, const AVFilterLin
 }
 return 0;
 case AV_PIX_FMT_GRAYF32:
+case AV_PIX_FMT_YUV420P:
+case AV_PIX_FMT_YUV422P:
+case AV_PIX_FMT_YUV444P:
+case AV_PIX_FMT_YUV410P:
+case AV_PIX_FMT_YUV411P:
 if (model_input->channels != 1) {
 LOG_FORMAT_CHANNEL_MISMATCH();
 return AVERROR(EIO);
@@ -246,6 +253,28 @@ static int prepare_sws_context(AVFilterLink *outlink)
0, NULL, NULL, NULL);
 }
 return 0;
+case AV_PIX_FMT_YUV420P:
+case AV_PIX_FMT_YUV422P:
+case AV_PIX_FMT_YUV444P:
+case AV_PIX_FMT_YUV410P:
+case AV_PIX_FMT_YUV411P:
+av_assert0(input_dt == DNN_FLOAT);
+av_assert0(output_dt == DNN_FLOAT);
+ctx->sws_gray8_to_grayf32 = sws_getContext(inlink->w,
+   inlink->h,
+   AV_PIX_FMT_GRAY8,
+   inlink->w,
+   inlink->h,
+   AV_PIX_FMT_GRAYF32,
+   0, NULL, NULL, NULL);
+ctx->sws_grayf32_to_gray8 = sws_getContext(outlink->w,
+   outlink->h,
+   AV_PIX_FMT_GRAYF32,
+   outlink->w,
+   outlink->h,
+   AV_PIX_FMT_GRAY8,
+   0, NULL, NULL, NULL);
+return 0;
 default:
 //do nothing
 break;
@@ -300,6 +329,15 @@ static int copy_from_frame_to_dnn(DnnProcessingContext 
*ctx, const AVFrame *fram
 frame->data[0], frame->linesize[0],
 bytewidth, frame->height);
 return 0;
+case AV_PIX_FMT_YUV420P:
+case AV_PIX_FMT_YUV422P:
+case AV_PIX_FMT_YUV444P:
+case AV_PIX_FMT_YUV410P:
+case AV_PIX_FMT_YUV411P:
+sws_scale(ctx->sws_gray8_to_grayf32, (const uint8_t **)frame->data, 
frame->linesize,
+  0, frame->height, (uint8_t * const*)(&dnn_input->data),
+  (const int [4]){frame->width * sizeof(float), 0, 0, 0});
+return 0;
 default:
 return AVERROR(EIO);
 }
@@ -341,6 +379,15 @@ static int copy_from_dnn_to_frame(DnnProcessingContext 
*ctx, AVFrame *frame)
 dnn_output->data, bytewidth,
 bytewidth, frame->height);
 

[FFmpeg-devel] [PATCH V2 3/3] avfilter/vf_dnn_processing.c: add frame size change support for planar yuv format

2020-02-25 Thread Guo, Yejun
The Y channel is handled by dnn, and also resized by dnn. The UV channels
are resized with swscale.

The command to use espcn.pb (see vf_sr) looks like:
./ffmpeg -i 480p.jpg -vf 
format=yuv420p,dnn_processing=dnn_backend=tensorflow:model=espcn.pb:input=x:output=y
 -y tmp.espcn.jpg

Signed-off-by: Guo, Yejun 
---
 doc/filters.texi|  9 +
 libavfilter/vf_dnn_processing.c | 37 ++---
 2 files changed, 39 insertions(+), 7 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index 33b7857..e3df8f9 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -9155,6 +9155,7 @@ ffmpeg -i INPUT -f lavfi -i 
nullsrc=hd720,geq='r=128+80*(sin(sqrt((X-W/2)*(X-W/2
 @end example
 @end itemize
 
+@anchor{dnn_processing}
 @section dnn_processing
 
 Do image processing with deep neural networks. It works together with another 
filter
@@ -9216,6 +9217,12 @@ Handle the Y channel with srcnn.pb (see @ref{sr} filter) 
for frame with yuv420p
 ./ffmpeg -i 480p.jpg -vf 
format=yuv420p,scale=w=iw*2:h=ih*2,dnn_processing=dnn_backend=tensorflow:model=srcnn.pb:input=x:output=y
 -y srcnn.jpg
 @end example
 
+@item
+Handle the Y channel with espcn.pb (see @ref{sr} filter), which changes frame 
size, for format yuv420p (planar YUV formats supported):
+@example
+./ffmpeg -i 480p.jpg -vf 
format=yuv420p,dnn_processing=dnn_backend=tensorflow:model=espcn.pb:input=x:output=y
 -y tmp.espcn.jpg
+@end example
+
 @end itemize
 
 @section drawbox
@@ -17369,6 +17376,8 @@ Default value is @code{2}. Scale factor is necessary 
for SRCNN model, because it
 input upscaled using bicubic upscaling with proper scale factor.
 @end table
 
+This feature can also be finished with @ref{dnn_processing} filter.
+
 @section ssim
 
 Obtain the SSIM (Structural SImilarity Metric) between two input videos.
diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index f9458f0..7f40f85 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -51,6 +51,8 @@ typedef struct DnnProcessingContext {
 
 struct SwsContext *sws_gray8_to_grayf32;
 struct SwsContext *sws_grayf32_to_gray8;
+struct SwsContext *sws_uv_scale;
+int sws_uv_height;
 } DnnProcessingContext;
 
 #define OFFSET(x) offsetof(DnnProcessingContext, x)
@@ -274,6 +276,18 @@ static int prepare_sws_context(AVFilterLink *outlink)
outlink->h,
AV_PIX_FMT_GRAY8,
0, NULL, NULL, NULL);
+
+if (inlink->w != outlink->w || inlink->h != outlink->h) {
+const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
+int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
+ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, 
AV_PIX_FMT_GRAY8,
+   sws_dst_w, sws_dst_h, 
AV_PIX_FMT_GRAY8,
+   SWS_BICUBIC, NULL, NULL, NULL);
+ctx->sws_uv_height = sws_src_h;
+}
 return 0;
 default:
 //do nothing
@@ -404,13 +418,21 @@ static av_always_inline int isPlanarYUV(enum 
AVPixelFormat pix_fmt)
 
 static int copy_uv_planes(DnnProcessingContext *ctx, AVFrame *out, const 
AVFrame *in)
 {
-const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(in->format);
-int uv_height = AV_CEIL_RSHIFT(in->height, desc->log2_chroma_h);
-for (int i = 1; i < 3; ++i) {
-int bytewidth = av_image_get_linesize(in->format, in->width, i);
-av_image_copy_plane(out->data[i], out->linesize[i],
-in->data[i], in->linesize[i],
-bytewidth, uv_height);
+if (!ctx->sws_uv_scale) {
+av_assert0(in->height == out->height && in->width == out->width);
+const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(in->format);
+int uv_height = AV_CEIL_RSHIFT(in->height, desc->log2_chroma_h);
+for (int i = 1; i < 3; ++i) {
+int bytewidth = av_image_get_linesize(in->format, in->width, i);
+av_image_copy_plane(out->data[i], out->linesize[i],
+in->data[i], in->linesize[i],
+bytewidth, uv_height);
+}
+} else {
+sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), 
in->linesize + 1,
+  0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
+sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), 
in->linesize + 2,
+  0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
 }
 
 return 0;
@@ -455,6 +477,7 @@ static av_cold v

[FFmpeg-devel] [PATCH V2 1/3] avfilter/vf_dnn_processing.c: use swscale for uint8<->float32 convert

2020-02-25 Thread Guo, Yejun
Signed-off-by: Guo, Yejun 
---
 libavfilter/vf_dnn_processing.c | 81 +++--
 1 file changed, 61 insertions(+), 20 deletions(-)

diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index 492df93..4d0ee78 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -32,6 +32,7 @@
 #include "dnn_interface.h"
 #include "formats.h"
 #include "internal.h"
+#include "libswscale/swscale.h"
 
 typedef struct DnnProcessingContext {
 const AVClass *class;
@@ -47,6 +48,9 @@ typedef struct DnnProcessingContext {
 // input & output of the model at execution time
 DNNData input;
 DNNData output;
+
+struct SwsContext *sws_gray8_to_grayf32;
+struct SwsContext *sws_grayf32_to_gray8;
 } DnnProcessingContext;
 
 #define OFFSET(x) offsetof(DnnProcessingContext, x)
@@ -211,6 +215,45 @@ static int config_input(AVFilterLink *inlink)
 return 0;
 }
 
+static int prepare_sws_context(AVFilterLink *outlink)
+{
+AVFilterContext *context = outlink->src;
+DnnProcessingContext *ctx = context->priv;
+AVFilterLink *inlink = context->inputs[0];
+enum AVPixelFormat fmt = inlink->format;
+DNNDataType input_dt  = ctx->input.dt;
+DNNDataType output_dt = ctx->output.dt;
+
+switch (fmt) {
+case AV_PIX_FMT_RGB24:
+case AV_PIX_FMT_BGR24:
+if (input_dt == DNN_FLOAT) {
+ctx->sws_gray8_to_grayf32 = sws_getContext(inlink->w * 3,
+   inlink->h,
+   AV_PIX_FMT_GRAY8,
+   inlink->w * 3,
+   inlink->h,
+   AV_PIX_FMT_GRAYF32,
+   0, NULL, NULL, NULL);
+}
+if (output_dt == DNN_FLOAT) {
+ctx->sws_grayf32_to_gray8 = sws_getContext(outlink->w * 3,
+   outlink->h,
+   AV_PIX_FMT_GRAYF32,
+   outlink->w * 3,
+   outlink->h,
+   AV_PIX_FMT_GRAY8,
+   0, NULL, NULL, NULL);
+}
+return 0;
+default:
+//do nothing
+break;
+}
+
+return 0;
+}
+
 static int config_output(AVFilterLink *outlink)
 {
 AVFilterContext *context = outlink->src;
@@ -227,25 +270,23 @@ static int config_output(AVFilterLink *outlink)
 outlink->w = ctx->output.width;
 outlink->h = ctx->output.height;
 
+prepare_sws_context(outlink);
+
 return 0;
 }
 
-static int copy_from_frame_to_dnn(DNNData *dnn_input, const AVFrame *frame)
+static int copy_from_frame_to_dnn(DnnProcessingContext *ctx, const AVFrame 
*frame)
 {
 int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
+DNNData *dnn_input = &ctx->input;
 
 switch (frame->format) {
 case AV_PIX_FMT_RGB24:
 case AV_PIX_FMT_BGR24:
 if (dnn_input->dt == DNN_FLOAT) {
-float *dnn_input_data = dnn_input->data;
-for (int i = 0; i < frame->height; i++) {
-for(int j = 0; j < frame->width * 3; j++) {
-int k = i * frame->linesize[0] + j;
-int t = i * frame->width * 3 + j;
-dnn_input_data[t] = frame->data[0][k] / 255.0f;
-}
-}
+sws_scale(ctx->sws_gray8_to_grayf32, (const uint8_t 
**)frame->data, frame->linesize,
+  0, frame->height, (uint8_t * const*)(&dnn_input->data),
+  (const int [4]){frame->linesize[0] * sizeof(float), 0, 
0, 0});
 } else {
 av_assert0(dnn_input->dt == DNN_UINT8);
 av_image_copy_plane(dnn_input->data, bytewidth,
@@ -266,22 +307,19 @@ static int copy_from_frame_to_dnn(DNNData *dnn_input, 
const AVFrame *frame)
 return 0;
 }
 
-static int copy_from_dnn_to_frame(AVFrame *frame, const DNNData *dnn_output)
+static int copy_from_dnn_to_frame(DnnProcessingContext *ctx, AVFrame *frame)
 {
 int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
+DNNData *dnn_output = &ctx->output;
 
 switch (frame->format) {
 case AV_PIX_FMT_RGB24:
 case AV_PIX_FMT_BGR24:
 if (dnn_output->dt == DNN_FLOAT) {
-float *dnn_output_data = dnn_output->data;
-for (int i = 0; i < frame->height; i++) {
-for(int j = 0; j < frame->width * 3; j++) {
-int k = i * frame->linesize[0] + j;
-int t = i * frame->width * 3 + j;
-frame->data[0][k] = 
av_clip_uintp2((int)(dnn_output_data[t] * 255.0f), 8);
-}
-

Re: [FFmpeg-devel] [PATCH] libswscale/x86/yuv2rgb: Fix Segmentation Fault when load unaligned data

2020-02-25 Thread Paul B Mahol
lgtm

On 2/25/20, Ting Fu  wrote:
> Signed-off-by: Ting Fu 
> ---
>  libswscale/x86/yuv_2_rgb.asm | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/libswscale/x86/yuv_2_rgb.asm b/libswscale/x86/yuv_2_rgb.asm
> index e05bbb89f5..575a84d921 100644
> --- a/libswscale/x86/yuv_2_rgb.asm
> +++ b/libswscale/x86/yuv_2_rgb.asm
> @@ -139,7 +139,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num,
> parameters
>  VBROADCASTSD vr_coff,  [pointer_c_ditherq + 4  * 8]
>  %endif
>  %endif
> -mova m_y, [py_2indexq + 2 * indexq]
> +movu m_y, [py_2indexq + 2 * indexq]
>  movh m_u, [pu_indexq  + indexq]
>  movh m_v, [pv_indexq  + indexq]
>  .loop0:
> @@ -347,7 +347,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num,
> parameters
>  %endif ; PACK RGB15/16
>  %endif ; PACK RGB15/16/32
>
> -mova m_y, [py_2indexq + 2 * indexq + 8 * time_num]
> +movu m_y, [py_2indexq + 2 * indexq + 8 * time_num]
>  movh m_v, [pv_indexq  + indexq + 4 * time_num]
>  movh m_u, [pu_indexq  + indexq + 4 * time_num]
>  add imageq, 8 * depth * time_num
> --
> 2.17.1
>
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] libswscale/x86/yuv2rgb: Fix Segmentation Fault when load unaligned data

2020-02-25 Thread Carl Eugen Hoyos


> Am 25.02.2020 um 07:29 schrieb Ting Fu :
> 
> Signed-off-by: Ting Fu 
> ---
> libswscale/x86/yuv_2_rgb.asm | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/libswscale/x86/yuv_2_rgb.asm b/libswscale/x86/yuv_2_rgb.asm
> index e05bbb89f5..575a84d921 100644
> --- a/libswscale/x86/yuv_2_rgb.asm
> +++ b/libswscale/x86/yuv_2_rgb.asm
> @@ -139,7 +139,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num, parameters
> VBROADCASTSD vr_coff,  [pointer_c_ditherq + 4  * 8]
> %endif
> %endif
> -mova m_y, [py_2indexq + 2 * indexq]
> +movu m_y, [py_2indexq + 2 * indexq]
> movh m_u, [pu_indexq  + indexq]
> movh m_v, [pv_indexq  + indexq]
> .loop0:
> @@ -347,7 +347,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num, parameters
> %endif ; PACK RGB15/16
> %endif ; PACK RGB15/16/32
> 
> -mova m_y, [py_2indexq + 2 * indexq + 8 * time_num]
> +movu m_y, [py_2indexq + 2 * indexq + 8 * time_num]

If there is a related ticket in trac, please mention it in the commit message.

Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 03/12] lavfi: drop vf_qp

2020-02-25 Thread Paul B Mahol
On 2/25/20, Anton Khirnov  wrote:
> Quoting Michael Niedermayer (2020-02-24 20:15:43)
>> On Mon, Feb 24, 2020 at 03:54:45PM +0100, Anton Khirnov wrote:
>> > Quoting Carl Eugen Hoyos (2020-02-24 13:50:57)
>> > > Am Mo., 24. Feb. 2020 um 13:40 Uhr schrieb Anton Khirnov
>> > > :
>> > > >
>> > > > It fundamentally depends on an API that has been deprecated for five
>> > > > years, has seen no commits since that time and is of highly dubious
>> > > > usefulness.
>> > >
>> > > Please explain how the removed functionality was replaced.
>> >
>> > It was not, for the reasons mentioned in the commit message.
>>
>> > In my view,
>> > the fact that nobody fixed it in all that time proves that nobody cares
>> > about this functionality and thus that there is no value in keeping it.
>>
>> your reasoning only works if there is a problem that requires a fix.
>>
>> Your reasoning here seems
>> BIG problem in A && noone fixes it -> noone cares about A
>>
>> My view is
>> whoever sees a problem in A (i do not really) should fix it.
>>
>> Maybe iam missing something and there is in fact a big problem in the
>> code. But if that is the case iam not aware of the problem and thats
>> why i did nothing for years "fixing" it. Its not that i do not care.
>>
>> So what is really the issue here ?
>> if i build vf_qp.o i get
>> ./libavutil/frame.h:719:1: note: 'av_frame_get_qp_table' has been
>> explicitly marked deprecated here
>> attribute_deprecated
>>
>> ./libavutil/frame.h:721:1: note: 'av_frame_set_qp_table' has been
>> explicitly marked deprecated here
>> attribute_deprecated
>
> Yes, I believe there is a problem, or more precisely a bunch of related
> problems. Not really that big, but real ones nevertheless.
>
> One aspect of the problem is precisely the fact that this functionality
> has been deprecated and nobody has addressed this deprecation in many
> years. Paul was concerned about our reputation - I believe having so
> many deprecation warnings during build is very bad for our reputation.
> But more importantly, it is confusing the developers and users about
> what they should use and what not. If you cared about keeping this code,
> you should have undeprecated it.
>
> Two ather aspects of the problem are:
> - this API is only really workable for MPEG1/2 and closely related
>   codecs like JPEG/H.263/MPEG4 ASP/RV
> - it is undocumented, the data layout is not defined
> If you really want to keep it, those two points should be addressed.
>
>>
>> if i look at git history these where deprecated in
>> commit 7df37dd319f2d9d3e1becd5d433884e3ccfa1ee2
>> Author: James Almer 
>> Date:   Mon Oct 23 11:10:48 2017 -0300
>>
>> avutil/frame: deprecate getters and setters for AVFrame fields
>>
>> The fields can be accessed directly, so these are not needed anymore.
>>
>> This says the field can be accessed directly, so certainly its not
>> deprecated in favor of the side data API.
>>
>> and in fact av_frame_get_qp_table / av_frame_set_qp_table do use the
>> side data API already so none of this makes sense really.
>> And the whole argument about five years also isnt correct as
>> october 2017 is not 5 years ago
>
> The accessors may have been deprecated in 2017, but the entire
> "exporting QP tables" functionality was deprecated long before that. In
> any case, it does not matter when exactly that was.
>
>>
>>
>> >
>> > Furthermore, I believe this filter (and all the associated
>> > "postprocessing" ones) are anachronistic relics of the DivX era. They
>> > were in fashion around ~2005 (though I doubt they were actually
>> > improving anything even then) but nobody with a clue has used them since
>> > H.264 took over.
>>
>> well, for old videos (which still exist today) and i mean the stuff
>> that used 8x8 dct based codecs mpeg1 to mpeg4, divx, msmpeg4, realvideo
>> also jpeg and that use not very high bitrate. (very high bitrate of course
>> doesnt have much to improve toward)
>>
>> There is a quite noticable quality improvment when using postprocessing
>> with the right parameters both subjective and objective (PSNR IIRC)
>> And at the rare but not noneexisting occurance where i do want to watch
>> such a video i always use one of these filters.
>> In realty that has often been the spp filter but thats probably not
>> important.
>> In general if you can see 8x8 blocks without the filter, these filters
>> will make the video simply look better.
>>
>> if passing QP helps for the above usecase probably depends on how
>> variable QP is in the video one wants to watch or if a single fixed
>> hand tuned QP works well (it often does indeed)
>
> But that is precisely the question at hand. Is passing QP tables a
> useful thing to have?
> Also, do note that MPV removed those filters and according to its
> developer nobody ever missed them or complained about their removal.
> Furthermore, people in https://github.com/mpv-player/mpv/issues/2792
> suggest that other filters may do as good or better job.

lol, I must comment on this. Yo

Re: [FFmpeg-devel] [PATCH 03/12] lavfi: drop vf_qp

2020-02-25 Thread Anton Khirnov
Quoting Thilo Borgmann (2020-02-24 23:07:48)
> Am 24.02.20 um 22:41 schrieb Lou Logan:
> > On Mon, Feb 24, 2020, at 3:37 AM, Anton Khirnov wrote:
> >> It fundamentally depends on an API that has been deprecated for five
> >> years, has seen no commits since that time and is of highly dubious
> >> usefulness.
> >> ---
> >>  doc/filters.texi|  32 ---
> >>  libavfilter/Makefile|   1 -
> >>  libavfilter/allfilters.c|   1 -
> >>  libavfilter/vf_qp.c | 183 
> >>  tests/fate/filter-video.mak |   7 +-
> >>  tests/ref/fate/filter-pp2   |   1 -
> >>  tests/ref/fate/filter-pp3   |   1 -
> >>  7 files changed, 1 insertion(+), 225 deletions(-)
> >>  delete mode 100644 libavfilter/vf_qp.c
> >>  delete mode 100644 tests/ref/fate/filter-pp2
> >>  delete mode 100644 tests/ref/fate/filter-pp3
> > 
> > Fine with me. I've never seen it used by anyone.
> 
> I'm not fine with it. Declaring it's {use | use case} not existent is
> no arguments whatsoever in reality.
> 
> Also, removing some functionality needs an argument - it is not
> keeping some functionality needs an argument.

I disagree with that. Keeping code around is not free, as Vittorio
already said - it is a burden in many ways. So I believe all code needs
continued justification for its existence - not just "it was added in
the past so it stays in forever". Note that I'm not saying it needs to
be mainstream or very popular - I am fine with obscure features that
are only useful to a few people in highly special cases (given they are
not unduly intrusive and those people are willing to maintain them). But
so far in this thread, there has been no actual use presented for
exporting and passing around QP tables. None whatsoever.

Most objections here are like yours - it's a) a feature and b) someone
somewhere sometime might conceivably want to use it, so it must not be
removed. Michael's reponse is the only one that comes close to having a
use case, but even he says that he's unsure of the usefulness of the
actual QP tables and that it's largely theoretical.

I believe I did more structural changes to the libraries than most
people and in my experience these obsolete features from mplayer days
are a MASSIVE maintenance pain. The amount of effort required to keep
them working is simply not justified when essentially nobody uses them.

IMO these demands that they all be preserved forever is endangering the
project. If making changes becomes too hard, people stop making them.
They move to other places that are less hostile to change. We are at
risk of turning into a repository of obscure old codecs and filters and
getting overtaken by leaner projects like dav1d (yes it's supposed to be
AV1-only, but that can change).

> 
> Nobody technically elaborates Paul's statement that it should go into side 
> data. WTF? The compromise isn't even considered?
> 
> Let's dig some trenches, shall we?
> 
> And how come some obvious "use cases" / "needs" like [1] come into play? Or 
> do we declare not continued discussions non-existent now, too?

The patch in your link is not using this API. Precisely because this API
is inadequate for anything newer than MPEG4 ASP. If anything, it's an
additional argument in favor of my patches.

> 
> And how comes, if Michael's investigation, that all of this is based on use 
> of _a function_ that is deprecated instead of direct access of AVFrame's 
> fields is the cause of all of this?
> 
> Shame on all of us.

WTF? What shame?
I am sending patches, in good faith, that I believe will improve the
project. People may (and do) object to them. As long as the discussion
is civil, constructive and in good faith (so far it mostly is), I see no
reason for any shame.

-- 
Anton Khirnov
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [FFmpeg-cvslog] avfilter/vf_v360: add basic commands support

2020-02-25 Thread Gyan Doshi



On 25-02-2020 04:05 pm, Paul B Mahol wrote:

ffmpeg | branch: master | Paul B Mahol  | Tue Feb 25 11:33:06 
2020 +0100| [b484eb2556608d6a818b27c705d959c1c2cd44ee] | committer: Paul B Mahol

avfilter/vf_v360: add basic commands support


http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=b484eb2556608d6a818b27c705d959c1c2cd44ee


Add docs mention.

Thanks,
Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] Hiring FFMPEG Developer for Live Broadcasting (remote)

2020-02-25 Thread sumaklos rembert
Hello FFMPEG-ers,

We are hiring a remote, expert FFMPEG engineer who can also develop in a
LAMP environment for our Live Broadcast Video division.  The candidate must
be highly detailed, meticulous & an expert w/ ffmpeg.  The goal is to
deliver very best, stable live video broadcaster in the industry.

The Developer will also manage the current program as well grow the
platform to the next level.  This is a super high-traffic platform with
millions of unique impressions daily.

Skills required:
-Lead/Sr. LAMP developer

We are using:
-a webrtc brodcaster
-flusonic streaming service
-ffmpeg to transcode
-hls and mse playback

We need someone with solid experience to scale and hit the ground running.

If you are interested, please send your resume to sumak...@nerd.com


Looking forward to hearing from you.

Let's NERD together!!!
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 03/12] lavfi: drop vf_qp

2020-02-25 Thread Nicolas George
Anton Khirnov (12020-02-25):
> So I believe all code needs
> continued justification for its existence - not just "it was added in
> the past so it stays in forever". Note that I'm not saying it needs to
> be mainstream or very popular - I am fine with obscure features that
> are only useful to a few people in highly special cases (given they are
> not unduly intrusive and those people are willing to maintain them). But
> so far in this thread, there has been no actual use presented for
> exporting and passing around QP tables. None whatsoever.

You are ignoring a significant bias in making this argument: satisfied
people don't complain.

The feature is there, it works. If people need it, they use it. They
don't come on the mailing list complaining they have no problem. They
don't even consider fixing a compilation warning among many others,
especially when they possibly don't compile themselves.

If we think a feature is unused and want to remove it, I think we need
to announce it in a way that its users can't miss, and then let them
sufficient time to make their needs known. In this case, "in a way that
its users can't miss" is not true.

You are right in saying that features are a maintenance burden, and
therefore we should remove features that are unused. But for that, you
need to actually make a case for the fact that they are unused. For now,
AFAICS, you case amounts to the fact that nobody spontaneously told you
they use this filter.

Regards,

-- 
  Nicolas George


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 001/244] Add a new channel layout API

2020-02-25 Thread Nicolas George
Anton Khirnov (12020-02-19):
> Thanks for the links. As far as I can tell this can be mapped to the
> proposed API just fine

Except for the user interface part, as I already pointed: if there are
several times the same channel, the API needs to provide a standard way
for the user to specify one.

>(except for signalling precise speaker
> coordinates, which I don't think anything uses).

I think somebody uses it, because somebody felt the need to include it
in the standard. Therefore, we need to support it.

Regards,

-- 
  Nicolas George


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 001/244] Add a new channel layout API

2020-02-25 Thread Hendrik Leppkes
On Tue, Feb 25, 2020 at 12:47 PM Nicolas George  wrote:
> >(except for signalling precise speaker
> > coordinates, which I don't think anything uses).
>
> I think somebody uses it, because somebody felt the need to include it
> in the standard. Therefore, we need to support it.
>

Standards designed by committee like all this MPEG stuff are full of
features that noone uses. Its usually indicative of the following
replacement standard dumping them again. Instead, they threw in a
bunch of new things of questionable use that will disappear in the
next standard once again.

I don't think we should be blindly following what some other group
thinks, but critically judge everything we're implementing here.

- Hendrik
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 7/30] avformat/matroskaenc: Avoid allocations for SeekHead

2020-02-25 Thread Andreas Rheinhardt
Andreas Rheinhardt:
> Andreas Rheinhardt:
>> Andreas Rheinhardt:
>>> Up until e7ddafd5, the Matroska muxer wrote two SeekHeads: One at the
>>> beginning referencing the main level 1 elements (i.e. not the Clusters)
>>> and one at the end, referencing the Clusters. This second SeekHead was
>>> useless and has therefore been removed. Yet the SeekHead-related
>>> functions and structures are still geared towards this usecase: They
>>> are built around an allocated array of variable size that gets
>>> reallocated every time an element is added to it although the maximum
>>> number of Seek entries is a small compile-time constant, so that one should
>>> rather include the array in the SeekHead structure itself; and said
>>> structure should be contained in the MatroskaMuxContext instead of being
>>> allocated separately.
>>>
>>> The earlier code reserved space for a SeekHead with 10 entries, although
>>> we currently write at most 6. Reducing said number implied that every
>>> Matroska/Webm file will be 84 bytes smaller and required to adapt
>>> several FATE tests; furthermore, the reserved amount overestimated the
>>> amount needed for for the SeekHead's length field and how many bytes
>>> need to be reserved to write a EBML Void element, bringing the total
>>> reduction to 89 bytes.
>>>
>>> Signed-off-by: Andreas Rheinhardt 
>>> ---
>>
>> Ping.
>>
>> - Andreas
>>
> Ping (the actual patch (which has been omitted for brevity) is here:
> https://ffmpeg.org/pipermail/ffmpeg-devel/2020-February/256997.html).
> 
> - Andreas
> 
Another ping.

- Andreas
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [avfomat/rtp: source ips lost when specified as URL options] Patch for ffmpeg using rtp protocol where sources option is not retained

2020-02-25 Thread Ross Nicholson
Hey Jun Li,

I noticed you have submitted some patches which work around the same code
area's that I submitted for. Your patches look quite tidy and well thought
out so I was wondering if you could look at this patch and see if I'm going
about it in the right way.

I'm not sure this area of ffmpeg currently has a maintainer currently so
the patches may be difficult to progress.

Ross

On Tue, 11 Feb 2020 at 22:42, Ross Nicholson  wrote:

> The patch was created as a workaround to an issue from in kodi (apologies,
> it's a rather long thread):
> https://forum.kodi.tv/showthread.php?tid=350901&pid=2923550#pid2923550
>
> As an example, here is a URL: rtp://87.141.215.251@232.0.10.234:1
>
> Taking this URL we should be able to either reformat it to: rtp://
> 232.0.10.234:1?sources=87.141.215.251 or pass the sources as an
> av_dict to avfomat_open_input.
>
> Neither option works however. Instead the above workaround was created but
> it's not really the right way to fix this. Would be great to get some
> guidance on the right place to fix this in the right way.
>
> Thanks in advance.
>
> On Tue, 11 Feb 2020 at 22:30, phunkyfish  wrote:
>
>> ---
>>  libavformat/rtsp.c | 26 --
>>  1 file changed, 24 insertions(+), 2 deletions(-)
>>
>> diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
>> index 859defa592..f922055134 100644
>> --- a/libavformat/rtsp.c
>> +++ b/libavformat/rtsp.c
>> @@ -2334,7 +2334,9 @@ static int sdp_read_header(AVFormatContext *s)
>>  RTSPStream *rtsp_st;
>>  int size, i, err;
>>  char *content;
>> +const char *p, *sp="", *sources="", *sp2, *sources2;
>>  char url[1024];
>> +char sources_buf[1024];
>>
>>  if (!ff_network_init())
>>  return AVERROR(EIO);
>> @@ -2360,6 +2362,16 @@ static int sdp_read_header(AVFormatContext *s)
>>  av_freep(&content);
>>  if (err) goto fail;
>>
>> +/* Search for sources= tag in original URL for rtp protocol only */
>> +if (strncmp(s->url, "rtp://", 6) == 0) {
>> +p = strchr(s->url, '?');
>> +if (p && av_find_info_tag(sources_buf, sizeof(sources_buf),
>> "sources", p)) {
>> +/* av_log(s, AV_LOG_VERBOSE, "sdp_read_header found sources
>> %s\n", sources_buf);  */
>> +sp = sources_buf;
>> +sources = "&sources=";
>> +}
>> +}
>> +
>>  /* open each RTP stream */
>>  for (i = 0; i < rt->nb_rtsp_streams; i++) {
>>  char namebuf[50];
>> @@ -2377,12 +2389,22 @@ static int sdp_read_header(AVFormatContext *s)
>>  av_dict_free(&opts);
>>  goto fail;
>>  }
>> +
>> +/* Prepare to add sources to the url to be opened.
>> +   Otherwise the join to the source specific muliticast will
>> be missing */
>> +sources2 = sources;
>> +sp2 = sp;
>> +/* ignore sources from original URL, when sources are
>> already set in rtsp_st */
>> +if (rtsp_st->nb_include_source_addrs > 0)
>> +sources2 = sp2 = "";
>> +
>>  ff_url_join(url, sizeof(url), "rtp", NULL,
>>  namebuf, rtsp_st->sdp_port,
>> -
>> "?localport=%d&ttl=%d&connect=%d&write_to_source=%d",
>> +
>> "?localport=%d&ttl=%d&connect=%d&write_to_source=%d%s%s",
>>  rtsp_st->sdp_port, rtsp_st->sdp_ttl,
>>  rt->rtsp_flags & RTSP_FLAG_FILTER_SRC ? 1 : 0,
>> -rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 :
>> 0);
>> +rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 :
>> 0,
>> +sources2, sp2);
>>
>>  append_source_addrs(url, sizeof(url), "sources",
>>  rtsp_st->nb_include_source_addrs,
>> --
>> 2.20.1 (Apple Git-117)
>>
>>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 17/21] vaapi_encode_h264: Support stereo 3D metadata

2020-02-25 Thread Fu, Linjie
> -Original Message-
> From: ffmpeg-devel  On Behalf Of
> Mark Thompson
> Sent: Monday, February 24, 2020 07:41
> To: ffmpeg-devel@ffmpeg.org
> Subject: [FFmpeg-devel] [PATCH v4 17/21] vaapi_encode_h264: Support
> stereo 3D metadata
> 
> Insert frame packing arrangement messages into the stream when input
> frames have associated stereo 3D side-data.
> ---
>  doc/encoders.texi  |  3 +++
>  libavcodec/vaapi_encode_h264.c | 25 -
>  2 files changed, 27 insertions(+), 1 deletion(-)
> 
> diff --git a/doc/encoders.texi b/doc/encoders.texi
> index e23b6b32fe..62b6902197 100644
> --- a/doc/encoders.texi
> +++ b/doc/encoders.texi
> @@ -3065,6 +3065,9 @@ Include picture timing parameters
> (@emph{buffering_period} and
>  @emph{pic_timing} messages).
>  @item recovery_point
>  Include recovery points where appropriate (@emph{recovery_point}
> messages).
> +@item frame_packing
> +Include stereo 3D metadata if the input frames have it
> +(@emph{frame_packing_arrangement} messages).
>  @end table
> 
>  @end table
> diff --git a/libavcodec/vaapi_encode_h264.c
> b/libavcodec/vaapi_encode_h264.c
> index f4965d8b09..58eae613c4 100644
> --- a/libavcodec/vaapi_encode_h264.c
> +++ b/libavcodec/vaapi_encode_h264.c
> @@ -25,6 +25,7 @@
>  #include "libavutil/common.h"
>  #include "libavutil/internal.h"
>  #include "libavutil/opt.h"
> +#include "libavutil/stereo3d.h"
> 
>  #include "avcodec.h"
>  #include "cbs.h"
> @@ -39,6 +40,7 @@ enum {
>  SEI_TIMING = 0x01,
>  SEI_IDENTIFIER = 0x02,
>  SEI_RECOVERY_POINT = 0x04,
> +SEI_FRAME_PACKING  = 0x20,
>  };

There is a jumping from 0x04 to 0x20, how about combining it with the enum in
vaapi_encode_h265.c, and moving into vaapi_encode.h, hence SEI_FRAME_PACKING
could also be used for H265 later?

vaapi_encode_h265.c:
enum {
SEI_MASTERING_DISPLAY   = 0x08,
SEI_CONTENT_LIGHT_LEVEL = 0x10,
};

>  // Random (version 4) ISO 11578 UUID.
> @@ -96,6 +98,7 @@ typedef struct VAAPIEncodeH264Context {
>  H264RawSEIBufferingPeriod  sei_buffering_period;
>  H264RawSEIPicTimingsei_pic_timing;
>  H264RawSEIRecoveryPointsei_recovery_point;
> +H264RawSEIFramePackingArrangement sei_frame_packing;
>  H264RawSEIUserDataUnregistered sei_identifier;
>  char  *sei_identifier_string;
> 
> @@ -251,6 +254,12 @@ static int
> vaapi_encode_h264_write_extra_header(AVCodecContext *avctx,
>  sei->payload[i].payload.recovery_point = 
> priv->sei_recovery_point;
>  ++i;
>  }
> +if (priv->sei_needed & SEI_FRAME_PACKING) {
> +sei->payload[i].payload_type = H264_SEI_TYPE_FRAME_PACKING;
> +sei->payload[i].payload.frame_packing_arrangement =
> +priv->sei_frame_packing;
> +++i;
> +}
> 
>  sei->payload_count = i;
>  av_assert0(sei->payload_count > 0);
> @@ -700,6 +709,17 @@ static int
> vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
>  priv->sei_needed |= SEI_RECOVERY_POINT;
>  }
> 
> +if (priv->sei & SEI_FRAME_PACKING) {
> +AVFrameSideData *sd = av_frame_get_side_data(pic->input_image,
> + AV_FRAME_DATA_STEREO3D);
> +if (sd) {
> +ff_cbs_h264_fill_sei_frame_packing_arrangement(
> +&priv->sei_frame_packing, (const AVStereo3D*)sd->data);
> +}
> +
> +priv->sei_needed |= SEI_FRAME_PACKING;

If got NULL sd from av_frame_get_side_data(),  would it be better to not adding
SEI_FRAME_PACKING to  priv->sei_needed or taking further actions to write extra 
header?

- Linjie
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 02/21] cbs: Ensure that reference fields always follow the associated pointer

2020-02-25 Thread Fu, Linjie
> -Original Message-
> From: ffmpeg-devel  On Behalf Of
> Mark Thompson
> Sent: Monday, February 24, 2020 07:41
> To: ffmpeg-devel@ffmpeg.org
> Subject: [FFmpeg-devel] [PATCH v4 02/21] cbs: Ensure that reference fields
> always follow the associated pointer
> 
> Hvaing these together allows us to find both pointers given the address

Nit: Hvaing -> Having

- Linjie

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 001/244] Add a new channel layout API

2020-02-25 Thread Anton Khirnov
Quoting Nicolas George (2020-02-25 12:47:03)
> Anton Khirnov (12020-02-19):
> > Thanks for the links. As far as I can tell this can be mapped to the
> > proposed API just fine
> 
> Except for the user interface part, as I already pointed: if there are
> several times the same channel, the API needs to provide a standard way
> for the user to specify one.

As far as I can tell, the Apple API linked above does not support that
either. The way of describing the channel layout is given by
mChannelLayoutTag, which can be either
- kAudioChannelLayoutTag_UseChannelBitmap, which is effectively
  equivalent to our current API, or the new API's LAYOUT_NATIVE
- one of several predefined layouts, which can be mapped either to
  LAYOUT_NATIVE or LAYOUT_CUSTOM
- kAudioChannelLayoutTag_UseChannelDescriptions, which cannot be
  represented in the current API, but is effectively equivalent to the
  new API's LAYOUT_CUSTOM
  The AudioChannelDescription struct contains:
* AudioChannelFlags, which apply to coordinates
* three floats, which are the coordinates
* AudioChannelLabel, which is uint32 similar to our AVChannel

I see no support for any custom free-form text labels of the kind you
want me to add.

> 
> >(except for signalling precise speaker
> > coordinates, which I don't think anything uses).
> 
> I think somebody uses it, because somebody felt the need to include it
> in the standard. Therefore, we need to support it.

In addition to Hendrik's reply (which I agree with), support for this
can be later added through a new layout type if someone really needs it.
I see no reason to spend effort implementing functionality that is not
actually used for anything.

-- 
Anton Khirnov
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] avcodec/magicyuv: Check that there are enough lines for interlacing to be possible

2020-02-25 Thread Michael Niedermayer
On Mon, Feb 24, 2020 at 12:51:48PM +0100, Paul B Mahol wrote:
> lgtm

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Frequently ignored answer#1 FFmpeg bugs should be sent to our bugtracker. User
questions about the command line tools should be sent to the ffmpeg-user ML.
And questions about how to use libav* should be sent to the libav-user ML.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 04/13] avformat/mux: Cosmetics

2020-02-25 Thread Michael Niedermayer
On Tue, Aug 13, 2019 at 04:47:17AM +0200, Andreas Rheinhardt wrote:
> Signed-off-by: Andreas Rheinhardt 
> ---
>  libavformat/mux.c | 19 +--
>  1 file changed, 9 insertions(+), 10 deletions(-)

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Take away the freedom of one citizen and you will be jailed, take away
the freedom of all citizens and you will be congratulated by your peers
in Parliament.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] configure: Enable section_data_rel_ro for OpenBSD aarch64 / arm

2020-02-25 Thread Michael Niedermayer
On Sun, Feb 23, 2020 at 04:15:35PM -0500, Brad Smith wrote:
> configure: Enable section_data_rel_ro for OpenBSD aarch64 / arm
> 
> Signed-off-by: Brad Smith 
> ---
>  configure | 1 +
>  1 file changed, 1 insertion(+)

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

During times of universal deceit, telling the truth becomes a
revolutionary act. -- George Orwell


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 2/2] avformat/mvdec: Check stream numbers

2020-02-25 Thread Michael Niedermayer
On Sun, Feb 23, 2020 at 08:54:00PM +, Andreas Rheinhardt wrote:
> Michael Niedermayer:
> > Fixes: null pointer dereference
> > Fixes: 
> > 20768/clusterfuzz-testcase-minimized-ffmpeg_DEMUXER_fuzzer-5638648978735104.fuzz
> > 
> > Found-by: continuous fuzzing process 
> > https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
> > Signed-off-by: Michael Niedermayer 
> > ---
> >  libavformat/mvdec.c | 6 ++
> >  1 file changed, 6 insertions(+)
> > 
> > diff --git a/libavformat/mvdec.c b/libavformat/mvdec.c
> > index f9f7e38137..64166a84b1 100644
> > --- a/libavformat/mvdec.c
> > +++ b/libavformat/mvdec.c
> > @@ -363,6 +363,12 @@ static int mv_read_header(AVFormatContext *avctx)
> >  if ((ret = read_table(avctx, NULL, parse_global_var)) < 0)
> >  return ret;
> >  
> > +if (mv->nb_audio_tracks < 0  || mv->nb_video_tracks < 0 ||
> > +   (mv->nb_audio_tracks == 0 && mv->nb_video_tracks == 0)) {
> > +av_log(avctx, AV_LOG_ERROR, "Stream count is invalid.\n");
> > +return AVERROR_INVALIDDATA;
> > +}
> > +
> >  if (mv->nb_audio_tracks > 1) {
> >  avpriv_request_sample(avctx, "Multiple audio streams support");
> >  return AVERROR_PATCHWELCOME;
> > 
> LGTM.

will apply


> 
> - Andreas
> 
> PS: Is it actually allowed to set the channel_layout to stereo if
> there are more than two channels (as set_channels() does)?

id say code which sets this inconsistently should be fixed

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

The misfortune of the wise is better than the prosperity of the fool.
-- Epicurus


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 7/7] avformat/segafilmenc: Add deinit function

2020-02-25 Thread Michael Niedermayer
On Tue, Jan 14, 2020 at 04:13:36AM +0100, Andreas Rheinhardt wrote:
> Prevents memleaks when the trailer is never written or when shifting the
> data fails when writing the trailer.
> 
> Signed-off-by: Andreas Rheinhardt 
> ---
>  libavformat/segafilmenc.c | 14 ++
>  1 file changed, 14 insertions(+)

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

There will always be a question for which you do not know the correct answer.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 6/7] avformat/segafilmenc: Combine several checks

2020-02-25 Thread Michael Niedermayer
On Tue, Jan 14, 2020 at 04:13:35AM +0100, Andreas Rheinhardt wrote:
> by moving them around.
> 
> Signed-off-by: Andreas Rheinhardt 
> ---
>  libavformat/segafilmenc.c | 26 +-
>  1 file changed, 9 insertions(+), 17 deletions(-)

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

I am the wisest man alive, for I know one thing, and that is that I know
nothing. -- Socrates


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] Status and Plans for Subtitle Filters

2020-02-25 Thread Clément Bœsch
On Sun, Feb 23, 2020 at 09:59:59PM +0100, Michael Niedermayer wrote:
[...]
> > The subtitles refactor requires to see the big picture and all the problems 
> > at
> > once. 
> 
> really ?
> just hypothetically, and playing the devils advocat here.
> what would happen if one problem or set of problems is solved at a time ?

The first requirement of everything following is to define a new
structure/API for holding the subtitles within the AVFrame (which has to
live in lavu and not lavc like current API). So you have to address all
the current limitations in that new API first, unless you're ready to
change that new API 10x in the near future. And even if you keep most of
the current design, you still have to at least come up with ways to remove
all the current hacks that would go away while moving to the new design.

> 
> Maybe the thinking should not be "what are all the things that might need
> to be considered"
> but rather "what is the minimum set of things that need to be considered"
> to make the first step towards a better API/first git push
> 
> 
> 
> > Since the core change (subtitles in AVFrame) requires the introduction of
> > a new subtitles structure and API, it also involve addressing the 
> > shortcomings
> > of the original API (or maybe we could tolerate a new API that actually 
> > looks
> > like the old?). So even if we ignore the subtitle-in-avframe thing, we don't
> > have a clear answer for a sane API that handles everything. Here is a
> > non-exhaustive list of stuff that we have to take into account while 
> > thinking
> > about that:
> > 
> > - text subtitles with and without markup
> 
> > - sparsity, overlapping
> 
> heartbeat frames would eliminate sparsity

Yes, and like many aspect of this refactor: we need to come up and
formalize a convention. Of course I can make a suggestion, but there are
many other cases and exceptions.

> what happens if you forbid overlapping ?

You can't, it's too common. The classic "Hello, hello" was already
mentioned, but I could also mention subtitles used to "legend" the
environment (you know, like, signposts and stuff) in addition to
dialogues.

> > - different semantics for duration (duration available, no known duration,
> >   event-based clearing, ...)
> 
> This one is annoying (though similar to video where its just not so much an
> issue as video is generally regularly spaced)
> But does this actually impact the API in any way ?
> decoder -> avframe -> encoder

AVFrame always go through lavfi. I don't remember the details (it's been
about 2 years now), but the lack of semantic for duration was causing some
issues within lavfi.

> (if some information is missing some look 
> ahead/buffer/filter/converter/whatever may be needed but the API wouldnt 
> change i think and that should work with any API)
> 
> 
> > - closed captions / teletext
> 
> What happens if you ignore these at this stage?

I can't ignore them, the way we change the subtitle interface must address
their special behaviours. But I'd say my main issue with closed captions /
teletext was the same as DVB subtitles: we don't have much tests.

Typically, the DVB subtitles hack we have in ffmpeg.c like, forever, I'm
dropping it, but I can't test it properly: DVBsub coverage is almost
non-existent: http://coverage.ffmpeg.org/ (look for dvbsub and dvbsuddec)

Actually, if someone does improve subtitle coverage for formats I'm not
comfortable with (specifically cc and dvb), that would actually help A
LOT. At least I wouldn't have to speculate on how it should/could/would
behave.

BTW, if there is someone available to explain to me DVB subtitles, I'm all
ear. I understand that they have no duration, but random (partial?)
subtitle resets?

> > - bitmap subtitles and their potential colorspaces (each rectangle as an
> >   AVFrame is way overkill but technically that's exactly what it is)
> 
> then a AVFrame needs to represent a collection of rectangles.
> Its either 1 or N for the design i think.
> Our current subtitle structures already have a similar design so this
> wouldnt be really different.

Yeah, the new API prototype ended up being:

+#define AV_NUM_DATA_POINTERS 8
+
+/**
+ * This structure describes decoded subtitle rectangle
+ */
+typedef struct AVFrameSubtitleRectangle {
+int x, y;
+int w, h;
+
+/* image data for bitmap subtitles, in AVFrame.format (AVPixelFormat) */
+uint8_t *data[AV_NUM_DATA_POINTERS];
+int linesize[AV_NUM_DATA_POINTERS];
+
+/* decoded text for text subtitles, in ASS */
+char *text;
+
+int flags;
+} AVFrameSubtitleRectangle;
+

But then, do we use a fixed pixel format for all codecs? Is this really
enough when some subtitles are actually a bunch of image files inside a
"modern standard container"? (before you ask, yeah I saw that a few years
back in some broadcasting garbage thing).

What about PAL8 subtitles? We currently need to convert them into codecs,
and re-analyzed them again during encoding to reconstitute the palette,
and

Re: [FFmpeg-devel] avfilter/vf_zscale: fix crash on unaligned input

2020-02-25 Thread Paul B Mahol
Applied

On 2/17/20, jjadfh5dfg  wrote:
> ‐‐‐ Original Message ‐‐‐
> On Saturday, February 15, 2020 11:11 AM, Paul B Mahol 
> wrote:
>
>> On 2/15/20, jjadfh5dfg jjadfh5...@protonmail.com wrote:
>>
>> > ‐‐‐ Original Message ‐‐‐
>> > On Saturday, February 15, 2020 7:08 AM, Paul B Mahol one...@gmail.com
>> > wrote:
>> >
>> > > On 2/15/20, jjadfh5dfg jjadfh5...@protonmail.com wrote:
>> > >
>> > > > Sent with ProtonMail Secure Email.
>> > >
>> > > How to apply this? Author name is not valid.
>> > > Please specify meaningful name, like: Donnie Darko Rabbit
>> > > ffmpeg-devel mailing list
>> > > ffmpeg-devel@ffmpeg.org
>> > > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> > > To unsubscribe, visit link above, or email
>> > > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
>> >
>> > To apply the patch, please use this command:
>> > $ git am 0001-avfilter-vf_zscale-fix-crash-on-unaligned-input.patch
>> > You may change the author if it displeases you.
>>
>> Personally I do not care, but others do.
>> So If you plan to send more patches to zscale and not just this one
>> please consider changing it to something more pleasing.
>>
>> > Sent with ProtonMail Secure Email.
>> >
>> > ffmpeg-devel mailing list
>> > ffmpeg-devel@ffmpeg.org
>> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> > To unsubscribe, visit link above, or email
>> > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
>>
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
>
> Please find attached a revised patch with different author field.
>
> Sent with ProtonMail Secure Email.
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 01/12] fifo: uninline av_fifo_peek2() on the next major bump

2020-02-25 Thread James Almer
On 2/24/2020 9:37 AM, Anton Khirnov wrote:
> Inline public functions should be avoided unless absolutely necessary,
> and no such necessity exists in this code.
> ---
>  libavutil/fifo.c | 13 +
>  libavutil/fifo.h |  5 +
>  2 files changed, 18 insertions(+)
> 
> diff --git a/libavutil/fifo.c b/libavutil/fifo.c
> index 1060aedf13..0baaadc521 100644
> --- a/libavutil/fifo.c
> +++ b/libavutil/fifo.c
> @@ -23,6 +23,7 @@
>  #include "avassert.h"
>  #include "common.h"
>  #include "fifo.h"
> +#include "version.h"
>  
>  static AVFifoBuffer *fifo_alloc_common(void *buffer, size_t size)
>  {
> @@ -238,3 +239,15 @@ void av_fifo_drain(AVFifoBuffer *f, int size)
>  f->rptr -= f->end - f->buffer;
>  f->rndx += size;
>  }
> +
> +#if LIBAVUTIL_VERSION_MAJOR >= 57
> +uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs);
> +{
> +uint8_t *ptr = f->rptr + offs;
> +if (ptr >= f->end)
> +ptr = f->buffer + (ptr - f->end);
> +else if (ptr < f->buffer)
> +ptr = f->end - (f->buffer - ptr);
> +return ptr;
> +}
> +#endif
> diff --git a/libavutil/fifo.h b/libavutil/fifo.h
> index dc7bc6f0dd..8cd964ef45 100644
> --- a/libavutil/fifo.h
> +++ b/libavutil/fifo.h
> @@ -27,6 +27,7 @@
>  #include 
>  #include "avutil.h"
>  #include "attributes.h"
> +#include "version.h"
>  
>  typedef struct AVFifoBuffer {
>  uint8_t *buffer;
> @@ -166,6 +167,7 @@ void av_fifo_drain(AVFifoBuffer *f, int size);
>   * point outside to the buffer data.
>   * The used buffer size can be checked with av_fifo_size().
>   */
> +#if LIBAVUTIL_VERSION_MAJOR < 57
>  static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
>  {
>  uint8_t *ptr = f->rptr + offs;
> @@ -175,5 +177,8 @@ static inline uint8_t *av_fifo_peek2(const AVFifoBuffer 
> *f, int offs)
>  ptr = f->end - (f->buffer - ptr);
>  return ptr;
>  }
> +#else
> +uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs);
> +#endif

This patch will need a following one after the bump to remove dead code,
so IMO might as well just do this in one commit after the bump and save
all the ifdeffery.

>  
>  #endif /* AVUTIL_FIFO_H */
> 

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] avfilter/vf_program_opencl: allow setting kernel per plane

2020-02-25 Thread Mark Thompson
On 24/02/2020 10:01, Paul B Mahol wrote:
> Fixes #7190
> 
> Signed-off-by: Paul B Mahol 
> ---
>  doc/filters.texi| 22 
>  libavfilter/vf_program_opencl.c | 64 ++---
>  2 files changed, 65 insertions(+), 21 deletions(-)
> 
> diff --git a/doc/filters.texi b/doc/filters.texi
> index 70fd7a4cc7..6b10f649b9 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -21302,6 +21302,17 @@ Number of inputs to the filter.  Defaults to 1.
>  @item size, s
>  Size of output frames.  Defaults to the same as the first input.
>  
> +@item kernel2
> +Kernel name in program for 2nd plane, if not set kernel from option
> +@var{kernel} is used.
> +
> +@item kernel3
> +Kernel name in program for 3nd plane, if not set kernel from option
> +@var{kernel} is used.

Why this default?  The kernel for the second plane feels a more obvious choice 
to me for cases like yuv420p.

> +
> +@item kernel4
> +Kernel name in program for 4nd plane, if not set kernel from option
> +@var{kernel} is used.
>  @end table
>  
>  The program source file must contain a kernel function with the given name,

An example using it would be nice to show the intended setup.

> @@ -22488,6 +22499,17 @@ Pixel format to use for the generated frames.  This 
> must be set.
>  @item rate, r
>  Number of frames generated every second.  Default value is '25'.
>  
> +@item kernel2
> +Kernel name in program for 2nd plane, if not set kernel from option
> +@var{kernel} is used.
> +
> +@item kernel3
> +Kernel name in program for 3nd plane, if not set kernel from option
> +@var{kernel} is used.
> +
> +@item kernel4
> +Kernel name in program for 4nd plane, if not set kernel from option
> +@var{kernel} is used.
>  @end table
>  
>  For details of how the program loading works, see the @ref{program_opencl}
> diff --git a/libavfilter/vf_program_opencl.c b/libavfilter/vf_program_opencl.c
> index ec25e931f5..f748b15037 100644
> --- a/libavfilter/vf_program_opencl.c
> +++ b/libavfilter/vf_program_opencl.c
> @@ -33,14 +33,14 @@ typedef struct ProgramOpenCLContext {
>  
>  int loaded;
>  cl_uint index;
> -cl_kernel   kernel;
> +cl_kernel   kernel[4];
>  cl_command_queuecommand_queue;
>  
>  FFFrameSync fs;
>  AVFrame   **frames;
>  
>  const char *source_file;
> -const char *kernel_name;
> +const char *kernel_name[4];
>  int nb_inputs;
>  int width, height;
>  enum AVPixelFormat  source_format;
> @@ -66,15 +66,17 @@ static int program_opencl_load(AVFilterContext *avctx)
>  return AVERROR(EIO);
>  }
>  
> -ctx->kernel = clCreateKernel(ctx->ocf.program, ctx->kernel_name, &cle);
> -if (!ctx->kernel) {
> -if (cle == CL_INVALID_KERNEL_NAME) {
> -av_log(avctx, AV_LOG_ERROR, "Kernel function '%s' not found in "
> -   "program.\n", ctx->kernel_name);
> -} else {
> -av_log(avctx, AV_LOG_ERROR, "Failed to create kernel: %d.\n", 
> cle);
> +for (int i = 0; i < 4; i++) {

I don't think it's a good idea to make kernel objects for absent planes, and it 
should be an error to provide more kernels than planes.

> +ctx->kernel[i] = clCreateKernel(ctx->ocf.program, 
> ctx->kernel_name[i] ? ctx->kernel_name[i] : ctx->kernel_name[0], &cle);

Since the kernel you end up with is exactly the same, perhaps you would be 
better making only the named kernels and then choosing later which one to use 
rather than having many copies of the same object.

(Also, please avoid overlong lines.)

> +if (!ctx->kernel[i]) {
> +if (cle == CL_INVALID_KERNEL_NAME) {
> +av_log(avctx, AV_LOG_ERROR, "Kernel function '%s' not found 
> in "
> +   "program.\n", ctx->kernel_name[i] ? 
> ctx->kernel_name[i] : ctx->kernel_name[0]);
> +} else {
> +av_log(avctx, AV_LOG_ERROR, "Failed to create kernel%d: 
> %d.\n", i, cle);
> +}
> +return AVERROR(EIO);
>  }
> -return AVERROR(EIO);
>  }
>  
>  ctx->loaded = 1;
> @@ -108,14 +110,14 @@ static int program_opencl_run(AVFilterContext *avctx)
>  if (!dst)
>  break;
>  
> -cle = clSetKernelArg(ctx->kernel, 0, sizeof(cl_mem), &dst);
> +cle = clSetKernelArg(ctx->kernel[plane], 0, sizeof(cl_mem), &dst);
>  if (cle != CL_SUCCESS) {
>  av_log(avctx, AV_LOG_ERROR, "Failed to set kernel "
> "destination image argument: %d.\n", cle);
>  err = AVERROR_UNKNOWN;
>  goto fail;
>  }
> -cle = clSetKernelArg(ctx->kernel, 1, sizeof(cl_uint), &ctx->index);
> +cle = clSetKernelArg(ctx->kernel[plane], 1, sizeof(cl_uint), 
> &ctx->index);
>  if (cle != CL_SUCCESS) {
>  av_log(avctx, AV_LOG_ERROR, "Failed to set kernel "
>  

Re: [FFmpeg-devel] [PATCH 1/2] lavc/qsvdec: add decode support for HEVC 4:2:2 8-bit and 10-bit

2020-02-25 Thread Mark Thompson
On 25/02/2020 02:01, Linjie Fu wrote:
> Enables HEVC Range Extension decoding support for 4:2:2 8/10 bit
> on ICL+ (gen11 +) platform.
> 
> Signed-off-by: Linjie Fu 
> ---
>  libavcodec/qsv.c  | 12 
>  libavutil/hwcontext_qsv.c | 22 ++
>  2 files changed, 34 insertions(+)

Should this be gated somehow to stop it being run on Windows?  There it will 
probably fail in some ugly way inside the D3D code which doesn't support YUYV 
formats.

Similarly, do you need a specific libva version or is that already implied by 
the libmfx version?

- Mark
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 18/21] cbs_h265: Add functions to turn HDR metadata into SEI

2020-02-25 Thread Mark Thompson
On 25/02/2020 04:32, Vittorio Giovara wrote:
> On Mon, Feb 24, 2020 at 5:18 PM Mark Thompson  wrote:
>> On 24/02/2020 21:28, Vittorio Giovara wrote:
>>> On Sun, Feb 23, 2020 at 6:41 PM Mark Thompson  wrote:
>>>
 ---
  libavcodec/Makefile   |  2 +-
  libavcodec/cbs_h265.c | 99 +++
  libavcodec/cbs_h265.h | 18 
  3 files changed, 118 insertions(+), 1 deletion(-)
  create mode 100644 libavcodec/cbs_h265.c

 ...
 +void

>> ff_cbs_h265_fill_sei_mastering_display(H265RawSEIMasteringDisplayColourVolume
 *mdcv,
 +const
 AVMasteringDisplayMetadata *mdm)
 +{
 +memset(mdcv, 0, sizeof(*mdcv));
 +
 +if (mdm->has_primaries) {
 +// The values in the metadata structure are fractions between 0
 and 1,
 +// while the SEI message contains fixed-point values with an
 increment
 +// of 0.2.  So, scale up by 5 to convert between them.
 +
 +for (int a = 0; a < 3; a++) {
 +// The metadata structure stores this in RGB order, but the
 SEI
 +// wants it in GBR order.
 +int b = (a + 1) % 3;

>>>
>>> this is a pretty minor comment, but do you think you could use the more
>>> legible way present in other parts of the codebase?
>>> const int mapping[3] = {2, 0, 1};
>>> rather than (a + 1) % 3;
>>
>> Ok.
>>
>> Is there a specific reason to make it on the stack rather than static?  I
>> see it's there in hevcdec.
>>
> 
> No particular reason, I just find it more readable, if you think it's a
> really bad practice then you could keep the code as is.

Sorry, my question wasn't very clear.  I don't mind the change.  But:

Is there a reason why the array in hevcdec (and your suggestion) is not static? 
 (Some sort of compiler optimisation effect I'm missing, maybe.)  Intuitively 
it feels like it should be static const rather than being constructed on the 
stack every time the function is called.

- Mark
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 17/21] vaapi_encode_h264: Support stereo 3D metadata

2020-02-25 Thread Mark Thompson
On 25/02/2020 14:10, Fu, Linjie wrote:
>> -Original Message-
>> From: ffmpeg-devel  On Behalf Of
>> Mark Thompson
>> Sent: Monday, February 24, 2020 07:41
>> To: ffmpeg-devel@ffmpeg.org
>> Subject: [FFmpeg-devel] [PATCH v4 17/21] vaapi_encode_h264: Support
>> stereo 3D metadata
>>
>> Insert frame packing arrangement messages into the stream when input
>> frames have associated stereo 3D side-data.
>> ---
>>  doc/encoders.texi  |  3 +++
>>  libavcodec/vaapi_encode_h264.c | 25 -
>>  2 files changed, 27 insertions(+), 1 deletion(-)
>>
>> diff --git a/doc/encoders.texi b/doc/encoders.texi
>> index e23b6b32fe..62b6902197 100644
>> --- a/doc/encoders.texi
>> +++ b/doc/encoders.texi
>> @@ -3065,6 +3065,9 @@ Include picture timing parameters
>> (@emph{buffering_period} and
>>  @emph{pic_timing} messages).
>>  @item recovery_point
>>  Include recovery points where appropriate (@emph{recovery_point}
>> messages).
>> +@item frame_packing
>> +Include stereo 3D metadata if the input frames have it
>> +(@emph{frame_packing_arrangement} messages).
>>  @end table
>>
>>  @end table
>> diff --git a/libavcodec/vaapi_encode_h264.c
>> b/libavcodec/vaapi_encode_h264.c
>> index f4965d8b09..58eae613c4 100644
>> --- a/libavcodec/vaapi_encode_h264.c
>> +++ b/libavcodec/vaapi_encode_h264.c
>> @@ -25,6 +25,7 @@
>>  #include "libavutil/common.h"
>>  #include "libavutil/internal.h"
>>  #include "libavutil/opt.h"
>> +#include "libavutil/stereo3d.h"
>>
>>  #include "avcodec.h"
>>  #include "cbs.h"
>> @@ -39,6 +40,7 @@ enum {
>>  SEI_TIMING = 0x01,
>>  SEI_IDENTIFIER = 0x02,
>>  SEI_RECOVERY_POINT = 0x04,
>> +SEI_FRAME_PACKING  = 0x20,
>>  };
> 
> There is a jumping from 0x04 to 0x20, how about combining it with the enum in
> vaapi_encode_h265.c, and moving into vaapi_encode.h, hence SEI_FRAME_PACKING
> could also be used for H265 later?

Yeah, the point of including the jump was that they are disjoint parts of the 
same enum in the two files.

Moving it into the header would be reasonable, I'll do that (other codecs where 
SEI isn't a thing can see it, but they don't care so whatever).

Does anyone use stereo frame packing in H.265?  If that's not an entirely 
vestigial feature then I would just add it, because it's very easy to do.

>>  // Random (version 4) ISO 11578 UUID.
>> @@ -96,6 +98,7 @@ typedef struct VAAPIEncodeH264Context {
>>  H264RawSEIBufferingPeriod  sei_buffering_period;
>>  H264RawSEIPicTimingsei_pic_timing;
>>  H264RawSEIRecoveryPointsei_recovery_point;
>> +H264RawSEIFramePackingArrangement sei_frame_packing;
>>  H264RawSEIUserDataUnregistered sei_identifier;
>>  char  *sei_identifier_string;
>>
>> @@ -251,6 +254,12 @@ static int
>> vaapi_encode_h264_write_extra_header(AVCodecContext *avctx,
>>  sei->payload[i].payload.recovery_point = 
>> priv->sei_recovery_point;
>>  ++i;
>>  }
>> +if (priv->sei_needed & SEI_FRAME_PACKING) {
>> +sei->payload[i].payload_type = H264_SEI_TYPE_FRAME_PACKING;
>> +sei->payload[i].payload.frame_packing_arrangement =
>> +priv->sei_frame_packing;
>> +++i;
>> +}
>>
>>  sei->payload_count = i;
>>  av_assert0(sei->payload_count > 0);
>> @@ -700,6 +709,17 @@ static int
>> vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
>>  priv->sei_needed |= SEI_RECOVERY_POINT;
>>  }
>>
>> +if (priv->sei & SEI_FRAME_PACKING) {
>> +AVFrameSideData *sd = av_frame_get_side_data(pic->input_image,
>> + 
>> AV_FRAME_DATA_STEREO3D);
>> +if (sd) {
>> +ff_cbs_h264_fill_sei_frame_packing_arrangement(
>> +&priv->sei_frame_packing, (const AVStereo3D*)sd->data);
>> +}
>> +
>> +priv->sei_needed |= SEI_FRAME_PACKING;
> 
> If got NULL sd from av_frame_get_side_data(),  would it be better to not 
> adding
> SEI_FRAME_PACKING to  priv->sei_needed or taking further actions to write 
> extra header?

Good point, it needs to be inside the brace.  (And I should check negative 
cases more carefully.)

Thanks,

- Mark
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] Status and Plans for Subtitle Filters

2020-02-25 Thread Michael Niedermayer
On Mon, Feb 24, 2020 at 08:48:23PM +0100, Nicolas George wrote:
> Michael Niedermayer (12020-02-24):
> > > No, they can't: being the same subtitle or not is part of the semantic.
> 
> > Does anyone else share this oppinion ?
> > 
> > iam asking because we need to resolve such differences of oppinion to
> > move forward.
> > Theres no way to design an API if such relativly fundamental things
> > have disagreements on them
> 
> It's not a matter of opinion, it is actually quite obvious:
> 
> # 1
> # 00:00:10,000 --> 00:00:11,000
> # Hello.
> # 
> # 2
> # 00:00:11,000 --> 00:00:12,000
> # Hello.
> 
> … means that two people said Hello in quick succession while:
> 
> # 1
> # 00:00:10,000 --> 00:00:12,000
> # Hello.
> 
> … means that Hello was said only once, slowly.

Yes
but the overlap is neither solving that nor sufficient
nor does this work very well

it doesnt work very well because when someone speaks really fast
you display the text only for a short time and noone can read it.
that fails to achive the main goal of a subtitle of allowing
someone to read it.
one can go on now to list cases where this is ambigous or not
enough.

But i think a better summary is that there are 2 really seperate things
1. The actual content 
2. The way it is presented. (loud, fast fearfull, whatever)

I think we should not in our internal representation use the duration
of display for the duration of sound.
Especially formats with strict random access points will always start
all subtitles at that point anew. Otherwise one could not seek to
that point. and that will produce subtitles where the duration
interpretation as sound duration would not work well


> 
> And it has practical consequences: Clément suggested a voice synthesis
> filter, that would change its output.
> 
> Some subtitles have overlap all over the place. I am thinking in
> particular of some animé fansub, with on-screen signs and onomatopoeia
> translated and cultural notes, all along with dialogue. De-overlapping
> would increase their size considerably, and cause actual dialogue to be
> split, which results in the problems I have explained above.

i think you mix things up

subtitle size matters in the muxed format, this is talking about the
representation in AVFrames. This would make no difference to what is
stored, in fact the encoder searching for things it can merge instead
of not doing that could lead to smaller files.

Also for the subtitle rectangles we could even use reference counting
and reuse them as long as they did not change.



> 
> But I don't know why you are so focussed on this. Overlapping is not a

Its not a focus at all, just was something i noticed when reading this
which IMHO can be avoided to make the API maybe simpler

Its a suggestion nothing else


> problem, it's just something to keep in mind while designing the API,
> like the fact that bitmap subtitles have several rectangles. It's
> actually quite easy to handle.

Iam not sure arbitrary overlapping AVFrames will not cause problems,
its very different from existing semantics

Thanks

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Asymptotically faster algorithms should always be preferred if you have
asymptotical amounts of data


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 02/21] cbs: Ensure that reference fields always follow the associated pointer

2020-02-25 Thread Mark Thompson
On 25/02/2020 14:19, Fu, Linjie wrote:
>> -Original Message-
>> From: ffmpeg-devel  On Behalf Of
>> Mark Thompson
>> Sent: Monday, February 24, 2020 07:41
>> To: ffmpeg-devel@ffmpeg.org
>> Subject: [FFmpeg-devel] [PATCH v4 02/21] cbs: Ensure that reference fields
>> always follow the associated pointer
>>
>> Hvaing these together allows us to find both pointers given the address
> 
> Nit: Hvaing -> Having

Fixed.

Thanks

- Mark
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 3/3 v2] avformat/dashenc: always attempt to enable prft on ldash mode

2020-02-25 Thread James Almer
On 2/24/2020 6:54 AM, Anton Khirnov wrote:
> Quoting James Almer (2020-02-20 17:26:00)
>> Signed-off-by: James Almer 
> 
> Commit message is now misleading since it will only enable prft if it's
> not disabled.

Sorry, i pushed this during the weekend. And, true. It's still
attempting but technically not always...

Which makes me realize i should mention this undocumented behavior in
the doxy.

>> ---
>> Now it can be overriden if you explicitly set write_prft to 0.
>>
>>  libavformat/dashenc.c | 8 +++-
>>  1 file changed, 7 insertions(+), 1 deletion(-)
>>
>> diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
>> index a52cbc9113..7032adc84d 100644
>> --- a/libavformat/dashenc.c
>> +++ b/libavformat/dashenc.c
>> @@ -1394,6 +1394,12 @@ static int dash_init(AVFormatContext *s)
>>  c->frag_type = FRAG_TYPE_EVERY_FRAME;
>>  }
>>  
>> +if (c->write_prft < 0) {
>> +c->write_prft = c->ldash;
> 
> nit: !!, in case ldash becomes something else than a bool in the future

The chances for that are pretty slim, since turning a bool into an int
would be an API break (true/false would stop working from the command
line, afaik). But i can change it anyway.

> 
>> +if (c->ldash)
>> +av_log(s, AV_LOG_INFO, "Enabling Producer Reference Time 
>> element for Low Latency mode\n");
> 
> I'd say this should be VERBOSE, since a normal run with no unexpected
> events should produce no log output.

Sure, will change in a new commit.

> 
> Otherwise LGTM.
> 

Thanks, and apologies for not waiting a bit more.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] Update HDR10+ metadata structure.

2020-02-25 Thread Mohammad Izadi
On Mon, Feb 24, 2020 at 9:56 AM Vittorio Giovara 
wrote:

> On Sat, Feb 22, 2020 at 12:44 PM Mohammad Izadi 
> wrote:
>
> > On Fri, Feb 21, 2020, 6:44 PM Vittorio Giovara <
> vittorio.giov...@gmail.com
> > >
> > wrote:
> >
> > > On Fri, Feb 21, 2020 at 5:17 PM Mohammad Izadi 
> > > wrote:
> > >
> > > > Why does the struct belong to lavu? This struct is super similar to
> > > structs
> > > > in libavcodec/hevc_sei.h. We just move it to a new file to share it
> > > between
> > > > hevc and vp9 encoder/decoder.
> > > >
> > > > --
> > > >
> > >
> > > 1. Please kindly stop top posting:
> > http://www.idallen.com/topposting.html
> > > 2. It belongs to lavu because it's where the frame code generically
> code
> > > is. I'm not familiar with this API too much, but from what i gather
> users
> > > may need to have a way of accessing this data without pulling in all
> the
> > > dependencies of lavc or lavf.
> > >
> > This struct is related to parsing and SEI, not frame. If so, why other
> > structs are not in lavu? Please check similar structs in hevc_sei?
> >
>
> I don't think I understand your question, but if you need examples you can
> check these patches
> 8f58ecc344a92e63193c38e28c173be987954bbb structure defined in lavu,
> e7a6f8c972a0b5b98ef7bbf393e95c434e9e2539 structure populated in lavc
> d91718107c33960ad295950d7419e6dba292d723 structure defined in lavu, used in
> lavc
> 7e244c68600f479270e979258e389ed5240885fb same
> and so on and so on, so I'd advise you do to the same, scrapping your
> current code if necessary.
>
I will do, but let me explain the problem in more details and you may help
me for a solution. The patches you mentioned, contains two structs
AVSphericalMapping
and  AVMasteringDisplayMetadata in lavu. They are easily set (afew members)
in lavc. The struct for HDR10+ is very similar and I would keep it in lavu.
But, we have to parse and decode a message and then populate the values.
Your structs are simple and no need for parsing them in lavc.
So, my struct needs two steps : 1) parsing/encoding/decoding and 2)
populating. It is not a good idea to implement the 2 steps for each codec
separately. Instead it would be  better to implement once and reuse them as
both steps are long and complex. Now please advise me where is better to
put 1 and 2 in lavc. Right now, I have all with struct in lavu.

> --
> Vittorio
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH V2] libswscale/x86/yuv2rgb: Fix Segmentation Fault when load unaligned data

2020-02-25 Thread Ting Fu
Fixes ticket #8532

Signed-off-by: Ting Fu 
---
V2:
Add ticket info in commit message

 libswscale/x86/yuv_2_rgb.asm | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libswscale/x86/yuv_2_rgb.asm b/libswscale/x86/yuv_2_rgb.asm
index e05bbb89f5..575a84d921 100644
--- a/libswscale/x86/yuv_2_rgb.asm
+++ b/libswscale/x86/yuv_2_rgb.asm
@@ -139,7 +139,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num, parameters
 VBROADCASTSD vr_coff,  [pointer_c_ditherq + 4  * 8]
 %endif
 %endif
-mova m_y, [py_2indexq + 2 * indexq]
+movu m_y, [py_2indexq + 2 * indexq]
 movh m_u, [pu_indexq  + indexq]
 movh m_v, [pv_indexq  + indexq]
 .loop0:
@@ -347,7 +347,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num, reg_num, parameters
 %endif ; PACK RGB15/16
 %endif ; PACK RGB15/16/32
 
-mova m_y, [py_2indexq + 2 * indexq + 8 * time_num]
+movu m_y, [py_2indexq + 2 * indexq + 8 * time_num]
 movh m_v, [pv_indexq  + indexq + 4 * time_num]
 movh m_u, [pu_indexq  + indexq + 4 * time_num]
 add imageq, 8 * depth * time_num
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] libswscale/x86/yuv2rgb: Fix Segmentation Fault when load unaligned data

2020-02-25 Thread Fu, Ting


> -Original Message-
> From: ffmpeg-devel  On Behalf Of Carl
> Eugen Hoyos
> Sent: Tuesday, February 25, 2020 05:43 PM
> To: FFmpeg development discussions and patches 
> Subject: Re: [FFmpeg-devel] [PATCH] libswscale/x86/yuv2rgb: Fix Segmentation
> Fault when load unaligned data
> 
> 
> 
> > Am 25.02.2020 um 07:29 schrieb Ting Fu :
> >
> > Signed-off-by: Ting Fu 
> > ---
> > libswscale/x86/yuv_2_rgb.asm | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/libswscale/x86/yuv_2_rgb.asm
> > b/libswscale/x86/yuv_2_rgb.asm index e05bbb89f5..575a84d921 100644
> > --- a/libswscale/x86/yuv_2_rgb.asm
> > +++ b/libswscale/x86/yuv_2_rgb.asm
> > @@ -139,7 +139,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num,
> reg_num, parameters
> > VBROADCASTSD vr_coff,  [pointer_c_ditherq + 4  * 8] %endif %endif
> > -mova m_y, [py_2indexq + 2 * indexq]
> > +movu m_y, [py_2indexq + 2 * indexq]
> > movh m_u, [pu_indexq  + indexq]
> > movh m_v, [pv_indexq  + indexq]
> > .loop0:
> > @@ -347,7 +347,7 @@ cglobal %1_420_%2%3, GPR_num, GPR_num,
> reg_num,
> > parameters %endif ; PACK RGB15/16 %endif ; PACK RGB15/16/32
> >
> > -mova m_y, [py_2indexq + 2 * indexq + 8 * time_num]
> > +movu m_y, [py_2indexq + 2 * indexq + 8 * time_num]
> 
> If there is a related ticket in trac, please mention it in the commit message.
> 
> Carl Eugen

Sorry for the missing ticket info. Added in patch V2.

Thank you,
Ting Fu
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org
> with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] Print out numeric values of option constants

2020-02-25 Thread Soft Works


> -Original Message-
> From: ffmpeg-devel  On Behalf Of
> Moritz Barsnick
> Sent: Tuesday, February 25, 2020 8:54 AM
> To: FFmpeg development discussions and patches  de...@ffmpeg.org>
> Subject: Re: [FFmpeg-devel] [PATCH] Print out numeric values of option
> constants
> 
> On Tue, Feb 18, 2020 at 02:40:49 +, Soft Works wrote:
> > It's often not obvious how option constants relate to numerical values.
> > Defaults are sometimes printed as numbers and from/to are always
> printed as numbers.
> > Printing the numeric values of options constants avoids this confusion.
> > It also allows to see which constants are equivalent.
> 
> Was this resent by accident? It was already pushed as
> 9e0a071edec93a7bd23f389fb1724ec6b43f8304
> quite a long time ago.

Apologies, I had looked at the wrong place when checking whether it has 
been merged. 
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] Update HDR10+ metadata structure.

2020-02-25 Thread Vittorio Giovara
On Tue, Feb 25, 2020 at 9:16 PM Mohammad Izadi  wrote:

> On Mon, Feb 24, 2020 at 9:56 AM Vittorio Giovara <
> vittorio.giov...@gmail.com>
> wrote:
>
> > On Sat, Feb 22, 2020 at 12:44 PM Mohammad Izadi 
> > wrote:
> >
> > > On Fri, Feb 21, 2020, 6:44 PM Vittorio Giovara <
> > vittorio.giov...@gmail.com
> > > >
> > > wrote:
> > >
> > > > On Fri, Feb 21, 2020 at 5:17 PM Mohammad Izadi 
> > > > wrote:
> > > >
> > > > > Why does the struct belong to lavu? This struct is super similar to
> > > > structs
> > > > > in libavcodec/hevc_sei.h. We just move it to a new file to share it
> > > > between
> > > > > hevc and vp9 encoder/decoder.
> > > > >
> > > > > --
> > > > >
> > > >
> > > > 1. Please kindly stop top posting:
> > > http://www.idallen.com/topposting.html
> > > > 2. It belongs to lavu because it's where the frame code generically
> > code
> > > > is. I'm not familiar with this API too much, but from what i gather
> > users
> > > > may need to have a way of accessing this data without pulling in all
> > the
> > > > dependencies of lavc or lavf.
> > > >
> > > This struct is related to parsing and SEI, not frame. If so, why other
> > > structs are not in lavu? Please check similar structs in hevc_sei?
> > >
> >
> > I don't think I understand your question, but if you need examples you
> can
> > check these patches
> > 8f58ecc344a92e63193c38e28c173be987954bbb structure defined in lavu,
> > e7a6f8c972a0b5b98ef7bbf393e95c434e9e2539 structure populated in lavc
> > d91718107c33960ad295950d7419e6dba292d723 structure defined in lavu, used
> in
> > lavc
> > 7e244c68600f479270e979258e389ed5240885fb same
> > and so on and so on, so I'd advise you do to the same, scrapping your
> > current code if necessary.
> >
> I will do, but let me explain the problem in more details and you may help
> me for a solution. The patches you mentioned, contains two structs
> AVSphericalMapping
> and  AVMasteringDisplayMetadata in lavu. They are easily set (afew members)
> in lavc. The struct for HDR10+ is very similar and I would keep it in lavu.
> But, we have to parse and decode a message and then populate the values.
> Your structs are simple and no need for parsing them in lavc.
> So, my struct needs two steps : 1) parsing/encoding/decoding and 2)
> populating. It is not a good idea to implement the 2 steps for each codec
> separately. Instead it would be  better to implement once and reuse them as
> both steps are long and complex. Now please advise me where is better to
> put 1 and 2 in lavc. Right now, I have all with struct in lavu.
>

Hi Mohammad,
thanks for explaining the problem a bit better. If that's the case you
could have an helper function that parses the data in lavc (usually these
functions are prefixed with ff_, meaning their intended use is internal
within a library) and use the helper function to parse whatever buffer you
pass. This wrapper could then return a lavu struct to be embedded in a side
data message like in the examples I sent you.
Let me know if this is clear enough for you
Thanks
-- 
Vittorio
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] Update HDR10+ metadata structure.

2020-02-25 Thread Mohammad Izadi
On Tue, Feb 25, 2020, 9:32 PM Vittorio Giovara 
wrote:

> On Tue, Feb 25, 2020 at 9:16 PM Mohammad Izadi 
> wrote:
>
> > On Mon, Feb 24, 2020 at 9:56 AM Vittorio Giovara <
> > vittorio.giov...@gmail.com>
> > wrote:
> >
> > > On Sat, Feb 22, 2020 at 12:44 PM Mohammad Izadi 
> > > wrote:
> > >
> > > > On Fri, Feb 21, 2020, 6:44 PM Vittorio Giovara <
> > > vittorio.giov...@gmail.com
> > > > >
> > > > wrote:
> > > >
> > > > > On Fri, Feb 21, 2020 at 5:17 PM Mohammad Izadi <
> moh.iz...@gmail.com>
> > > > > wrote:
> > > > >
> > > > > > Why does the struct belong to lavu? This struct is super similar
> to
> > > > > structs
> > > > > > in libavcodec/hevc_sei.h. We just move it to a new file to share
> it
> > > > > between
> > > > > > hevc and vp9 encoder/decoder.
> > > > > >
> > > > > > --
> > > > > >
> > > > >
> > > > > 1. Please kindly stop top posting:
> > > > http://www.idallen.com/topposting.html
> > > > > 2. It belongs to lavu because it's where the frame code generically
> > > code
> > > > > is. I'm not familiar with this API too much, but from what i gather
> > > users
> > > > > may need to have a way of accessing this data without pulling in
> all
> > > the
> > > > > dependencies of lavc or lavf.
> > > > >
> > > > This struct is related to parsing and SEI, not frame. If so, why
> other
> > > > structs are not in lavu? Please check similar structs in hevc_sei?
> > > >
> > >
> > > I don't think I understand your question, but if you need examples you
> > can
> > > check these patches
> > > 8f58ecc344a92e63193c38e28c173be987954bbb structure defined in lavu,
> > > e7a6f8c972a0b5b98ef7bbf393e95c434e9e2539 structure populated in lavc
> > > d91718107c33960ad295950d7419e6dba292d723 structure defined in lavu,
> used
> > in
> > > lavc
> > > 7e244c68600f479270e979258e389ed5240885fb same
> > > and so on and so on, so I'd advise you do to the same, scrapping your
> > > current code if necessary.
> > >
> > I will do, but let me explain the problem in more details and you may
> help
> > me for a solution. The patches you mentioned, contains two structs
> > AVSphericalMapping
> > and  AVMasteringDisplayMetadata in lavu. They are easily set (afew
> members)
> > in lavc. The struct for HDR10+ is very similar and I would keep it in
> lavu.
> > But, we have to parse and decode a message and then populate the values.
> > Your structs are simple and no need for parsing them in lavc.
> > So, my struct needs two steps : 1) parsing/encoding/decoding and 2)
> > populating. It is not a good idea to implement the 2 steps for each codec
> > separately. Instead it would be  better to implement once and reuse them
> as
> > both steps are long and complex. Now please advise me where is better to
> > put 1 and 2 in lavc. Right now, I have all with struct in lavu.
> >
>
> Hi Mohammad,
> thanks for explaining the problem a bit better. If that's the case you
> could have an helper function that parses the data in lavc (usually these
> functions are prefixed with ff_, meaning their intended use is internal
> within a library) and use the helper function to parse whatever buffer you
> pass. This wrapper could then return a lavu struct to be embedded in a side
> data message like in the examples I sent you.
> Let me know if this is clear enough for you
> Thanks
>

Thanks for your solution. I have to use the parser or helper function in
libavformat for mkv too. Am I allowed to use the ff_ helpers in lavf?

> --
> Vittorio
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH v4 18/21] cbs_h265: Add functions to turn HDR metadata into SEI

2020-02-25 Thread Vittorio Giovara
On Tue, Feb 25, 2020 at 6:03 PM Mark Thompson  wrote:

> On 25/02/2020 04:32, Vittorio Giovara wrote:
> > On Mon, Feb 24, 2020 at 5:18 PM Mark Thompson  wrote:
> >> On 24/02/2020 21:28, Vittorio Giovara wrote:
> >>> On Sun, Feb 23, 2020 at 6:41 PM Mark Thompson  wrote:
> >>>
>  ---
>   libavcodec/Makefile   |  2 +-
>   libavcodec/cbs_h265.c | 99
> +++
>   libavcodec/cbs_h265.h | 18 
>   3 files changed, 118 insertions(+), 1 deletion(-)
>   create mode 100644 libavcodec/cbs_h265.c
> 
>  ...
>  +void
> 
> >>
> ff_cbs_h265_fill_sei_mastering_display(H265RawSEIMasteringDisplayColourVolume
>  *mdcv,
>  +const
>  AVMasteringDisplayMetadata *mdm)
>  +{
>  +memset(mdcv, 0, sizeof(*mdcv));
>  +
>  +if (mdm->has_primaries) {
>  +// The values in the metadata structure are fractions
> between 0
>  and 1,
>  +// while the SEI message contains fixed-point values with an
>  increment
>  +// of 0.2.  So, scale up by 5 to convert between
> them.
>  +
>  +for (int a = 0; a < 3; a++) {
>  +// The metadata structure stores this in RGB order, but
> the
>  SEI
>  +// wants it in GBR order.
>  +int b = (a + 1) % 3;
> 
> >>>
> >>> this is a pretty minor comment, but do you think you could use the more
> >>> legible way present in other parts of the codebase?
> >>> const int mapping[3] = {2, 0, 1};
> >>> rather than (a + 1) % 3;
> >>
> >> Ok.
> >>
> >> Is there a specific reason to make it on the stack rather than static?
> I
> >> see it's there in hevcdec.
> >>
> >
> > No particular reason, I just find it more readable, if you think it's a
> > really bad practice then you could keep the code as is.
>
> Sorry, my question wasn't very clear.  I don't mind the change.  But:
>
> Is there a reason why the array in hevcdec (and your suggestion) is not
> static?  (Some sort of compiler optimisation effect I'm missing, maybe.)
> Intuitively it feels like it should be static const rather than being
> constructed on the stack every time the function is called.
>

Oops no, there isn't any to my knowledge, feel free to add it though.
-- 
Vittorio
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [avfomat/rtp: source ips lost when specified as URL options] Patch for ffmpeg using rtp protocol where sources option is not retained

2020-02-25 Thread Jun Li
On Tue, Feb 25, 2020 at 5:01 AM Ross Nicholson  wrote:

> Hey Jun Li,
>
> I noticed you have submitted some patches which work around the same code
> area's that I submitted for. Your patches look quite tidy and well thought
> out so I was wondering if you could look at this patch and see if I'm going
> about it in the right way.
>
> I'm not sure this area of ffmpeg currently has a maintainer currently so
> the patches may be difficult to progress.
>
> Ross
>
> On Tue, 11 Feb 2020 at 22:42, Ross Nicholson  wrote:
>
>> The patch was created as a workaround to an issue from in kodi
>> (apologies, it's a rather long thread):
>> https://forum.kodi.tv/showthread.php?tid=350901&pid=2923550#pid2923550
>>
>> As an example, here is a URL: rtp://87.141.215.251@232.0.10.234:1
>>
>> Taking this URL we should be able to either reformat it to: rtp://
>> 232.0.10.234:1?sources=87.141.215.251 or pass the sources as an
>> av_dict to avfomat_open_input.
>>
>> Neither option works however. Instead the above workaround was created
>> but it's not really the right way to fix this. Would be great to get some
>> guidance on the right place to fix this in the right way.
>>
>> Thanks in advance.
>>
>> On Tue, 11 Feb 2020 at 22:30, phunkyfish  wrote:
>>
>>> ---
>>>  libavformat/rtsp.c | 26 --
>>>  1 file changed, 24 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
>>> index 859defa592..f922055134 100644
>>> --- a/libavformat/rtsp.c
>>> +++ b/libavformat/rtsp.c
>>> @@ -2334,7 +2334,9 @@ static int sdp_read_header(AVFormatContext *s)
>>>  RTSPStream *rtsp_st;
>>>  int size, i, err;
>>>  char *content;
>>> +const char *p, *sp="", *sources="", *sp2, *sources2;
>>>  char url[1024];
>>> +char sources_buf[1024];
>>>
>>>  if (!ff_network_init())
>>>  return AVERROR(EIO);
>>> @@ -2360,6 +2362,16 @@ static int sdp_read_header(AVFormatContext *s)
>>>  av_freep(&content);
>>>  if (err) goto fail;
>>>
>>> +/* Search for sources= tag in original URL for rtp protocol only */
>>> +if (strncmp(s->url, "rtp://", 6) == 0) {
>>> +p = strchr(s->url, '?');
>>> +if (p && av_find_info_tag(sources_buf, sizeof(sources_buf),
>>> "sources", p)) {
>>> +/* av_log(s, AV_LOG_VERBOSE, "sdp_read_header found sources
>>> %s\n", sources_buf);  */
>>> +sp = sources_buf;
>>> +sources = "&sources=";
>>> +}
>>> +}
>>> +
>>>  /* open each RTP stream */
>>>  for (i = 0; i < rt->nb_rtsp_streams; i++) {
>>>  char namebuf[50];
>>> @@ -2377,12 +2389,22 @@ static int sdp_read_header(AVFormatContext *s)
>>>  av_dict_free(&opts);
>>>  goto fail;
>>>  }
>>> +
>>> +/* Prepare to add sources to the url to be opened.
>>> +   Otherwise the join to the source specific muliticast
>>> will be missing */
>>> +sources2 = sources;
>>> +sp2 = sp;
>>> +/* ignore sources from original URL, when sources are
>>> already set in rtsp_st */
>>> +if (rtsp_st->nb_include_source_addrs > 0)
>>> +sources2 = sp2 = "";
>>> +
>>>  ff_url_join(url, sizeof(url), "rtp", NULL,
>>>  namebuf, rtsp_st->sdp_port,
>>> -
>>> "?localport=%d&ttl=%d&connect=%d&write_to_source=%d",
>>> +
>>> "?localport=%d&ttl=%d&connect=%d&write_to_source=%d%s%s",
>>>  rtsp_st->sdp_port, rtsp_st->sdp_ttl,
>>>  rt->rtsp_flags & RTSP_FLAG_FILTER_SRC ? 1 : 0,
>>> -rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 :
>>> 0);
>>> +rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 :
>>> 0,
>>> +sources2, sp2);
>>>
>>>  append_source_addrs(url, sizeof(url), "sources",
>>>  rtsp_st->nb_include_source_addrs,
>>> --
>>> 2.20.1 (Apple Git-117)
>>>
>>>
Hi Ross,
I am not sure I understand your requirement clearly, an alternative way is
to save the url in sdp(maybe in rtp_read_header?) and read it out in
sdp_read_header. But I am not sure which field can carry this info (maybe
uri attribute ?).

Since you already have the code change, why now send it as a formal patch
so that FFmpeg maintainers/experts can have a review ?

-Jun
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH] Fixing rare dshow input crash

2020-02-25 Thread Roger Pack
Original bigger patch is enough for now.  Please merge, thanks!

On Wed, Jan 29, 2020 at 1:40 AM Paul B Mahol  wrote:
>
> Please incorporate this change in previous bigger patch. It is ready for 
> merge.
>
> On 1/29/20, Roger Pack  wrote:
> > Good catch.
> > Or maybe this?
> >
> > diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c
> > index d7f5bd7..96e4374 100644
> > --- a/libavdevice/dshow.c
> > +++ b/libavdevice/dshow.c
> > @@ -453,6 +453,7 @@ next:
> >  if (type->pbFormat)
> >  CoTaskMemFree(type->pbFormat);
> >  CoTaskMemFree(type);
> > +type = NULL;
> >  }
> >  end:
> >  IAMStreamConfig_Release(config);
> >
> > On Thu, Sep 5, 2019 at 2:56 PM Alexey Potakhov  wrote:
> >>
> >> re-sending without tabs.
> >>
> >> On Wed, Sep 4, 2019 at 6:22 PM Carl Eugen Hoyos 
> >> wrote:
> >> >
> >> > Am Do., 5. Sept. 2019 um 00:08 Uhr schrieb Alexey Potakhov
> >> > :
> >> >
> >> > > In some rare cases when IAMStreamConfig_GetStreamCaps returns
> >> > > an error avformat_open_input() crashes with access violation.
> >> >
> >> > Tabs are rejected by our repository, please remove them and resend.
> >> >
> >> > Carl Eugen
> >> > ___
> >> > ffmpeg-devel mailing list
> >> > ffmpeg-devel@ffmpeg.org
> >> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >> >
> >> > To unsubscribe, visit link above, or email
> >> > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
> >> ___
> >> ffmpeg-devel mailing list
> >> ffmpeg-devel@ffmpeg.org
> >> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >>
> >> To unsubscribe, visit link above, or email
> >> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
> > ___
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >
> > To unsubscribe, visit link above, or email
> > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH v3] avfilter: add overlay vaapi filter

2020-02-25 Thread Xinpeng Sun
Overlay one video on the top of another.

It takes two inputs and has one output. The first input is the "main" video on 
which the
second input is overlaid. This filter requires same memory layout for all the 
inputs.

An example command to use this filter to overlay an image LOGO at the top-left 
corner of
the INPUT video and both inputs are yuv420p format:
FFMPEG -hwaccel vaapi -vaapi_device /dev/dri/renderD128 -hwaccel_output_format 
vaapi \
-i INPUT -i LOGO -filter_complex \
"[0:v]hwupload[a], [1:v]format=yuv420p, hwupload[b], [a][b]overlay_vaapi, 
hwdownload" \
OUTPUT

Signed-off-by: Xinpeng Sun 
Signed-off-by: Zachary Zhou 
---
 configure  |   3 +
 doc/filters.texi   |  51 
 libavfilter/Makefile   |   1 +
 libavfilter/allfilters.c   |   1 +
 libavfilter/vf_overlay_vaapi.c | 426 +
 5 files changed, 482 insertions(+)
 create mode 100644 libavfilter/vf_overlay_vaapi.c

diff --git a/configure b/configure
index ab761c7183..19fe94729f 100755
--- a/configure
+++ b/configure
@@ -3533,6 +3533,7 @@ openclsrc_filter_deps="opencl"
 overlay_opencl_filter_deps="opencl"
 overlay_qsv_filter_deps="libmfx"
 overlay_qsv_filter_select="qsvvpp"
+overlay_vaapi_filter_deps="vaapi"
 overlay_vulkan_filter_deps="vulkan libglslang"
 owdenoise_filter_deps="gpl"
 pad_opencl_filter_deps="opencl"
@@ -3592,6 +3593,7 @@ tonemap_vaapi_filter_deps="vaapi 
VAProcFilterParameterBufferHDRToneMapping"
 tonemap_opencl_filter_deps="opencl const_nan"
 transpose_opencl_filter_deps="opencl"
 transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
+overlay_vaapi_filter_deps="vaapi VAProcPipelineCaps_blend_flags"
 unsharp_opencl_filter_deps="opencl"
 uspp_filter_deps="gpl avcodec"
 vaguedenoiser_filter_deps="gpl"
@@ -6599,6 +6601,7 @@ if enabled vaapi; then
 check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
 check_type   "va/va.h va/va_vpp.h" 
"VAProcFilterParameterBufferHDRToneMapping"
 check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags
+check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" blend_flags
 check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
 check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
 check_type "va/va.h va/va_enc_vp8.h"  "VAEncPictureParameterBufferVP8"
diff --git a/doc/filters.texi b/doc/filters.texi
index 70fd7a4cc7..fbf7264b94 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -21732,6 +21732,57 @@ To enable compilation of these filters you need to 
configure FFmpeg with
 
 To use vaapi filters, you need to setup the vaapi device correctly. For more 
information, please read @url{https://trac.ffmpeg.org/wiki/Hardware/VAAPI}
 
+@section overlay_vaapi
+
+Overlay one video on the top of another.
+
+It takes two inputs and has one output. The first input is the "main" video on 
which the second input is overlaid.
+This filter requires same memory layout for all the inputs. So, format 
conversion may be needed.
+
+The filter accepts the following options:
+
+@table @option
+
+@item x
+Set the x coordinate of the overlaid video on the main video.
+Default value is @code{0}.
+
+@item y
+Set the y coordinate of the overlaid video on the main video.
+Default value is @code{0}.
+
+@item w
+Set the width of the overlaid video on the main video.
+Default value is the width of input overlay video.
+
+@item h
+Set the height of the overlaid video on the main video.
+Default value is the height of input overlay video.
+
+@item alpha
+Set blocking detection thresholds. Allowed range is 0.0 to 1.0, it
+requires an input video with alpha channel.
+Default value is @code{0.0}.
+
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Overlay an image LOGO at the top-left corner of the INPUT video. Both inputs 
for this filter are yuv420p format.
+@example
+-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuv420p, 
hwupload[b], [a][b]overlay_vaapi" OUTPUT
+@end example
+@item
+Overlay an image LOGO at the offset (200, 100) from the top-left corner of the 
INPUT video.
+The inputs have same memory layout for color channels, the overlay has 
additional alpha plane, like INPUT is yuv420p, and the LOGO is yuva420p.
+@example
+-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuva420p, 
hwupload[b], [a][b]overlay_vaapi=x=200:y=100:w=400:h=300:alpha=1.0, hwdownload, 
format=nv12" OUTPUT
+@end example
+
+@end itemize
+
 @section tonemap_vaapi
 
 Perform HDR(High Dynamic Range) to SDR(Standard Dynamic Range) conversion with 
tone-mapping.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 089880a39d..96fce9e84f 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -326,6 +326,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER)+= 
vf_overlay.o framesync.o
 OBJS-$(CONFIG_OVERLAY_OPENCL_FILTER) += vf_overlay_opencl.o opencl.o \
 opencl/overla

Re: [FFmpeg-devel] [PATCH v3] avfilter: add overlay vaapi filter

2020-02-25 Thread Sun, Xinpeng
Rebase and resend this patch. Fix some typo and indentation errors, and change 
some ambiguous statements in the document to make it more clear.

-Xinpeng 

> -Original Message-
> From: Sun, Xinpeng 
> Sent: Wednesday, February 26, 2020 1:54 PM
> To: ffmpeg-devel@ffmpeg.org
> Cc: Sun, Xinpeng ; Zhou, Zachary
> 
> Subject: [PATCH v3] avfilter: add overlay vaapi filter
> 
> Overlay one video on the top of another.
> 
> It takes two inputs and has one output. The first input is the "main" video on
> which the second input is overlaid. This filter requires same memory layout
> for all the inputs.
> 
> An example command to use this filter to overlay an image LOGO at the top-
> left corner of the INPUT video and both inputs are yuv420p format:
> FFMPEG -hwaccel vaapi -vaapi_device /dev/dri/renderD128 -
> hwaccel_output_format vaapi \ -i INPUT -i LOGO -filter_complex \
> "[0:v]hwupload[a], [1:v]format=yuv420p, hwupload[b], [a][b]overlay_vaapi,
> hwdownload" \ OUTPUT
> 
> Signed-off-by: Xinpeng Sun 
> Signed-off-by: Zachary Zhou 
> ---
>  configure  |   3 +
>  doc/filters.texi   |  51 
>  libavfilter/Makefile   |   1 +
>  libavfilter/allfilters.c   |   1 +
>  libavfilter/vf_overlay_vaapi.c | 426 +
>  5 files changed, 482 insertions(+)
>  create mode 100644 libavfilter/vf_overlay_vaapi.c
> 
[...]
> --
> 2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH] [RFC] GSoC: FLIF16 Image format parser

2020-02-25 Thread Anamitra Ghorui
This is a buildable "skeleton" of my component (the FLIF16 parser)
i.e. everything is present aside from the logic itself.

***

Hello, I am trying to implement a parser for the FLIF16 file format as
a GSoC 2020 qualification project. So far I think I have managed to
register the parser (alongwith the format) and the basic structure
of the parser code.

I have now reached a point where moving forward is going to be quite 
difficult without outside help and references, and so I have a number 
of questions regarding the conceptual understanding of FFmpeg:

a. Please tell me if I am right or wrong here:
1. Each audio/video/image file format has a parser for converting the
   file data into a format that can be understood by a decoder.

2. A Decoder converts a given, recogised encoded data stream into a
   form that can be processed by physical hardware.

3. File formats can be independent of what sort of encoding it uses.
   Eg: WebM

4. The general Audio parsing/decoding process is as follows:
 i. Allocate space for a packet of data
ii. Try to find a hit for the codec of  given data format
   iii. Now, with the codec id, attempt to init a parser
iv. Allocate a context for the codec
 v. Initialize the codec context
vi. Initialize the codec
   vii. Allocate space for frame data
  viii. Open the imput file
ix. While file pointer isn't EOF:
Read data into buffer
Parse data into a single frame
Decode the data
 x. Flush the file and free stuff.

5. Every parser has its own parser context extended from the default parser
   context. The byte offsets/positions in the file are kept by the parser
   context.

6. An image can be thought of as a video with a single frame

b. In libavcodec/parser.h:

typedef struct ParseContext{
...
int frame_start_found;
...
} ParseContext;

Is frame_start_found the determined position of the start of the frame
in the data stream?


c. I have been looking at the decoder/encoder/parser of the BMP format
   (which is one of the simplest image formats), the actual decoding work
   (according to me), i.e. Finding the magic numbers, seeing the various
   segments is being done by the decoder function and not the parser.
   
   The parser function from what I can see from the png_parser and
   bmp_parser, simply manipulates the ParseConstext for appropriate
   values, and does not much else. What is it exactly doing over here?

If there are any books or articles I should read, please tell me.
---
 libavcodec/Makefile|  1 +
 libavcodec/avcodec.h   |  1 +
 libavcodec/flif16_parser.c | 51 ++
 libavcodec/parsers.c   |  1 +
 libavformat/img2.c |  1 +
 5 files changed, 55 insertions(+)
 create mode 100644 libavcodec/flif16_parser.c

diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 1e894c8049..ce18632d2c 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -1045,6 +1045,7 @@ OBJS-$(CONFIG_DVD_NAV_PARSER)  += dvd_nav_parser.o
 OBJS-$(CONFIG_DVDSUB_PARSER)   += dvdsub_parser.o
 OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
   vorbis_data.o
+OBJS-$(CONFIG_FLAC_PARSER) += flif16_parser.o
 OBJS-$(CONFIG_G723_1_PARSER)   += g723_1_parser.o
 OBJS-$(CONFIG_G729_PARSER) += g729_parser.o
 OBJS-$(CONFIG_GIF_PARSER)  += gif_parser.o
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 978f36d12a..c6b8c6a1eb 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -461,6 +461,7 @@ enum AVCodecID {
 AV_CODEC_ID_MVDV,
 AV_CODEC_ID_MVHA,
 AV_CODEC_ID_CDTOONS,
+AV_CODEC_ID_FLIF16,
 
 /* various PCM "codecs" */
 AV_CODEC_ID_FIRST_AUDIO = 0x1, ///< A dummy id pointing at the 
start of audio codecs
diff --git a/libavcodec/flif16_parser.c b/libavcodec/flif16_parser.c
new file mode 100644
index 00..54bd93d499
--- /dev/null
+++ b/libavcodec/flif16_parser.c
@@ -0,0 +1,51 @@
+/*
+ * FLIF16 parser
+ * Copyright (c) 2020 Anamitra Ghorui
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+ 
+ /**
+  * @file
+  * FLIF16 parser
+  */
+
+#include "parser