[FFmpeg-cvslog] avfilter/vf_xmedian: remove limitation of only odd number of inputs

2019-06-02 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Sun Jun  2 11:03:08 
2019 +0200| [cbaa60329a73d1479a697cb83e82b1b97261d879] | committer: Paul B Mahol

avfilter/vf_xmedian: remove limitation of only odd number of inputs

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=cbaa60329a73d1479a697cb83e82b1b97261d879
---

 doc/filters.texi |  5 +++--
 libavfilter/vf_xmedian.c | 18 ++
 2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index 6e2dedaf0e..67bafdc7d2 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -18465,9 +18465,10 @@ Pick median pixels from several input videos.
 The filter accept the following options:
 
 @table @option
-@item nb_inputs
-Set number of inputs. This must be odd number.
+@item inputs
+Set number of inputs.
 Default is 3. Allowed range is from 3 to 255.
+If number of inputs is even number, than result will be mean value between two 
median values.
 
 @item planes
 Set which planes to filter. Default value is @code{15}, by which all planes 
are processed.
diff --git a/libavfilter/vf_xmedian.c b/libavfilter/vf_xmedian.c
index ae61e18098..672b3a7e78 100644
--- a/libavfilter/vf_xmedian.c
+++ b/libavfilter/vf_xmedian.c
@@ -88,10 +88,6 @@ static av_cold int init(AVFilterContext *ctx)
 XMedianContext *s = ctx->priv;
 int ret;
 
-if (!(s->nb_inputs & 1))
-av_log(s, AV_LOG_WARNING, "nb_intputs: %d is not odd number.\n", 
s->nb_inputs);
-
-s->nb_inputs = s->nb_inputs | 1;
 s->radius = s->nb_inputs / 2;
 s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
 if (!s->frames)
@@ -156,7 +152,10 @@ static int median_frames16(AVFilterContext *ctx, void 
*arg, int jobnr, int nb_jo
 }
 
 AV_QSORT(values, nb_inputs, int, comparei);
-dst[x] = values[radius];
+if (radius & 1)
+dst[x] = values[radius];
+else
+dst[x] = (values[radius] + values[radius - 1]) >> 1;
 }
 
 dst += out->linesize[p] / 2;
@@ -195,7 +194,10 @@ static int median_frames8(AVFilterContext *ctx, void *arg, 
int jobnr, int nb_job
 values[i] = in[i]->data[p][y * in[i]->linesize[p] + x];
 
 AV_QSORT(values, nb_inputs, int, comparei);
-dst[x] = values[radius];
+if (radius & 1)
+dst[x] = values[radius];
+else
+dst[x] = (values[radius] + values[radius - 1]) >> 1;
 }
 
 dst += out->linesize[p];
@@ -319,8 +321,8 @@ static int activate(AVFilterContext *ctx)
 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
 
 static const AVOption xmedian_options[] = {
-{ "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, 
{.i64=3},  3, 255, .flags = FLAGS },
-{ "planes","set planes to filter", OFFSET(planes),AV_OPT_TYPE_INT, 
{.i64=15}, 0,  15, .flags = FLAGS },
+{ "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, 
{.i64=3},  3, 255, .flags = FLAGS },
+{ "planes", "set planes to filter", OFFSET(planes),AV_OPT_TYPE_INT, 
{.i64=15}, 0,  15, .flags = FLAGS },
 { NULL },
 };
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] avfilter/af_anlmdn: avoid creating frames with zero samples

2019-06-02 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Sun Jun  2 13:00:35 
2019 +0200| [fcfe85220dbdcc8df226abc2f829de569372b7b4] | committer: Paul B Mahol

avfilter/af_anlmdn: avoid creating frames with zero samples

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=fcfe85220dbdcc8df226abc2f829de569372b7b4
---

 libavfilter/af_anlmdn.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavfilter/af_anlmdn.c b/libavfilter/af_anlmdn.c
index 5c881f6fd2..93b76e0dff 100644
--- a/libavfilter/af_anlmdn.c
+++ b/libavfilter/af_anlmdn.c
@@ -317,7 +317,7 @@ static int request_frame(AVFilterLink *outlink)
 
 if (s->eof_left < 0)
 s->eof_left = av_audio_fifo_size(s->fifo) - (s->S + s->K);
-if (s->eof_left < 0)
+if (s->eof_left <= 0)
 return AVERROR_EOF;
 in = ff_get_audio_buffer(outlink, s->H);
 if (!in)

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] avfilter/af_anlmdn: try to recover when cache becomes negative

2019-06-02 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Sun Jun  2 12:58:07 
2019 +0200| [1a266a1ef91d0329936ab75389102017cb48f45f] | committer: Paul B Mahol

avfilter/af_anlmdn: try to recover when cache becomes negative

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=1a266a1ef91d0329936ab75389102017cb48f45f
---

 libavfilter/af_anlmdn.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/libavfilter/af_anlmdn.c b/libavfilter/af_anlmdn.c
index 06e9736cc2..5c881f6fd2 100644
--- a/libavfilter/af_anlmdn.c
+++ b/libavfilter/af_anlmdn.c
@@ -224,7 +224,10 @@ static int filter_channel(AVFilterContext *ctx, void *arg, 
int ch, int nb_jobs)
 unsigned weight_lut_idx;
 float w;
 
-av_assert2(distance >= 0.f);
+if (distance < 0.f) {
+cache[j] = 0.f;
+continue;
+}
 w = distance * sw;
 if (w >= smooth)
 continue;

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] avfilter/af_anlmdn: add smooth factor option

2019-06-02 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Sun Jun  2 11:58:49 
2019 +0200| [b5355774652cfbef3fd3f2c77d1403661e5222a5] | committer: Paul B Mahol

avfilter/af_anlmdn: add smooth factor option

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=b5355774652cfbef3fd3f2c77d1403661e5222a5
---

 doc/filters.texi| 3 +++
 libavfilter/af_anlmdn.c | 8 +---
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index 67bafdc7d2..926f2717ec 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -1801,6 +1801,9 @@ Pass only noise.
 
 Default value is @var{o}.
 @end table
+
+@item m
+Set smooth factor. Default value is @var{11}. Allowed range is from @var{1} to 
@var{15}.
 @end table
 
 @section anull
diff --git a/libavfilter/af_anlmdn.c b/libavfilter/af_anlmdn.c
index 87c49c63b1..06e9736cc2 100644
--- a/libavfilter/af_anlmdn.c
+++ b/libavfilter/af_anlmdn.c
@@ -29,7 +29,6 @@
 
 #include "af_anlmdndsp.h"
 
-#define MAX_DIFF 11.f
 #define WEIGHT_LUT_NBITS 20
 #define WEIGHT_LUT_SIZE  (1pdiff_lut_scale = 1.f / s->m * WEIGHT_LUT_SIZE;
 for (int i = 0; i < WEIGHT_LUT_SIZE; i++) {
 float w = -i / s->pdiff_lut_scale;
 
@@ -201,6 +202,7 @@ static int filter_channel(AVFilterContext *ctx, void *arg, 
int ch, int nb_jobs)
 float *cache = (float *)s->cache->extended_data[ch];
 const float sw = (65536.f / (4 * K + 2)) / sqrtf(s->a);
 float *dst = (float *)out->extended_data[ch] + s->offset;
+const float smooth = s->m;
 
 for (int i = S; i < s->H + S; i++) {
 float P = 0.f, Q = 0.f;
@@ -224,7 +226,7 @@ static int filter_channel(AVFilterContext *ctx, void *arg, 
int ch, int nb_jobs)
 
 av_assert2(distance >= 0.f);
 w = distance * sw;
-if (w >= MAX_DIFF)
+if (w >= smooth)
 continue;
 weight_lut_idx = w * s->pdiff_lut_scale;
 av_assert2(weight_lut_idx < WEIGHT_LUT_SIZE);

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] doc/muxers: fix typo of the hls var_stream_map example

2019-06-02 Thread Steven Liu
ffmpeg | branch: master | Steven Liu  | Sun Jun  2 
21:53:11 2019 +0800| [4ef0bea292d29115a60b427175614f99a7bc9da8] | committer: 
Steven Liu

doc/muxers: fix typo of the hls var_stream_map example

And fix typo of the 1833 of hlsenc.c

Reviewed-by: Gyan Doshi 
Signed-off-by: Steven Liu 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=4ef0bea292d29115a60b427175614f99a7bc9da8
---

 doc/muxers.texi  | 2 +-
 libavformat/hlsenc.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/muxers.texi b/doc/muxers.texi
index 83ae017d6c..c73719c421 100644
--- a/doc/muxers.texi
+++ b/doc/muxers.texi
@@ -991,7 +991,7 @@ By default, a single hls variant containing all the encoded 
streams is created.
 @example
 ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \
   -map 0:a -map 0:a -map 0:v -f hls \
-  -var_stream_map "a:0,agroup:aud_low,default:yes,language=ENG 
a:1,agroup:aud_low,language:CHN v:0,agroup:aud_low" \
+  -var_stream_map "a:0,agroup:aud_low,default:yes,language:ENG 
a:1,agroup:aud_low,language:CHN v:0,agroup:aud_low" \
   -master_pl_name master.m3u8 \
   http://example.com/live/out_%v.m3u8
 @end example
diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c
index 6b913be31c..1613c34509 100644
--- a/libavformat/hlsenc.c
+++ b/libavformat/hlsenc.c
@@ -1830,7 +1830,7 @@ static int parse_variant_stream_mapstring(AVFormatContext 
*s)
 /**
  * Expected format for var_stream_map string is as below:
  * "a:0,v:0 a:1,v:1"
- * "a:0,agroup:a0,default:1,language:ENG a:1,agroup:a1,defalut:0 
v:0,agroup:a0  v:1,agroup:a1"
+ * "a:0,agroup:a0,default:1,language:ENG a:1,agroup:a1,default:0 
v:0,agroup:a0  v:1,agroup:a1"
  * This string specifies how to group the audio, video and subtitle streams
  * into different variant streams. The variant stream groups are separated
  * by space.

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] vf_misc_vaapi: Add missing return value checks

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sun Mar 31 15:39:40 
2019 +0100| [5fb9eb9ed256d772609377ce356b8e22de9611d2] | committer: Mark 
Thompson

vf_misc_vaapi: Add missing return value checks

Parameter buffer creation can fail.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=5fb9eb9ed256d772609377ce356b8e22de9611d2
---

 libavfilter/vf_misc_vaapi.c | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/libavfilter/vf_misc_vaapi.c b/libavfilter/vf_misc_vaapi.c
index 6f31a04293..6fbd453741 100644
--- a/libavfilter/vf_misc_vaapi.c
+++ b/libavfilter/vf_misc_vaapi.c
@@ -84,10 +84,9 @@ static int denoise_vaapi_build_filter_params(AVFilterContext 
*avctx)
 denoise.value =  map(ctx->denoise, DENOISE_MIN, DENOISE_MAX,
  caps.range.min_value,
  caps.range.max_value);
-ff_vaapi_vpp_make_param_buffers(avctx, VAProcFilterParameterBufferType,
-&denoise, sizeof(denoise), 1);
-
-return 0;
+return ff_vaapi_vpp_make_param_buffers(avctx,
+   VAProcFilterParameterBufferType,
+   &denoise, sizeof(denoise), 1);
 }
 
 static int sharpness_vaapi_build_filter_params(AVFilterContext *avctx)
@@ -116,11 +115,9 @@ static int 
sharpness_vaapi_build_filter_params(AVFilterContext *avctx)
   SHARPNESS_MIN, SHARPNESS_MAX,
   caps.range.min_value,
   caps.range.max_value);
-ff_vaapi_vpp_make_param_buffers(avctx,
-VAProcFilterParameterBufferType,
-&sharpness, sizeof(sharpness), 1);
-
-return 0;
+return ff_vaapi_vpp_make_param_buffers(avctx,
+   VAProcFilterParameterBufferType,
+   &sharpness, sizeof(sharpness), 1);
 }
 
 static int misc_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] lavfi/vaapi: Factorise out common code for parameter buffer setup

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sun Mar 31 15:39:35 
2019 +0100| [6ed34a437925c5263f6c4ac7d0a9a46955055abe] | committer: Mark 
Thompson

lavfi/vaapi: Factorise out common code for parameter buffer setup

Also enables cropping on all VAAPI filters, inherited from the existing
support in scale_vaapi.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6ed34a437925c5263f6c4ac7d0a9a46955055abe
---

 libavfilter/vaapi_vpp.c| 55 +++---
 libavfilter/vaapi_vpp.h|  8 +-
 libavfilter/vf_deinterlace_vaapi.c | 33 +--
 libavfilter/vf_misc_vaapi.c| 34 ---
 libavfilter/vf_procamp_vaapi.c | 34 ---
 libavfilter/vf_scale_vaapi.c   | 38 +-
 libavfilter/vf_transpose_vaapi.c   | 44 +-
 7 files changed, 87 insertions(+), 159 deletions(-)

diff --git a/libavfilter/vaapi_vpp.c b/libavfilter/vaapi_vpp.c
index c5bbc3b85b..647ddc0811 100644
--- a/libavfilter/vaapi_vpp.c
+++ b/libavfilter/vaapi_vpp.c
@@ -248,6 +248,52 @@ int ff_vaapi_vpp_colour_standard(enum AVColorSpace av_cs)
 }
 }
 
+int ff_vaapi_vpp_init_params(AVFilterContext *avctx,
+ VAProcPipelineParameterBuffer *params,
+ const AVFrame *input_frame,
+ AVFrame *output_frame)
+{
+VAAPIVPPContext *ctx = avctx->priv;
+VASurfaceID input_surface;
+
+ctx->input_region = (VARectangle) {
+.x  = input_frame->crop_left,
+.y  = input_frame->crop_top,
+.width  = input_frame->width -
+ (input_frame->crop_left + input_frame->crop_right),
+.height = input_frame->height -
+ (input_frame->crop_top + input_frame->crop_bottom),
+};
+output_frame->crop_top= 0;
+output_frame->crop_bottom = 0;
+output_frame->crop_left   = 0;
+output_frame->crop_right  = 0;
+
+input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3],
+
+*params = (VAProcPipelineParameterBuffer) {
+.surface = input_surface,
+.surface_region  = &ctx->input_region,
+.surface_color_standard  =
+ff_vaapi_vpp_colour_standard(input_frame->colorspace),
+.output_region   = NULL,
+.output_background_color = VAAPI_VPP_BACKGROUND_BLACK,
+.output_color_standard   =
+ff_vaapi_vpp_colour_standard(input_frame->colorspace),
+.pipeline_flags  = 0,
+.filter_flags= VA_FRAME_PICTURE,
+
+// Filter and reference data filled by the filter itself.
+
+#if VA_CHECK_VERSION(1, 1, 0)
+.rotation_state = VA_ROTATION_NONE,
+.mirror_state   = VA_MIRROR_NONE,
+#endif
+};
+
+return 0;
+}
+
 int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
 int type,
 const void *data,
@@ -279,12 +325,15 @@ int ff_vaapi_vpp_make_param_buffers(AVFilterContext 
*avctx,
 
 int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
 VAProcPipelineParameterBuffer *params,
-VASurfaceID output_surface)
+AVFrame *output_frame)
 {
+VAAPIVPPContext *ctx = avctx->priv;
+VASurfaceID output_surface;
 VABufferID params_id;
 VAStatus vas;
-int err = 0;
-VAAPIVPPContext *ctx   = avctx->priv;
+int err;
+
+output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];
 
 vas = vaBeginPicture(ctx->hwctx->display,
  ctx->va_context, output_surface);
diff --git a/libavfilter/vaapi_vpp.h b/libavfilter/vaapi_vpp.h
index 96f720f07d..1e2b4a1066 100644
--- a/libavfilter/vaapi_vpp.h
+++ b/libavfilter/vaapi_vpp.h
@@ -42,6 +42,7 @@ typedef struct VAAPIVPPContext {
 
 AVBufferRef   *input_frames_ref;
 AVHWFramesContext *input_frames;
+VARectangleinput_region;
 
 enum AVPixelFormat output_format;
 int output_width;   // computed width
@@ -69,6 +70,11 @@ int ff_vaapi_vpp_config_output(AVFilterLink *outlink);
 
 int ff_vaapi_vpp_colour_standard(enum AVColorSpace av_cs);
 
+int ff_vaapi_vpp_init_params(AVFilterContext *avctx,
+ VAProcPipelineParameterBuffer *params,
+ const AVFrame *input_frame,
+ AVFrame *output_frame);
+
 int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
 int type,
 const void *data,
@@ -77,6 +83,6 @@ int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
 
 int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
 VAProcPipelineParameterBuffer *params,
-VASurfaceID output_surface);
+AVFrame *outp

[FFmpeg-cvslog] vf_crop: Add support for cropping hardware frames

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sat Mar 23 16:18:48 
2019 +| [f1b359aaf5c34359df8c8541d002f128d868f27f] | committer: Mark 
Thompson

vf_crop: Add support for cropping hardware frames

Set the cropping fields in the AVFrame.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f1b359aaf5c34359df8c8541d002f128d868f27f
---

 libavfilter/vf_crop.c | 73 +++
 1 file changed, 50 insertions(+), 23 deletions(-)

diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index 84be4c7d0d..9fca7a7309 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -98,9 +98,17 @@ static int query_formats(AVFilterContext *ctx)
 
 for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
-if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | 
AV_PIX_FMT_FLAG_BITSTREAM)) &&
-!((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & 
AV_PIX_FMT_FLAG_PLANAR)) &&
-(ret = ff_add_format(&formats, fmt)) < 0)
+if (desc->flags & AV_PIX_FMT_FLAG_BITSTREAM)
+continue;
+if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
+// Not usable if there is any subsampling but the format is
+// not planar (e.g. YUYV422).
+if ((desc->log2_chroma_w || desc->log2_chroma_h) &&
+!(desc->flags & AV_PIX_FMT_FLAG_PLANAR))
+continue;
+}
+ret = ff_add_format(&formats, fmt);
+if (ret < 0)
 return ret;
 }
 
@@ -157,8 +165,14 @@ static int config_input(AVFilterLink *link)
 s->var_values[VAR_POS]   = NAN;
 
 av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
-s->hsub = pix_desc->log2_chroma_w;
-s->vsub = pix_desc->log2_chroma_h;
+
+if (pix_desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
+s->hsub = 1;
+s->vsub = 1;
+} else {
+s->hsub = pix_desc->log2_chroma_w;
+s->vsub = pix_desc->log2_chroma_h;
+}
 
 if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
   var_names, s->var_values,
@@ -237,9 +251,15 @@ fail_expr:
 static int config_output(AVFilterLink *link)
 {
 CropContext *s = link->src->priv;
+const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
 
-link->w = s->w;
-link->h = s->h;
+if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
+// Hardware frames adjust the cropping regions rather than
+// changing the frame size.
+} else {
+link->w = s->w;
+link->h = s->h;
+}
 link->sample_aspect_ratio = s->out_sar;
 
 return 0;
@@ -252,9 +272,6 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
 int i;
 
-frame->width  = s->w;
-frame->height = s->h;
-
 s->var_values[VAR_N] = link->frame_count_out;
 s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
 NAN : frame->pts * av_q2d(link->time_base);
@@ -285,22 +302,32 @@ static int filter_frame(AVFilterLink *link, AVFrame 
*frame)
 (int)s->var_values[VAR_N], s->var_values[VAR_T], 
s->var_values[VAR_POS],
 s->x, s->y, s->x+s->w, s->y+s->h);
 
-frame->data[0] += s->y * frame->linesize[0];
-frame->data[0] += s->x * s->max_step[0];
-
-if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & FF_PSEUDOPAL)) {
-for (i = 1; i < 3; i ++) {
-if (frame->data[i]) {
-frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
-frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
+if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
+frame->crop_top   += s->y;
+frame->crop_left  += s->x;
+frame->crop_bottom = frame->height - frame->crop_top - 
frame->crop_bottom - s->h;
+frame->crop_right  = frame->width  - frame->crop_left - 
frame->crop_right - s->w;
+} else {
+frame->width  = s->w;
+frame->height = s->h;
+
+frame->data[0] += s->y * frame->linesize[0];
+frame->data[0] += s->x * s->max_step[0];
+
+if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & 
FF_PSEUDOPAL)) {
+for (i = 1; i < 3; i ++) {
+if (frame->data[i]) {
+frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
+frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
+}
 }
 }
-}
 
-/* alpha plane */
-if (frame->data[3]) {
-frame->data[3] += s->y * frame->linesize[3];
-frame->data[3] += s->x * s->max_step[3];
+/* alpha plane */
+if (frame->data[3]) {
+frame->data[3] += s->y * frame->linesize[3];
+frame->data[3] += s->x * s->max_step[3];
+}
 }
 
 return ff_filter_frame(link->dst->outputs[0], frame);


[FFmpeg-cvslog] vf_scale_vaapi: Add options to configure output colour properties

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Thu Feb 28 00:38:09 
2019 +| [ef2f89bbccc973fbde0926bfedef6e1eb3604674] | committer: Mark 
Thompson

vf_scale_vaapi: Add options to configure output colour properties

The "out_color_matrix" and "out_range" properties match the same options
in vf_scale; the others attempt to follow the same pattern.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ef2f89bbccc973fbde0926bfedef6e1eb3604674
---

 libavfilter/vf_scale_vaapi.c | 70 
 1 file changed, 70 insertions(+)

diff --git a/libavfilter/vf_scale_vaapi.c b/libavfilter/vf_scale_vaapi.c
index ae2471b821..c32395ac09 100644
--- a/libavfilter/vf_scale_vaapi.c
+++ b/libavfilter/vf_scale_vaapi.c
@@ -39,6 +39,17 @@ typedef struct ScaleVAAPIContext {
 
 char *w_expr;  // width expression string
 char *h_expr;  // height expression string
+
+char *colour_primaries_string;
+char *colour_transfer_string;
+char *colour_matrix_string;
+int   colour_range;
+char *chroma_location_string;
+
+enum AVColorPrimaries colour_primaries;
+enum AVColorTransferCharacteristic colour_transfer;
+enum AVColorSpace colour_matrix;
+enum AVChromaLocation chroma_location;
 } ScaleVAAPIContext;
 
 static const char *scale_vaapi_mode_name(int mode)
@@ -110,6 +121,17 @@ static int scale_vaapi_filter_frame(AVFilterLink *inlink, 
AVFrame *input_frame)
 if (err < 0)
 return err;
 
+if (ctx->colour_primaries != AVCOL_PRI_UNSPECIFIED)
+output_frame->color_primaries = ctx->colour_primaries;
+if (ctx->colour_transfer != AVCOL_TRC_UNSPECIFIED)
+output_frame->color_trc = ctx->colour_transfer;
+if (ctx->colour_matrix != AVCOL_SPC_UNSPECIFIED)
+output_frame->colorspace = ctx->colour_matrix;
+if (ctx->colour_range != AVCOL_RANGE_UNSPECIFIED)
+output_frame->color_range = ctx->colour_range;
+if (ctx->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
+output_frame->chroma_location = ctx->chroma_location;
+
 err = ff_vaapi_vpp_init_params(avctx, ¶ms,
input_frame, output_frame);
 if (err < 0)
@@ -155,6 +177,24 @@ static av_cold int scale_vaapi_init(AVFilterContext *avctx)
 vpp_ctx->output_format = AV_PIX_FMT_NONE;
 }
 
+#define STRING_OPTION(var_name, func_name, default_value) do { \
+if (ctx->var_name ## _string) { \
+int var = av_ ## func_name ## _from_name(ctx->var_name ## 
_string); \
+if (var < 0) { \
+av_log(avctx, AV_LOG_ERROR, "Invalid %s.\n", #var_name); \
+return AVERROR(EINVAL); \
+} \
+ctx->var_name = var; \
+} else { \
+ctx->var_name = default_value; \
+} \
+} while (0)
+
+STRING_OPTION(colour_primaries, color_primaries, AVCOL_PRI_UNSPECIFIED);
+STRING_OPTION(colour_transfer,  color_transfer,  AVCOL_TRC_UNSPECIFIED);
+STRING_OPTION(colour_matrix,color_space, AVCOL_SPC_UNSPECIFIED);
+STRING_OPTION(chroma_location,  chroma_location, AVCHROMA_LOC_UNSPECIFIED);
+
 return 0;
 }
 
@@ -178,6 +218,36 @@ static const AVOption scale_vaapi_options[] = {
   0, AV_OPT_TYPE_CONST, { .i64 = VA_FILTER_SCALING_HQ }, 0, 0, FLAGS,  
"mode" },
 { "nl_anamorphic", "Use nolinear anamorphic scaling algorithm",
   0, AV_OPT_TYPE_CONST, { .i64 = VA_FILTER_SCALING_NL_ANAMORPHIC }, 0, 
0, FLAGS,  "mode" },
+
+// These colour properties match the ones of the same name in vf_scale.
+{ "out_color_matrix", "Output colour matrix coefficient set",
+  OFFSET(colour_matrix_string), AV_OPT_TYPE_STRING, { .str = NULL }, 
.flags = FLAGS },
+{ "out_range", "Output colour range",
+  OFFSET(colour_range), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED 
},
+  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_JPEG, FLAGS, "range" },
+{ "full","Full range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, "range" 
},
+{ "limited", "Limited range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, "range" 
},
+{ "jpeg","Full range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, "range" 
},
+{ "mpeg","Limited range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, "range" 
},
+{ "tv",  "Limited range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, "range" 
},
+{ "pc",  "Full range",
+  0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, "range" 
},
+// These colour properties are new here.
+{ "out_color_primaries", "Output colour primaries",
+  OFFSET(colour_primaries_string), AV_OPT_TYPE_STRING,
+  { .str = NULL }, .flags = FLAGS },
+{ "out_color_transfer", "Output colour transfer characteristics",
+  OFFSET(colour_transfer_string),  AV_OPT_TYPE_STRING,
+  { .str = NULL }, .flags = FLAG

[FFmpeg-cvslog] lavfi/vaapi: Improve support for colour properties

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Thu Feb 28 00:38:08 
2019 +| [5051b7f898aebb77291f2b1c97a53e84b4c34256] | committer: Mark 
Thompson

lavfi/vaapi: Improve support for colour properties

Attempts to pick the set of supported colour properties best matching the
input.  Output is then set with the same values, except for the colour
matrix which may change when converting between RGB and YUV.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=5051b7f898aebb77291f2b1c97a53e84b4c34256
---

 libavfilter/vaapi_vpp.c| 288 +++--
 libavfilter/vaapi_vpp.h|   2 -
 libavfilter/vf_deinterlace_vaapi.c |   8 +-
 libavfilter/vf_misc_vaapi.c|   7 +-
 libavfilter/vf_procamp_vaapi.c |   7 +-
 libavfilter/vf_scale_vaapi.c   |   8 +-
 libavfilter/vf_transpose_vaapi.c   |   7 +-
 7 files changed, 295 insertions(+), 32 deletions(-)

diff --git a/libavfilter/vaapi_vpp.c b/libavfilter/vaapi_vpp.c
index 647ddc0811..0486c295cb 100644
--- a/libavfilter/vaapi_vpp.c
+++ b/libavfilter/vaapi_vpp.c
@@ -234,18 +234,278 @@ fail:
 return err;
 }
 
-int ff_vaapi_vpp_colour_standard(enum AVColorSpace av_cs)
+typedef struct VAAPIColourProperties {
+VAProcColorStandardType va_color_standard;
+
+enum AVColorPrimaries color_primaries;
+enum AVColorTransferCharacteristic color_trc;
+enum AVColorSpace colorspace;
+
+uint8_t va_chroma_sample_location;
+uint8_t va_color_range;
+
+enum AVColorRange color_range;
+enum AVChromaLocation chroma_sample_location;
+} VAAPIColourProperties;
+
+static const VAAPIColourProperties vaapi_colour_standard_map[] = {
+{ VAProcColorStandardBT601,   5,  6,  5 },
+{ VAProcColorStandardBT601,   6,  6,  6 },
+{ VAProcColorStandardBT709,   1,  1,  1 },
+{ VAProcColorStandardBT470M,  4,  4,  4 },
+{ VAProcColorStandardBT470BG, 5,  5,  5 },
+{ VAProcColorStandardSMPTE170M,   6,  6,  6 },
+{ VAProcColorStandardSMPTE240M,   7,  7,  7 },
+{ VAProcColorStandardGenericFilm, 8,  1,  1 },
+#if VA_CHECK_VERSION(1, 1, 0)
+{ VAProcColorStandardSRGB,1, 13,  0 },
+{ VAProcColorStandardXVYCC601,1, 11,  5 },
+{ VAProcColorStandardXVYCC709,1, 11,  1 },
+{ VAProcColorStandardBT2020,  9, 14,  9 },
+#endif
+};
+
+static void vaapi_vpp_fill_colour_standard(VAAPIColourProperties *props,
+   VAProcColorStandardType *vacs,
+   int nb_vacs)
 {
-switch(av_cs) {
-#define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va;
-CS(BT709, BT709);
-CS(BT470BG,   BT601);
-CS(SMPTE170M, SMPTE170M);
-CS(SMPTE240M, SMPTE240M);
-#undef CS
+const VAAPIColourProperties *t;
+int i, j, score, best_score, worst_score;
+VAProcColorStandardType best_standard;
+
+#if VA_CHECK_VERSION(1, 1, 0)
+// If the driver supports explicit use of the standard values then just
+// use them and avoid doing any mapping.  (The driver may not support
+// some particular code point, but it still has enough information to
+// make a better fallback choice than we do in that case.)
+for (i = 0; i < nb_vacs; i++) {
+if (vacs[i] == VAProcColorStandardExplicit) {
+props->va_color_standard = VAProcColorStandardExplicit;
+return;
+}
+}
+#endif
+
+// Give scores to the possible options and choose the lowest one.
+// An exact match will score zero and therefore always be chosen, as
+// will a partial match where all unmatched elements are explicitly
+// unspecified.  If no options match at all then just pass "none" to
+// the driver and let it make its own choice.
+best_standard = VAProcColorStandardNone;
+best_score = -1;
+worst_score = 4 * (props->colorspace != AVCOL_SPC_UNSPECIFIED &&
+   props->colorspace != AVCOL_SPC_RGB) +
+  2 * (props->color_trc != AVCOL_TRC_UNSPECIFIED) +
+  (props->color_primaries != AVCOL_PRI_UNSPECIFIED);
+
+if (worst_score == 0) {
+// No properties are specified, so we aren't going to be able to
+// make a useful choice.
+props->va_color_standard = VAProcColorStandardNone;
+return;
+}
+
+for (i = 0; i < nb_vacs; i++) {
+for (j = 0; j < FF_ARRAY_ELEMS(vaapi_colour_standard_map); j++) {
+t = &vaapi_colour_standard_map[j];
+if (t->va_color_standard != vacs[i])
+continue;
+
+score = 0;
+if (props->colorspace != AVCOL_SPC_UNSPECIFIED &&
+props->colorspace != AVCOL_SPC_RGB)
+score += 4 * (props->colorspace != t->colorspace);
+if (props->color_trc != AVCOL_TRC_UNSPECIFIED)
+score += 2 * (props->color_trc != t->color_trc);
+if (props->color_primaries != AVCOL_PRI_UNSPECIFIED)
+ 

[FFmpeg-cvslog] doc/indevs: Add example using cropping to capture part of a plane

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sat Mar 23 16:18:49 
2019 +| [963c4f85fe547ef51fafb66d7eceb3f5637d3843] | committer: Mark 
Thompson

doc/indevs: Add example using cropping to capture part of a plane

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=963c4f85fe547ef51fafb66d7eceb3f5637d3843
---

 doc/indevs.texi | 8 
 1 file changed, 8 insertions(+)

diff --git a/doc/indevs.texi b/doc/indevs.texi
index 1d5ed65773..89ba4fb406 100644
--- a/doc/indevs.texi
+++ b/doc/indevs.texi
@@ -910,6 +910,14 @@ Capture from CRTC ID 42 at 60fps, map the result to VAAPI, 
convert to NV12 and e
 ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 
'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v 
h264_vaapi output.mp4
 @end example
 
+@item
+To capture only part of a plane the output can be cropped - this can be used 
to capture
+a single window, as long as it has a known absolute position and size.  For 
example, to
+capture and encode the middle quarter of a 1920x1080 plane:
+@example
+ffmpeg -f kmsgrab -i - -vf 
'hwmap=derive_device=vaapi,crop=960:540:480:270,scale_vaapi=960:540:nv12' -c:v 
h264_vaapi output.mp4
+@end example
+
 @end itemize
 
 @section lavfi

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] vaapi_encode: Warn if input has cropping information

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sun Mar 31 15:39:44 
2019 +0100| [909bcedc581aa03dd5e22ecb1d0cc3b52eba8c26] | committer: Mark 
Thompson

vaapi_encode: Warn if input has cropping information

Cropping is not supported by VAAPI encode.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=909bcedc581aa03dd5e22ecb1d0cc3b52eba8c26
---

 libavcodec/vaapi_encode.c | 19 +++
 libavcodec/vaapi_encode.h |  4 
 2 files changed, 23 insertions(+)

diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c
index 2dda451882..c3d8944c3c 100644
--- a/libavcodec/vaapi_encode.c
+++ b/libavcodec/vaapi_encode.c
@@ -913,6 +913,21 @@ static int vaapi_encode_clear_old(AVCodecContext *avctx)
 return 0;
 }
 
+static int vaapi_encode_check_frame(AVCodecContext *avctx,
+const AVFrame *frame)
+{
+VAAPIEncodeContext *ctx = avctx->priv_data;
+
+if ((frame->crop_top  || frame->crop_bottom ||
+ frame->crop_left || frame->crop_right) && !ctx->crop_warned) {
+av_log(avctx, AV_LOG_WARNING, "Cropping information on input "
+   "frames ignored due to lack of API support.\n");
+ctx->crop_warned = 1;
+}
+
+return 0;
+}
+
 int ff_vaapi_encode_send_frame(AVCodecContext *avctx, const AVFrame *frame)
 {
 VAAPIEncodeContext *ctx = avctx->priv_data;
@@ -923,6 +938,10 @@ int ff_vaapi_encode_send_frame(AVCodecContext *avctx, 
const AVFrame *frame)
 av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n",
frame->width, frame->height, frame->pts);
 
+err = vaapi_encode_check_frame(avctx, frame);
+if (err < 0)
+return err;
+
 pic = vaapi_encode_alloc(avctx);
 if (!pic)
 return AVERROR(ENOMEM);
diff --git a/libavcodec/vaapi_encode.h b/libavcodec/vaapi_encode.h
index 44a8db566e..12efee2d08 100644
--- a/libavcodec/vaapi_encode.h
+++ b/libavcodec/vaapi_encode.h
@@ -314,6 +314,10 @@ typedef struct VAAPIEncodeContext {
 int idr_counter;
 int gop_counter;
 int end_of_stream;
+
+// The encoder does not support cropping information, so warn about
+// it the first time we encounter any nonzero crop fields.
+int crop_warned;
 } VAAPIEncodeContext;
 
 enum {

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] hwcontext_vaapi: Add option to set driver name

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Wed Nov 14 21:25:00 
2018 +| [7f3f5a24a1c70c6a8e72cdf387252ce0d1afee7f] | committer: Mark 
Thompson

hwcontext_vaapi: Add option to set driver name

For example: -init_hw_device vaapi:/dev/dri/renderD128,driver=foo

This may be more convenient that using the environment variable, and allows
loading different drivers for different devices in the same process.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=7f3f5a24a1c70c6a8e72cdf387252ce0d1afee7f
---

 libavutil/hwcontext_vaapi.c | 17 +
 1 file changed, 17 insertions(+)

diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index 941c257b1b..f05b9ee9cf 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -1598,6 +1598,23 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 return AVERROR(EINVAL);
 }
 
+ent = av_dict_get(opts, "driver", NULL, 0);
+if (ent) {
+#if VA_CHECK_VERSION(0, 38, 0)
+VAStatus vas;
+vas = vaSetDriverName(display, ent->value);
+if (vas != VA_STATUS_SUCCESS) {
+av_log(ctx, AV_LOG_ERROR, "Failed to set driver name to "
+   "%s: %d (%s).\n", ent->value, vas, vaErrorStr(vas));
+vaTerminate(display);
+return AVERROR_EXTERNAL;
+}
+#else
+av_log(ctx, AV_LOG_WARNING, "Driver name setting is not "
+   "supported with this VAAPI version.\n");
+#endif
+}
+
 return vaapi_device_connect(ctx, display);
 }
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] ffmpeg_hw: Treat empty device string as no device setting

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:30:24 
2019 +0100| [a4448637380d9397a7ea5d644e9853af90a66a89] | committer: Mark 
Thompson

ffmpeg_hw: Treat empty device string as no device setting

The implementation will use some default in this case.  The empty string
is not a meaningful device for any existing hardware type, and indeed
OpenCL treats it identically to no device already to work around the lack
of this setting on the command line.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=a4448637380d9397a7ea5d644e9853af90a66a89
---

 fftools/ffmpeg_hw.c | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/fftools/ffmpeg_hw.c b/fftools/ffmpeg_hw.c
index d454ae7179..962d8f7d5a 100644
--- a/fftools/ffmpeg_hw.c
+++ b/fftools/ffmpeg_hw.c
@@ -155,10 +155,12 @@ int hw_device_init_from_string(const char *arg, HWDevice 
**dev_out)
 ++p;
 q = strchr(p, ',');
 if (q) {
-device = av_strndup(p, q - p);
-if (!device) {
-err = AVERROR(ENOMEM);
-goto fail;
+if (q - p > 0) {
+device = av_strndup(p, q - p);
+if (!device) {
+err = AVERROR(ENOMEM);
+goto fail;
+}
 }
 err = av_dict_parse_string(&options, q + 1, "=", ",", 0);
 if (err < 0) {
@@ -168,7 +170,8 @@ int hw_device_init_from_string(const char *arg, HWDevice 
**dev_out)
 }
 
 err = av_hwdevice_ctx_create(&device_ref, type,
- device ? device : p, options, 0);
+ q ? device : p[0] ? p : NULL,
+ options, 0);
 if (err < 0)
 goto fail;
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] hwcontext_vaapi: Add option to specify connection type

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:31:18 
2019 +0100| [d2141a9b652c52350b5a4519ec34c59f5531fad1] | committer: Mark 
Thompson

hwcontext_vaapi: Add option to specify connection type

Can be set to "drm" or "x11" to force a specific connection type.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=d2141a9b652c52350b5a4519ec34c59f5531fad1
---

 libavutil/hwcontext_vaapi.c | 32 
 1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index 8624369bb9..561b82fcce 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -1469,6 +1469,8 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 {
 VAAPIDevicePriv *priv;
 VADisplay display = NULL;
+const AVDictionaryEntry *ent;
+int try_drm, try_x11, try_all;
 
 priv = av_mallocz(sizeof(*priv));
 if (!priv)
@@ -1479,8 +1481,26 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 ctx->user_opaque = priv;
 ctx->free= vaapi_device_free;
 
+ent = av_dict_get(opts, "connection_type", NULL, 0);
+if (ent) {
+try_all = try_drm = try_x11 = 0;
+if (!strcmp(ent->value, "drm")) {
+try_drm = 1;
+} else if (!strcmp(ent->value, "x11")) {
+try_x11 = 1;
+} else {
+av_log(ctx, AV_LOG_ERROR, "Invalid connection type %s.\n",
+   ent->value);
+return AVERROR(EINVAL);
+}
+} else {
+try_all = 1;
+try_drm = HAVE_VAAPI_DRM;
+try_x11 = HAVE_VAAPI_X11;
+}
+
 #if HAVE_VAAPI_X11
-if (!display && !(device && device[0] == '/')) {
+if (!display && try_x11) {
 // Try to open the device as an X11 display.
 priv->x11_display = XOpenDisplay(device);
 if (!priv->x11_display) {
@@ -1501,7 +1521,7 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 #endif
 
 #if HAVE_VAAPI_DRM
-if (!display) {
+if (!display && try_drm) {
 // Try to open the device as a DRM path.
 // Default to using the first render node if the user did not
 // supply a path.
@@ -1525,8 +1545,12 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 #endif
 
 if (!display) {
-av_log(ctx, AV_LOG_ERROR, "No VA display found for "
-   "device: %s.\n", device ? device : "");
+if (device)
+av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+   "device %s.\n", device);
+else
+av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+   "any default device.\n");
 return AVERROR(EINVAL);
 }
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] ffmpeg_hw: Mark some strings as const

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:29:01 
2019 +0100| [1f8b36329f20ea08eafe3823b1e1b3785d74768d] | committer: Mark 
Thompson

ffmpeg_hw: Mark some strings as const

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=1f8b36329f20ea08eafe3823b1e1b3785d74768d
---

 fftools/ffmpeg.h| 2 +-
 fftools/ffmpeg_hw.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index eb1eaf6363..7b6f802082 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -72,7 +72,7 @@ typedef struct HWAccel {
 } HWAccel;
 
 typedef struct HWDevice {
-char *name;
+const char *name;
 enum AVHWDeviceType type;
 AVBufferRef *device_ref;
 } HWDevice;
diff --git a/fftools/ffmpeg_hw.c b/fftools/ffmpeg_hw.c
index 2ec1813854..d454ae7179 100644
--- a/fftools/ffmpeg_hw.c
+++ b/fftools/ffmpeg_hw.c
@@ -99,7 +99,7 @@ int hw_device_init_from_string(const char *arg, HWDevice 
**dev_out)
 // -> av_hwdevice_ctx_create_derived()
 
 AVDictionary *options = NULL;
-char *type_name = NULL, *name = NULL, *device = NULL;
+const char *type_name = NULL, *name = NULL, *device = NULL;
 enum AVHWDeviceType type;
 HWDevice *dev, *src;
 AVBufferRef *device_ref = NULL;

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] hwcontext_vaapi: Make default DRM device selection more helpful

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:43:04 
2019 +0100| [6b6b8a63713cf730a3793b7715be0470d6152191] | committer: Mark 
Thompson

hwcontext_vaapi: Make default DRM device selection more helpful

Iterate over available render devices and pick the first one which looks
usable.  Adds an option to specify the name of the kernel driver associated
with the desired device, so that it is possible to select a specific type
of device in a multiple-device system without knowing the card numbering.

For example: -init_hw_device vaapi:,kernel_driver=amdgpu will select only
devices using the "amdgpu" driver (as used with recent AMD graphics cards).

Kernel driver selection requires libdrm to work.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6b6b8a63713cf730a3793b7715be0470d6152191
---

 libavutil/hwcontext_vaapi.c | 76 +++--
 1 file changed, 60 insertions(+), 16 deletions(-)

diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index 561b82fcce..941c257b1b 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -27,6 +27,7 @@
 
 #if CONFIG_LIBDRM
 #   include 
+#   include 
 #   include 
 #   ifndef DRM_FORMAT_MOD_INVALID
 #   define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
@@ -1521,26 +1522,69 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 #endif
 
 #if HAVE_VAAPI_DRM
-if (!display && try_drm) {
-// Try to open the device as a DRM path.
-// Default to using the first render node if the user did not
-// supply a path.
-const char *path = device ? device : "/dev/dri/renderD128";
-priv->drm_fd = open(path, O_RDWR);
-if (priv->drm_fd < 0) {
-av_log(ctx, AV_LOG_VERBOSE, "Cannot open DRM device %s.\n",
-   path);
+while (!display && try_drm) {
+// If the device is specified, try to open it as a DRM device node.
+// If not, look for a usable render node, possibly restricted to those
+// using a specified kernel driver.
+int loglevel = try_all ? AV_LOG_VERBOSE : AV_LOG_ERROR;
+if (device) {
+priv->drm_fd = open(device, O_RDWR);
+if (priv->drm_fd < 0) {
+av_log(ctx, loglevel, "Failed to open %s as "
+   "DRM device node.\n", device);
+break;
+}
 } else {
-display = vaGetDisplayDRM(priv->drm_fd);
-if (!display) {
-av_log(ctx, AV_LOG_ERROR, "Cannot open a VA display "
-   "from DRM device %s.\n", path);
-return AVERROR_UNKNOWN;
+const AVDictionaryEntry *kernel_driver;
+char path[64];
+int n, max_devices = 8;
+kernel_driver = av_dict_get(opts, "kernel_driver", NULL, 0);
+for (n = 0; n < max_devices; n++) {
+snprintf(path, sizeof(path),
+ "/dev/dri/renderD%d", 128 + n);
+priv->drm_fd = open(path, O_RDWR);
+if (priv->drm_fd < 0) {
+av_log(ctx, AV_LOG_VERBOSE, "Cannot open "
+   "DRM render node for device %d.\n", n);
+break;
+}
+#if CONFIG_LIBDRM
+if (kernel_driver) {
+drmVersion *info;
+info = drmGetVersion(priv->drm_fd);
+if (strcmp(kernel_driver->value, info->name)) {
+av_log(ctx, AV_LOG_VERBOSE, "Ignoring device %d "
+   "with non-matching kernel driver (%s).\n",
+   n, info->name);
+drmFreeVersion(info);
+close(priv->drm_fd);
+priv->drm_fd = -1;
+continue;
+}
+av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+   "DRM render node for device %d, "
+   "with matching kernel driver (%s).\n",
+   n, info->name);
+drmFreeVersion(info);
+} else
+#endif
+{
+av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+   "DRM render node for device %d.\n", n);
+}
+break;
 }
+if (n >= max_devices)
+break;
+}
 
-av_log(ctx, AV_LOG_VERBOSE, "Opened VA display via "
-   "DRM device %s.\n", path);
+display = vaGetDisplayDRM(priv->drm_fd);
+if (!display) {
+av_log(ctx, AV_LOG_VERBOSE, "Cannot open a VA display "
+   "from DRM device %s.\n", device);
+return AVERROR_EXTERNAL;
 }
+break;
 }
 #endif
 

___
ffmpeg-cvslog mailing list
ffmpeg-c

[FFmpeg-cvslog] hwcontext_qsv: Try to select a matching VAAPI device by default

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:45:44 
2019 +0100| [468f00384338dc201c5ae8b00229afff703d1d25] | committer: Mark 
Thompson

hwcontext_qsv: Try to select a matching VAAPI device by default

Tries to find a device backed by the i915 kernel driver and loads the iHD
VAAPI driver to use with it.  This reduces confusion on machines with
multiple DRM devices and removes the surprising requirement to set the
LIBVA_DRIVER_NAME environment variable to use libmfx at all.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=468f00384338dc201c5ae8b00229afff703d1d25
---

 libavutil/hwcontext_qsv.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 49b5952cef..59e4ed9157 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -1206,6 +1206,7 @@ static int qsv_device_create(AVHWDeviceContext *ctx, 
const char *device,
 QSVDevicePriv *priv;
 enum AVHWDeviceType child_device_type;
 AVHWDeviceContext *child_device;
+AVDictionary *child_device_opts;
 AVDictionaryEntry *e;
 
 mfxIMPL impl;
@@ -1220,9 +1221,17 @@ static int qsv_device_create(AVHWDeviceContext *ctx, 
const char *device,
 
 e = av_dict_get(opts, "child_device", NULL, 0);
 
-if (CONFIG_VAAPI)
+child_device_opts = NULL;
+if (CONFIG_VAAPI) {
 child_device_type = AV_HWDEVICE_TYPE_VAAPI;
-else if (CONFIG_DXVA2)
+// libmfx does not actually implement VAAPI properly, rather it
+// depends on the specific behaviour of a matching iHD driver when
+// used on recent Intel hardware.  Set options to the VAAPI device
+// creation so that we should pick a usable setup by default if
+// possible, even when multiple devices and drivers are available.
+av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
+av_dict_set(&child_device_opts, "driver","iHD",  0);
+} else if (CONFIG_DXVA2)
 child_device_type = AV_HWDEVICE_TYPE_DXVA2;
 else {
 av_log(ctx, AV_LOG_ERROR, "No supported child device type is 
enabled\n");
@@ -1230,7 +1239,7 @@ static int qsv_device_create(AVHWDeviceContext *ctx, 
const char *device,
 }
 
 ret = av_hwdevice_ctx_create(&priv->child_device_ctx, child_device_type,
- e ? e->value : NULL, NULL, 0);
+ e ? e->value : NULL, child_device_opts, 0);
 if (ret < 0)
 return ret;
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] hwcontext_vaapi: Try to create devices via DRM before X11

2019-06-02 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Mon May  6 15:41:09 
2019 +0100| [0b4696fbe8dbd2ab038006fdc02cada2ef6ae3ba] | committer: Mark 
Thompson

hwcontext_vaapi: Try to create devices via DRM before X11

Opening the device via X11 (DRI2/DRI3) rather than opening a DRM render
node directly is only useful if you intend to use the legacy X11 interop
functions.  That's never true for the ffmpeg utility, and a library user
who does want this will likely provide their own display instance rather
than making a new one here.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=0b4696fbe8dbd2ab038006fdc02cada2ef6ae3ba
---

 libavutil/hwcontext_vaapi.c | 42 +-
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index f05b9ee9cf..4227c3c090 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -1500,27 +1500,6 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 try_x11 = HAVE_VAAPI_X11;
 }
 
-#if HAVE_VAAPI_X11
-if (!display && try_x11) {
-// Try to open the device as an X11 display.
-priv->x11_display = XOpenDisplay(device);
-if (!priv->x11_display) {
-av_log(ctx, AV_LOG_VERBOSE, "Cannot open X11 display "
-   "%s.\n", XDisplayName(device));
-} else {
-display = vaGetDisplay(priv->x11_display);
-if (!display) {
-av_log(ctx, AV_LOG_ERROR, "Cannot open a VA display "
-   "from X11 display %s.\n", XDisplayName(device));
-return AVERROR_UNKNOWN;
-}
-
-av_log(ctx, AV_LOG_VERBOSE, "Opened VA display via "
-   "X11 display %s.\n", XDisplayName(device));
-}
-}
-#endif
-
 #if HAVE_VAAPI_DRM
 while (!display && try_drm) {
 // If the device is specified, try to open it as a DRM device node.
@@ -1588,6 +1567,27 @@ static int vaapi_device_create(AVHWDeviceContext *ctx, 
const char *device,
 }
 #endif
 
+#if HAVE_VAAPI_X11
+if (!display && try_x11) {
+// Try to open the device as an X11 display.
+priv->x11_display = XOpenDisplay(device);
+if (!priv->x11_display) {
+av_log(ctx, AV_LOG_VERBOSE, "Cannot open X11 display "
+   "%s.\n", XDisplayName(device));
+} else {
+display = vaGetDisplay(priv->x11_display);
+if (!display) {
+av_log(ctx, AV_LOG_ERROR, "Cannot open a VA display "
+   "from X11 display %s.\n", XDisplayName(device));
+return AVERROR_UNKNOWN;
+}
+
+av_log(ctx, AV_LOG_VERBOSE, "Opened VA display via "
+   "X11 display %s.\n", XDisplayName(device));
+}
+}
+#endif
+
 if (!display) {
 if (device)
 av_log(ctx, AV_LOG_ERROR, "No VA display found for "

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] lavfi/lut: Add slice threading support

2019-06-02 Thread Jun Zhao
ffmpeg | branch: master | Jun Zhao  | Tue May 21 
23:39:09 2019 +0800| [bbad0bc5ffcfd37df2df5aec6541fa766323d6bf] | committer: 
Jun Zhao

lavfi/lut: Add slice threading support

Used the command for 1080p h264 clip as follow:

a). ffmpeg -i input -vf lutyuv="u=128:v=128" -f null /dev/null
b). ffmpeg -i input -vf lutrgb="g=0:b=0" -f null /dev/null

after enabled the slice threading, the fps change from:

a). 144fps to 258fps (lutyuv)
b). 94fps  to 153fps (lutrgb)

in Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz

Reviewed-by: Paul B Mahol 
Signed-off-by: Jun Zhao 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=bbad0bc5ffcfd37df2df5aec6541fa766323d6bf
---

 libavfilter/vf_lut.c | 310 ---
 1 file changed, 197 insertions(+), 113 deletions(-)

diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c
index c815ddc194..90998e655f 100644
--- a/libavfilter/vf_lut.c
+++ b/libavfilter/vf_lut.c
@@ -337,13 +337,194 @@ static int config_props(AVFilterLink *inlink)
 return 0;
 }
 
+struct thread_data {
+AVFrame *in;
+AVFrame *out;
+
+int w;
+int h;
+};
+
+#define LOAD_PACKED_COMMON\
+LutContext *s = ctx->priv;\
+const struct thread_data *td = arg;\
+\
+int i, j;\
+const int w = td->w;\
+const int h = td->h;\
+AVFrame *in = td->in;\
+AVFrame *out = td->out;\
+const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;\
+const int step = s->step;\
+\
+const int slice_start = (h *  jobnr   ) / nb_jobs;\
+const int slice_end   = (h * (jobnr+1)) / nb_jobs;\
+
+/* packed, 16-bit */
+static int lut_packed_16bits(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
+{
+LOAD_PACKED_COMMON
+
+uint16_t *inrow, *outrow, *inrow0, *outrow0;
+const int in_linesize  =  in->linesize[0] / 2;
+const int out_linesize = out->linesize[0] / 2;
+inrow0  = (uint16_t *)in ->data[0];
+outrow0 = (uint16_t *)out->data[0];
+
+for (i = slice_start; i < slice_end; i++) {
+inrow  = inrow0 + i * in_linesize;
+outrow = outrow0 + i * out_linesize;
+for (j = 0; j < w; j++) {
+
+switch (step) {
+#if HAVE_BIGENDIAN
+case 4:  outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // 
Fall-through
+case 3:  outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // 
Fall-through
+case 2:  outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // 
Fall-through
+default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]);
+#else
+case 4:  outrow[3] = tab[3][inrow[3]]; // Fall-through
+case 3:  outrow[2] = tab[2][inrow[2]]; // Fall-through
+case 2:  outrow[1] = tab[1][inrow[1]]; // Fall-through
+default: outrow[0] = tab[0][inrow[0]];
+#endif
+}
+outrow += step;
+inrow  += step;
+}
+}
+
+return 0;
+}
+
+/* packed, 8-bit */
+static int lut_packed_8bits(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
+{
+LOAD_PACKED_COMMON
+
+uint8_t *inrow, *outrow, *inrow0, *outrow0;
+const int in_linesize  =  in->linesize[0];
+const int out_linesize = out->linesize[0];
+inrow0  = in ->data[0];
+outrow0 = out->data[0];
+
+for (i = slice_start; i < slice_end; i++) {
+inrow  = inrow0 + i * in_linesize;
+outrow = outrow0 + i * out_linesize;
+for (j = 0; j < w; j++) {
+switch (step) {
+case 4:  outrow[3] = tab[3][inrow[3]]; // Fall-through
+case 3:  outrow[2] = tab[2][inrow[2]]; // Fall-through
+case 2:  outrow[1] = tab[1][inrow[1]]; // Fall-through
+default: outrow[0] = tab[0][inrow[0]];
+}
+outrow += step;
+inrow  += step;
+}
+}
+
+return 0;
+}
+
+#define LOAD_PLANAR_COMMON\
+LutContext *s = ctx->priv;\
+const struct thread_data *td = arg;\
+int i, j, plane;\
+AVFrame *in = td->in;\
+AVFrame *out = td->out;\
+
+#define PLANAR_COMMON\
+int vsub = plane == 1 || plane == 2 ? s->vsub : 0;\
+int hsub = plane == 1 || plane == 2 ? s->hsub : 0;\
+int h = AV_CEIL_RSHIFT(td->h, vsub);\
+int w = AV_CEIL_RSHIFT(td->w, hsub);\
+const uint16_t *tab = s->lut[plane];\
+\
+const int slice_start = (h *  jobnr   ) / nb_jobs;\
+const int slice_end   = (h * (jobnr+1)) / nb_jobs;\
+
+/* planar >8 bit depth */
+static int lut_planar_16bits(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
+{
+LOAD_PLANAR_COMMON
+
+uint16_t *inrow, *outrow;
+
+for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; 
plane++) {
+PLANAR_COMMON
+
+const int in_linesize  =  in->linesize[plane] / 2;
+const int out_linesize = out->linesize[plane] / 2;
+
+inrow  = (uint16_t *)(in ->data[plane] + slice_start * in_linesize);
+outrow = (uint16_t *)(out->data[plane] + slice_star

[FFmpeg-cvslog] lavfi/colorlevels: Add slice threading support

2019-06-02 Thread Jun Zhao
ffmpeg | branch: master | Jun Zhao  | Tue May 21 
09:08:21 2019 +0800| [360bee8ca49d94d5cc8b77106887d6d7250440fe] | committer: 
Jun Zhao

lavfi/colorlevels: Add slice threading support

Add slice threading support, use the command like:

./ffmpeg -i input -vf colorlevels -f null /dev/null

with 1080p h264 clip, the fps from 39 fps to 79 fps
in the local(Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz)

Reviewed-by: Paul B Mahol 
Signed-off-by: Jun Zhao 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=360bee8ca49d94d5cc8b77106887d6d7250440fe
---

 libavfilter/vf_colorlevels.c | 110 +++
 1 file changed, 91 insertions(+), 19 deletions(-)

diff --git a/libavfilter/vf_colorlevels.c b/libavfilter/vf_colorlevels.c
index 5385a5e754..fadb39e004 100644
--- a/libavfilter/vf_colorlevels.c
+++ b/libavfilter/vf_colorlevels.c
@@ -105,6 +105,68 @@ static int config_input(AVFilterLink *inlink)
 return 0;
 }
 
+struct thread_data {
+const uint8_t *srcrow;
+uint8_t *dstrow;
+int dst_linesize;
+int src_linesize;
+
+double coeff;
+uint8_t offset;
+
+int h;
+
+int imin;
+int omin;
+};
+
+#define LOAD_COMMON\
+ColorLevelsContext *s = ctx->priv;\
+const struct thread_data *td = arg;\
+\
+int process_h = td->h;\
+const int slice_start = (process_h *  jobnr   ) / nb_jobs;\
+const int slice_end   = (process_h * (jobnr+1)) / nb_jobs;\
+int x, y;\
+const uint8_t *srcrow = td->srcrow;\
+uint8_t *dstrow = td->dstrow;\
+const int step = s->step;\
+const uint8_t offset = td->offset;\
+\
+int imin = td->imin;\
+int omin = td->omin;\
+double coeff = td->coeff;\
+
+static int colorlevel_slice_8(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
+{
+LOAD_COMMON
+
+for (y = slice_start; y < slice_end; y++) {
+const uint8_t *src = srcrow + y * td->src_linesize;
+uint8_t *dst = dstrow + y * td->dst_linesize;
+
+for (x = 0; x < s->linesize; x += step)
+dst[x + offset] = av_clip_uint8((src[x + offset] - imin) * coeff + 
omin);
+}
+
+return 0;
+}
+
+static int colorlevel_slice_16(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
+{
+LOAD_COMMON
+
+for (y = slice_start; y < slice_end; y++) {
+const uint16_t *src = (const uint16_t *)(srcrow + y * 
td->src_linesize);
+uint16_t *dst = (uint16_t *)(dstrow + y * td->dst_linesize);
+
+for (x = 0; x < s->linesize; x += step)
+dst[x + offset] = av_clip_uint16((src[x + offset] - imin) * coeff 
+ omin);
+}
+
+return 0;
+}
+
 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 {
 AVFilterContext *ctx = inlink->dst;
@@ -137,6 +199,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 int omin = lrint(r->out_min * UINT8_MAX);
 int omax = lrint(r->out_max * UINT8_MAX);
 double coeff;
+struct thread_data td;
 
 if (imin < 0) {
 imin = UINT8_MAX;
@@ -162,15 +225,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 
 srcrow = in->data[0];
 coeff = (omax - omin) / (double)(imax - imin);
-for (y = 0; y < inlink->h; y++) {
-const uint8_t *src = srcrow;
-uint8_t *dst = dstrow;
-
-for (x = 0; x < s->linesize; x += step)
-dst[x + offset] = av_clip_uint8((src[x + offset] - imin) * 
coeff + omin);
-dstrow += out->linesize[0];
-srcrow += in->linesize[0];
-}
+
+td.srcrow= srcrow;
+td.dstrow= dstrow;
+td.dst_linesize  = out->linesize[0];
+td.src_linesize  = in->linesize[0];
+td.coeff = coeff;
+td.offset= offset;
+td.h = inlink->h;
+td.imin  = imin;
+td.omin  = omin;
+
+ctx->internal->execute(ctx, colorlevel_slice_8, &td, NULL,
+   FFMIN(inlink->h, 
ff_filter_get_nb_threads(ctx)));
 }
 break;
 case 2:
@@ -184,6 +251,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 int omin = lrint(r->out_min * UINT16_MAX);
 int omax = lrint(r->out_max * UINT16_MAX);
 double coeff;
+struct thread_data td;
 
 if (imin < 0) {
 imin = UINT16_MAX;
@@ -209,15 +277,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 
 srcrow = in->data[0];
 coeff = (omax - omin) / (double)(imax - imin);
-for (y = 0; y < inlink->h; y++) {
-const uint16_t *src = (const uint16_t*)srcrow;
-uint16_t *dst = (uint16_t *)dstrow;
-
-for (x = 0; x < s->linesize; x += step)
-dst[x + offset] = av_clip_uint16((src[x + offset] - imin) 
* coe

[FFmpeg-cvslog] avfilter/vf_unsharp: enable slice threading

2019-06-02 Thread Ruiling Song
ffmpeg | branch: master | Ruiling Song  | Tue May  7 
09:46:33 2019 +0800| [94ceeba9f991ab69b192fa8527be0965de7e254b] | committer: 
Ruiling Song

avfilter/vf_unsharp: enable slice threading

benchmarking with a simple command:
ffmpeg -i 1080p.mp4 -vf unsharp=la=3:ca=3 -an -f null /dev/null
with the patch, the fps increase from 50 to 120 on my local machine (i7-6770HQ).

Signed-off-by: Ruiling Song 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=94ceeba9f991ab69b192fa8527be0965de7e254b
---

 libavfilter/unsharp.h|   4 +-
 libavfilter/vf_unsharp.c | 102 ---
 2 files changed, 81 insertions(+), 25 deletions(-)

diff --git a/libavfilter/unsharp.h b/libavfilter/unsharp.h
index caff986fc1..a60b30f31a 100644
--- a/libavfilter/unsharp.h
+++ b/libavfilter/unsharp.h
@@ -37,7 +37,8 @@ typedef struct UnsharpFilterParam {
 int steps_y; ///< vertical step count
 int scalebits;   ///< bits to shift pixel
 int32_t halfscale;   ///< amount to add to pixel
-uint32_t *sc[MAX_MATRIX_SIZE - 1];   ///< finite state machine storage
+uint32_t *sr;///< finite state machine storage within a row
+uint32_t **sc;   ///< finite state machine storage across rows
 } UnsharpFilterParam;
 
 typedef struct UnsharpContext {
@@ -47,6 +48,7 @@ typedef struct UnsharpContext {
 UnsharpFilterParam luma;   ///< luma parameters (width, height, amount)
 UnsharpFilterParam chroma; ///< chroma parameters (width, height, amount)
 int hsub, vsub;
+int nb_threads;
 int opencl;
 int (* apply_unsharp)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
 } UnsharpContext;
diff --git a/libavfilter/vf_unsharp.c b/libavfilter/vf_unsharp.c
index 41ccc56942..af05833a5d 100644
--- a/libavfilter/vf_unsharp.c
+++ b/libavfilter/vf_unsharp.c
@@ -47,15 +47,22 @@
 #include "libavutil/pixdesc.h"
 #include "unsharp.h"
 
-static void apply_unsharp(  uint8_t *dst, int dst_stride,
-  const uint8_t *src, int src_stride,
-  int width, int height, UnsharpFilterParam *fp)
+typedef struct TheadData {
+UnsharpFilterParam *fp;
+uint8_t   *dst;
+const uint8_t *src;
+int dst_stride;
+int src_stride;
+int width;
+int height;
+} ThreadData;
+
+static int unsharp_slice(AVFilterContext *ctx, void *arg, int jobnr, int 
nb_jobs)
 {
+ThreadData *td = arg;
+UnsharpFilterParam *fp = td->fp;
 uint32_t **sc = fp->sc;
-uint32_t sr[MAX_MATRIX_SIZE - 1], tmp1, tmp2;
-
-int32_t res;
-int x, y, z;
+uint32_t *sr = fp->sr;
 const uint8_t *src2 = NULL;  //silence a warning
 const int amount = fp->amount;
 const int steps_x = fp->steps_x;
@@ -63,30 +70,54 @@ static void apply_unsharp(  uint8_t *dst, int 
dst_stride,
 const int scalebits = fp->scalebits;
 const int32_t halfscale = fp->halfscale;
 
+uint8_t *dst = td->dst;
+const uint8_t *src = td->src;
+const int dst_stride = td->dst_stride;
+const int src_stride = td->src_stride;
+const int width = td->width;
+const int height = td->height;
+const int sc_offset = jobnr * 2 * steps_y;
+const int sr_offset = jobnr * (MAX_MATRIX_SIZE - 1);
+const int slice_start = (height * jobnr) / nb_jobs;
+const int slice_end = (height * (jobnr+1)) / nb_jobs;
+
+int32_t res;
+int x, y, z;
+uint32_t tmp1, tmp2;
+
 if (!amount) {
-av_image_copy_plane(dst, dst_stride, src, src_stride, width, height);
-return;
+av_image_copy_plane(dst + slice_start * dst_stride, dst_stride,
+src + slice_start * src_stride, src_stride,
+width, slice_end - slice_start);
+return 0;
 }
 
 for (y = 0; y < 2 * steps_y; y++)
-memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x));
+memset(sc[sc_offset + y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x));
 
-for (y = -steps_y; y < height + steps_y; y++) {
+// if this is not the first tile, we start from (slice_start - steps_y),
+// so we can get smooth result at slice boundary
+if (slice_start > steps_y) {
+src += (slice_start - steps_y) * src_stride;
+dst += (slice_start - steps_y) * dst_stride;
+}
+
+for (y = -steps_y + slice_start; y < steps_y + slice_end; y++) {
 if (y < height)
 src2 = src;
 
-memset(sr, 0, sizeof(sr[0]) * (2 * steps_x - 1));
+memset(sr + sr_offset, 0, sizeof(sr[0]) * (2 * steps_x - 1));
 for (x = -steps_x; x < width + steps_x; x++) {
 tmp1 = x <= 0 ? src2[0] : x >= width ? src2[width-1] : src2[x];
 for (z = 0; z < steps_x * 2; z += 2) {
-tmp2 = sr[z + 0] + tmp1; sr[z + 0] = tmp1;
-tmp1 = sr[z + 1] + tmp2; sr[z + 1] = tmp2;
+tmp2 = sr[sr_offset + z + 0] + tmp1; sr[sr_offset + z + 0

[FFmpeg-cvslog] lavf/qsv: use av_cold for init/uninit

2019-06-02 Thread Zhong Li
ffmpeg | branch: master | Zhong Li  | Fri May 31 08:44:49 
2019 +0800| [165eabf19bf21d235e1b5254314ba2ba5c627454] | committer: Zhong Li

lavf/qsv: use av_cold for init/uninit

Signed-off-by: Zhong Li 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=165eabf19bf21d235e1b5254314ba2ba5c627454
---

 libavfilter/vf_deinterlace_qsv.c | 2 +-
 libavfilter/vf_overlay_qsv.c | 2 +-
 libavfilter/vf_scale_qsv.c   | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/libavfilter/vf_deinterlace_qsv.c b/libavfilter/vf_deinterlace_qsv.c
index bee10c220f..80217c8419 100644
--- a/libavfilter/vf_deinterlace_qsv.c
+++ b/libavfilter/vf_deinterlace_qsv.c
@@ -83,7 +83,7 @@ typedef struct QSVDeintContext {
 int mode;
 } QSVDeintContext;
 
-static void qsvdeint_uninit(AVFilterContext *ctx)
+static av_cold void qsvdeint_uninit(AVFilterContext *ctx)
 {
 QSVDeintContext *s = ctx->priv;
 QSVFrame *cur;
diff --git a/libavfilter/vf_overlay_qsv.c b/libavfilter/vf_overlay_qsv.c
index 9aabb594ba..2a4dc5cb58 100644
--- a/libavfilter/vf_overlay_qsv.c
+++ b/libavfilter/vf_overlay_qsv.c
@@ -345,7 +345,7 @@ static int overlay_qsv_init(AVFilterContext *ctx)
 return 0;
 }
 
-static void overlay_qsv_uninit(AVFilterContext *ctx)
+static av_cold void overlay_qsv_uninit(AVFilterContext *ctx)
 {
 QSVOverlayContext *vpp = ctx->priv;
 
diff --git a/libavfilter/vf_scale_qsv.c b/libavfilter/vf_scale_qsv.c
index 7d593b2b21..db7715fc1b 100644
--- a/libavfilter/vf_scale_qsv.c
+++ b/libavfilter/vf_scale_qsv.c
@@ -109,7 +109,7 @@ typedef struct QSVScaleContext {
 char *format_str;
 } QSVScaleContext;
 
-static int qsvscale_init(AVFilterContext *ctx)
+static av_cold int qsvscale_init(AVFilterContext *ctx)
 {
 QSVScaleContext *s = ctx->priv;
 
@@ -126,7 +126,7 @@ static int qsvscale_init(AVFilterContext *ctx)
 return 0;
 }
 
-static void qsvscale_uninit(AVFilterContext *ctx)
+static av_cold void qsvscale_uninit(AVFilterContext *ctx)
 {
 QSVScaleContext *s = ctx->priv;
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] lavf/sr: Refine the coding style for init

2019-06-02 Thread Jun Zhao
ffmpeg | branch: master | Jun Zhao  | Sat Jun  1 
11:28:56 2019 +0800| [0b7bfa8ad79e483fd81fc006b35c4ef86990d46f] | committer: 
Jun Zhao

lavf/sr: Refine the coding style for init

We perfer the coding style like:

/* some stuff */
if (error) {
/* error handling */
return -(errorcode);
}
/* normal actions */
do_something()

Signed-off-by: Jun Zhao 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=0b7bfa8ad79e483fd81fc006b35c4ef86990d46f
---

 libavfilter/vf_sr.c | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 0be572ff9d..0433246e26 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -72,17 +72,16 @@ static av_cold int init(AVFilterContext *context)
 av_log(context, AV_LOG_ERROR, "could not create DNN module for 
requested backend\n");
 return AVERROR(ENOMEM);
 }
+
 if (!sr_context->model_filename){
 av_log(context, AV_LOG_ERROR, "model file for network was not 
specified\n");
 return AVERROR(EIO);
-} else {
-if (!sr_context->dnn_module->load_model) {
-av_log(context, AV_LOG_ERROR, "load_model for network was not 
specified\n");
-return AVERROR(EIO);
-} else {
-sr_context->model = 
(sr_context->dnn_module->load_model)(sr_context->model_filename);
-}
 }
+if (!sr_context->dnn_module->load_model) {
+av_log(context, AV_LOG_ERROR, "load_model for network was not 
specified\n");
+return AVERROR(EIO);
+}
+sr_context->model = 
(sr_context->dnn_module->load_model)(sr_context->model_filename);
 if (!sr_context->model){
 av_log(context, AV_LOG_ERROR, "could not load DNN model\n");
 return AVERROR(EIO);

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] lavf/sr: Don't need to check NULL before sws_freeContext

2019-06-02 Thread Jun Zhao
ffmpeg | branch: master | Jun Zhao  | Sat Jun  1 
11:25:46 2019 +0800| [5c1fbc42397b26fe77abbd7e2ae604d57274ac35] | committer: 
Jun Zhao

lavf/sr: Don't need to check NULL before sws_freeContext

sws_freeContext have check the NULL pointer, so don't need to check
NULL before sws_freeContext.

Signed-off-by: Jun Zhao 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=5c1fbc42397b26fe77abbd7e2ae604d57274ac35
---

 libavfilter/vf_sr.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index a371e443d4..0be572ff9d 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -286,9 +286,7 @@ static av_cold void uninit(AVFilterContext *context)
 }
 
 for (i = 0; i < 3; ++i){
-if (sr_context->sws_contexts[i]){
-sws_freeContext(sr_context->sws_contexts[i]);
-}
+sws_freeContext(sr_context->sws_contexts[i]);
 }
 }
 

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-cvslog] lavf/sr: Dump input pixel format in error message

2019-06-02 Thread Jun Zhao
ffmpeg | branch: master | Jun Zhao  | Sat Jun  1 
11:20:33 2019 +0800| [51b0e812161d737802491fa883de8878fc256020] | committer: 
Jun Zhao

lavf/sr: Dump input pixel format in error message

Dump input pixel format in error message, it's will help to debugging

Signed-off-by: Jun Zhao 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=51b0e812161d737802491fa883de8878fc256020
---

 libavfilter/vf_sr.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 86dc551553..a371e443d4 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -29,6 +29,7 @@
 #include "formats.h"
 #include "internal.h"
 #include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
 #include "libavformat/avio.h"
 #include "libswscale/swscale.h"
 #include "dnn_interface.h"
@@ -205,7 +206,9 @@ static int config_props(AVFilterLink *inlink)
 sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
 break;
 default:
-av_log(context, AV_LOG_ERROR, "could not create SwsContext for 
scaling for given input pixel format");
+av_log(context, AV_LOG_ERROR,
+   "could not create SwsContext for scaling for given 
input pixel format: %s\n",
+   av_get_pix_fmt_name(inlink->format));
 return AVERROR(EIO);
 }
 sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, 
AV_PIX_FMT_GRAY8,

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

To unsubscribe, visit link above, or email
ffmpeg-cvslog-requ...@ffmpeg.org with subject "unsubscribe".