Re: [FFmpeg-devel] [PATCH] ffprobe: add decode_error_flags

2021-05-28 Thread Tobias Rapp

On 27.05.2021 20:09, Marton Balint wrote:

[...]

An alternative approach is to print the meaning of the actually used
flags:


  concealment_active="0" decode_slices="0" />



This is the most readable, but maybe too verbose for the default use?


This would match how pixfmt flags are currently printed by ffprobe. I 
agree that this is readable and avoids duplicating the flag constants on 
the parsing side. But as it would be printed for each frame and takes up 
multiple lines for some output formats I think its quite verbose.


The current output of AVPacket flags takes another approach by printing 
a single-letter character for each flag bit, like 'K' for keyframe and 
'D' for discard. That might be a good compromise, in my opinion.


Regards,
Tobias

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 04/10] lavfi/dnn: Use uint8_t for async and do_ioproc in TaskItems

2021-05-28 Thread Shubhanshu Saxena
These properties have values either 0 or 1, so using uint8_t
is a better option as compared to int.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_common.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.h 
b/libavfilter/dnn/dnn_backend_common.h
index 704cf921f1..d962312c16 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -33,8 +33,8 @@ typedef struct TaskItem {
 AVFrame *out_frame;
 const char *input_name;
 const char **output_names;
-int async;
-int do_ioproc;
+uint8_t async;
+uint8_t do_ioproc;
 uint32_t nb_output;
 uint32_t inference_todo;
 uint32_t inference_done;
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 05/10] lavfi/dnn: Fill Task using Common Function

2021-05-28 Thread Shubhanshu Saxena
This commit adds a common function for filling the TaskItems
in all three backends.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_common.c   | 20 
 libavfilter/dnn/dnn_backend_common.h   | 15 +++
 libavfilter/dnn/dnn_backend_openvino.c | 23 +++
 3 files changed, 42 insertions(+), 16 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.c 
b/libavfilter/dnn/dnn_backend_common.c
index a522ab5650..4d9d3f79b1 100644
--- a/libavfilter/dnn/dnn_backend_common.c
+++ b/libavfilter/dnn/dnn_backend_common.c
@@ -49,3 +49,23 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, 
DNNFunctionType func
 
 return 0;
 }
+
+DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, 
void *backend_model, int async, int do_ioproc) {
+if (task == NULL || exec_params == NULL || backend_model == NULL)
+return DNN_ERROR;
+if (do_ioproc != 0 && do_ioproc != 1)
+return DNN_ERROR;
+if (async != 0 && async != 1)
+return DNN_ERROR;
+
+task->do_ioproc = do_ioproc;
+task->async = async;
+task->input_name = exec_params->input_name;
+task->in_frame = exec_params->in_frame;
+task->out_frame = exec_params->out_frame;
+task->model = backend_model;
+task->nb_output = exec_params->nb_output;
+task->output_names = exec_params->output_names;
+
+return DNN_SUCCESS;
+}
diff --git a/libavfilter/dnn/dnn_backend_common.h 
b/libavfilter/dnn/dnn_backend_common.h
index d962312c16..df59615f40 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -48,4 +48,19 @@ typedef struct InferenceItem {
 
 int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType 
func_type, DNNExecBaseParams *exec_params);
 
+/**
+ * Fill the Task for Backend Execution. It should be called after
+ * checking execution parameters using ff_check_exec_params.
+ *
+ * @param task pointer to the allocated task
+ * @param exec_param pointer to execution parameters
+ * @param backend_model void pointer to the backend model
+ * @param async flag for async execution. Must be 0 or 1
+ * @param do_ioproc flag for IO processing. Must be 0 or 1
+ *
+ * @retval DNN_SUCCESS if successful
+ * @retval DNN_ERROR if flags are invalid or any parameter is NULL
+ */
+DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, 
void *backend_model, int async, int do_ioproc);
+
 #endif
diff --git a/libavfilter/dnn/dnn_backend_openvino.c 
b/libavfilter/dnn/dnn_backend_openvino.c
index c2487c35be..709a772a4d 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -793,14 +793,9 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel 
*model, DNNExecBaseParams *
 }
 }
 
-task.do_ioproc = 1;
-task.async = 0;
-task.input_name = exec_params->input_name;
-task.in_frame = exec_params->in_frame;
-task.output_names = &exec_params->output_names[0];
-task.out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
-task.nb_output = exec_params->nb_output;
-task.model = ov_model;
+if (ff_dnn_fill_task(&task, exec_params, ov_model, 0, 1) != DNN_SUCCESS) {
+return DNN_ERROR;
+}
 
 if (extract_inference_from_task(ov_model->model->func_type, &task, 
ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
 av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
@@ -841,14 +836,10 @@ DNNReturnType ff_dnn_execute_model_async_ov(const 
DNNModel *model, DNNExecBasePa
 return DNN_ERROR;
 }
 
-task->do_ioproc = 1;
-task->async = 1;
-task->input_name = exec_params->input_name;
-task->in_frame = exec_params->in_frame;
-task->output_names = &exec_params->output_names[0];
-task->out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
-task->nb_output = exec_params->nb_output;
-task->model = ov_model;
+if (ff_dnn_fill_task(task, exec_params, ov_model, 1, 1) != DNN_SUCCESS) {
+return DNN_ERROR;
+}
+
 if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
 av_freep(&task);
 av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 06/10] lavfi/dnn_backend_tf: Request-based Execution

2021-05-28 Thread Shubhanshu Saxena
This commit adds RequestItem and rearranges the existing sync
execution mechanism to use request-based execution. It will help
in adding async functionality to the TensorFlow backend later.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_tf.c | 297 +--
 1 file changed, 206 insertions(+), 91 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 4c16c2bdb0..793b108e55 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -35,10 +35,13 @@
 #include "dnn_backend_native_layer_maximum.h"
 #include "dnn_io_proc.h"
 #include "dnn_backend_common.h"
+#include "safe_queue.h"
+#include "queue.h"
 #include 
 
 typedef struct TFOptions{
 char *sess_config;
+uint32_t nireq;
 } TFOptions;
 
 typedef struct TFContext {
@@ -52,26 +55,79 @@ typedef struct TFModel{
 TF_Graph *graph;
 TF_Session *session;
 TF_Status *status;
+SafeQueue *request_queue;
+Queue *inference_queue;
 } TFModel;
 
+typedef struct tf_infer_request {
+TF_Output *tf_outputs;
+TF_Tensor **output_tensors;
+TF_Output *tf_input;
+TF_Tensor *input_tensor;
+} tf_infer_request;
+
+typedef struct RequestItem {
+tf_infer_request *infer_request;
+InferenceItem *inference;
+// further properties will be added later for async
+} RequestItem;
+
 #define OFFSET(x) offsetof(TFContext, x)
 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
 static const AVOption dnn_tensorflow_options[] = {
 { "sess_config", "config for SessionOptions", OFFSET(options.sess_config), 
AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
+{ "nireq",  "number of request",   OFFSET(options.nireq),   
AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
 { NULL }
 };
 
 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
 
-static DNNReturnType execute_model_tf(const DNNModel *model, const char 
*input_name, AVFrame *in_frame,
-  const char **output_names, uint32_t 
nb_output, AVFrame *out_frame,
-  int do_ioproc);
+static DNNReturnType execute_model_tf(RequestItem *request, Queue 
*inference_queue);
 
 static void free_buffer(void *data, size_t length)
 {
 av_freep(&data);
 }
 
+static void tf_free_request(tf_infer_request *request)
+{
+if (!request)
+return;
+if (request->input_tensor) {
+TF_DeleteTensor(request->input_tensor);
+request->input_tensor = NULL;
+}
+av_freep(&request->tf_input);
+av_freep(&request->tf_outputs);
+av_freep(&request->output_tensors);
+}
+
+static tf_infer_request* tf_create_inference_request(void)
+{
+tf_infer_request* infer_request = av_malloc(sizeof(tf_infer_request));
+infer_request->tf_outputs = NULL;
+infer_request->tf_input = NULL;
+infer_request->input_tensor = NULL;
+infer_request->output_tensors = NULL;
+return infer_request;
+}
+
+static DNNReturnType extract_inference_from_task(TaskItem *task, Queue 
*inference_queue)
+{
+InferenceItem *inference = av_malloc(sizeof(*inference));
+if (!inference) {
+return DNN_ERROR;
+}
+task->inference_todo = 1;
+task->inference_done = 0;
+inference->task = task;
+if (ff_queue_push_back(inference_queue, inference) < 0) {
+av_freep(&inference);
+return DNN_ERROR;
+}
+return DNN_SUCCESS;
+}
+
 static TF_Buffer *read_graph(const char *model_filename)
 {
 TF_Buffer *graph_buf;
@@ -171,6 +227,8 @@ static DNNReturnType get_output_tf(void *model, const char 
*input_name, int inpu
 TFContext *ctx = &tf_model->ctx;
 AVFrame *in_frame = av_frame_alloc();
 AVFrame *out_frame = NULL;
+TaskItem task;
+RequestItem *request;
 
 if (!in_frame) {
 av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input 
frame\n");
@@ -187,7 +245,27 @@ static DNNReturnType get_output_tf(void *model, const char 
*input_name, int inpu
 in_frame->width = input_width;
 in_frame->height = input_height;
 
-ret = execute_model_tf(tf_model->model, input_name, in_frame, 
&output_name, 1, out_frame, 0);
+task.do_ioproc = 0;
+task.async = 0;
+task.input_name = input_name;
+task.in_frame = in_frame;
+task.output_names = &output_name;
+task.out_frame = out_frame;
+task.model = tf_model;
+task.nb_output = 1;
+
+if (extract_inference_from_task(&task, tf_model->inference_queue) != 
DNN_SUCCESS) {
+av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
+return DNN_ERROR;
+}
+
+request = ff_safe_queue_pop_front(tf_model->request_queue);
+if (!request) {
+av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
+return DNN_ERROR;
+}
+
+ret = execute_model_tf(request, tf_model->inference_queue);
 *output_width = out_frame->width;
 *output_height = out_frame->height;
 
@@ -691,6 +769,7 @@ DNNModel *ff_dnn_load_model_tf(con

[FFmpeg-devel] [PATCH 09/10] lavfi/dnn: Async Support for TensorFlow Backend

2021-05-28 Thread Shubhanshu Saxena
This commit adds functions to execute the inference requests
to TensorFlow Backend asynchronously in detached threads.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_tf.c | 198 ---
 libavfilter/dnn/dnn_backend_tf.h |   3 +
 libavfilter/dnn/dnn_interface.c  |   3 +
 3 files changed, 187 insertions(+), 17 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 31746deef4..296604461b 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -35,6 +35,7 @@
 #include "dnn_backend_native_layer_maximum.h"
 #include "dnn_io_proc.h"
 #include "dnn_backend_common.h"
+#include "libavutil/thread.h"
 #include "safe_queue.h"
 #include "queue.h"
 #include 
@@ -57,6 +58,7 @@ typedef struct TFModel{
 TF_Status *status;
 SafeQueue *request_queue;
 Queue *inference_queue;
+Queue *task_queue;
 } TFModel;
 
 typedef struct tf_infer_request {
@@ -69,7 +71,10 @@ typedef struct tf_infer_request {
 typedef struct RequestItem {
 tf_infer_request *infer_request;
 InferenceItem *inference;
-// further properties will be added later for async
+#if HAVE_PTHREAD_CANCEL
+pthread_t thread;
+pthread_attr_t thread_attr;
+#endif
 } RequestItem;
 
 #define OFFSET(x) offsetof(TFContext, x)
@@ -83,6 +88,7 @@ static const AVOption dnn_tensorflow_options[] = {
 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
 
 static DNNReturnType execute_model_tf(RequestItem *request, Queue 
*inference_queue);
+static void infer_completion_callback(void *args);
 
 static void free_buffer(void *data, size_t length)
 {
@@ -112,6 +118,59 @@ static tf_infer_request* tf_create_inference_request(void)
 return infer_request;
 }
 
+static void tf_start_inference(RequestItem *request)
+{
+tf_infer_request *infer_request = request->infer_request;
+InferenceItem *inference = request->inference;
+TaskItem *task = inference->task;
+TFModel *tf_model = task->model;
+
+TF_SessionRun(tf_model->session, NULL,
+  infer_request->tf_input, &infer_request->input_tensor, 1,
+  infer_request->tf_outputs, infer_request->output_tensors,
+  task->nb_output, NULL, 0, NULL,
+  tf_model->status);
+}
+
+static void *tf_thread_routine(void *arg)
+{
+RequestItem *request = arg;
+tf_start_inference(request);
+infer_completion_callback(request);
+#if HAVE_PTHREAD_CANCEL
+pthread_exit(0);
+#endif
+}
+
+static DNNReturnType tf_start_inference_async(RequestItem *request)
+{
+InferenceItem *inference = request->inference;
+TaskItem *task = inference->task;
+TFModel *tf_model = task->model;
+TFContext *ctx = &tf_model->ctx;
+int ret;
+
+#if HAVE_PTHREAD_CANCEL
+ret = pthread_create(&request->thread, &request->thread_attr, 
tf_thread_routine, request);
+if (ret != 0)
+{
+av_log(ctx, AV_LOG_ERROR, "unable to start async inference\n");
+return DNN_ERROR;
+}
+return DNN_SUCCESS;
+#else
+av_log(ctx, AV_LOG_WARNING, "pthreads not supported. Roll back to sync\n");
+tf_start_inference(request);
+if (TF_GetCode(tf_model->status) != TF_OK) {
+tf_free_request(request->infer_request);
+av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing 
model\n");
+return DNN_ERROR;
+}
+infer_completion_callback(request);
+return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : 
DNN_ERROR;
+#endif
+}
+
 static DNNReturnType extract_inference_from_task(TaskItem *task, Queue 
*inference_queue)
 {
 TFModel *tf_model = task->model;
@@ -826,7 +885,10 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, 
DNNFunctionType func_
 av_freep(&item);
 goto err;
 }
-
+#if HAVE_PTHREAD_CANCEL
+pthread_attr_init(&item->thread_attr);
+pthread_attr_setdetachstate(&item->thread_attr, 
PTHREAD_CREATE_DETACHED);
+#endif
 if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
 av_freep(&item->infer_request);
 av_freep(&item);
@@ -839,6 +901,16 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, 
DNNFunctionType func_
 goto err;
 }
 
+tf_model->task_queue = ff_queue_create();
+if (!tf_model->task_queue) {
+goto err;
+}
+
+tf_model->inference_queue = ff_queue_create();
+if (!tf_model->inference_queue) {
+goto err;
+}
+
 model->model = tf_model;
 model->get_input = &get_input_tf;
 model->get_output = &get_output_tf;
@@ -1012,10 +1084,9 @@ final:
 static DNNReturnType execute_model_tf(RequestItem *request, Queue 
*inference_queue)
 {
 TFModel *tf_model;
-TFContext *ctx;
-tf_infer_request *infer_request;
 InferenceItem *inference;
 TaskItem *task;
+TFContext *ctx;
 
 inference = ff_queue_peek_front(inference_queue);
 if (!inference) {
@@ -1026,22 +1097,16 @@ static DNNR

[FFmpeg-devel] [PATCH 07/10] lavfi/dnn_backend_tf: Separate function for filling RequestItem and callback

2021-05-28 Thread Shubhanshu Saxena
This commit rearranges the existing code to create two separate functions
for filling request with execution data and the completion callback.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_tf.c | 81 ++--
 1 file changed, 57 insertions(+), 24 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 793b108e55..5d34da5db1 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -826,20 +826,16 @@ DNNModel *ff_dnn_load_model_tf(const char 
*model_filename, DNNFunctionType func_
 return model;
 }
 
-static DNNReturnType execute_model_tf(RequestItem *request, Queue 
*inference_queue)
-{
-TFModel *tf_model;
-TFContext *ctx;
-tf_infer_request *infer_request;
+static DNNReturnType fill_model_input_tf(TFModel *tf_model, RequestItem 
*request) {
+DNNData input;
 InferenceItem *inference;
 TaskItem *task;
-DNNData input, *outputs;
+tf_infer_request *infer_request;
+TFContext *ctx = &tf_model->ctx;
 
-inference = ff_queue_pop_front(inference_queue);
+inference = ff_queue_pop_front(tf_model->inference_queue);
 av_assert0(inference);
 task = inference->task;
-tf_model = task->model;
-ctx = &tf_model->ctx;
 request->inference = inference;
 
 if (get_input_tf(tf_model, &input, task->input_name) != DNN_SUCCESS)
@@ -852,7 +848,7 @@ static DNNReturnType execute_model_tf(RequestItem *request, 
Queue *inference_que
 infer_request->tf_input = av_malloc(sizeof(TF_Output));
 infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, 
task->input_name);
 if (!infer_request->tf_input->oper){
-av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", 
input_name);
+av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", 
task->input_name);
 return DNN_ERROR;
 }
 infer_request->tf_input->index = 0;
@@ -902,22 +898,23 @@ static DNNReturnType execute_model_tf(RequestItem 
*request, Queue *inference_que
 infer_request->tf_outputs[i].index = 0;
 }
 
-TF_SessionRun(tf_model->session, NULL,
-infer_request->tf_input, &infer_request->input_tensor, 1,
-infer_request->tf_outputs, infer_request->output_tensors,
-task->nb_output, NULL, 0, NULL,
-tf_model->status);
-if (TF_GetCode(tf_model->status) != TF_OK) {
-tf_free_request(infer_request);
-av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing 
model\n");
-return DNN_ERROR;
-}
+return DNN_SUCCESS;
+}
+
+static void infer_completion_callback(void *args) {
+RequestItem *request = args;
+InferenceItem *inference = request->inference;
+TaskItem *task = inference->task;
+DNNData *outputs;
+tf_infer_request *infer_request = request->infer_request;
+TFModel *tf_model = task->model;
+TFContext *ctx = &tf_model->ctx;
 
 outputs = av_malloc_array(task->nb_output, sizeof(*outputs));
 if (!outputs) {
 tf_free_request(infer_request);
 av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
-return DNN_ERROR;
+return;
 }
 
 for (uint32_t i = 0; i < task->nb_output; ++i) {
@@ -944,7 +941,7 @@ static DNNReturnType execute_model_tf(RequestItem *request, 
Queue *inference_que
 case DFT_ANALYTICS_DETECT:
 if (!tf_model->model->detect_post_proc) {
 av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post 
proc\n");
-return DNN_ERROR;
+return;
 }
 tf_model->model->detect_post_proc(task->out_frame, outputs, 
task->nb_output, tf_model->model->filter_ctx);
 break;
@@ -955,7 +952,7 @@ static DNNReturnType execute_model_tf(RequestItem *request, 
Queue *inference_que
 }
 }
 av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this 
kind of dnn filter now\n");
-return DNN_ERROR;
+return;
 }
 for (uint32_t i = 0; i < task->nb_output; ++i) {
 if (infer_request->output_tensors[i]) {
@@ -966,7 +963,43 @@ static DNNReturnType execute_model_tf(RequestItem 
*request, Queue *inference_que
 tf_free_request(infer_request);
 av_freep(&outputs);
 ff_safe_queue_push_back(tf_model->request_queue, request);
-return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : 
DNN_ERROR;
+}
+
+static DNNReturnType execute_model_tf(RequestItem *request, Queue 
*inference_queue)
+{
+TFModel *tf_model;
+TFContext *ctx;
+tf_infer_request *infer_request;
+InferenceItem *inference;
+TaskItem *task;
+
+inference = ff_queue_peek_front(inference_queue);
+task = inference->task;
+tf_model = task->model;
+ctx = &tf_model->ctx;
+
+if (task->async) {
+avpriv_report_missing_feature(ctx, "Async execution not supported");
+return DNN_ERR

[FFmpeg-devel] [PATCH 10/10] lavfi/dnn_backend_tf.c: Documentation for tf_infer_request functions

2021-05-28 Thread Shubhanshu Saxena
Documentation for functions related to tf_infer_request

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_tf.c | 41 
 1 file changed, 41 insertions(+)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 296604461b..8a74b11cf5 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -95,6 +95,13 @@ static void free_buffer(void *data, size_t length)
 av_freep(&data);
 }
 
+/**
+ * Free the contents of TensorFlow inference request.
+ * It does not free the tf_infer_request instance.
+ *
+ * @param request pointer to tf_infer_request instance.
+ * NULL pointer is allowed.
+ */
 static void tf_free_request(tf_infer_request *request)
 {
 if (!request)
@@ -108,6 +115,12 @@ static void tf_free_request(tf_infer_request *request)
 av_freep(&request->output_tensors);
 }
 
+/**
+ * Create a TensorFlow inference request. All properties
+ * are initially unallocated and set as NULL.
+ *
+ * @return pointer to the allocated tf_infer_request instance.
+ */
 static tf_infer_request* tf_create_inference_request(void)
 {
 tf_infer_request* infer_request = av_malloc(sizeof(tf_infer_request));
@@ -118,8 +131,17 @@ static tf_infer_request* tf_create_inference_request(void)
 return infer_request;
 }
 
+/**
+ * Start synchronous inference for the TensorFlow model.
+ * It does not check for the status of the operation.
+ * Check using tf_model->status.
+ *
+ * @param request pointer to the RequestItem for inference
+ */
 static void tf_start_inference(RequestItem *request)
 {
+if (!request)
+return;
 tf_infer_request *infer_request = request->infer_request;
 InferenceItem *inference = request->inference;
 TaskItem *task = inference->task;
@@ -132,6 +154,12 @@ static void tf_start_inference(RequestItem *request)
   tf_model->status);
 }
 
+/**
+ * Thread routine for async inference. It calls completion
+ * callback on completion of inference.
+ *
+ * @param arg pointer to RequestItem instance for inference
+ */
 static void *tf_thread_routine(void *arg)
 {
 RequestItem *request = arg;
@@ -142,8 +170,21 @@ static void *tf_thread_routine(void *arg)
 #endif
 }
 
+/**
+ * Start asynchronous inference routine for the TensorFlow
+ * model on a detached thread. It calls the completion callback
+ * after the inference completes.
+ * In case pthreads aren't supported, the execution rolls back
+ * to synchronous mode, calling completion callback after inference.
+ *
+ * @param request pointer to the RequestItem for inference
+ * @retval DNN_SUCCESS on the start of async inference.
+ * @retval DNN_ERROR in case async inference cannot be started
+ */
 static DNNReturnType tf_start_inference_async(RequestItem *request)
 {
+if (!request)
+return DNN_ERROR;
 InferenceItem *inference = request->inference;
 TaskItem *task = inference->task;
 TFModel *tf_model = task->model;
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 08/10] lavfi/dnn_backend_tf: Error Handling

2021-05-28 Thread Shubhanshu Saxena
This commit adds handling for cases where an error may occur, clearing
the allocated memory resources.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_tf.c | 100 +++
 1 file changed, 74 insertions(+), 26 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 5d34da5db1..31746deef4 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -114,14 +114,18 @@ static tf_infer_request* tf_create_inference_request(void)
 
 static DNNReturnType extract_inference_from_task(TaskItem *task, Queue 
*inference_queue)
 {
+TFModel *tf_model = task->model;
+TFContext *ctx = &tf_model->ctx;
 InferenceItem *inference = av_malloc(sizeof(*inference));
 if (!inference) {
+av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for 
InferenceItem\n");
 return DNN_ERROR;
 }
 task->inference_todo = 1;
 task->inference_done = 0;
 inference->task = task;
 if (ff_queue_push_back(inference_queue, inference) < 0) {
+av_log(ctx, AV_LOG_ERROR, "Failed to push back inference_queue.\n");
 av_freep(&inference);
 return DNN_ERROR;
 }
@@ -232,14 +236,15 @@ static DNNReturnType get_output_tf(void *model, const 
char *input_name, int inpu
 
 if (!in_frame) {
 av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input 
frame\n");
-return DNN_ERROR;
+ret = DNN_ERROR;
+goto final;
 }
 
 out_frame = av_frame_alloc();
 if (!out_frame) {
 av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output 
frame\n");
-av_frame_free(&in_frame);
-return DNN_ERROR;
+ret = DNN_ERROR;
+goto final;
 }
 
 in_frame->width = input_width;
@@ -256,19 +261,22 @@ static DNNReturnType get_output_tf(void *model, const 
char *input_name, int inpu
 
 if (extract_inference_from_task(&task, tf_model->inference_queue) != 
DNN_SUCCESS) {
 av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
-return DNN_ERROR;
+ret = DNN_ERROR;
+goto final;
 }
 
 request = ff_safe_queue_pop_front(tf_model->request_queue);
 if (!request) {
 av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-return DNN_ERROR;
+ret = DNN_ERROR;
+goto final;
 }
 
 ret = execute_model_tf(request, tf_model->inference_queue);
 *output_width = out_frame->width;
 *output_height = out_frame->height;
 
+final:
 av_frame_free(&out_frame);
 av_frame_free(&in_frame);
 return ret;
@@ -788,18 +796,13 @@ DNNModel *ff_dnn_load_model_tf(const char 
*model_filename, DNNFunctionType func_
 //parse options
 av_opt_set_defaults(&ctx);
 if (av_opt_set_from_string(&ctx, options, NULL, "=", "&") < 0) {
-av_log(&tf_model->ctx, AV_LOG_ERROR, "Failed to parse options 
\"%s\"\n", options);
-av_freep(&tf_model);
-av_freep(&model);
-return NULL;
+av_log(&ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", 
options);
+goto err;
 }
 
 if (load_tf_model(tf_model, model_filename) != DNN_SUCCESS){
 if (load_native_model(tf_model, model_filename) != DNN_SUCCESS){
-av_freep(&tf_model);
-av_freep(&model);
-
-return NULL;
+goto err;
 }
 }
 
@@ -808,14 +811,34 @@ DNNModel *ff_dnn_load_model_tf(const char 
*model_filename, DNNFunctionType func_
 }
 
 tf_model->request_queue = ff_safe_queue_create();
+if (!tf_model->request_queue) {
+goto err;
+}
 
 for (int i = 0; i < ctx->options.nireq; i++) {
 RequestItem *item = av_mallocz(sizeof(*item));
+if (!item) {
+goto err;
+}
 item->infer_request = tf_create_inference_request();
-ff_safe_queue_push_back(tf_model->request_queue, item);
+if (!item->infer_request) {
+av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for 
TensorFlow inference request\n");
+av_freep(&item);
+goto err;
+}
+
+if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
+av_freep(&item->infer_request);
+av_freep(&item);
+goto err;
+}
 }
 
 tf_model->inference_queue = ff_queue_create();
+if (!tf_model->inference_queue) {
+goto err;
+}
+
 model->model = tf_model;
 model->get_input = &get_input_tf;
 model->get_output = &get_output_tf;
@@ -824,6 +847,9 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, 
DNNFunctionType func_
 model->func_type = func_type;
 
 return model;
+err:
+ff_dnn_free_model_tf(&model);
+return NULL;
 }
 
 static DNNReturnType fill_model_input_tf(TFModel *tf_model, RequestItem 
*request) {
@@ -838,24 +864,31 @@ static DNNReturnType fill_model_input_tf(TFModel 
*tf_model, RequestItem *request
 t

[FFmpeg-devel] [PATCH 02/10] lavfi/dnn: Convert output_name to char** in TaskItem

2021-05-28 Thread Shubhanshu Saxena
Convert output_name to char **output_names in TaskItem and use it as
a pointer to array of output names in the DNN backend.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_common.h   |  2 +-
 libavfilter/dnn/dnn_backend_openvino.c | 10 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.h 
b/libavfilter/dnn/dnn_backend_common.h
index 0c043e51f0..f76a05026d 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -32,7 +32,7 @@ typedef struct TaskItem {
 AVFrame *in_frame;
 AVFrame *out_frame;
 const char *input_name;
-const char *output_name;
+const char **output_names;
 int async;
 int do_ioproc;
 uint32_t inference_todo;
diff --git a/libavfilter/dnn/dnn_backend_openvino.c 
b/libavfilter/dnn/dnn_backend_openvino.c
index a84370d689..0f3b235820 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -208,7 +208,7 @@ static void infer_completion_callback(void *args)
 DNNData output;
 OVContext *ctx = &ov_model->ctx;
 
-status = ie_infer_request_get_blob(request->infer_request, 
task->output_name, &output_blob);
+status = ie_infer_request_get_blob(request->infer_request, 
task->output_names[0], &output_blob);
 if (status != OK) {
 //incorrect output name
 char *model_output_name = NULL;
@@ -222,7 +222,7 @@ static void infer_completion_callback(void *args)
 }
 av_log(ctx, AV_LOG_ERROR,
"output \"%s\" may not correct, all output(s) are: \"%s\"\n",
-   task->output_name, all_output_names);
+   task->output_names[0], all_output_names);
 return;
 }
 
@@ -676,7 +676,7 @@ static DNNReturnType get_output_ov(void *model, const char 
*input_name, int inpu
 task.async = 0;
 task.input_name = input_name;
 task.in_frame = in_frame;
-task.output_name = output_name;
+task.output_names = &output_name;
 task.out_frame = out_frame;
 task.model = ov_model;
 
@@ -796,7 +796,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel 
*model, DNNExecBaseParams *
 task.async = 0;
 task.input_name = exec_params->input_name;
 task.in_frame = exec_params->in_frame;
-task.output_name = exec_params->output_names[0];
+task.output_names = &exec_params->output_names[0];
 task.out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
 task.model = ov_model;
 
@@ -843,7 +843,7 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel 
*model, DNNExecBasePa
 task->async = 1;
 task->input_name = exec_params->input_name;
 task->in_frame = exec_params->in_frame;
-task->output_name = exec_params->output_names[0];
+task->output_names = &exec_params->output_names[0];
 task->out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
 task->model = ov_model;
 if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 03/10] lavfi/dnn: Add nb_output to TaskItem

2021-05-28 Thread Shubhanshu Saxena
Add nb_output property to TaskItem for use in TensorFlow backend
and Native backend.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_common.h   | 1 +
 libavfilter/dnn/dnn_backend_openvino.c | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/libavfilter/dnn/dnn_backend_common.h 
b/libavfilter/dnn/dnn_backend_common.h
index f76a05026d..704cf921f1 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -35,6 +35,7 @@ typedef struct TaskItem {
 const char **output_names;
 int async;
 int do_ioproc;
+uint32_t nb_output;
 uint32_t inference_todo;
 uint32_t inference_done;
 } TaskItem;
diff --git a/libavfilter/dnn/dnn_backend_openvino.c 
b/libavfilter/dnn/dnn_backend_openvino.c
index 0f3b235820..c2487c35be 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -678,6 +678,7 @@ static DNNReturnType get_output_ov(void *model, const char 
*input_name, int inpu
 task.in_frame = in_frame;
 task.output_names = &output_name;
 task.out_frame = out_frame;
+task.nb_output = 1;
 task.model = ov_model;
 
 if (extract_inference_from_task(ov_model->model->func_type, &task, 
ov_model->inference_queue, NULL) != DNN_SUCCESS) {
@@ -798,6 +799,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel 
*model, DNNExecBaseParams *
 task.in_frame = exec_params->in_frame;
 task.output_names = &exec_params->output_names[0];
 task.out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
+task.nb_output = exec_params->nb_output;
 task.model = ov_model;
 
 if (extract_inference_from_task(ov_model->model->func_type, &task, 
ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
@@ -845,6 +847,7 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel 
*model, DNNExecBasePa
 task->in_frame = exec_params->in_frame;
 task->output_names = &exec_params->output_names[0];
 task->out_frame = exec_params->out_frame ? exec_params->out_frame : 
exec_params->in_frame;
+task->nb_output = exec_params->nb_output;
 task->model = ov_model;
 if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
 av_freep(&task);
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 01/10] lavfi/dnn: Extract TaskItem and InferenceItem from OpenVino Backend

2021-05-28 Thread Shubhanshu Saxena
Extract TaskItem and InferenceItem from OpenVino backend and convert
ov_model to void in TaskItem.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_common.h   | 19 +
 libavfilter/dnn/dnn_backend_openvino.c | 58 ++
 2 files changed, 40 insertions(+), 37 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.h 
b/libavfilter/dnn/dnn_backend_common.h
index cd9c0f5339..0c043e51f0 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -26,6 +26,25 @@
 
 #include "../dnn_interface.h"
 
+// one task for one function call from dnn interface
+typedef struct TaskItem {
+void *model; // model for the backend
+AVFrame *in_frame;
+AVFrame *out_frame;
+const char *input_name;
+const char *output_name;
+int async;
+int do_ioproc;
+uint32_t inference_todo;
+uint32_t inference_done;
+} TaskItem;
+
+// one task might have multiple inferences
+typedef struct InferenceItem {
+TaskItem *task;
+uint32_t bbox_index;
+} InferenceItem;
+
 int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType 
func_type, DNNExecBaseParams *exec_params);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_openvino.c 
b/libavfilter/dnn/dnn_backend_openvino.c
index 58c4ec9c9b..a84370d689 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -59,25 +59,6 @@ typedef struct OVModel{
 Queue *inference_queue; // holds InferenceItem
 } OVModel;
 
-// one task for one function call from dnn interface
-typedef struct TaskItem {
-OVModel *ov_model;
-const char *input_name;
-AVFrame *in_frame;
-const char *output_name;
-AVFrame *out_frame;
-int do_ioproc;
-int async;
-uint32_t inference_todo;
-uint32_t inference_done;
-} TaskItem;
-
-// one task might have multiple inferences
-typedef struct InferenceItem {
-TaskItem *task;
-uint32_t bbox_index;
-} InferenceItem;
-
 // one request for one call to openvino
 typedef struct RequestItem {
 ie_infer_request_t *infer_request;
@@ -184,7 +165,7 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, 
RequestItem *request
 request->inferences[i] = inference;
 request->inference_count = i + 1;
 task = inference->task;
-switch (task->ov_model->model->func_type) {
+switch (ov_model->model->func_type) {
 case DFT_PROCESS_FRAME:
 if (task->do_ioproc) {
 if (ov_model->model->frame_pre_proc != NULL) {
@@ -220,11 +201,12 @@ static void infer_completion_callback(void *args)
 RequestItem *request = args;
 InferenceItem *inference = request->inferences[0];
 TaskItem *task = inference->task;
-SafeQueue *requestq = task->ov_model->request_queue;
+OVModel *ov_model = task->model;
+SafeQueue *requestq = ov_model->request_queue;
 ie_blob_t *output_blob = NULL;
 ie_blob_buffer_t blob_buffer;
 DNNData output;
-OVContext *ctx = &task->ov_model->ctx;
+OVContext *ctx = &ov_model->ctx;
 
 status = ie_infer_request_get_blob(request->infer_request, 
task->output_name, &output_blob);
 if (status != OK) {
@@ -233,9 +215,9 @@ static void infer_completion_callback(void *args)
 char *all_output_names = NULL;
 size_t model_output_count = 0;
 av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
-status = ie_network_get_outputs_number(task->ov_model->network, 
&model_output_count);
+status = ie_network_get_outputs_number(ov_model->network, 
&model_output_count);
 for (size_t i = 0; i < model_output_count; i++) {
-status = ie_network_get_output_name(task->ov_model->network, i, 
&model_output_name);
+status = ie_network_get_output_name(ov_model->network, i, 
&model_output_name);
 APPEND_STRING(all_output_names, model_output_name)
 }
 av_log(ctx, AV_LOG_ERROR,
@@ -271,11 +253,11 @@ static void infer_completion_callback(void *args)
 task = request->inferences[i]->task;
 task->inference_done++;
 
-switch (task->ov_model->model->func_type) {
+switch (ov_model->model->func_type) {
 case DFT_PROCESS_FRAME:
 if (task->do_ioproc) {
-if (task->ov_model->model->frame_post_proc != NULL) {
-task->ov_model->model->frame_post_proc(task->out_frame, 
&output, task->ov_model->model->filter_ctx);
+if (ov_model->model->frame_post_proc != NULL) {
+ov_model->model->frame_post_proc(task->out_frame, &output, 
ov_model->model->filter_ctx);
 } else {
 ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx);
 }
@@ -285,18 +267,18 @@ static void infer_completion_callback(void *args)
 }
 break;
 case DFT_ANALYTICS_DETECT:
-if (!task->ov_model->model->detect_post

Re: [FFmpeg-devel] [PATCH 2/3] libavcodec/mips: Fix build errors reported by clang

2021-05-28 Thread yinshiyou-hf



> -原始邮件-
> 发件人: "Jin Bo" 
> 发送时间: 2021-05-28 10:04:40 (星期五)
> 收件人: ffmpeg-devel@ffmpeg.org
> 抄送: "Jin Bo" 
> 主题: [FFmpeg-devel] [PATCH 2/3] libavcodec/mips: Fix build errors reported 
by clang
> 
> Clang is more strict on the type of asm operands, float or double
> type variable should use constraint 'f', integer variable should
> use constraint 'r'.
> 
> Signed-off-by: Jin Bo 
> ---
>  libavcodec/mips/constants.c  |  89 +++--
>  libavcodec/mips/constants.h  |  88 +++--
>  libavcodec/mips/h264chroma_mmi.c | 157 +++
>  libavcodec/mips/h264dsp_mmi.c|  20 +--
>  libavcodec/mips/h264pred_mmi.c   |  23 ++--
>  libavcodec/mips/h264qpel_mmi.c   |  34 ++---
>  libavcodec/mips/hevcdsp_mmi.c|  59 +
>  libavcodec/mips/idctdsp_mmi.c|   2 +-
>  libavcodec/mips/mpegvideo_mmi.c  |  20 +--
>  libavcodec/mips/vc1dsp_mmi.c | 176 +-
>  libavcodec/mips/vp8dsp_mmi.c | 263 
+--
>  libavutil/mips/asmdefs.h |   8 ++
>  12 files changed, 536 insertions(+), 403 deletions(-)
> 
> diff --git a/libavcodec/mips/constants.c b/libavcodec/mips/constants.c
> index 8c990b6..6a8f1a5 100644
> --- a/libavcodec/mips/constants.c
> +++ b/libavcodec/mips/constants.c
> @@ -19,50 +19,49 @@
>   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
02110-1301 USA
>   */
>  
> -#include "config.h"
> -#include "libavutil/mem_internal.h"
> +#include "libavutil/intfloat.h"
>  #include "constants.h"
>  
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_1) =   
{0x0001000100010001ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_2) =   
{0x0002000200020002ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_3) =   
{0x0003000300030003ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_4) =   
{0x0004000400040004ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_5) =   
{0x0005000500050005ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_6) =   
{0x0006000600060006ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_8) =   
{0x0008000800080008ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_9) =   
{0x0009000900090009ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_10) =  
{0x000A000A000A000AULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_12) =  
{0x000C000C000C000CULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) =  
{0x000F000F000F000FULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_16) =  
{0x0010001000100010ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_17) =  
{0x0011001100110011ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_18) =  
{0x0012001200120012ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) =  
{0x0014001400140014ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_22) =  
{0x0016001600160016ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_28) =  
{0x001C001C001C001CULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_32) =  
{0x0020002000200020ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) =  
{0x0035003500350035ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_64) =  
{0x0040004000400040ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 
{0x0080008000800080ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_512) = 
{0x0200020002000200ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_m8tom5) =  
{0xFFFBFFFAFFF9FFF8ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_m4tom1) =  
{0xFFFEFFFDFFFCULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_1to4) =
{0x0004000300020001ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_5to8) =
{0x0008000700060005ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_0to3) =
{0x000300020001ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_4to7) =
{0x0007000600050004ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_8tob) =
{0x000b000a00090008ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pw_ctof) =
{0x000f000e000d000cULL};
> -
> -DECLARE_ALIGNED(8, const uint64_t, ff_pb_1) =   
{0x0101010101010101ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pb_3) =   
{0x0303030303030303ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pb_80) =  
{0x8080808080808080ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1) =  
{0xA1A1A1A1A1A1A1A1ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_pb_FE) =  
{0xFEFEFEFEFEFEFEFEULL};
> -
> -DECLARE_ALIGNED(8, const uint64_t, ff_rnd) =
{0x0004000400040004ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_rnd2) =   
{0x0040004000400040ULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_rnd3) =   
{0x0020002000200020ULL};
> -
> -DECLARE_ALIGNED(8, const uint64_t, ff_wm1010) = 
{0xULL};
> -DECLARE_ALIGNED(8, const uint64_t, ff_d4) = 
{0x0004ULL};
> +union av_intfloat64 ff_pw_1 =  {0x0001000100010001ULL};
> +union av_intfloat64 ff_pw_2 =  {0x0002000200020002ULL};
> +union av_intfloat64 ff_pw_3 =  {0x0003000300030003ULL};
> +union av_intfloat

Re: [FFmpeg-devel] [PATCH] avcodec: Pass the HDR10+ metadata to the packet side data in VP9 encoder

2021-05-28 Thread Michael Niedermayer
On Thu, May 27, 2021 at 09:44:10AM -0700, Mohammad Izadi wrote:
> HDR10+ metadata is stored in the bit stream for HEVC. The story is different 
> for VP9 and cannot store the metadata in the bit stream. HDR10+ should be 
> passed to packet side data an stored in the container (mkv) for VP9.
> 
> This CL is taking HDR10+ from AVFrame side data in libvpxenc and is passing 
> it to the AVPacket side data.
> ---
>  doc/APIchanges |  2 +
>  libavcodec/avpacket.c  |  1 +
>  libavcodec/decode.c|  1 +
>  libavcodec/libvpxenc.c | 92 ++
>  libavcodec/packet.h|  8 
>  libavcodec/version.h   |  2 +-
>  6 files changed, 105 insertions(+), 1 deletion(-)
[...]
> @@ -316,6 +323,53 @@ static av_cold void free_frame_list(struct FrameListData 
> *list)
>  }
>  }
>  
> +static av_cold int add_hdr10_plus(AVFifoBuffer *fifo, struct FrameHDR10Plus 
> *data)
> +{
> +int err = av_fifo_grow(fifo, sizeof(FrameHDR10Plus));
> +if (err < 0)
> +return err;
> +av_fifo_generic_write(fifo, data, sizeof(FrameHDR10Plus), NULL);
> +return 0;
> +}
> +
> +static av_cold void free_hdr10_plus(struct FrameHDR10Plus *p)
> +{
> +if (!p)
> +return;
> +av_buffer_unref(&p->hdr10_plus);
> +av_free(p);
> +}
> +
> +static av_cold void free_hdr10_plus_fifo(AVFifoBuffer **fifo)
> +{
> +FrameHDR10Plus *frame_hdr10_plus = NULL;
> +while (av_fifo_generic_read(*fifo, frame_hdr10_plus, 
> sizeof(*frame_hdr10_plus), NULL) > 0)
> +free_hdr10_plus(frame_hdr10_plus);
> +av_fifo_freep(fifo);
> +}

This seems crashing

frame=3 fps=0.1 q=0.0 Lsize=  18kB time=00:00:01.03 bitrate= 
145.7kbits/s speed=0.0346x
video:3kB audio:14kB subtitle:0kB other streams:0kB global headers:0kB muxing 
overhead: 7.865490%
==21306== Invalid read of size 8
==21306==at 0x12203B3: av_fifo_generic_read (fifo.c:218)
==21306==by 0x9F5DA3: free_hdr10_plus_fifo (libvpxenc.c:346)
==21306==by 0x9F627A: vpx_free (libvpxenc.c:441)
==21306==by 0x7A1B02: avcodec_close (avcodec.c:472)
==21306==by 0xAE48E0: avcodec_free_context (options.c:163)
==21306==by 0x24AA21: ffmpeg_cleanup (ffmpeg.c:609)
==21306==by 0x24239C: exit_program (cmdutils.c:135)
==21306==by 0x25C3AB: main (ffmpeg.c:5030)
==21306==  Address 0x18 is not stack'd, malloc'd or (recently) free'd

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Dictatorship: All citizens are under surveillance, all their steps and
actions recorded, for the politicians to enforce control.
Democracy: All politicians are under surveillance, all their steps and
actions recorded, for the citizens to enforce control.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH] doc/examples/muxing: remove unused arguments of open_video and open_audio

2021-05-28 Thread Steven Liu
Signed-off-by: Steven Liu 
---
 doc/examples/muxing.c | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/doc/examples/muxing.c b/doc/examples/muxing.c
index fe1b9ded21..3986561b2f 100644
--- a/doc/examples/muxing.c
+++ b/doc/examples/muxing.c
@@ -242,8 +242,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat 
sample_fmt,
 return frame;
 }
 
-static void open_audio(AVFormatContext *oc, const AVCodec *codec,
-   OutputStream *ost, AVDictionary *opt_arg)
+static void open_audio(const AVCodec *codec, OutputStream *ost, AVDictionary 
*opt_arg)
 {
 AVCodecContext *c;
 int nb_samples;
@@ -406,8 +405,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, 
int width, int height)
 return picture;
 }
 
-static void open_video(AVFormatContext *oc, const AVCodec *codec,
-   OutputStream *ost, AVDictionary *opt_arg)
+static void open_video(const AVCodec *codec, OutputStream *ost, AVDictionary 
*opt_arg)
 {
 int ret;
 AVCodecContext *c = ost->enc;
@@ -592,10 +590,10 @@ int main(int argc, char **argv)
 /* Now that all the parameters are set, we can open the audio and
  * video codecs and allocate the necessary encode buffers. */
 if (have_video)
-open_video(oc, video_codec, &video_st, opt);
+open_video(video_codec, &video_st, opt);
 
 if (have_audio)
-open_audio(oc, audio_codec, &audio_st, opt);
+open_audio(audio_codec, &audio_st, opt);
 
 av_dump_format(oc, 0, filename, 1);
 
-- 
2.25.0



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 1/3] libavcodec/mips: Fix specification of instruction name

2021-05-28 Thread Michael Niedermayer
On Fri, May 28, 2021 at 05:21:39PM +0800, yinshiyou...@loongson.cn wrote:
> 
> 
> 
[...]
> > ___
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > 
> > To unsubscribe, visit link above, or email
> > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
> 
> LGTM.

will apply

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

What does censorship reveal? It reveals fear. -- Julian Assange


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] Ping: [PATCH] avfilter/vf_subtitles: allow using embedded fonts

2021-05-28 Thread Oneric
On Sun, May 02, 2021 at 23:02:02 +0200, Oneric wrote:
> ASS subtitles can have encoded fonts embedded into the subtitle file
> itself. Allow libass to load those, to render subs as intended.
> ---
>  libavfilter/vf_subtitles.c | 1 +
>  1 file changed, 1 insertion(+)

another ping
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 3/3] avcodec/lpc: Avoid floating point division by 0 in compute_ref_coefs()

2021-05-28 Thread Michael Niedermayer
Fixes: Ticket7996
Fixes: CVE-2020-20445

Signed-off-by: Michael Niedermayer 
---
 libavcodec/lpc.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libavcodec/lpc.h b/libavcodec/lpc.h
index 88ca247f87..52170fd623 100644
--- a/libavcodec/lpc.h
+++ b/libavcodec/lpc.h
@@ -143,7 +143,7 @@ static inline void compute_ref_coefs(const LPC_TYPE *autoc, 
int max_order,
 gen0[i] = gen1[i] = autoc[i + 1];
 
 err= autoc[0];
-ref[0] = -gen1[0] / err;
+ref[0] = -gen1[0] / ((USE_FIXED || err) ? err : 1);
 err   +=  gen1[0] * ref[0];
 if (error)
 error[0] = err;
@@ -152,7 +152,7 @@ static inline void compute_ref_coefs(const LPC_TYPE *autoc, 
int max_order,
 gen1[j] = gen1[j + 1] + ref[i - 1] * gen0[j];
 gen0[j] = gen1[j + 1] * ref[i - 1] + gen0[j];
 }
-ref[i] = -gen1[0] / err;
+ref[i] = -gen1[0] / ((USE_FIXED || err) ? err : 1);
 err   +=  gen1[0] * ref[i];
 if (error)
 error[i] = err;
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 2/3] avcodec/aacpsy: Avoid floating point division by 0 of norm_fac

2021-05-28 Thread Michael Niedermayer
Fixes: Ticket7995
Fixes: CVE-2020-20446

Signed-off-by: Michael Niedermayer 
---
 libavcodec/aacpsy.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/aacpsy.c b/libavcodec/aacpsy.c
index 482113d427..e51d29750b 100644
--- a/libavcodec/aacpsy.c
+++ b/libavcodec/aacpsy.c
@@ -794,7 +794,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int 
channel,
 
 if (pe < 1.15f * desired_pe) {
 /* 6.6.1.3.6 "Final threshold modification by linearization" */
-norm_fac = 1.0f / norm_fac;
+norm_fac = norm_fac ? 1.0f / norm_fac : 0;
 for (w = 0; w < wi->num_windows*16; w += 16) {
 for (g = 0; g < num_bands; g++) {
 AacPsyBand *band = &pch->band[w+g];
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 1/3] avcodec/ratecontrol: Avoid floating point division by 0 of mb_num

2021-05-28 Thread Michael Niedermayer
Fixes: Ticket7990
Fixes: CVE-2020-20448

Signed-off-by: Michael Niedermayer 
---
 libavcodec/ratecontrol.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index 6b77ccd006..aa146730f4 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -241,7 +241,7 @@ static double get_qscale(MpegEncContext *s, 
RateControlEntry *rce,
 RateControlContext *rcc = &s->rc_context;
 AVCodecContext *a   = s->avctx;
 const int pict_type = rce->new_pict_type;
-const double mb_num = s->mb_num;
+const double mb_num = s->mb_num ? s->mb_num : 1;
 double q, bits;
 int i;
 
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 1/3] avcodec/ratecontrol: Avoid floating point division by 0 of mb_num

2021-05-28 Thread Michael Niedermayer
On Fri, May 28, 2021 at 08:48:32PM +0200, Michael Niedermayer wrote:
> Fixes: Ticket7990
> Fixes: CVE-2020-20448
> 
> Signed-off-by: Michael Niedermayer 
> ---
>  libavcodec/ratecontrol.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)

please disregard this patch, its already fixed since 
55279d699fa64d8eb1185d8db04ab4ed92e8dea2

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Whats the most studid thing your enemy could do ? Blow himself up
Whats the most studid thing you could do ? Give up your rights and
freedom because your enemy blew himself up.



signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH] avcodec/aacenc: Avoid 0 lambda

2021-05-28 Thread Michael Niedermayer
Fixes: Ticket8003
Fixes: CVE-2020-20453

Signed-off-by: Michael Niedermayer 
---
 libavcodec/aacenc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c
index aa223cf25f..e80591ba86 100644
--- a/libavcodec/aacenc.c
+++ b/libavcodec/aacenc.c
@@ -28,6 +28,7 @@
  *  TODOs:
  * add sane pulse detection
  ***/
+#include 
 
 #include "libavutil/libm.h"
 #include "libavutil/float_dsp.h"
@@ -852,7 +853,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket 
*avpkt,
 /* Not so fast though */
 ratio = sqrtf(ratio);
 }
-s->lambda = FFMIN(s->lambda * ratio, 65536.f);
+s->lambda = av_clipf(s->lambda * ratio, FLT_MIN, 65536.f);
 
 /* Keep iterating if we must reduce and lambda is in the sky */
 if (ratio > 0.9f && ratio < 1.1f) {
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] Ping: [PATCH] avfilter/vf_subtitles: allow using embedded fonts

2021-05-28 Thread Gyan Doshi




On 2021-05-28 23:26, Oneric wrote:

On Sun, May 02, 2021 at 23:02:02 +0200, Oneric wrote:

ASS subtitles can have encoded fonts embedded into the subtitle file
itself. Allow libass to load those, to render subs as intended.
---
  libavfilter/vf_subtitles.c | 1 +
  1 file changed, 1 insertion(+)

another ping


I'll test this and apply.

Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] Ping: [PATCH] avfilter/vf_subtitles: allow using embedded fonts

2021-05-28 Thread Gyan Doshi



On 2021-05-29 11:07, Gyan Doshi wrote:



On 2021-05-28 23:26, Oneric wrote:

On Sun, May 02, 2021 at 23:02:02 +0200, Oneric wrote:

ASS subtitles can have encoded fonts embedded into the subtitle file
itself. Allow libass to load those, to render subs as intended.
---
  libavfilter/vf_subtitles.c | 1 +
  1 file changed, 1 insertion(+)

another ping


I'll test this and apply.


Pushed as 3300625c6f148455b08d641597d54b5be4c0f76a

Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] Ping: [PATCH] avfilter/vf_subtitles: allow using embedded fonts

2021-05-28 Thread Gyan Doshi



On 2021-05-29 11:51, Gyan Doshi wrote:



On 2021-05-29 11:07, Gyan Doshi wrote:



On 2021-05-28 23:26, Oneric wrote:

On Sun, May 02, 2021 at 23:02:02 +0200, Oneric wrote:

ASS subtitles can have encoded fonts embedded into the subtitle file
itself. Allow libass to load those, to render subs as intended.
---
  libavfilter/vf_subtitles.c | 1 +
  1 file changed, 1 insertion(+)

another ping


I'll test this and apply.


Pushed as 3300625c6f148455b08d641597d54b5be4c0f76a


Would it make sense to allow users to not load embedded fonts?

Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH v2] ffmpeg: add -fpsmin to clamp output framerate

2021-05-28 Thread Gyan Doshi



On 2021-05-15 18:41, Gyan Doshi wrote:

I'll review this in a few days.


Can you send this again? Patchwork hasn't appeared to pick it up earlier?



On 2021-05-11 21:42, Matthias Neugebauer wrote:

Add -fpsmin analogously to -fpsmax for setting a lower bound to the
auto-set frame rate.

Signed-off-by: Matthias Neugebauer 
---
doc/ffmpeg.texi  |  7 +++
fftools/ffmpeg.c |  8 +++-
fftools/ffmpeg.h |  3 +++
fftools/ffmpeg_opt.c | 26 +++---
4 files changed, 40 insertions(+), 4 deletions(-)

diff --git a/doc/ffmpeg.texi b/doc/ffmpeg.texi
index 9feabe6517..5576508347 100644
--- a/doc/ffmpeg.texi
+++ b/doc/ffmpeg.texi
@@ -862,6 +862,13 @@ Clamps output frame rate when output framerate 
is auto-set and is higher than th
Useful in batch processing or when input framerate is wrongly 
detected as very high.
It cannot be set together with @code{-r}. It is ignored during 
streamcopy.


+@item -fpsmin[:@var{stream_specifier}] @var{fps} 
(@emph{output,per-stream})

+Set minimum frame rate (Hz value, fraction or abbreviation).
+
+Clamps output frame rate when output framerate is auto-set and is 
lower than this value.
+Useful in batch processing or when input framerate is wrongly 
detected as very low.
+It cannot be set together with @code{-r}. It is ignored during 
streamcopy.

+
@item -s[:@var{stream_specifier}] @var{size} 
(@emph{input/output,per-stream})

Set frame size.

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index 3ad11452da..696ba7d730 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -3390,7 +3390,8 @@ static int 
init_output_stream_encode(OutputStream *ost, AVFrame *frame)

  ost->frame_rate = ist->framerate;
  if (ist && !ost->frame_rate.num)
  ost->frame_rate = ist->st->r_frame_rate;
-    if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
+    if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num 
&& !ost->min_frame_rate.num)

+    {
  ost->frame_rate = (AVRational){25, 1};
  av_log(NULL, AV_LOG_WARNING,
 "No information "
@@ -3400,6 +3401,11 @@ static int 
init_output_stream_encode(OutputStream *ost, AVFrame *frame)

 ost->file_index, ost->index);
  }

+    if (ost->min_frame_rate.num &&
+    (av_q2d(ost->frame_rate) < av_q2d(ost->min_frame_rate) ||
+    !ost->frame_rate.den))
+    ost->frame_rate = ost->min_frame_rate;
+
  if (ost->max_frame_rate.num &&
  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
  !ost->frame_rate.den))
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index 606f2afe0c..515d0c7a46 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -110,6 +110,8 @@ typedef struct OptionsContext {
  int    nb_frame_rates;
  SpecifierOpt *max_frame_rates;
  int    nb_max_frame_rates;
+    SpecifierOpt *min_frame_rates;
+    int    nb_min_frame_rates;
  SpecifierOpt *frame_sizes;
  int    nb_frame_sizes;
  SpecifierOpt *frame_pix_fmts;
@@ -486,6 +488,7 @@ typedef struct OutputStream {
  /* video only */
  AVRational frame_rate;
  AVRational max_frame_rate;
+    AVRational min_frame_rate;
  int is_cfr;
  int force_fps;
  int top_field_first;
diff --git a/fftools/ffmpeg_opt.c b/fftools/ffmpeg_opt.c
index 849d24b16d..b0d7550ce6 100644
--- a/fftools/ffmpeg_opt.c
+++ b/fftools/ffmpeg_opt.c
@@ -56,6 +56,7 @@ static const char *const 
opt_name_audio_channels[]    = {"ac", NULL};
static const char *const opt_name_audio_sample_rate[] = 
{"ar", NULL};
static const char *const opt_name_frame_rates[]   = {"r", 
NULL};
static const char *const opt_name_max_frame_rates[]   = 
{"fpsmax", NULL};
+static const char *const opt_name_min_frame_rates[]   = 
{"fpsmin", NULL};
static const char *const opt_name_frame_sizes[]   = {"s", 
NULL};
static const char *const opt_name_frame_pix_fmts[]    = 
{"pix_fmt", NULL};
static const char *const opt_name_ts_scale[]  = 
{"itsscale", NULL};
@@ -1694,7 +1695,7 @@ static OutputStream 
*new_video_stream(OptionsContext *o, AVFormatContext *oc, in

  AVStream *st;
  OutputStream *ost;
  AVCodecContext *video_enc;
-    char *frame_rate = NULL, *max_frame_rate = NULL, 
*frame_aspect_ratio = NULL;
+    char *frame_rate = NULL, *max_frame_rate = NULL, *min_frame_rate 
= NULL, *frame_aspect_ratio = NULL;


  ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index);
  st  = ost->st;
@@ -1712,14 +1713,30 @@ static OutputStream 
*new_video_stream(OptionsContext *o, AVFormatContext *oc, in

  exit_program(1);
  }

+    MATCH_PER_STREAM_OPT(min_frame_rates, str, min_frame_rate, oc, st);
+    if (min_frame_rate && av_parse_video_rate(&ost->min_frame_rate, 
min_frame_rate) < 0) {
+    av_log(NULL, AV_LOG_FATAL, "Invalid minimum framerate value: 
%s\n", mi