On 16.07.2021 18:09, Jai Luthra wrote:
Recent encode API restructure (827d6fe73d) removed some state - which broke
the API for flushing without closing the encoder.

This functionality was originally added in 3ea7057677 and is useful for
segmented video, where we don't want to do expensive re-init of HW sessions
for every segment.
---
v2: Forgot to rebase off master in v1

  libavcodec/nvenc.c | 16 +++++++++++-----
  libavcodec/nvenc.h |  2 ++
  2 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/libavcodec/nvenc.c b/libavcodec/nvenc.c
index ee046b9cef..ee0beb4795 100644
--- a/libavcodec/nvenc.c
+++ b/libavcodec/nvenc.c
@@ -2281,6 +2281,11 @@ static int nvenc_send_frame(AVCodecContext *avctx, const 
AVFrame *frame)
          return AVERROR(EINVAL);
if (frame && frame->buf[0]) {
+        if (ctx->encoder_flushing) {
+            ctx->encoder_flushing = 0;
+            av_fifo_reset(ctx->timestamp_list);
+        }
+
          in_surf = get_free_frame(ctx);
          if (!in_surf)
              return AVERROR(EAGAIN);
@@ -2335,6 +2340,7 @@ static int nvenc_send_frame(AVCodecContext *avctx, const 
AVFrame *frame)
          nvenc_codec_specific_pic_params(avctx, &pic_params, ctx->sei_data, 
sei_count);
      } else {
          pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
+        ctx->encoder_flushing = 1;
      }
res = nvenc_push_context(avctx);
@@ -2384,8 +2390,11 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, 
AVPacket *pkt)
if (!frame->buf[0]) {
          res = ff_encode_get_frame(avctx, frame);
-        if (res < 0 && res != AVERROR_EOF)
+        if (res == AVERROR_EOF || (ctx->encoder_flushing && res == 
AVERROR(EAGAIN))) {
+            // flushing mode, continue to send packets
+        } else if (res < 0) {
              return res;
+        }
      }
res = nvenc_send_frame(avctx, frame);
@@ -2395,7 +2404,7 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, 
AVPacket *pkt)
      } else
          av_frame_unref(frame);
- if (output_ready(avctx, avctx->internal->draining)) {
+    if (output_ready(avctx, ctx->encoder_flushing)) {
          av_fifo_generic_read(ctx->output_surface_ready_queue, &tmp_out_surf, 
sizeof(tmp_out_surf), NULL);
res = nvenc_push_context(avctx);
@@ -2423,8 +2432,5 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, 
AVPacket *pkt)
av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx)
  {
-    NvencContext *ctx = avctx->priv_data;
-
      nvenc_send_frame(avctx, NULL);
-    av_fifo_reset(ctx->timestamp_list);
  }

Is it really correct for this to do absolutely nothing now?

diff --git a/libavcodec/nvenc.h b/libavcodec/nvenc.h
index 85d3a33601..6af3aaf0ce 100644
--- a/libavcodec/nvenc.h
+++ b/libavcodec/nvenc.h
@@ -169,6 +169,8 @@ typedef struct NvencContext
      NV_ENC_SEI_PAYLOAD *sei_data;
      int sei_data_size;
+ int encoder_flushing;
+
      struct {
          void *ptr;
          int ptr_index;


Can you explain the logic this follows some more? I'm quite confused what exactly changed, and how it broke this.
How do you trigger a flush, if calling flush does nothing?

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to