> On Dec 15, 2024, at 12:14, Scott Theisen <scott.the....@gmail.com> wrote: > > Based on patches by Lukas Rusak from > https://github.com/lrusak/FFmpeg/commits/v4l2-drmprime-v4 > > libavcodec: v4l2m2m: output AVDRMFrameDescriptor > https://github.com/lrusak/FFmpeg/commit/2cb8052ac65a56d8a3f347a1e6f12d4449a5a614 > > libavcodec: v4l2m2m: depends on libdrm > https://github.com/lrusak/FFmpeg/commit/ab4cf3e6fb37cffdebccca52e36a7b2deb7e729f > > libavcodec: v4l2m2m: set format_modifier to DRM_FORMAT_MOD_LINEAR > https://github.com/lrusak/FFmpeg/commit/9438a6efa29c7c7ec80c39c9b013b9a12d7b5f33 > > libavcodec: v4l2m2m: only mmap the buffer when it is output type and drm > prime is used > https://github.com/lrusak/FFmpeg/commit/093656607863e47630de2d1cfcf0ac8e4b93a69e > > libavcodec: v4l2m2m: allow using software pixel formats > https://github.com/lrusak/FFmpeg/commit/8405b573e83838e6b2fea99825fbef32ee9f7767 > > libavcodec: v4l2m2m: implement hwcontext > https://github.com/lrusak/FFmpeg/commit/b2c1f1eb39b54bf034497a7f2a7f23855d0a7cde > > libavcodec: v4l2m2m: implement flush > https://github.com/lrusak/FFmpeg/commit/e793ef82727d6d6f55c40844463d476e7e84efad > > Originally added to MythTV in: > FFmpeg: Patch FFmpeg for V4L2 codecs DRM PRIME support > https://github.com/MythTV/mythtv/commit/cc7572f9b26189ad5d5d504c05f08e53e4e61b54 > > FFmpeg: Re-apply v4l2 memory to memory DRM_PRIME support > https://github.com/MythTV/mythtv/commit/1c942720591b5b7820abe9ed0d805afabbdffe3c > > modified in: > V4L2 Codecs: Fix lockup when seeking > https://github.com/MythTV/mythtv/commit/fdc0645aba9a9ad373888bd62ebcbc83a3feb7e5 > > v4l2_buffers: Add some libdrm ifdef's > https://github.com/MythTV/mythtv/commit/336df1067abfa4fe7cf611541e5b6f3561fc81a2 > > !!!! > NB: libavcodec/v4l2_m2m_dec.c: v4l2_decode_init(): I'm returning -1 > since I don't know what error code I should use. > > Note also Lucas Rusak's v5 series: > closer diff to current state, otherwise unchanged > libavcodec: v4l2m2m: output AVDRMFrameDescriptor > https://github.com/lrusak/FFmpeg/commit/c6b85ed30f06ea99513b13cc768a922ebe4d68c2 > > new option: > libavcodec: v4l2m2m: add option to specify pixel format used by the decoder > https://github.com/lrusak/FFmpeg/commit/ffc4419f456c00ab71cf93f792b0473c6de14e64 > > additional code vs v4 > libavcodec: v4l2m2m: implement flush > https://github.com/lrusak/FFmpeg/commit/8595d06d4909bbec0aa14625fcfc869c6bcef696 > --- > configure | 1 + > libavcodec/v4l2_buffers.c | 206 +++++++++++++++++++++++++++++++++++--- > libavcodec/v4l2_buffers.h | 4 + > libavcodec/v4l2_context.c | 43 +++++++- > libavcodec/v4l2_context.h | 2 + > libavcodec/v4l2_m2m.h | 5 + > libavcodec/v4l2_m2m_dec.c | 62 ++++++++++++ > 7 files changed, 305 insertions(+), 18 deletions(-) > > diff --git a/configure b/configure > index bf55ba67fa..5f02cf3b51 100755 > --- a/configure > +++ b/configure > @@ -3770,6 +3770,7 @@ sndio_indev_deps="sndio" > sndio_outdev_deps="sndio" > v4l2_indev_deps_any="linux_videodev2_h sys_videoio_h" > v4l2_indev_suggest="libv4l2" > +v4l2_outdev_deps="libdrm"
Why v4l2_outdev when the patch is for libavcodec? > v4l2_outdev_deps_any="linux_videodev2_h sys_videoio_h" > v4l2_outdev_suggest="libv4l2" > vfwcap_indev_deps="vfw32 vfwcap_defines" > diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c > index 56a8f0825c..1a1a29c4e6 100644 > --- a/libavcodec/v4l2_buffers.c > +++ b/libavcodec/v4l2_buffers.c > @@ -21,6 +21,10 @@ > * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 > USA > */ > > +#include "config.h" > +#if CONFIG_LIBDRM > +#include <drm_fourcc.h> > +#endif > #include <linux/videodev2.h> > #include <sys/ioctl.h> > #include <sys/mman.h> > @@ -29,6 +33,8 @@ > #include <poll.h> > #include "libavcodec/avcodec.h" > #include "libavutil/pixdesc.h" > +#include "libavutil/hwcontext.h" > +#include "libavutil/buffer.h" > #include "refstruct.h" > #include "v4l2_context.h" > #include "v4l2_buffers.h" > @@ -247,6 +253,80 @@ FF_ENABLE_DEPRECATION_WARNINGS > } > } > > +#if CONFIG_LIBDRM > +static uint8_t * v4l2_get_drm_frame(V4L2Buffer *avbuf) > +{ > + AVDRMFrameDescriptor *drm_desc = &avbuf->drm_frame; > + AVDRMLayerDescriptor *layer; > + > + /* fill the DRM frame descriptor */ > + drm_desc->nb_objects = avbuf->num_planes; > + drm_desc->nb_layers = 1; > + > + layer = &drm_desc->layers[0]; > + layer->nb_planes = avbuf->num_planes; > + > + for (int i = 0; i < avbuf->num_planes; i++) { > + layer->planes[i].object_index = i; > + layer->planes[i].offset = 0; > + layer->planes[i].pitch = avbuf->plane_info[i].bytesperline; > + } > + > + switch (avbuf->context->av_pix_fmt) { > + case AV_PIX_FMT_YUYV422: > + > + layer->format = DRM_FORMAT_YUYV; > + layer->nb_planes = 1; > + > + break; > + > + case AV_PIX_FMT_NV12: > + case AV_PIX_FMT_NV21: > + > + layer->format = avbuf->context->av_pix_fmt == AV_PIX_FMT_NV12 ? > + DRM_FORMAT_NV12 : DRM_FORMAT_NV21; > + > + if (avbuf->num_planes > 1) > + break; > + > + layer->nb_planes = 2; > + > + layer->planes[1].object_index = 0; > + layer->planes[1].offset = avbuf->plane_info[0].bytesperline * > + avbuf->context->format.fmt.pix.height; > + layer->planes[1].pitch = avbuf->plane_info[0].bytesperline; > + break; > + > + case AV_PIX_FMT_YUV420P: > + > + layer->format = DRM_FORMAT_YUV420; > + > + if (avbuf->num_planes > 1) > + break; > + > + layer->nb_planes = 3; > + > + layer->planes[1].object_index = 0; > + layer->planes[1].offset = avbuf->plane_info[0].bytesperline * > + avbuf->context->format.fmt.pix.height; > + layer->planes[1].pitch = avbuf->plane_info[0].bytesperline >> 1; > + > + layer->planes[2].object_index = 0; > + layer->planes[2].offset = layer->planes[1].offset + > + ((avbuf->plane_info[0].bytesperline * > + avbuf->context->format.fmt.pix.height) >> 2); > + layer->planes[2].pitch = avbuf->plane_info[0].bytesperline >> 1; > + break; > + > + default: > + drm_desc->nb_layers = 0; > + break; > + } > + > + return (uint8_t *) drm_desc; > +} > +#endif > + > static void v4l2_free_buffer(void *opaque, uint8_t *unused) > { > V4L2Buffer* avbuf = opaque; > @@ -271,6 +351,40 @@ static void v4l2_free_buffer(void *opaque, uint8_t > *unused) > } > } > > +#if CONFIG_LIBDRM > +static int v4l2_buffer_export_drm(V4L2Buffer* avbuf) > +{ > + struct v4l2_exportbuffer expbuf; > + int i, ret; > + > + for (i = 0; i < avbuf->num_planes; i++) { > + memset(&expbuf, 0, sizeof(expbuf)); > + > + expbuf.index = avbuf->buf.index; > + expbuf.type = avbuf->buf.type; > + expbuf.plane = i; > + > + ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_EXPBUF, &expbuf); > + if (ret < 0) > + return AVERROR(errno); > + > + if (V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type)) { > + /* drm frame */ > + avbuf->drm_frame.objects[i].size = avbuf->buf.m.planes[i].length; > + avbuf->drm_frame.objects[i].fd = expbuf.fd; > + avbuf->drm_frame.objects[i].format_modifier = > DRM_FORMAT_MOD_LINEAR; > + } else { > + /* drm frame */ > + avbuf->drm_frame.objects[0].size = avbuf->buf.length; > + avbuf->drm_frame.objects[0].fd = expbuf.fd; > + avbuf->drm_frame.objects[0].format_modifier = > DRM_FORMAT_MOD_LINEAR; > + } > + } > + > + return 0; > +} > +#endif > + > static int v4l2_buf_increase_ref(V4L2Buffer *in) > { > V4L2m2mContext *s = buf_to_m2mctx(in); > @@ -289,6 +403,26 @@ static int v4l2_buf_increase_ref(V4L2Buffer *in) > return 0; > } > > +#if CONFIG_LIBDRM > +static int v4l2_buf_to_bufref_drm(V4L2Buffer *in, AVBufferRef **buf) > +{ > + int ret; > + > + *buf = av_buffer_create((uint8_t *) &in->drm_frame, > + sizeof(in->drm_frame), > + v4l2_free_buffer, > + in, AV_BUFFER_FLAG_READONLY); > + if (!*buf) > + return AVERROR(ENOMEM); > + > + ret = v4l2_buf_increase_ref(in); > + if (ret) > + av_buffer_unref(buf); > + > + return ret; > +} > +#endif > + > static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf) > { > int ret; > @@ -458,9 +592,22 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, > V4L2Buffer *avbuf) > av_frame_unref(frame); > > /* 1. get references to the actual data */ > - ret = v4l2_buffer_buf_to_swframe(frame, avbuf); > - if (ret) > - return ret; > + if (buf_to_m2mctx(avbuf)->output_drm) { > +#if CONFIG_LIBDRM > + /* 1. get references to the actual data */ > + ret = v4l2_buf_to_bufref_drm(avbuf, &frame->buf[0]); > + if (ret) > + return ret; > + > + frame->data[0] = (uint8_t *) v4l2_get_drm_frame(avbuf); > + frame->format = AV_PIX_FMT_DRM_PRIME; > + frame->hw_frames_ctx = av_buffer_ref(avbuf->context->frames_ref); > +#endif > + } else { > + ret = v4l2_buffer_buf_to_swframe(frame, avbuf); > + if (ret) > + return ret; > + } > > /* 2. get frame information */ > if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME) > @@ -537,6 +684,27 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int > index) > avbuf->buf.type = ctx->type; > avbuf->buf.index = index; > > + if (buf_to_m2mctx(avbuf)->output_drm) { > + AVHWFramesContext *hwframes; > + > + av_buffer_unref(&ctx->frames_ref); > + > + ctx->frames_ref = > av_hwframe_ctx_alloc(buf_to_m2mctx(avbuf)->device_ref); > + if (!ctx->frames_ref) { > + ret = AVERROR(ENOMEM); > + return ret; > + } > + > + hwframes = (AVHWFramesContext*)ctx->frames_ref->data; > + hwframes->format = AV_PIX_FMT_DRM_PRIME; > + hwframes->sw_format = ctx->av_pix_fmt; > + hwframes->width = ctx->width; > + hwframes->height = ctx->height; > + ret = av_hwframe_ctx_init(ctx->frames_ref); > + if (ret < 0) > + return ret; > + } > + > if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { > avbuf->buf.length = VIDEO_MAX_PLANES; > avbuf->buf.m.planes = avbuf->planes; > @@ -564,14 +732,20 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int > index) > > if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { > avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length; > - avbuf->plane_info[i].mm_addr = mmap(NULL, > avbuf->buf.m.planes[i].length, > - PROT_READ | PROT_WRITE, > MAP_SHARED, > - buf_to_m2mctx(avbuf)->fd, > avbuf->buf.m.planes[i].m.mem_offset); > + > + if (V4L2_TYPE_IS_OUTPUT(ctx->type) || > !buf_to_m2mctx(avbuf)->output_drm) { > + avbuf->plane_info[i].mm_addr = mmap(NULL, > avbuf->buf.m.planes[i].length, > + PROT_READ | PROT_WRITE, > MAP_SHARED, > + buf_to_m2mctx(avbuf)->fd, > avbuf->buf.m.planes[i].m.mem_offset); > + } > } else { > avbuf->plane_info[i].length = avbuf->buf.length; > - avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length, > - PROT_READ | PROT_WRITE, MAP_SHARED, > - buf_to_m2mctx(avbuf)->fd, > avbuf->buf.m.offset); > + > + if (V4L2_TYPE_IS_OUTPUT(ctx->type) || > !buf_to_m2mctx(avbuf)->output_drm) { > + avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length, > + PROT_READ | PROT_WRITE, > MAP_SHARED, > + buf_to_m2mctx(avbuf)->fd, > avbuf->buf.m.offset); > + } > } > > if (avbuf->plane_info[i].mm_addr == MAP_FAILED) > @@ -580,18 +754,24 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int > index) > > avbuf->status = V4L2BUF_AVAILABLE; > > - if (V4L2_TYPE_IS_OUTPUT(ctx->type)) > - return 0; > - > if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { > avbuf->buf.m.planes = avbuf->planes; > avbuf->buf.length = avbuf->num_planes; > - > } else { > avbuf->buf.bytesused = avbuf->planes[0].bytesused; > avbuf->buf.length = avbuf->planes[0].length; > } > > + if (V4L2_TYPE_IS_OUTPUT(ctx->type)) > + return 0; > + > +#if CONFIG_LIBDRM > + if (buf_to_m2mctx(avbuf)->output_drm) { > + ret = v4l2_buffer_export_drm(avbuf); > + if (ret) > + return ret; > + } > +#endif > return ff_v4l2_buffer_enqueue(avbuf); > } > > diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h > index e35b161309..964e037b9d 100644 > --- a/libavcodec/v4l2_buffers.h > +++ b/libavcodec/v4l2_buffers.h > @@ -28,6 +28,7 @@ > #include <stddef.h> > #include <linux/videodev2.h> > > +#include "libavutil/hwcontext_drm.h" > #include "libavutil/frame.h" > #include "packet.h" > > @@ -44,6 +45,9 @@ typedef struct V4L2Buffer { > /* each buffer needs to have a reference to its context */ > struct V4L2Context *context; > > + /* DRM descriptor */ > + AVDRMFrameDescriptor drm_frame; > + > /* This object is refcounted per-plane, so we need to keep track > * of how many context-refs we are holding. > * This pointer is a RefStruct reference. */ > diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c > index be1df3785b..e9c37f05cd 100644 > --- a/libavcodec/v4l2_context.c > +++ b/libavcodec/v4l2_context.c > @@ -290,8 +290,8 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, > int timeout) > "packets/frames.\n"); > } > > - /* if we are draining and there are no more capture buffers queued in > the driver we are done */ > - if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) { > + /* if there are no more capture buffers queued in the driver, skip > polling */ > + if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) { > for (i = 0; i < ctx->num_buffers; i++) { > /* capture buffer initialization happens during decode hence > * detection happens at runtime > @@ -302,7 +302,9 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, > int timeout) > if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER) > goto start; > } > - ctx->done = 1; > + /* if we were waiting to drain, all done! */ > + if (ctx_to_m2mctx(ctx)->draining) > + ctx->done = 1; > return NULL; > } > > @@ -448,20 +450,51 @@ static int v4l2_release_buffers(V4L2Context* ctx) > .type = ctx->type, > .count = 0, /* 0 -> unmaps buffers from the driver */ > }; > - int i, j; > + int ret, i, j; > > for (i = 0; i < ctx->num_buffers; i++) { > V4L2Buffer *buffer = &ctx->buffers[i]; > > for (j = 0; j < buffer->num_planes; j++) { > struct V4L2Plane_info *p = &buffer->plane_info[j]; > + > + if (V4L2_TYPE_IS_OUTPUT(ctx->type)) { > + /* output buffers are not EXPORTED */ > + goto unmap; > + } > + > + if (ctx_to_m2mctx(ctx)->output_drm) { > + /* use the DRM frame to close */ > + if (buffer->drm_frame.objects[j].fd >= 0) { > + if (close(buffer->drm_frame.objects[j].fd) < 0) { > + av_log(logger(ctx), AV_LOG_ERROR, "%s close drm fd " > + "[buffer=%2d, plane=%d, fd=%2d] - %s \n", > + ctx->name, i, j, buffer->drm_frame.objects[j].fd, > + av_err2str(AVERROR(errno))); > + } > + } > + } > +unmap: > if (p->mm_addr && p->length) > if (munmap(p->mm_addr, p->length) < 0) > av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane > (%s))\n", ctx->name, av_err2str(AVERROR(errno))); > } > } > > - return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req); > + ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req); > + if (ret < 0) { > + av_log(logger(ctx), AV_LOG_ERROR, "release all %s buffers > (%s)\n", > + ctx->name, av_err2str(AVERROR(errno))); > + > + if (ctx_to_m2mctx(ctx)->output_drm) > + av_log(logger(ctx), AV_LOG_ERROR, > + "Make sure the DRM client releases all FB/GEM objects > before closing the codec (ie):\n" > + "for all buffers: \n" > + " 1. drmModeRmFB(..)\n" > + " 2. drmIoctl(.., DRM_IOCTL_GEM_CLOSE,... )\n"); > + } > + > + return ret; > } > > static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat > pixfmt) > diff --git a/libavcodec/v4l2_context.h b/libavcodec/v4l2_context.h > index 6f7460c89a..02fbb6eec3 100644 > --- a/libavcodec/v4l2_context.h > +++ b/libavcodec/v4l2_context.h > @@ -93,6 +93,8 @@ typedef struct V4L2Context { > */ > int done; > > + AVBufferRef *frames_ref; > + > } V4L2Context; > > /** > diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h > index 4ba33dc335..47dfe0905f 100644 > --- a/libavcodec/v4l2_m2m.h > +++ b/libavcodec/v4l2_m2m.h > @@ -66,6 +66,11 @@ typedef struct V4L2m2mContext { > > /* reference back to V4L2m2mPriv */ > void *priv; > + > + AVBufferRef *device_ref; > + > + /* generate DRM frames */ > + int output_drm; > } V4L2m2mContext; > > typedef struct V4L2m2mPriv { > diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c > index aa2d759e1e..cfeee7f2fb 100644 > --- a/libavcodec/v4l2_m2m_dec.c > +++ b/libavcodec/v4l2_m2m_dec.c > @@ -23,12 +23,17 @@ > > #include <linux/videodev2.h> > #include <sys/ioctl.h> > + > +#include "libavutil/hwcontext.h" > +#include "libavutil/hwcontext_drm.h" > #include "libavutil/pixfmt.h" > #include "libavutil/pixdesc.h" > #include "libavutil/opt.h" > #include "libavcodec/avcodec.h" > #include "codec_internal.h" > #include "libavcodec/decode.h" > +#include "libavcodec/internal.h" > +#include "libavcodec/hwconfig.h" > > #include "v4l2_context.h" > #include "v4l2_m2m.h" > @@ -205,7 +210,44 @@ static av_cold int v4l2_decode_init(AVCodecContext > *avctx) > capture->av_codec_id = AV_CODEC_ID_RAWVIDEO; > capture->av_pix_fmt = avctx->pix_fmt; > > + /* the client requests the codec to generate DRM frames: > + * - data[0] will therefore point to the returned AVDRMFrameDescriptor > + * check the ff_v4l2_buffer_to_avframe conversion function. > + * - the DRM frame format is passed in the DRM frame descriptor layer. > + * check the v4l2_get_drm_frame function. > + */ > + { > + const enum AVPixelFormat *pix_fmts = NULL; > + int num_pix_fmts = 0; > + ret = avcodec_get_supported_config(avctx, NULL, > AV_CODEC_CONFIG_PIX_FORMAT, > + 0, (const void **) &pix_fmts, > &num_pix_fmts); I don’t think here is the meant use case for avcodec_get_supported_config. Specify available pixel format directly in pix_fmts. > + if (ret < 0) > + return ret; > + if (pix_fmts == NULL || num_pix_fmts < 1) > + return -1; There is no which error code to use issue after remove avcodec_get_supported_config. > + switch (ff_get_format(avctx, pix_fmts)) { > + case AV_PIX_FMT_DRM_PRIME: > + s->output_drm = 1; > + break; > + case AV_PIX_FMT_NONE: > + return 0; > + break; Why return 0 for AV_PIX_FMT_NONE? It’s suspicious. ‘break’ can be removed. > + default: > + break; > + } > + } > + > + s->device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); > + if (!s->device_ref) { > + ret = AVERROR(ENOMEM); > + return ret; > + } I’m not sure whether there is use case to let user specify device? > + > s->avctx = avctx; > + ret = av_hwdevice_ctx_init(s->device_ref); > + if (ret < 0) > + return ret; > + > ret = ff_v4l2_m2m_codec_init(priv); > if (ret) { > av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n"); > @@ -220,6 +262,16 @@ static av_cold int v4l2_decode_close(AVCodecContext > *avctx) > return ff_v4l2_m2m_codec_end(avctx->priv_data); > } > > +static void v4l2_flush(AVCodecContext *avctx) > +{ > + V4L2m2mPriv *priv = avctx->priv_data; > + V4L2m2mContext* s = priv->context; > + > + /* wait for pending buffer references */ > + if (atomic_load(&s->refcount)) > + while(sem_wait(&s->refsync) == -1 && errno == EINTR); > +} Does refcount depends on users behavior like holding a frame? Is it possible to run into deadlock here, e.g., user request flush while holding some frames? > + > #define OFFSET(x) offsetof(V4L2m2mPriv, x) > #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM > > @@ -230,6 +282,11 @@ static const AVOption options[] = { > { NULL}, > }; > > +static const AVCodecHWConfigInternal *v4l2_m2m_hw_configs[] = { > + HW_CONFIG_INTERNAL(DRM_PRIME), Depends on whether allow user to specify drm device. > + NULL > +}; > + > #define M2MDEC_CLASS(NAME) \ > static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \ > .class_name = #NAME "_v4l2m2m_decoder", \ > @@ -250,7 +307,12 @@ static const AVOption options[] = { > .init = v4l2_decode_init, \ > FF_CODEC_RECEIVE_FRAME_CB(v4l2_receive_frame), \ > .close = v4l2_decode_close, \ > + .flush = v4l2_flush, \ > + .p.pix_fmts = (const enum AVPixelFormat[]) { > AV_PIX_FMT_DRM_PRIME, \ > + AV_PIX_FMT_NV12, \ > + AV_PIX_FMT_NONE}, \ > .bsfs = bsf_name, \ > + .hw_configs = v4l2_m2m_hw_configs, \ > .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | > AV_CODEC_CAP_AVOID_PROBING, \ > .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | \ > FF_CODEC_CAP_INIT_CLEANUP, \ > -- > 2.43.0 > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".