On Thu, 3 Dec 2015 10:55:12 +0100 Hendrik Leppkes <h.lepp...@gmail.com> wrote:
> --- > libavcodec/vp9.c | 137 > ++++++++++++++++++++++++++++++++++++++++--------------- > libavcodec/vp9.h | 3 ++ > 2 files changed, 103 insertions(+), 37 deletions(-) > > diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c > index d8888c0..134ee55 100644 > --- a/libavcodec/vp9.c > +++ b/libavcodec/vp9.c > @@ -168,6 +168,15 @@ static const uint8_t bwh_tab[2][N_BS_SIZES][2] = { > } > }; > > +static void vp9_unref_frame(AVCodecContext *ctx, VP9Frame *f) > +{ > + ff_thread_release_buffer(ctx, &f->tf); > + av_buffer_unref(&f->extradata); > + av_buffer_unref(&f->hwaccel_priv_buf); > + f->segmentation_map = NULL; > + f->hwaccel_picture_private = NULL; > +} > + > static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f) > { > VP9Context *s = ctx->priv_data; > @@ -177,21 +186,28 @@ static int vp9_alloc_frame(AVCodecContext *ctx, > VP9Frame *f) > return ret; > sz = 64 * s->sb_cols * s->sb_rows; > if (!(f->extradata = av_buffer_allocz(sz * (1 + sizeof(struct > VP9mvrefPair))))) { > - ff_thread_release_buffer(ctx, &f->tf); > - return AVERROR(ENOMEM); > + goto fail; > } > > f->segmentation_map = f->extradata->data; > f->mv = (struct VP9mvrefPair *) (f->extradata->data + sz); > > + if (ctx->hwaccel) { > + const AVHWAccel *hwaccel = ctx->hwaccel; > + av_assert0(!f->hwaccel_picture_private); > + if (hwaccel->frame_priv_data_size) { > + f->hwaccel_priv_buf = > av_buffer_allocz(hwaccel->frame_priv_data_size); > + if (!f->hwaccel_priv_buf) > + goto fail; > + f->hwaccel_picture_private = f->hwaccel_priv_buf->data; > + } > + } > + > return 0; > -} > > -static void vp9_unref_frame(AVCodecContext *ctx, VP9Frame *f) > -{ > - ff_thread_release_buffer(ctx, &f->tf); > - av_buffer_unref(&f->extradata); > - f->segmentation_map = NULL; > +fail: > + vp9_unref_frame(ctx, f); > + return AVERROR(ENOMEM); > } > > static int vp9_ref_frame(AVCodecContext *ctx, VP9Frame *dst, VP9Frame *src) > @@ -201,19 +217,31 @@ static int vp9_ref_frame(AVCodecContext *ctx, VP9Frame > *dst, VP9Frame *src) > if ((res = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0) { > return res; > } else if (!(dst->extradata = av_buffer_ref(src->extradata))) { > - vp9_unref_frame(ctx, dst); > - return AVERROR(ENOMEM); > + goto fail; > } > > dst->segmentation_map = src->segmentation_map; > dst->mv = src->mv; > dst->uses_2pass = src->uses_2pass; > > + if (src->hwaccel_picture_private) { > + dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf); > + if (!dst->hwaccel_priv_buf) > + goto fail; > + dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data; > + } > + > return 0; > + > +fail: > + vp9_unref_frame(ctx, dst); > + return AVERROR(ENOMEM); > } > > static int update_size(AVCodecContext *ctx, int w, int h) > { > +#define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + CONFIG_VP9_D3D11VA_HWACCEL) > + enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts; > VP9Context *s = ctx->priv_data; > uint8_t *p; > int bytesperpixel = s->bytesperpixel, res; > @@ -225,7 +253,25 @@ static int update_size(AVCodecContext *ctx, int w, int h) > > if ((res = ff_set_dimensions(ctx, w, h)) < 0) > return res; > - s->last_fmt = ctx->pix_fmt = s->pix_fmt; > + s->last_fmt = s->pix_fmt; > + > + if (s->pix_fmt == AV_PIX_FMT_YUV420P) { > +#if CONFIG_VP9_DXVA2_HWACCEL > + *fmtp++ = AV_PIX_FMT_DXVA2_VLD; > +#endif > +#if CONFIG_VP9_D3D11VA_HWACCEL > + *fmtp++ = AV_PIX_FMT_D3D11VA_VLD; > +#endif > + } > + > + *fmtp++ = s->pix_fmt; > + *fmtp = AV_PIX_FMT_NONE; I wonder if there should be a nicer way to do that, instead of duplicating it over all new codecs. (Just thinking loudly.) > + > + res = ff_thread_get_format(ctx, pix_fmts); > + if (res < 0) > + return res; > + > + ctx->pix_fmt = res; > s->sb_cols = (w + 63) >> 6; > s->sb_rows = (h + 63) >> 6; > s->cols = (w + 7) >> 3; > @@ -573,32 +619,6 @@ static int decode_frame_header(AVCodecContext *ctx, > s->s.h.varcompref[1] = 2; > } > } > - > - for (i = 0; i < 3; i++) { > - AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f; > - int refw = ref->width, refh = ref->height; > - > - if (ref->format != s->pix_fmt) { > - av_log(ctx, AV_LOG_ERROR, > - "Ref pixfmt (%s) did not match current frame > (%s)", > - av_get_pix_fmt_name(ref->format), > - av_get_pix_fmt_name(s->pix_fmt)); > - return AVERROR_INVALIDDATA; > - } else if (refw == w && refh == h) { > - s->mvscale[i][0] = s->mvscale[i][1] = 0; > - } else { > - if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > > 16 * refh) { > - av_log(ctx, AV_LOG_ERROR, > - "Invalid ref frame dimensions %dx%d for frame > size %dx%d\n", > - refw, refh, w, h); > - return AVERROR_INVALIDDATA; > - } > - s->mvscale[i][0] = (refw << 14) / w; > - s->mvscale[i][1] = (refh << 14) / h; > - s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14; > - s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14; > - } > - } > } > } > s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb); > @@ -748,6 +768,35 @@ static int decode_frame_header(AVCodecContext *ctx, > return AVERROR(ENOMEM); > } > } > + > + /* check reference frames */ > + if (!s->s.h.keyframe && !s->s.h.intraonly) { > + for (i = 0; i < 3; i++) { > + AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f; > + int refw = ref->width, refh = ref->height; > + > + if (ref->format != ctx->pix_fmt) { > + av_log(ctx, AV_LOG_ERROR, > + "Ref pixfmt (%s) did not match current frame (%s)", > + av_get_pix_fmt_name(ref->format), > + av_get_pix_fmt_name(ctx->pix_fmt)); > + return AVERROR_INVALIDDATA; > + } else if (refw == w && refh == h) { > + s->mvscale[i][0] = s->mvscale[i][1] = 0; > + } else { > + if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 > * refh) { > + av_log(ctx, AV_LOG_ERROR, > + "Invalid ref frame dimensions %dx%d for frame > size %dx%d\n", > + refw, refh, w, h); > + return AVERROR_INVALIDDATA; > + } > + s->mvscale[i][0] = (refw << 14) / w; > + s->mvscale[i][1] = (refh << 14) / h; > + s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14; > + s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14; > + } > + } > + } > > if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && > s->s.h.resetctx == 3)) { > s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p = > @@ -3979,6 +4028,19 @@ static int vp9_decode_frame(AVCodecContext *ctx, void > *frame, > return res; > } > > + if (ctx->hwaccel) { > + res = ctx->hwaccel->start_frame(ctx, NULL, 0); > + if (res < 0) > + return res; > + res = ctx->hwaccel->decode_slice(ctx, pkt->data, pkt->size); > + if (res < 0) > + return res; > + res = ctx->hwaccel->end_frame(ctx); > + if (res < 0) > + return res; > + goto finish; > + } > + > // main tile decode loop > bytesperpixel = s->bytesperpixel; > memset(s->above_partition_ctx, 0, s->cols); > @@ -4148,6 +4210,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void > *frame, > } while (s->pass++ == 1); > ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); > > +finish: > // ref frame setup > for (i = 0; i < 8; i++) { > if (s->s.refs[i].f->buf[0]) > diff --git a/libavcodec/vp9.h b/libavcodec/vp9.h > index 27cdfc6..df5bd4d 100644 > --- a/libavcodec/vp9.h > +++ b/libavcodec/vp9.h > @@ -129,6 +129,9 @@ typedef struct VP9Frame { > uint8_t *segmentation_map; > struct VP9mvrefPair *mv; > int uses_2pass; > + > + AVBufferRef *hwaccel_priv_buf; > + void *hwaccel_picture_private; > } VP9Frame; > > typedef struct VP9BitstreamHeader { _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel