It is currently a "Picture", an mpegvideo-specific type that has a lot of baggage, all of which is unnecessary for new_picture, because only its embedded AVFrame is ever used. So just use an ordinary AVFrame.
Signed-off-by: Andreas Rheinhardt <andreas.rheinha...@outlook.com> --- libavcodec/motion_est.c | 12 ++++----- libavcodec/mpeg4videoenc.c | 2 +- libavcodec/mpegvideo.c | 4 +-- libavcodec/mpegvideo.h | 4 +-- libavcodec/mpegvideo_enc.c | 50 +++++++++++++++++++------------------- libavcodec/snowenc.c | 2 +- libavcodec/svq1enc.c | 4 +-- 7 files changed, 39 insertions(+), 39 deletions(-) diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c index 3df1e4f40a..69e1455c3e 100644 --- a/libavcodec/motion_est.c +++ b/libavcodec/motion_est.c @@ -681,7 +681,7 @@ static inline int h263_mv4_search(MPVEncContext *s, int mx, int my, int shift) if (s->mecc.me_sub_cmp[0] != s->mecc.mb_cmp[0]) { dmin_sum += s->mecc.mb_cmp[0](s, - s->new_picture.f->data[0] + + s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * 16 * stride, c->scratchpad, stride, 16); } @@ -705,8 +705,8 @@ static inline int h263_mv4_search(MPVEncContext *s, int mx, int my, int shift) s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8); } - dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8); - dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8); + dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8); + dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8); } c->pred_x= mx; @@ -895,7 +895,7 @@ void ff_estimate_p_frame_motion(MPVEncContext *s, int mb_type=0; Picture * const pic= &s->current_picture; - init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0); + init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0); av_assert0(s->quarter_sample==0 || s->quarter_sample==1); av_assert0(s->linesize == c->stride); @@ -1066,7 +1066,7 @@ int ff_pre_estimate_p_frame_motion(MPVEncContext *s, int P[10][2]; const int shift= 1+s->quarter_sample; const int xy= mb_x + mb_y*s->mb_stride; - init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0); + init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0); av_assert0(s->quarter_sample==0 || s->quarter_sample==1); @@ -1495,7 +1495,7 @@ void ff_estimate_b_frame_motion(MPVEncContext *s, int fmin, bmin, dmin, fbmin, bimin, fimin; int type=0; const int xy = mb_y*s->mb_stride + mb_x; - init_ref(c, s->new_picture.f->data, s->last_picture.f->data, + init_ref(c, s->new_picture->data, s->last_picture.f->data, s->next_picture.f->data, 16 * mb_x, 16 * mb_y, 2); get_limits(s, 16*mb_x, 16*mb_y); diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c index 2f78763344..81fbd918ca 100644 --- a/libavcodec/mpeg4videoenc.c +++ b/libavcodec/mpeg4videoenc.c @@ -646,7 +646,7 @@ void ff_mpeg4_encode_mb(MPVEncContext *s, int16_t block[6][64], y = s->mb_y * 16; offset = x + y * s->linesize; - p_pic = s->new_picture.f->data[0] + offset; + p_pic = s->new_picture->data[0] + offset; s->mb_skipped = 1; for (i = 0; i < m->max_b_frames; i++) { diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 7314f1b39e..d0fbdaf1b4 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -721,7 +721,7 @@ av_cold int ff_mpv_common_init(MPVMainContext *m) if (!(s->next_picture.f = av_frame_alloc()) || !(s->last_picture.f = av_frame_alloc()) || !(s->current_picture.f = av_frame_alloc()) || - !(s->new_picture.f = av_frame_alloc())) + !(s->new_picture = av_frame_alloc())) goto fail_nomem; if ((ret = ff_mpv_init_context_frame(m))) @@ -803,7 +803,7 @@ void ff_mpv_common_end(MPVMainContext *m) ff_mpv_picture_free(s->avctx, &s->last_picture); ff_mpv_picture_free(s->avctx, &s->current_picture); ff_mpv_picture_free(s->avctx, &s->next_picture); - ff_mpv_picture_free(s->avctx, &s->new_picture); + av_frame_free(&s->new_picture); m->context_initialized = 0; m->context_reinit = 0; diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index ba081a9087..2d5ee61a27 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -130,10 +130,10 @@ typedef struct MPVContext { Picture next_picture; /** - * copy of the source picture structure for encoding. + * Reference to the source picture for encoding. * note, linesize & data, might not match the source picture (for field pictures) */ - Picture new_picture; + AVFrame *new_picture; /** * copy of the current picture structure. diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index 1d1cd6e3d3..ddfd123c46 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -995,7 +995,7 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx) for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++) av_frame_free(&m->tmp_frames[i]); - ff_mpv_picture_free(avctx, &s->new_picture); + av_frame_free(&s->new_picture); av_freep(&avctx->stats_out); @@ -1555,15 +1555,15 @@ static int select_input_picture(MPVMainEncContext *m) } } no_output_pic: - ff_mpeg_unref_picture(s->avctx, &s->new_picture); + av_frame_unref(s->new_picture); if (m->reordered_input_picture[0]) { m->reordered_input_picture[0]->reference = m->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_B ? 3 : 0; - if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, - m->reordered_input_picture[0]))) + if ((ret = av_frame_ref(s->new_picture, + m->reordered_input_picture[0]->f))) return ret; if (m->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) { @@ -1594,8 +1594,8 @@ no_output_pic: // input is not a shared pix -> reuse buffer for current_pix s->current_picture_ptr = m->reordered_input_picture[0]; for (i = 0; i < 4; i++) { - if (s->new_picture.f->data[i]) - s->new_picture.f->data[i] += INPLACE_OFFSET; + if (s->new_picture->data[i]) + s->new_picture->data[i] += INPLACE_OFFSET; } } ff_mpeg_unref_picture(s->avctx, &s->current_picture); @@ -1603,7 +1603,7 @@ no_output_pic: s->current_picture_ptr)) < 0) return ret; - s->picture_number = s->new_picture.f->display_picture_number; + s->picture_number = s->new_picture->display_picture_number; } return 0; } @@ -1761,7 +1761,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, } /* output? */ - if (s->new_picture.f->data[0]) { + if (s->new_picture->data[0]) { int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning; int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE : @@ -1785,7 +1785,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, init_put_bits(&m2->thread_context[i]->pb, start, end - start); } - s->pict_type = s->new_picture.f->pict_type; + s->pict_type = s->new_picture->pict_type; //emms_c(); ret = frame_start(m); if (ret < 0) @@ -2151,11 +2151,11 @@ static av_always_inline void encode_mb_internal(MPVEncContext *s, wrap_y = s->linesize; wrap_c = s->uvlinesize; - ptr_y = s->new_picture.f->data[0] + + ptr_y = s->new_picture->data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; - ptr_cb = s->new_picture.f->data[1] + + ptr_cb = s->new_picture->data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width; - ptr_cr = s->new_picture.f->data[2] + + ptr_cr = s->new_picture->data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width; if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){ @@ -2645,18 +2645,18 @@ static int sse_mb(MPVEncContext *s) if(w==16 && h==16) if(s->avctx->mb_cmp == FF_CMP_NSSE){ - return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) + - s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) + - s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8); + return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) + + s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) + + s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8); }else{ - return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) + - s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) + - s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8); + return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) + + s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) + + s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8); } else - return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) - +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) - +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); + return sse(s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) + +sse(s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) + +sse(s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); } static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){ @@ -2711,7 +2711,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){ for(mb_x=0; mb_x < s->mb_width; mb_x++) { int xx = mb_x * 16; int yy = mb_y * 16; - uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx; + uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx; int varc; int sum = s->mpvencdsp.pix_sum(pix, s->linesize); @@ -3433,13 +3433,13 @@ static int encode_thread(AVCodecContext *c, void *arg){ if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; s->current_picture.encoding_error[0] += sse( - s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, + s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize); s->current_picture.encoding_error[1] += sse( - s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize); s->current_picture.encoding_error[2] += sse( - s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize); } if(s->loop_filter){ diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c index 7200d94f65..fa0bb77994 100644 --- a/libavcodec/snowenc.c +++ b/libavcodec/snowenc.c @@ -1653,7 +1653,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, mpv->avctx= s->avctx; mpv-> last_picture.f = s->last_picture[0]; - mpv-> new_picture.f = s->input_picture; + mpv-> new_picture = s->input_picture; mpv-> last_picture_ptr= &mpv-> last_picture; mpv->linesize = stride; mpv->uvlinesize= s->current_picture->linesize[1]; diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c index 3a84bb6a07..64c856ddb5 100644 --- a/libavcodec/svq1enc.c +++ b/libavcodec/svq1enc.c @@ -278,7 +278,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane, mpv->last_picture.f->data[0] = ref_plane; mpv->linesize = mpv->last_picture.f->linesize[0] = - mpv->new_picture.f->linesize[0] = + mpv->new_picture->linesize[0] = mpv->current_picture.f->linesize[0] = stride; mpv->width = width; mpv->height = height; @@ -328,7 +328,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane, mpv->me.dia_size = s->avctx->dia_size; mpv->first_slice_line = 1; for (y = 0; y < block_height; y++) { - mpv->new_picture.f->data[0] = src - y * 16 * stride; // ugly + mpv->new_picture->data[0] = src - y * 16 * stride; // ugly mpv->mb_y = y; for (i = 0; i < 16 && i + 16 * y < height; i++) { -- 2.32.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".