Le sunnuntaina 5. toukokuuta 2024, 19.45.28 EEST u...@foxmail.com a écrit : > From: sunyuechi <sunyue...@iscas.ac.cn> > > C908: > vp8_put_bilin4_h_c: 373.5 > vp8_put_bilin4_h_rvv_i32: 158.7 > vp8_put_bilin8_h_c: 1437.7 > vp8_put_bilin8_h_rvv_i32: 318.7 > vp8_put_bilin16_h_c: 2845.7 > vp8_put_bilin16_h_rvv_i32: 374.7 > --- > libavcodec/riscv/vp8dsp_init.c | 14 +++++++++++ > libavcodec/riscv/vp8dsp_rvv.S | 45 ++++++++++++++++++++++++++++++++++ > 2 files changed, 59 insertions(+) > > diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c > index fa3feeacf7..778d5ceb29 100644 > --- a/libavcodec/riscv/vp8dsp_init.c > +++ b/libavcodec/riscv/vp8dsp_init.c > @@ -34,6 +34,10 @@ VP8_EPEL(16, rvi); > VP8_EPEL(8, rvi); > VP8_EPEL(4, rvi); > > +VP8_BILIN(16, rvv); > +VP8_BILIN(8, rvv); > +VP8_BILIN(4, rvv); > + > av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c) > { > #if HAVE_RV > @@ -48,6 +52,16 @@ av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c) > c->put_vp8_epel_pixels_tab[2][0][0] = ff_put_vp8_pixels4_rvi; > c->put_vp8_bilinear_pixels_tab[2][0][0] = ff_put_vp8_pixels4_rvi; > } > +#if HAVE_RVV > + if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) { > + c->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_rvv; > + c->put_vp8_bilinear_pixels_tab[0][0][2] = > ff_put_vp8_bilin16_h_rvv; + c->put_vp8_bilinear_pixels_tab[1][0][1] > = ff_put_vp8_bilin8_h_rvv; + c->put_vp8_bilinear_pixels_tab[1][0][2] > = ff_put_vp8_bilin8_h_rvv; + c->put_vp8_bilinear_pixels_tab[2][0][1] > = ff_put_vp8_bilin4_h_rvv; + c->put_vp8_bilinear_pixels_tab[2][0][2] > = ff_put_vp8_bilin4_h_rvv; + } > +#endif > #endif > } > > diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S > index 8a0773f964..760d9d3871 100644 > --- a/libavcodec/riscv/vp8dsp_rvv.S > +++ b/libavcodec/riscv/vp8dsp_rvv.S > @@ -20,6 +20,18 @@ > > #include "libavutil/riscv/asm.S" > > +.macro vsetvlstatic8 len > +.if \len <= 4 > + vsetivli zero, \len, e8, mf4, ta, ma > +.elseif \len <= 8 > + vsetivli zero, \len, e8, mf2, ta, ma > +.elseif \len <= 16 > + vsetivli zero, \len, e8, m1, ta, ma > +.elseif \len <= 31 > + vsetivli zero, \len, e8, m2, ta, ma > +.endif > +.endm > + > .macro vp8_idct_dc_add > vlse32.v v0, (a0), a2 > lh a5, 0(a1) > @@ -71,3 +83,36 @@ func ff_vp8_idct_dc_add4uv_rvv, zve32x > > ret > endfunc > + > +.macro bilin_h_load dst len > + vsetvlstatic8 \len + 1 > + vle8.v \dst, (a2) > + vslide1down.vx v2, \dst, t5 > + vsetvlstatic8 \len
Doesn't this effectively discard the last element, t5? Can't we skip the slide and just load the vector at a2+1? Also then, we can keep VL=len and halve the multipler. > + vwmulu.vx v28, \dst, t1 > + vwmaccu.vx v28, a5, v2 > + vwaddu.wx v24, v28, t4 > + vnsra.wi \dst, v24, 3 > +.endm > + > +.macro put_vp8_bilin_h len > +func ff_put_vp8_bilin\len\()_h_rvv, zve32x > + li t1, 8 > + li t4, 4 > + li t5, 1 > + sub t1, t1, a5 > +1: > + addi a4, a4, -1 > + bilin_h_load v0, \len > + vse8.v v0, (a0) > + add a2, a2, a3 > + add a0, a0, a1 > + bnez a4, 1b > + > + ret > +endfunc > +.endm > + > +.irp len 16,8,4 > +put_vp8_bilin_h \len > +.endr -- レミ・デニ-クールモン http://www.remlab.net/ _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".