ffmpeg | branch: master | James Almer <jamr...@gmail.com> | Mon Oct 30 16:03:27 2017 -0300| [f9c3fbc00cd6c8887e0e66cec275ade881130adf] | committer: James Almer
Merge commit '3d69dd65c6771c28d3bf4e8e53a905aa8cd01fd9' * commit '3d69dd65c6771c28d3bf4e8e53a905aa8cd01fd9': hevc: Add support for bitdepth 10 for IDCT DC Merged-by: James Almer <jamr...@gmail.com> > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f9c3fbc00cd6c8887e0e66cec275ade881130adf --- libavcodec/arm/hevcdsp_idct_neon.S | 40 ++++++++++++++++++++++++++------------ libavcodec/arm/hevcdsp_init_neon.c | 9 +++++++++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/libavcodec/arm/hevcdsp_idct_neon.S b/libavcodec/arm/hevcdsp_idct_neon.S index 8f831a37f2..e8f18d42a7 100644 --- a/libavcodec/arm/hevcdsp_idct_neon.S +++ b/libavcodec/arm/hevcdsp_idct_neon.S @@ -159,26 +159,29 @@ function ff_hevc_transform_luma_4x4_neon_8, export=1 bx lr endfunc -function ff_hevc_idct_4x4_dc_8_neon, export=1 +.macro idct_4x4_dc bitdepth +function ff_hevc_idct_4x4_dc_\bitdepth\()_neon, export=1 ldrsh r1, [r0] - ldr r2, =0x20 + ldr r2, =(1 << (13 - \bitdepth)) add r1, #1 asr r1, #1 add r1, r2 - asr r1, #6 + asr r1, #(14 - \bitdepth) vdup.16 q0, r1 vdup.16 q1, r1 vst1.16 {q0, q1}, [r0, :128] bx lr endfunc +.endm -function ff_hevc_idct_8x8_dc_8_neon, export=1 +.macro idct_8x8_dc bitdepth +function ff_hevc_idct_8x8_dc_\bitdepth\()_neon, export=1 ldrsh r1, [r0] - ldr r2, =0x20 + ldr r2, =(1 << (13 - \bitdepth)) add r1, #1 asr r1, #1 add r1, r2 - asr r1, #6 + asr r1, #(14 - \bitdepth) vdup.16 q8, r1 vdup.16 q9, r1 vmov.16 q10, q8 @@ -190,14 +193,16 @@ function ff_hevc_idct_8x8_dc_8_neon, export=1 vstm r0, {q8-q15} bx lr endfunc +.endm -function ff_hevc_idct_16x16_dc_8_neon, export=1 +.macro idct_16x16_dc bitdepth +function ff_hevc_idct_16x16_dc_\bitdepth\()_neon, export=1 ldrsh r1, [r0] - ldr r2, =0x20 + ldr r2, =(1 << (13 - \bitdepth)) add r1, #1 asr r1, #1 add r1, r2 - asr r1, #6 + asr r1, #(14 - \bitdepth) vdup.16 q8, r1 vdup.16 q9, r1 vmov.16 q10, q8 @@ -212,14 +217,16 @@ function ff_hevc_idct_16x16_dc_8_neon, export=1 vstm r0, {q8-q15} bx lr endfunc +.endm -function ff_hevc_idct_32x32_dc_8_neon, export=1 +.macro idct_32x32_dc bitdepth +function ff_hevc_idct_32x32_dc_\bitdepth\()_neon, export=1 ldrsh r1, [r0] - ldr r2, =0x20 + ldr r2, =(1 << (13 - \bitdepth)) add r1, #1 asr r1, #1 add r1, r2 - asr r1, #6 + asr r1, #(14 - \bitdepth) mov r3, #16 vdup.16 q8, r1 vdup.16 q9, r1 @@ -234,6 +241,7 @@ function ff_hevc_idct_32x32_dc_8_neon, export=1 bne 1b bx lr endfunc +.endm .macro sum_sub out, in, c, op .ifc \op, + @@ -625,8 +633,16 @@ tr_16x4 secondpass_10, 20 - 10 .ltorg idct_4x4 8 +idct_4x4_dc 8 idct_4x4 10 +idct_4x4_dc 10 idct_8x8 8 +idct_8x8_dc 8 idct_8x8 10 +idct_8x8_dc 10 idct_16x16 8 +idct_16x16_dc 8 idct_16x16 10 +idct_16x16_dc 10 +idct_32x32_dc 8 +idct_32x32_dc 10 diff --git a/libavcodec/arm/hevcdsp_init_neon.c b/libavcodec/arm/hevcdsp_init_neon.c index 9537561513..740de86bb2 100644 --- a/libavcodec/arm/hevcdsp_init_neon.c +++ b/libavcodec/arm/hevcdsp_init_neon.c @@ -31,6 +31,10 @@ void ff_hevc_idct_4x4_dc_8_neon(int16_t *coeffs); void ff_hevc_idct_8x8_dc_8_neon(int16_t *coeffs); void ff_hevc_idct_16x16_dc_8_neon(int16_t *coeffs); void ff_hevc_idct_32x32_dc_8_neon(int16_t *coeffs); +void ff_hevc_idct_4x4_dc_10_neon(int16_t *coeffs); +void ff_hevc_idct_8x8_dc_10_neon(int16_t *coeffs); +void ff_hevc_idct_16x16_dc_10_neon(int16_t *coeffs); +void ff_hevc_idct_32x32_dc_10_neon(int16_t *coeffs); void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit); void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit); void ff_hevc_idct_16x16_8_neon(int16_t *coeffs, int col_limit); @@ -228,6 +232,11 @@ av_cold void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth) } if (bit_depth == 10) { + c->idct_dc[0] = ff_hevc_idct_4x4_dc_10_neon; + c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_neon; + c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_neon; + c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_neon; + c->idct[0] = ff_hevc_idct_4x4_10_neon; c->idct[1] = ff_hevc_idct_8x8_10_neon; c->idct[2] = ff_hevc_idct_16x16_10_neon; ====================================================================== diff --cc libavcodec/arm/hevcdsp_idct_neon.S index 8f831a37f2,0000000000..e8f18d42a7 mode 100644,000000..100644 --- a/libavcodec/arm/hevcdsp_idct_neon.S +++ b/libavcodec/arm/hevcdsp_idct_neon.S @@@ -1,632 -1,0 +1,648 @@@ +/* + * ARM NEON optimised IDCT functions for HEVC decoding + * Copyright (c) 2014 Seppo Tomperi <seppo.tomp...@vtt.fi> + * Copyright (c) 2017 Alexandra Hájková + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/arm/asm.S" + +const trans, align=4 + .short 64, 83, 64, 36 + .short 89, 75, 50, 18 + .short 90, 87, 80, 70 + .short 57, 43, 25, 9 +endconst + +function ff_hevc_add_residual_4x4_neon_8, export=1 + vldm r1, {q0-q1} + vld1.32 d4[0], [r0], r2 + vld1.32 d4[1], [r0], r2 + vld1.32 d5[0], [r0], r2 + vld1.32 d5[1], [r0], r2 + sub r0, r0, r2, lsl #2 + vmovl.u8 q8, d4 + vmovl.u8 q9, d5 + vqadd.s16 q0, q0, q8 + vqadd.s16 q1, q1, q9 + vqmovun.s16 d0, q0 + vqmovun.s16 d1, q1 + vst1.32 d0[0], [r0], r2 + vst1.32 d0[1], [r0], r2 + vst1.32 d1[0], [r0], r2 + vst1.32 d1[1], [r0], r2 + bx lr +endfunc + +function ff_hevc_add_residual_8x8_neon_8, export=1 + mov r3, #8 +1: subs r3, #1 + vld1.16 {q0}, [r1]! + vld1.8 d16, [r0] + vmovl.u8 q8, d16 + vqadd.s16 q0, q8 + vqmovun.s16 d0, q0 + vst1.32 d0, [r0], r2 + bne 1b + bx lr +endfunc + +function ff_hevc_add_residual_16x16_neon_8, export=1 + mov r3, #16 +1: subs r3, #1 + vld1.16 {q0, q1}, [r1]! + vld1.8 {q8}, [r0] + vmovl.u8 q9, d16 + vmovl.u8 q10, d17 + vqadd.s16 q0, q9 + vqadd.s16 q1, q10 + vqmovun.s16 d0, q0 + vqmovun.s16 d1, q1 + vst1.8 {q0}, [r0], r2 + bne 1b + bx lr +endfunc + +function ff_hevc_add_residual_32x32_neon_8, export=1 + mov r3, #32 +1: subs r3, #1 + vldm r1!, {q0-q3} + vld1.8 {q8, q9}, [r0] + vmovl.u8 q10, d16 + vmovl.u8 q11, d17 + vmovl.u8 q12, d18 + vmovl.u8 q13, d19 + vqadd.s16 q0, q10 + vqadd.s16 q1, q11 + vqadd.s16 q2, q12 + vqadd.s16 q3, q13 + vqmovun.s16 d0, q0 + vqmovun.s16 d1, q1 + vqmovun.s16 d2, q2 + vqmovun.s16 d3, q3 + vst1.8 {q0, q1}, [r0], r2 + bne 1b + bx lr +endfunc + +/* uses registers q2 - q9 for temp values */ +/* TODO: reorder */ +.macro tr4_luma_shift r0, r1, r2, r3, shift + vaddl.s16 q5, \r0, \r2 // c0 = src0 + src2 + vaddl.s16 q2, \r2, \r3 // c1 = src2 + src3 + vsubl.s16 q4, \r0, \r3 // c2 = src0 - src3 + vmull.s16 q6, \r1, d0[0] // c3 = 74 * src1 + + vaddl.s16 q7, \r0, \r3 // src0 + src3 + vsubw.s16 q7, q7, \r2 // src0 - src2 + src3 + vmul.s32 q7, q7, d0[0] // dst2 = 74 * (src0 - src2 + src3) + + vmul.s32 q8, q5, d0[1] // 29 * c0 + vmul.s32 q9, q2, d1[0] // 55 * c1 + vadd.s32 q8, q9 // 29 * c0 + 55 * c1 + vadd.s32 q8, q6 // dst0 = 29 * c0 + 55 * c1 + c3 + + vmul.s32 q2, q2, d0[1] // 29 * c1 + vmul.s32 q9, q4, d1[0] // 55 * c2 + vsub.s32 q9, q2 // 55 * c2 - 29 * c1 + vadd.s32 q9, q6 // dst1 = 55 * c2 - 29 * c1 + c3 + + vmul.s32 q5, q5, d1[0] // 55 * c0 + vmul.s32 q4, q4, d0[1] // 29 * c2 + vadd.s32 q5, q4 // 55 * c0 + 29 * c2 + vsub.s32 q5, q6 // dst3 = 55 * c0 + 29 * c2 - c3 + + vqrshrn.s32 \r0, q8, \shift + vqrshrn.s32 \r1, q9, \shift + vqrshrn.s32 \r2, q7, \shift + vqrshrn.s32 \r3, q5, \shift +.endm + +function ff_hevc_transform_luma_4x4_neon_8, export=1 + vpush {d8-d15} + vld1.16 {q14, q15}, [r0] // coeffs + ldr r3, =0x4a // 74 + vmov.32 d0[0], r3 + ldr r3, =0x1d // 29 + vmov.32 d0[1], r3 + ldr r3, =0x37 // 55 + vmov.32 d1[0], r3 + + tr4_luma_shift d28, d29, d30, d31, #7 + + vtrn.16 d28, d29 + vtrn.16 d30, d31 + vtrn.32 q14, q15 + + tr4_luma_shift d28, d29, d30, d31, #12 + + vtrn.16 d28, d29 + vtrn.16 d30, d31 + vtrn.32 q14, q15 + vst1.16 {q14, q15}, [r0] + vpop {d8-d15} + bx lr +endfunc + - function ff_hevc_idct_4x4_dc_8_neon, export=1 ++.macro idct_4x4_dc bitdepth ++function ff_hevc_idct_4x4_dc_\bitdepth\()_neon, export=1 + ldrsh r1, [r0] - ldr r2, =0x20 ++ ldr r2, =(1 << (13 - \bitdepth)) + add r1, #1 + asr r1, #1 + add r1, r2 - asr r1, #6 ++ asr r1, #(14 - \bitdepth) + vdup.16 q0, r1 + vdup.16 q1, r1 + vst1.16 {q0, q1}, [r0, :128] + bx lr +endfunc ++.endm + - function ff_hevc_idct_8x8_dc_8_neon, export=1 ++.macro idct_8x8_dc bitdepth ++function ff_hevc_idct_8x8_dc_\bitdepth\()_neon, export=1 + ldrsh r1, [r0] - ldr r2, =0x20 ++ ldr r2, =(1 << (13 - \bitdepth)) + add r1, #1 + asr r1, #1 + add r1, r2 - asr r1, #6 ++ asr r1, #(14 - \bitdepth) + vdup.16 q8, r1 + vdup.16 q9, r1 + vmov.16 q10, q8 + vmov.16 q11, q8 + vmov.16 q12, q8 + vmov.16 q13, q8 + vmov.16 q14, q8 + vmov.16 q15, q8 + vstm r0, {q8-q15} + bx lr +endfunc ++.endm + - function ff_hevc_idct_16x16_dc_8_neon, export=1 ++.macro idct_16x16_dc bitdepth ++function ff_hevc_idct_16x16_dc_\bitdepth\()_neon, export=1 + ldrsh r1, [r0] - ldr r2, =0x20 ++ ldr r2, =(1 << (13 - \bitdepth)) + add r1, #1 + asr r1, #1 + add r1, r2 - asr r1, #6 ++ asr r1, #(14 - \bitdepth) + vdup.16 q8, r1 + vdup.16 q9, r1 + vmov.16 q10, q8 + vmov.16 q11, q8 + vmov.16 q12, q8 + vmov.16 q13, q8 + vmov.16 q14, q8 + vmov.16 q15, q8 + vstm r0!, {q8-q15} + vstm r0!, {q8-q15} + vstm r0!, {q8-q15} + vstm r0, {q8-q15} + bx lr +endfunc ++.endm + - function ff_hevc_idct_32x32_dc_8_neon, export=1 ++.macro idct_32x32_dc bitdepth ++function ff_hevc_idct_32x32_dc_\bitdepth\()_neon, export=1 + ldrsh r1, [r0] - ldr r2, =0x20 ++ ldr r2, =(1 << (13 - \bitdepth)) + add r1, #1 + asr r1, #1 + add r1, r2 - asr r1, #6 ++ asr r1, #(14 - \bitdepth) + mov r3, #16 + vdup.16 q8, r1 + vdup.16 q9, r1 + vmov.16 q10, q8 + vmov.16 q11, q8 + vmov.16 q12, q8 + vmov.16 q13, q8 + vmov.16 q14, q8 + vmov.16 q15, q8 +1: subs r3, #1 + vstm r0!, {q8-q15} + bne 1b + bx lr +endfunc ++.endm + +.macro sum_sub out, in, c, op + .ifc \op, + + vmlal.s16 \out, \in, \c + .else + vmlsl.s16 \out, \in, \c + .endif +.endm + +.macro tr_4x4 in0, in1, in2, in3, out0, out1, out2, out3, shift, tmp0, tmp1, tmp2, tmp3, tmp4 + vshll.s16 \tmp0, \in0, #6 + vmull.s16 \tmp2, \in1, d4[1] + vmov \tmp1, \tmp0 + vmull.s16 \tmp3, \in1, d4[3] + vmlal.s16 \tmp0, \in2, d4[0] @e0 + vmlsl.s16 \tmp1, \in2, d4[0] @e1 + vmlal.s16 \tmp2, \in3, d4[3] @o0 + vmlsl.s16 \tmp3, \in3, d4[1] @o1 + + vadd.s32 \tmp4, \tmp0, \tmp2 + vsub.s32 \tmp0, \tmp0, \tmp2 + vadd.s32 \tmp2, \tmp1, \tmp3 + vsub.s32 \tmp1, \tmp1, \tmp3 + vqrshrn.s32 \out0, \tmp4, #\shift + vqrshrn.s32 \out3, \tmp0, #\shift + vqrshrn.s32 \out1, \tmp2, #\shift + vqrshrn.s32 \out2, \tmp1, #\shift +.endm + +.macro tr_4x4_8 in0, in1, in2, in3, out0, out1, out2, out3, tmp0, tmp1, tmp2, tmp3 + vshll.s16 \tmp0, \in0, #6 + vld1.s16 {\in0}, [r1, :64]! + vmov \tmp1, \tmp0 + vmull.s16 \tmp2, \in1, \in0[1] + vmull.s16 \tmp3, \in1, \in0[3] + vmlal.s16 \tmp0, \in2, \in0[0] @e0 + vmlsl.s16 \tmp1, \in2, \in0[0] @e1 + vmlal.s16 \tmp2, \in3, \in0[3] @o0 + vmlsl.s16 \tmp3, \in3, \in0[1] @o1 + + vld1.s16 {\in0}, [r1, :64] + + vadd.s32 \out0, \tmp0, \tmp2 + vadd.s32 \out1, \tmp1, \tmp3 + vsub.s32 \out2, \tmp1, \tmp3 + vsub.s32 \out3, \tmp0, \tmp2 + + sub r1, r1, #8 +.endm + +@ Do a 4x4 transpose, using q registers for the subtransposes that don't +@ need to address the indiviudal d registers. +@ r0,r1 == rq0, r2,r3 == rq1 +.macro transpose_4x4 rq0, rq1, r0, r1, r2, r3 + vtrn.32 \rq0, \rq1 + vtrn.16 \r0, \r1 + vtrn.16 \r2, \r3 +.endm + +.macro idct_4x4 bitdepth +function ff_hevc_idct_4x4_\bitdepth\()_neon, export=1 +@r0 - coeffs + vld1.s16 {q0-q1}, [r0, :128] + + movrel r1, trans + vld1.s16 {d4}, [r1, :64] + + tr_4x4 d0, d1, d2, d3, d16, d17, d18, d19, 7, q10, q11, q12, q13, q0 + transpose_4x4 q8, q9, d16, d17, d18, d19 + + tr_4x4 d16, d17, d18, d19, d0, d1, d2, d3, 20 - \bitdepth, q10, q11, q12, q13, q0 + transpose_4x4 q0, q1, d0, d1, d2, d3 + vst1.s16 {d0-d3}, [r0, :128] + bx lr +endfunc +.endm + +.macro transpose8_4x4 r0, r1, r2, r3 + vtrn.16 \r0, \r1 + vtrn.16 \r2, \r3 + vtrn.32 \r0, \r2 + vtrn.32 \r1, \r3 +.endm + +.macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7, l0, l1, l2, l3, l4, l5, l6, l7 + transpose8_4x4 \r0, \r1, \r2, \r3 + transpose8_4x4 \r4, \r5, \r6, \r7 + + transpose8_4x4 \l0, \l1, \l2, \l3 + transpose8_4x4 \l4, \l5, \l6, \l7 +.endm + +.macro tr_8x4 shift, in0, in1, in2, in3, in4, in5, in6, in7 + tr_4x4_8 \in0, \in2, \in4, \in6, q8, q9, q10, q11, q12, q13, q14, q15 + + vmull.s16 q14, \in1, \in0[2] + vmull.s16 q12, \in1, \in0[0] + vmull.s16 q13, \in1, \in0[1] + sum_sub q14, \in3, \in0[0], - + sum_sub q12, \in3, \in0[1], + + sum_sub q13, \in3, \in0[3], - + + sum_sub q14, \in5, \in0[3], + + sum_sub q12, \in5, \in0[2], + + sum_sub q13, \in5, \in0[0], - + + sum_sub q14, \in7, \in0[1], + + sum_sub q12, \in7, \in0[3], + + sum_sub q13, \in7, \in0[2], - + + vadd.s32 q15, q10, q14 + vsub.s32 q10, q10, q14 + vqrshrn.s32 \in2, q15, \shift + + vmull.s16 q15, \in1, \in0[3] + sum_sub q15, \in3, \in0[2], - + sum_sub q15, \in5, \in0[1], + + sum_sub q15, \in7, \in0[0], - + + vqrshrn.s32 \in5, q10, \shift + + vadd.s32 q10, q8, q12 + vsub.s32 q8, q8, q12 + vadd.s32 q12, q9, q13 + vsub.s32 q9, q9, q13 + vadd.s32 q14, q11, q15 + vsub.s32 q11, q11, q15 + + vqrshrn.s32 \in0, q10, \shift + vqrshrn.s32 \in7, q8, \shift + vqrshrn.s32 \in1, q12, \shift + vqrshrn.s32 \in6, q9, \shift + vqrshrn.s32 \in3, q14, \shift + vqrshrn.s32 \in4, q11, \shift +.endm + +.macro idct_8x8 bitdepth +function ff_hevc_idct_8x8_\bitdepth\()_neon, export=1 +@r0 - coeffs + vpush {q4-q7} + + mov r1, r0 + mov r2, #64 + add r3, r0, #32 + vld1.s16 {q0-q1}, [r1,:128], r2 + vld1.s16 {q2-q3}, [r3,:128], r2 + vld1.s16 {q4-q5}, [r1,:128], r2 + vld1.s16 {q6-q7}, [r3,:128], r2 + + movrel r1, trans + + tr_8x4 7, d0, d2, d4, d6, d8, d10, d12, d14 + tr_8x4 7, d1, d3, d5, d7, d9, d11, d13, d15 + + @ Transpose each 4x4 block, and swap how d4-d7 and d8-d11 are used. + @ Layout before: + @ d0 d1 + @ d2 d3 + @ d4 d5 + @ d6 d7 + @ d8 d9 + @ d10 d11 + @ d12 d13 + @ d14 d15 + transpose_8x8 d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, d11, d13, d15 + @ Now the layout is: + @ d0 d8 + @ d2 d10 + @ d4 d12 + @ d6 d14 + @ d1 d9 + @ d3 d11 + @ d5 d13 + @ d7 d15 + + tr_8x4 20 - \bitdepth, d0, d2, d4, d6, d1, d3, d5, d7 + vswp d0, d8 + tr_8x4 20 - \bitdepth, d0, d10, d12, d14, d9, d11, d13, d15 + vswp d0, d8 + + transpose_8x8 d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, d11, d13, d15 + + mov r1, r0 + mov r2, #64 + add r3, r0, #32 + vst1.s16 {q0-q1}, [r1,:128], r2 + vst1.s16 {q2-q3}, [r3,:128], r2 + vst1.s16 {q4-q5}, [r1,:128], r2 + vst1.s16 {q6-q7}, [r3,:128], r2 + + vpop {q4-q7} + bx lr +endfunc +.endm + +.macro butterfly e, o, tmp_p, tmp_m + vadd.s32 \tmp_p, \e, \o + vsub.s32 \tmp_m, \e, \o +.endm + +.macro tr16_8x4 in0, in1, in2, in3, in4, in5, in6, in7 + tr_4x4_8 \in0, \in2, \in4, \in6, q8, q9, q10, q11, q12, q13, q14, q15 + + vmull.s16 q12, \in1, \in0[0] + vmull.s16 q13, \in1, \in0[1] + vmull.s16 q14, \in1, \in0[2] + vmull.s16 q15, \in1, \in0[3] + sum_sub q12, \in3, \in0[1], + + sum_sub q13, \in3, \in0[3], - + sum_sub q14, \in3, \in0[0], - + sum_sub q15, \in3, \in0[2], - + + sum_sub q12, \in5, \in0[2], + + sum_sub q13, \in5, \in0[0], - + sum_sub q14, \in5, \in0[3], + + sum_sub q15, \in5, \in0[1], + + + sum_sub q12, \in7, \in0[3], + + sum_sub q13, \in7, \in0[2], - + sum_sub q14, \in7, \in0[1], + + sum_sub q15, \in7, \in0[0], - + + butterfly q8, q12, q0, q7 + butterfly q9, q13, q1, q6 + butterfly q10, q14, q2, q5 + butterfly q11, q15, q3, q4 + add r4, sp, #512 + vst1.s16 {q0-q1}, [r4, :128]! + vst1.s16 {q2-q3}, [r4, :128]! + vst1.s16 {q4-q5}, [r4, :128]! + vst1.s16 {q6-q7}, [r4, :128] +.endm + +.macro load16 in0, in1, in2, in3, in4, in5, in6, in7 + vld1.s16 {\in0}, [r1, :64], r2 + vld1.s16 {\in1}, [r3, :64], r2 + vld1.s16 {\in2}, [r1, :64], r2 + vld1.s16 {\in3}, [r3, :64], r2 + vld1.s16 {\in4}, [r1, :64], r2 + vld1.s16 {\in5}, [r3, :64], r2 + vld1.s16 {\in6}, [r1, :64], r2 + vld1.s16 {\in7}, [r3, :64], r2 +.endm + +.macro add_member in, t0, t1, t2, t3, t4, t5, t6, t7, op0, op1, op2, op3, op4, op5, op6, op7 + sum_sub q5, \in, \t0, \op0 + sum_sub q6, \in, \t1, \op1 + sum_sub q7, \in, \t2, \op2 + sum_sub q8, \in, \t3, \op3 + sum_sub q9, \in, \t4, \op4 + sum_sub q10, \in, \t5, \op5 + sum_sub q11, \in, \t6, \op6 + sum_sub q12, \in, \t7, \op7 +.endm + +.macro butterfly16 in0, in1, in2, in3, in4, in5, in6, in7 + vadd.s32 q4, \in0, \in1 + vsub.s32 \in0, \in0, \in1 + vadd.s32 \in1, \in2, \in3 + vsub.s32 \in2, \in2, \in3 + vadd.s32 \in3, \in4, \in5 + vsub.s32 \in4, \in4, \in5 + vadd.s32 \in5, \in6, \in7 + vsub.s32 \in6, \in6, \in7 +.endm + +.macro store16 in0, in1, in2, in3, in4, in5, in6, in7 + vst1.s16 \in0, [r1, :64], r2 + vst1.s16 \in1, [r3, :64], r4 + vst1.s16 \in2, [r1, :64], r2 + vst1.s16 \in3, [r3, :64], r4 + vst1.s16 \in4, [r1, :64], r2 + vst1.s16 \in5, [r3, :64], r4 + vst1.s16 \in6, [r1, :64], r2 + vst1.s16 \in7, [r3, :64], r4 +.endm + +.macro scale out0, out1, out2, out3, out4, out5, out6, out7, in0, in1, in2, in3, in4, in5, in6, in7, shift + vqrshrn.s32 \out0, \in0, \shift + vqrshrn.s32 \out1, \in1, \shift + vqrshrn.s32 \out2, \in2, \shift + vqrshrn.s32 \out3, \in3, \shift + vqrshrn.s32 \out4, \in4, \shift + vqrshrn.s32 \out5, \in5, \shift + vqrshrn.s32 \out6, \in6, \shift + vqrshrn.s32 \out7, \in7, \shift +.endm + +.macro tr_16x4 name, shift +function func_tr_16x4_\name + mov r1, r5 + add r3, r5, #64 + mov r2, #128 + load16 d0, d1, d2, d3, d4, d5, d6, d7 + movrel r1, trans + + tr16_8x4 d0, d1, d2, d3, d4, d5, d6, d7 + + add r1, r5, #32 + add r3, r5, #(64 + 32) + mov r2, #128 + load16 d8, d9, d2, d3, d4, d5, d6, d7 + movrel r1, trans + 16 + vld1.s16 {q0}, [r1, :128] + vmull.s16 q5, d8, d0[0] + vmull.s16 q6, d8, d0[1] + vmull.s16 q7, d8, d0[2] + vmull.s16 q8, d8, d0[3] + vmull.s16 q9, d8, d1[0] + vmull.s16 q10, d8, d1[1] + vmull.s16 q11, d8, d1[2] + vmull.s16 q12, d8, d1[3] + + add_member d9, d0[1], d1[0], d1[3], d1[1], d0[2], d0[0], d0[3], d1[2], +, +, +, -, -, -, -, - + add_member d2, d0[2], d1[3], d0[3], d0[1], d1[2], d1[0], d0[0], d1[1], +, +, -, -, -, +, +, + + add_member d3, d0[3], d1[1], d0[1], d1[3], d0[0], d1[2], d0[2], d1[0], +, -, -, +, +, +, -, - + add_member d4, d1[0], d0[2], d1[2], d0[0], d1[3], d0[1], d1[1], d0[3], +, -, -, +, -, -, +, + + add_member d5, d1[1], d0[0], d1[0], d1[2], d0[1], d0[3], d1[3], d0[2], +, -, +, +, -, +, +, - + add_member d6, d1[2], d0[3], d0[0], d0[2], d1[1], d1[3], d1[0], d0[1], +, -, +, -, +, +, -, + + add_member d7, d1[3], d1[2], d1[1], d1[0], d0[3], d0[2], d0[1], d0[0], +, -, +, -, +, -, +, - + + add r4, sp, #512 + vld1.s16 {q0-q1}, [r4, :128]! + vld1.s16 {q2-q3}, [r4, :128]! + + butterfly16 q0, q5, q1, q6, q2, q7, q3, q8 + scale d26, d27, d28, d29, d30, d31, d16, d17, q4, q0, q5, q1, q6, q2, q7, q3, \shift + transpose8_4x4 d26, d28, d30, d16 + transpose8_4x4 d17, d31, d29, d27 + mov r1, r6 + add r3, r6, #(24 +3*32) + mov r2, #32 + mov r4, #-32 + store16 d26, d27, d28, d29, d30, d31, d16, d17 + + add r4, sp, #576 + vld1.s16 {q0-q1}, [r4, :128]! + vld1.s16 {q2-q3}, [r4, :128] + butterfly16 q0, q9, q1, q10, q2, q11, q3, q12 + scale d26, d27, d28, d29, d30, d31, d8, d9, q4, q0, q9, q1, q10, q2, q11, q3, \shift + transpose8_4x4 d26, d28, d30, d8 + transpose8_4x4 d9, d31, d29, d27 + + add r1, r6, #8 + add r3, r6, #(16 + 3 * 32) + mov r2, #32 + mov r4, #-32 + store16 d26, d27, d28, d29, d30, d31, d8, d9 + + bx lr +endfunc +.endm + +.macro idct_16x16 bitdepth +function ff_hevc_idct_16x16_\bitdepth\()_neon, export=1 +@r0 - coeffs + push {r4-r7, lr} + vpush {q4-q7} + + @ Align the stack, allocate a temp buffer +T mov r7, sp +T and r7, r7, #15 +A and r7, sp, #15 + add r7, r7, #640 + sub sp, sp, r7 + +.irp i, 0, 1, 2, 3 + add r5, r0, #(8 * \i) + add r6, sp, #(8 * \i * 16) + bl func_tr_16x4_firstpass +.endr + +.irp i, 0, 1, 2, 3 + add r5, sp, #(8 * \i) + add r6, r0, #(8 * \i * 16) + bl func_tr_16x4_secondpass_\bitdepth +.endr + + add sp, sp, r7 + + vpop {q4-q7} + pop {r4-r7, pc} +endfunc +.endm + +tr_16x4 firstpass, 7 +tr_16x4 secondpass_8, 20 - 8 +tr_16x4 secondpass_10, 20 - 10 +.ltorg + +idct_4x4 8 ++idct_4x4_dc 8 +idct_4x4 10 ++idct_4x4_dc 10 +idct_8x8 8 ++idct_8x8_dc 8 +idct_8x8 10 ++idct_8x8_dc 10 +idct_16x16 8 ++idct_16x16_dc 8 +idct_16x16 10 ++idct_16x16_dc 10 ++idct_32x32_dc 8 ++idct_32x32_dc 10 diff --cc libavcodec/arm/hevcdsp_init_neon.c index 9537561513,0000000000..740de86bb2 mode 100644,000000..100644 --- a/libavcodec/arm/hevcdsp_init_neon.c +++ b/libavcodec/arm/hevcdsp_init_neon.c @@@ -1,235 -1,0 +1,244 @@@ +/* + * Copyright (c) 2014 Seppo Tomperi <seppo.tomp...@vtt.fi> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/attributes.h" +#include "libavutil/arm/cpu.h" +#include "libavcodec/hevcdsp.h" +#include "hevcdsp_arm.h" + +void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q); +void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q); +void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q); +void ff_hevc_h_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q); +void ff_hevc_idct_4x4_dc_8_neon(int16_t *coeffs); +void ff_hevc_idct_8x8_dc_8_neon(int16_t *coeffs); +void ff_hevc_idct_16x16_dc_8_neon(int16_t *coeffs); +void ff_hevc_idct_32x32_dc_8_neon(int16_t *coeffs); ++void ff_hevc_idct_4x4_dc_10_neon(int16_t *coeffs); ++void ff_hevc_idct_8x8_dc_10_neon(int16_t *coeffs); ++void ff_hevc_idct_16x16_dc_10_neon(int16_t *coeffs); ++void ff_hevc_idct_32x32_dc_10_neon(int16_t *coeffs); +void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit); +void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit); +void ff_hevc_idct_16x16_8_neon(int16_t *coeffs, int col_limit); +void ff_hevc_idct_4x4_10_neon(int16_t *coeffs, int col_limit); +void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit); +void ff_hevc_idct_16x16_10_neon(int16_t *coeffs, int col_limit); +void ff_hevc_transform_luma_4x4_neon_8(int16_t *coeffs); +void ff_hevc_add_residual_4x4_neon_8(uint8_t *_dst, int16_t *coeffs, + ptrdiff_t stride); +void ff_hevc_add_residual_8x8_neon_8(uint8_t *_dst, int16_t *coeffs, + ptrdiff_t stride); +void ff_hevc_add_residual_16x16_neon_8(uint8_t *_dst, int16_t *coeffs, + ptrdiff_t stride); +void ff_hevc_add_residual_32x32_neon_8(uint8_t *_dst, int16_t *coeffs, + ptrdiff_t stride); + +#define PUT_PIXELS(name) \ + void name(int16_t *dst, uint8_t *src, \ + ptrdiff_t srcstride, int height, \ + intptr_t mx, intptr_t my, int width) +PUT_PIXELS(ff_hevc_put_pixels_w2_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w4_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w6_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w8_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w12_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w16_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w24_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w32_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w48_neon_8); +PUT_PIXELS(ff_hevc_put_pixels_w64_neon_8); +#undef PUT_PIXELS + +static void (*put_hevc_qpel_neon[4][4])(int16_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, int width); +static void (*put_hevc_qpel_uw_neon[4][4])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, + int width, int height, int16_t* src2, ptrdiff_t src2stride); +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width); +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width); +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int16_t *src2, + int height, intptr_t mx, intptr_t my, int width); +#define QPEL_FUNC(name) \ + void name(int16_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, \ + int height, int width) + +QPEL_FUNC(ff_hevc_put_qpel_v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h1v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h2v3_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v1_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v2_neon_8); +QPEL_FUNC(ff_hevc_put_qpel_h3v3_neon_8); +#undef QPEL_FUNC + +#define QPEL_FUNC_UW_PIX(name) \ + void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, \ + int height, intptr_t mx, intptr_t my, int width); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w4_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w8_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w16_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w24_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w32_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w48_neon_8); +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w64_neon_8); +#undef QPEL_FUNC_UW_PIX + +#define QPEL_FUNC_UW(name) \ + void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, \ + int width, int height, int16_t* src2, ptrdiff_t src2stride); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_pixels_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v3_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v1_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v2_neon_8); +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v3_neon_8); +#undef QPEL_FUNC_UW + +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width) { + + put_hevc_qpel_neon[my][mx](dst, MAX_PB_SIZE, src, srcstride, height, width); +} + +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width) { + + put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, NULL, 0); +} + +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, + int16_t *src2, + int height, intptr_t mx, intptr_t my, int width) { + put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, src2, MAX_PB_SIZE); +} + +av_cold void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth) +{ + if (bit_depth == 8) { + int x; + c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_neon; + c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_neon; + c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_neon; + c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_neon; + c->idct_dc[0] = ff_hevc_idct_4x4_dc_8_neon; + c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_neon; + c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_neon; + c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_neon; + c->idct[0] = ff_hevc_idct_4x4_8_neon; + c->idct[1] = ff_hevc_idct_8x8_8_neon; + c->idct[2] = ff_hevc_idct_16x16_8_neon; + c->add_residual[0] = ff_hevc_add_residual_4x4_neon_8; + c->add_residual[1] = ff_hevc_add_residual_8x8_neon_8; + c->add_residual[2] = ff_hevc_add_residual_16x16_neon_8; + c->add_residual[3] = ff_hevc_add_residual_32x32_neon_8; + c->transform_4x4_luma = ff_hevc_transform_luma_4x4_neon_8; + put_hevc_qpel_neon[1][0] = ff_hevc_put_qpel_v1_neon_8; + put_hevc_qpel_neon[2][0] = ff_hevc_put_qpel_v2_neon_8; + put_hevc_qpel_neon[3][0] = ff_hevc_put_qpel_v3_neon_8; + put_hevc_qpel_neon[0][1] = ff_hevc_put_qpel_h1_neon_8; + put_hevc_qpel_neon[0][2] = ff_hevc_put_qpel_h2_neon_8; + put_hevc_qpel_neon[0][3] = ff_hevc_put_qpel_h3_neon_8; + put_hevc_qpel_neon[1][1] = ff_hevc_put_qpel_h1v1_neon_8; + put_hevc_qpel_neon[1][2] = ff_hevc_put_qpel_h2v1_neon_8; + put_hevc_qpel_neon[1][3] = ff_hevc_put_qpel_h3v1_neon_8; + put_hevc_qpel_neon[2][1] = ff_hevc_put_qpel_h1v2_neon_8; + put_hevc_qpel_neon[2][2] = ff_hevc_put_qpel_h2v2_neon_8; + put_hevc_qpel_neon[2][3] = ff_hevc_put_qpel_h3v2_neon_8; + put_hevc_qpel_neon[3][1] = ff_hevc_put_qpel_h1v3_neon_8; + put_hevc_qpel_neon[3][2] = ff_hevc_put_qpel_h2v3_neon_8; + put_hevc_qpel_neon[3][3] = ff_hevc_put_qpel_h3v3_neon_8; + put_hevc_qpel_uw_neon[1][0] = ff_hevc_put_qpel_uw_v1_neon_8; + put_hevc_qpel_uw_neon[2][0] = ff_hevc_put_qpel_uw_v2_neon_8; + put_hevc_qpel_uw_neon[3][0] = ff_hevc_put_qpel_uw_v3_neon_8; + put_hevc_qpel_uw_neon[0][1] = ff_hevc_put_qpel_uw_h1_neon_8; + put_hevc_qpel_uw_neon[0][2] = ff_hevc_put_qpel_uw_h2_neon_8; + put_hevc_qpel_uw_neon[0][3] = ff_hevc_put_qpel_uw_h3_neon_8; + put_hevc_qpel_uw_neon[1][1] = ff_hevc_put_qpel_uw_h1v1_neon_8; + put_hevc_qpel_uw_neon[1][2] = ff_hevc_put_qpel_uw_h2v1_neon_8; + put_hevc_qpel_uw_neon[1][3] = ff_hevc_put_qpel_uw_h3v1_neon_8; + put_hevc_qpel_uw_neon[2][1] = ff_hevc_put_qpel_uw_h1v2_neon_8; + put_hevc_qpel_uw_neon[2][2] = ff_hevc_put_qpel_uw_h2v2_neon_8; + put_hevc_qpel_uw_neon[2][3] = ff_hevc_put_qpel_uw_h3v2_neon_8; + put_hevc_qpel_uw_neon[3][1] = ff_hevc_put_qpel_uw_h1v3_neon_8; + put_hevc_qpel_uw_neon[3][2] = ff_hevc_put_qpel_uw_h2v3_neon_8; + put_hevc_qpel_uw_neon[3][3] = ff_hevc_put_qpel_uw_h3v3_neon_8; + for (x = 0; x < 10; x++) { + c->put_hevc_qpel[x][1][0] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel[x][0][1] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel[x][1][1] = ff_hevc_put_qpel_neon_wrapper; + c->put_hevc_qpel_uni[x][1][0] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_uni[x][0][1] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_uni[x][1][1] = ff_hevc_put_qpel_uni_neon_wrapper; + c->put_hevc_qpel_bi[x][1][0] = ff_hevc_put_qpel_bi_neon_wrapper; + c->put_hevc_qpel_bi[x][0][1] = ff_hevc_put_qpel_bi_neon_wrapper; + c->put_hevc_qpel_bi[x][1][1] = ff_hevc_put_qpel_bi_neon_wrapper; + } + c->put_hevc_qpel[0][0][0] = ff_hevc_put_pixels_w2_neon_8; + c->put_hevc_qpel[1][0][0] = ff_hevc_put_pixels_w4_neon_8; + c->put_hevc_qpel[2][0][0] = ff_hevc_put_pixels_w6_neon_8; + c->put_hevc_qpel[3][0][0] = ff_hevc_put_pixels_w8_neon_8; + c->put_hevc_qpel[4][0][0] = ff_hevc_put_pixels_w12_neon_8; + c->put_hevc_qpel[5][0][0] = ff_hevc_put_pixels_w16_neon_8; + c->put_hevc_qpel[6][0][0] = ff_hevc_put_pixels_w24_neon_8; + c->put_hevc_qpel[7][0][0] = ff_hevc_put_pixels_w32_neon_8; + c->put_hevc_qpel[8][0][0] = ff_hevc_put_pixels_w48_neon_8; + c->put_hevc_qpel[9][0][0] = ff_hevc_put_pixels_w64_neon_8; + + c->put_hevc_qpel_uni[1][0][0] = ff_hevc_put_qpel_uw_pixels_w4_neon_8; + c->put_hevc_qpel_uni[3][0][0] = ff_hevc_put_qpel_uw_pixels_w8_neon_8; + c->put_hevc_qpel_uni[5][0][0] = ff_hevc_put_qpel_uw_pixels_w16_neon_8; + c->put_hevc_qpel_uni[6][0][0] = ff_hevc_put_qpel_uw_pixels_w24_neon_8; + c->put_hevc_qpel_uni[7][0][0] = ff_hevc_put_qpel_uw_pixels_w32_neon_8; + c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_qpel_uw_pixels_w48_neon_8; + c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_qpel_uw_pixels_w64_neon_8; + } + + if (bit_depth == 10) { ++ c->idct_dc[0] = ff_hevc_idct_4x4_dc_10_neon; ++ c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_neon; ++ c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_neon; ++ c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_neon; ++ + c->idct[0] = ff_hevc_idct_4x4_10_neon; + c->idct[1] = ff_hevc_idct_8x8_10_neon; + c->idct[2] = ff_hevc_idct_16x16_10_neon; + } +} _______________________________________________ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog