From: Zhao Zhili <zhiliz...@tencent.com> Test on Apple M1 with kperf:
abgr_to_uv_8_c: 19.4 abgr_to_uv_8_neon: 29.9 abgr_to_uv_128_c: 146.4 abgr_to_uv_128_neon: 85.1 abgr_to_uv_1080_c: 1162.6 abgr_to_uv_1080_neon: 819.6 abgr_to_uv_1920_c: 2063.6 abgr_to_uv_1920_neon: 1435.1 abgr_to_uv_half_8_c: 16.4 abgr_to_uv_half_8_neon: 35.6 abgr_to_uv_half_128_c: 108.6 abgr_to_uv_half_128_neon: 75.4 abgr_to_uv_half_1080_c: 883.4 abgr_to_uv_half_1080_neon: 460.6 abgr_to_uv_half_1920_c: 1553.6 abgr_to_uv_half_1920_neon: 817.6 abgr_to_y_8_c: 6.1 abgr_to_y_8_neon: 40.6 abgr_to_y_128_c: 99.9 abgr_to_y_128_neon: 67.4 abgr_to_y_1080_c: 735.9 abgr_to_y_1080_neon: 534.6 abgr_to_y_1920_c: 1279.4 abgr_to_y_1920_neon: 932.6 Disable vectorization in clang: abgr_to_uv_8_c: 20.9 abgr_to_uv_8_neon: 44.1 abgr_to_uv_128_c: 555.6 abgr_to_uv_128_neon: 105.9 abgr_to_uv_1080_c: 4773.1 abgr_to_uv_1080_neon: 846.1 abgr_to_uv_1920_c: 8499.9 abgr_to_uv_1920_neon: 1436.6 abgr_to_uv_half_8_c: 13.9 abgr_to_uv_half_8_neon: 167.9 abgr_to_uv_half_128_c: 338.9 abgr_to_uv_half_128_neon: 60.6 abgr_to_uv_half_1080_c: 2906.9 abgr_to_uv_half_1080_neon: 466.1 abgr_to_uv_half_1920_c: 5084.4 abgr_to_uv_half_1920_neon: 832.9 abgr_to_y_8_c: 9.6 abgr_to_y_8_neon: 18.6 abgr_to_y_128_c: 372.4 abgr_to_y_128_neon: 61.4 abgr_to_y_1080_c: 3168.9 abgr_to_y_1080_neon: 513.9 abgr_to_y_1920_c: 6187.6 abgr_to_y_1920_neon: 918.9 --- libswscale/aarch64/input.S | 104 ++++++++++++++++++++++++++++------- libswscale/aarch64/swscale.c | 17 ++++++ 2 files changed, 100 insertions(+), 21 deletions(-) diff --git a/libswscale/aarch64/input.S b/libswscale/aarch64/input.S index 37f1158504..b4e827dd50 100644 --- a/libswscale/aarch64/input.S +++ b/libswscale/aarch64/input.S @@ -34,6 +34,16 @@ uxtl2 v24.8h, v18.16b // v24: b .endm +.macro argb_to_yuv_load_rgb src + ld4 { v16.16b, v17.16b, v18.16b, v19.16b }, [\src] + uxtl v21.8h, v19.8b // v21: b + uxtl2 v24.8h, v19.16b // v24: b + uxtl v19.8h, v17.8b // v19: r + uxtl v20.8h, v18.8b // v20: g + uxtl2 v22.8h, v17.16b // v22: r + uxtl2 v23.8h, v18.16b // v23: g +.endm + .macro rgb_to_yuv_product r, g, b, dst1, dst2, dst, coef0, coef1, coef2, right_shift mov \dst1\().16b, v6.16b // dst1 = const_offset mov \dst2\().16b, v6.16b // dst2 = const_offset @@ -47,7 +57,7 @@ sqshrn2 \dst\().8h, \dst2\().4s, \right_shift // dst_higher_half = dst2 >> right_shift .endm -.macro rgbToY bgr, element=3 +.macro rgbToY bgr, element=3, alpha_first=0 cmp w4, #0 // check width > 0 .if \bgr ldr w12, [x5] // w12: ry @@ -71,7 +81,11 @@ dup v2.8h, w12 b.lt 2f 1: + .if \alpha_first + argb_to_yuv_load_rgb x1 + .else rgb_to_yuv_load_rgb x1, \element + .endif rgb_to_yuv_product v19, v20, v21, v25, v26, v16, v0, v1, v2, #9 rgb_to_yuv_product v22, v23, v24, v27, v28, v17, v0, v1, v2, #9 sub w4, w4, #16 // width -= 16 @@ -85,9 +99,15 @@ b.ge 1b cbz x4, 3f 2: + .if \alpha_first + ldrb w13, [x1, #1] // w13: r + ldrb w14, [x1, #2] // w14: g + ldrb w15, [x1, #3] // w15: b + .else ldrb w13, [x1] // w13: r ldrb w14, [x1, #1] // w14: g ldrb w15, [x1, #2] // w15: b + .endif smaddl x13, w13, w10, x9 // x13 = ry * r + const_offset smaddl x13, w14, w11, x13 // x13 += gy * g @@ -101,6 +121,14 @@ ret .endm +function ff_argb32ToY_neon, export=1 + rgbToY bgr=0, element=4, alpha_first=1 +endfunc + +function ff_abgr32ToY_neon, export=1 + rgbToY bgr=1, element=4, alpha_first=1 +endfunc + function ff_rgb24ToY_neon, export=1 rgbToY bgr=0 endfunc @@ -146,7 +174,21 @@ endfunc dup v6.4s, w9 .endm -.macro rgbToUV_half bgr, element=3 +.macro rgb_load_add_half off_r1, off_r2, off_g1, off_g2, off_b1, off_b2 + ldrb w2, [x3, \off_r1] // w2: r1 + ldrb w4, [x3, \off_r2] // w4: r2 + add w2, w2, w4 // w2 = r1 + r2 + + ldrb w4, [x3, \off_g1] // w4: g1 + ldrb w7, [x3, \off_g2] // w7: g2 + add w4, w4, w7 // w4 = g1 + g2 + + ldrb w7, [x3, \off_b1] // w7: b1 + ldrb w8, [x3, \off_b2] // w8: b2 + add w7, w7, w8 // w7 = b1 + b2 +.endm + +.macro rgbToUV_half bgr, element=3, alpha_first=0 cmp w5, #0 // check width > 0 b.le 3f @@ -160,9 +202,15 @@ endfunc .else ld4 { v16.16b, v17.16b, v18.16b, v19.16b }, [x3] .endif + .if \alpha_first + uaddlp v21.8h, v19.16b + uaddlp v20.8h, v18.16b + uaddlp v19.8h, v17.16b + .else uaddlp v19.8h, v16.16b // v19: r uaddlp v20.8h, v17.16b // v20: g uaddlp v21.8h, v18.16b // v21: b + .endif rgb_to_yuv_product v19, v20, v21, v22, v23, v16, v0, v1, v2, #10 rgb_to_yuv_product v19, v20, v21, v24, v25, v17, v3, v4, v5, #10 @@ -178,27 +226,15 @@ endfunc b.ge 1b cbz w5, 3f 2: - ldrb w2, [x3] // w2: r1 - ldrb w4, [x3, \element] // w4: r2 - add w2, w2, w4 // w2 = r1 + r2 - +.if \alpha_first + rgb_load_add_half 1, 5, 2, 6, 3, 7 +.else .if \element == 3 - ldrb w4, [x3, #1] // w4: g1 - ldrb w7, [x3, #4] // w7: g2 - add w4, w4, w7 // w4 = g1 + g2 - - ldrb w7, [x3, #2] // w7: b1 - ldrb w8, [x3, #5] // w8: b2 - add w7, w7, w8 // w7 = b1 + b2 + rgb_load_add_half 0, 3, 1, 4, 2, 5 .else - ldrb w4, [x3, #1] // w4: g1 - ldrb w7, [x3, #5] // w7: g2 - add w4, w4, w7 // w4 = g1 + g2 - - ldrb w7, [x3, #2] // w7: b1 - ldrb w8, [x3, #6] // w8: b2 - add w7, w7, w8 // w7 = b1 + b2 + rgb_load_add_half 0, 4, 1, 5, 2, 6 .endif +.endif smaddl x8, w2, w10, x9 // dst_u = ru * r + const_offset smaddl x8, w4, w11, x8 // dst_u += gu * g @@ -223,6 +259,14 @@ endfunc ret .endm +function ff_argb32ToUV_half_neon, export=1 + rgbToUV_half bgr=0, element=4, alpha_first=1 +endfunc + +function ff_abgr32ToUV_half_neon, export=1 + rgbToUV_half bgr=1, element=4, alpha_first=1 +endfunc + function ff_rgb24ToUV_half_neon, export=1 rgbToUV_half bgr=0 endfunc @@ -239,7 +283,7 @@ function ff_bgra32ToUV_half_neon, export=1 rgbToUV_half bgr=1, element=4 endfunc -.macro rgbToUV bgr, element=3 +.macro rgbToUV bgr, element=3, alpha_first=0 cmp w5, #0 // check width > 0 b.le 3f @@ -248,7 +292,11 @@ endfunc b.lt 2f // The following comments assume RGB order. The logic for RGB and BGR is the same. 1: + .if \alpha_first + argb_to_yuv_load_rgb x3 + .else rgb_to_yuv_load_rgb x3, \element + .endif rgb_to_yuv_product v19, v20, v21, v25, v26, v16, v0, v1, v2, #9 rgb_to_yuv_product v22, v23, v24, v27, v28, v17, v0, v1, v2, #9 rgb_to_yuv_product v19, v20, v21, v25, v26, v18, v3, v4, v5, #9 @@ -265,9 +313,15 @@ endfunc b.ge 1b cbz w5, 3f 2: + .if \alpha_first + ldrb w16, [x3, #1] // w16: r + ldrb w17, [x3, #2] // w17: g + ldrb w4, [x3, #3] // w4: b + .else ldrb w16, [x3] // w16: r ldrb w17, [x3, #1] // w17: g ldrb w4, [x3, #2] // w4: b + .endif smaddl x8, w16, w10, x9 // x8 = ru * r + const_offset smaddl x8, w17, w11, x8 // x8 += gu * g @@ -287,6 +341,14 @@ endfunc ret .endm +function ff_argb32ToUV_neon, export=1 + rgbToUV bgr=0, element=4, alpha_first=1 +endfunc + +function ff_abgr32ToUV_neon, export=1 + rgbToUV bgr=1, element=4, alpha_first=1 +endfunc + function ff_rgb24ToUV_neon, export=1 rgbToUV bgr=0 endfunc diff --git a/libswscale/aarch64/swscale.c b/libswscale/aarch64/swscale.c index 8fe9fb11ac..0169193592 100644 --- a/libswscale/aarch64/swscale.c +++ b/libswscale/aarch64/swscale.c @@ -211,6 +211,8 @@ void ff_##name##ToUV_half_neon(uint8_t *, uint8_t *, const uint8_t *, \ const uint8_t *, const uint8_t *, int w, \ uint32_t *coeffs, void *) +NEON_INPUT(abgr32); +NEON_INPUT(argb32); NEON_INPUT(bgr24); NEON_INPUT(bgra32); NEON_INPUT(rgb24); @@ -228,6 +230,21 @@ av_cold void ff_sws_init_swscale_aarch64(SwsContext *c) c->yuv2planeX = ff_yuv2planeX_8_neon; } switch (c->srcFormat) { + case AV_PIX_FMT_ABGR: + c->lumToYV12 = ff_abgr32ToY_neon; + if (c->chrSrcHSubSample) + c->chrToYV12 = ff_abgr32ToUV_half_neon; + else + c->chrToYV12 = ff_abgr32ToUV_neon; + break; + + case AV_PIX_FMT_ARGB: + c->lumToYV12 = ff_argb32ToY_neon; + if (c->chrSrcHSubSample) + c->chrToYV12 = ff_argb32ToUV_half_neon; + else + c->chrToYV12 = ff_argb32ToUV_neon; + break; case AV_PIX_FMT_BGR24: c->lumToYV12 = ff_bgr24ToY_neon; if (c->chrSrcHSubSample) -- 2.42.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".