On Wed, 28 Aug 2024, Ramiro Polla wrote:

                             A55               A76
rgb24toyv12_16_200_c:     36658.8           17319.2
rgb24toyv12_16_200_neon:  12765.8 ( 2.87x)   6036.0 ( 2.87x)
rgb24toyv12_128_60_c:     83329.5           39901.2
rgb24toyv12_128_60_neon:  28059.8 ( 2.97x)  14288.2 ( 2.79x)
rgb24toyv12_512_16_c:     87874.5           42339.0
rgb24toyv12_512_16_neon:  29673.5 ( 2.96x)  15219.0 ( 2.78x)
rgb24toyv12_1920_4_c:     82323.5           39672.8
rgb24toyv12_1920_4_neon:  27627.5 ( 2.98x)  14267.5 ( 2.78x)
---
libswscale/aarch64/rgb2rgb.c      |   4 +
libswscale/aarch64/rgb2rgb_neon.S | 158 ++++++++++++++++++++++++++++++
2 files changed, 162 insertions(+)

diff --git a/libswscale/aarch64/rgb2rgb.c b/libswscale/aarch64/rgb2rgb.c
index a9bf6ff9e0..c557cf871c 100644
--- a/libswscale/aarch64/rgb2rgb.c
+++ b/libswscale/aarch64/rgb2rgb.c
@@ -27,6 +27,9 @@
#include "libswscale/swscale.h"
#include "libswscale/swscale_internal.h"

+void ff_rgb24toyv12_neon(const uint8_t *src, uint8_t *ydst, uint8_t *udst,
+                         uint8_t *vdst, int width, int height, int lumStride,
+                         int chromStride, int srcStride, int32_t *rgb2yuv);
void ff_interleave_bytes_neon(const uint8_t *src1, const uint8_t *src2,
                              uint8_t *dest, int width, int height,
                              int src1Stride, int src2Stride, int dstStride);
@@ -36,6 +39,7 @@ av_cold void rgb2rgb_init_aarch64(void)
    int cpu_flags = av_get_cpu_flags();

    if (have_neon(cpu_flags)) {
+        ff_rgb24toyv12  = ff_rgb24toyv12_neon;
        interleaveBytes = ff_interleave_bytes_neon;
    }
}
diff --git a/libswscale/aarch64/rgb2rgb_neon.S 
b/libswscale/aarch64/rgb2rgb_neon.S
index d81110ec57..23059320b2 100644
--- a/libswscale/aarch64/rgb2rgb_neon.S
+++ b/libswscale/aarch64/rgb2rgb_neon.S
@@ -1,5 +1,6 @@
/*
 * Copyright (c) 2020 Martin Storsjo
+ * Copyright (c) 2024 Ramiro Polla
 *
 * This file is part of FFmpeg.
 *
@@ -20,6 +21,163 @@

#include "libavutil/aarch64/asm.S"

+#define RGB2YUV_COEFFS 16*4+16*32
+#define BY v0.h[0]
+#define GY v0.h[1]
+#define RY v0.h[2]
+#define BU v1.h[0]
+#define GU v1.h[1]
+#define RU v1.h[2]
+#define BV v2.h[0]
+#define GV v2.h[1]
+#define RV v2.h[2]
+#define Y_OFFSET  v22
+#define UV_OFFSET v23
+
+// convert rgb to 16-bit y, u, or v
+// uses v3 and v4
+.macro rgbconv16 dst, b, g, r, bc, gc, rc
+        smull           v3.4s, \b\().4h, \bc
+        smlal           v3.4s, \g\().4h, \gc
+        smlal           v3.4s, \r\().4h, \rc
+        smull2          v4.4s, \b\().8h, \bc
+        smlal2          v4.4s, \g\().8h, \gc
+        smlal2          v4.4s, \r\().8h, \rc        // v3:v4 = b * bc + g * gc 
+ r * rc (32-bit)
+        shrn            \dst\().4h, v3.4s, #7
+        shrn2           \dst\().8h, v4.4s, #7       // dst = b * bc + g * gc + 
r * rc (16-bit)
+.endm
+
+// void ff_rgb24toyv12_neon(const uint8_t *src, uint8_t *ydst, uint8_t *udst,
+//                          uint8_t *vdst, int width, int height, int 
lumStride,
+//                          int chromStride, int srcStride, int32_t *rgb2yuv);
+function ff_rgb24toyv12_neon, export=1
+// x0  const uint8_t *src
+// x1  uint8_t *ydst
+// x2  uint8_t *udst
+// x3  uint8_t *vdst
+// w4  int width
+// w5  int height
+// w6  int lumStride
+// w7  int chromStride
+        ldrsw           x14, [sp]
+        ldr             x15, [sp, #8]
+// x14 int srcStride
+// x15 int32_t *rgb2yuv
+
+        // extend width and stride parameters
+        uxtw            x4, w4
+        sxtw            x6, w6
+        sxtw            x7, w7

Just for the record: Yes, we could avoid these sxtw/uxtw instructions by folding it into the uses of w4/w6/w7 below, like "add ..., w6, sxtw". However, register extending ALU arithmetics perform worse than operations on the full register - that's why we prefer the explicit instructions here.

+
+        // src1 = x0
+        // src2 = x10
+        add             x10, x0,  x14               // x10 = src + srcStride
+        lsl             x14, x14, #1                // srcStride *= 2
+        add             x11, x4,  x4, lsl #1        // x11 = 3 * width
+        sub             x14, x14, x11               // srcPadding = (2 * 
srcStride) - (3 * width)
+
+        // ydst1 = x1
+        // ydst2 = x11
+        add             x11, x1,  x6                // x11 = ydst + lumStride
+        lsl             x6,  x6,  #1                // lumStride *= 2
+        sub             x6,  x6,  x4                // lumPadding = (2 * 
lumStride) - width
+
+        sub             x7,  x7,  x4, lsr #1        // chromPadding = 
chromStride - (width / 2)
+
+        // load rgb2yuv coefficients into v0, v1, and v2
+        add             x15, x15, #RGB2YUV_COEFFS
+        ld1             {v0.8h-v2.8h}, [x15]        // load 24 values
+
+        // load offset constants
+        movi            Y_OFFSET.8h,  #0x10, lsl #8
+        movi            UV_OFFSET.8h, #0x80, lsl #8
+
+1:
+        mov             w15, w4                     // w15 = width
+
+2:
+        // load first line
+        ld3             {v16.8b, v17.8b, v18.8b}, [x0], #24
+        ld3             {v19.8b, v20.8b, v21.8b}, [x0], #24

Hmm, can't we do just one single ld3 with .16b registers, instead of two separate ones?

If you want to keep the same register layout as now, load into v19-v21, then do "uxtl v16.8h, v19.8b; uxtl2 v19.8h, v19.16b".

+        uxtl            v16.8h, v16.8b              // v16 = B11
+        uxtl            v17.8h, v17.8b              // v17 = G11
+        uxtl            v18.8h, v18.8b              // v18 = R11
+        uxtl            v19.8h, v19.8b              // v19 = B12
+        uxtl            v20.8h, v20.8b              // v20 = G12
+        uxtl            v21.8h, v21.8b              // v21 = R12
+
+        // calculate Y values for first line
+        rgbconv16       v24, v16, v17, v18, BY, GY, RY // v24 = Y11
+        rgbconv16       v25, v19, v20, v21, BY, GY, RY // v25 = Y12
+
+        // pairwise add and save rgb values to calculate average
+        addp            v5.8h, v16.8h, v19.8h
+        addp            v6.8h, v17.8h, v20.8h
+        addp            v7.8h, v18.8h, v21.8h
+
+        // load second line
+        ld3             {v16.8b, v17.8b, v18.8b}, [x10], #24
+        ld3             {v19.8b, v20.8b, v21.8b}, [x10], #24

It's a shame we can't start this load earlier. But as essentially everything depends on the input as it is, in v16-v21, we'd pretty much need to use different registers here in order to do that.

If you wanted to, you could try loading earlier, into different registers (I think v26-v31 are free at this point?), while then doing the uxtl into the same registers as before, which shouldn't require any further changes.

// Martin

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to