On Wed, Aug 14, 2024 at 7:21 PM Martin Storsjö <mar...@martin.st> wrote: > On Fri, 9 Aug 2024, Ramiro Polla wrote: > > checkasm --bench for Raspberry Pi 5 Model B Rev 1.0: > > nv24_yuv420p_128_c: 423.0 > > nv24_yuv420p_128_neon: 115.7 > > nv24_yuv420p_1920_c: 5939.5 > > nv24_yuv420p_1920_neon: 1339.7 > > nv42_yuv420p_128_c: 423.2 > > nv42_yuv420p_128_neon: 115.7 > > nv42_yuv420p_1920_c: 5907.5 > > nv42_yuv420p_1920_neon: 1342.5 > > --- > > libswscale/aarch64/Makefile | 1 + > > libswscale/aarch64/swscale_unscaled.c | 30 +++++++++ > > libswscale/aarch64/swscale_unscaled_neon.S | 75 ++++++++++++++++++++++ > > 3 files changed, 106 insertions(+) > > create mode 100644 libswscale/aarch64/swscale_unscaled_neon.S > > > diff --git a/libswscale/aarch64/swscale_unscaled_neon.S > > b/libswscale/aarch64/swscale_unscaled_neon.S > > new file mode 100644 > > index 0000000000..a206fda41f > > --- /dev/null > > +++ b/libswscale/aarch64/swscale_unscaled_neon.S > > @@ -0,0 +1,75 @@ > > +/* > > + * Copyright (c) 2024 Ramiro Polla > > + * > > + * This file is part of FFmpeg. > > + * > > + * FFmpeg is free software; you can redistribute it and/or > > + * modify it under the terms of the GNU Lesser General Public > > + * License as published by the Free Software Foundation; either > > + * version 2.1 of the License, or (at your option) any later version. > > + * > > + * FFmpeg is distributed in the hope that it will be useful, > > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > > + * Lesser General Public License for more details. > > + * > > + * You should have received a copy of the GNU Lesser General Public > > + * License along with FFmpeg; if not, write to the Free Software > > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA > > 02110-1301 USA > > + */ > > + > > +#include "libavutil/aarch64/asm.S" > > + > > +function ff_nv24_to_yuv420p_chroma_neon, export=1 > > +// x0 uint8_t *dst1 > > +// x1 int dstStride1 > > +// x2 uint8_t *dst2 > > +// x3 int dstStride2 > > +// x4 const uint8_t *src > > +// x5 int srcStride > > +// w6 int w > > +// w7 int h > > + > > + uxtw x1, w1 > > + uxtw x3, w3 > > + uxtw x5, w5 > > You can often avoid the explicit uxtw instructions, if you can fold an > uxtw attribute into the cases where the register is used. (If it's used > often, it may be slightly more performant to do it upfront like this > though, but often it can be omitted entirely.) And whenever you do an > operation with a wN register as destination, the upper half of the > register gets explicitly cleared, so these also may be avoided that way. > > > + > > + add x9, x4, x5 // x9 = src + srcStride > > + lsl w5, w5, #1 // srcStride *= 2 > > + > > +1: > > + mov w10, w6 // w10 = w > > + mov x11, x4 // x11 = src1 (line 1) > > + mov x12, x9 // x12 = src2 (line 2) > > + mov x13, x0 // x13 = dst1 (dstU) > > + mov x14, x2 // x14 = dst2 (dstV) > > + > > +2: > > + ld2 { v0.16b, v1.16b }, [x11], #32 // v0 = U1, v1 = V1 > > + ld2 { v2.16b, v3.16b }, [x12], #32 // v2 = U2, v3 = V2 > > + > > + uaddlp v0.8h, v0.16b // pairwise add U1 > > into v0 > > + uaddlp v1.8h, v1.16b // pairwise add V1 > > into v1 > > + uadalp v0.8h, v2.16b // pairwise add U2, > > accumulate into v0 > > + uadalp v1.8h, v3.16b // pairwise add V2, > > accumulate into v1 > > + > > + shrn v0.8b, v0.8h, #2 // divide by 4 > > + shrn v1.8b, v1.8h, #2 // divide by 4 > > + > > + st1 { v0.8b }, [x13], #8 // store U into dst1 > > + st1 { v1.8b }, [x14], #8 // store V into dst2 > > + > > + subs w10, w10, #8 > > + b.gt 2b > > + > > + // next row > > + add x4, x4, x5 // src1 += srcStride * > > 2 > > + add x9, x9, x5 // src2 += srcStride * > > 2 > > + add x0, x0, x1 // dst1 += dstStride1 > > + add x2, x2, x3 // dst2 += dstStride2 > > It's often possible to avoid the extra step of moving the pointers back > into the the x11/x12/x13/x14 registers, if you subtract the width from the > stride at the start of the function. Then you don't need two separate > registers for each pointer, and shortens dependency chain when moving on > to the next line. > > If the width can be any uneven value, but we in practice write in > increments of 8 pixels, you may need to align the width up to 8 before > using it to decrement the stride that way though.
Thank you for the review. New patch attached.
From f6ea1edb0590c14e168fbce2ae42958220b6e778 Mon Sep 17 00:00:00 2001 From: Ramiro Polla <ramiro.po...@gmail.com> Date: Wed, 7 Aug 2024 18:53:12 +0200 Subject: [PATCH v2 4/4] swscale/aarch64: add nv24/nv42 to yuv420p unscaled converter checkasm --bench for Raspberry Pi 5 Model B Rev 1.0: nv24_yuv420p_128_c: 1320.2 nv24_yuv420p_128_neon: 709.5 nv24_yuv420p_1920_c: 12448.0 nv24_yuv420p_1920_neon: 2698.0 nv42_yuv420p_128_c: 1329.2 nv42_yuv420p_128_neon: 841.7 nv42_yuv420p_1920_c: 11967.5 nv42_yuv420p_1920_neon: 2866.5 --- libswscale/aarch64/Makefile | 1 + libswscale/aarch64/swscale_unscaled.c | 30 ++++++++++ libswscale/aarch64/swscale_unscaled_neon.S | 70 ++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 libswscale/aarch64/swscale_unscaled_neon.S diff --git a/libswscale/aarch64/Makefile b/libswscale/aarch64/Makefile index 37ad960619..1de8c9c0d6 100644 --- a/libswscale/aarch64/Makefile +++ b/libswscale/aarch64/Makefile @@ -7,4 +7,5 @@ NEON-OBJS += aarch64/hscale.o \ aarch64/output.o \ aarch64/range_convert_neon.o \ aarch64/rgb2rgb_neon.o \ + aarch64/swscale_unscaled_neon.o \ aarch64/yuv2rgb_neon.o \ diff --git a/libswscale/aarch64/swscale_unscaled.c b/libswscale/aarch64/swscale_unscaled.c index b3093bbc9d..87bb011709 100644 --- a/libswscale/aarch64/swscale_unscaled.c +++ b/libswscale/aarch64/swscale_unscaled.c @@ -83,6 +83,31 @@ static int ifmt##_to_##ofmt##_neon_wrapper(SwsContext *c, const uint8_t *src[], c->yuv2rgb_y_coeff); \ } \ +void ff_nv24_to_yuv420p_chroma_neon(uint8_t *dst1, int dstStride1, + uint8_t *dst2, int dstStride2, + const uint8_t *src, int srcStride, + int w, int h); + +static int nv24_to_yuv420p_neon_wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) +{ + uint8_t *dst1 = dst[1] + dstStride[1] * srcSliceY / 2; + uint8_t *dst2 = dst[2] + dstStride[2] * srcSliceY / 2; + + ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW, + dst[0], dstStride[0]); + + if (c->srcFormat == AV_PIX_FMT_NV24) + ff_nv24_to_yuv420p_chroma_neon(dst1, dstStride[1], dst2, dstStride[2], + src[1], srcStride[1], c->srcW / 2, srcSliceH); + else + ff_nv24_to_yuv420p_chroma_neon(dst2, dstStride[2], dst1, dstStride[1], + src[1], srcStride[1], c->srcW / 2, srcSliceH); + + return srcSliceH; +} + #define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx) \ DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, argb) \ DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, rgba) \ @@ -119,6 +144,11 @@ static void get_unscaled_swscale_neon(SwsContext *c) { SET_FF_NVX_TO_ALL_RGBX_FUNC(nv21, NV21, accurate_rnd); SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv420p, YUV420P, accurate_rnd); SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv422p, YUV422P, accurate_rnd); + + if (c->dstFormat == AV_PIX_FMT_YUV420P && + (c->srcFormat == AV_PIX_FMT_NV24 || c->srcFormat == AV_PIX_FMT_NV42) && + !(c->srcH & 1) && !(c->srcW & 15) && !accurate_rnd) + c->convert_unscaled = nv24_to_yuv420p_neon_wrapper; } void ff_get_unscaled_swscale_aarch64(SwsContext *c) diff --git a/libswscale/aarch64/swscale_unscaled_neon.S b/libswscale/aarch64/swscale_unscaled_neon.S new file mode 100644 index 0000000000..7f1890f58a --- /dev/null +++ b/libswscale/aarch64/swscale_unscaled_neon.S @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2024 Ramiro Polla + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/aarch64/asm.S" + +function ff_nv24_to_yuv420p_chroma_neon, export=1 +// x0 uint8_t *dst1 +// x1 int dstStride1 +// x2 uint8_t *dst2 +// x3 int dstStride2 +// x4 const uint8_t *src +// x5 int srcStride +// w6 int w +// w7 int h + + add x9, x4, w5, sxtw // x9 = src + srcStride + lsl w5, w5, #1 // srcStride *= 2 + sub w5, w5, w6, lsl #2 // srcPadding = (2 * srcStride) - (4 * w) + sub w1, w1, w6 // dstPadding1 = dstStride1 - w + sub w3, w3, w6 // dstPadding2 = dstStride2 - w + +1: + mov w10, w6 // w10 = w + +2: + ld2 { v0.16b, v1.16b }, [x4], #32 // v0 = U1, v1 = V1 + ld2 { v2.16b, v3.16b }, [x9], #32 // v2 = U2, v3 = V2 + + uaddlp v0.8h, v0.16b // pairwise add U1 into v0 + uaddlp v1.8h, v1.16b // pairwise add V1 into v1 + uadalp v0.8h, v2.16b // pairwise add U2, accumulate into v0 + uadalp v1.8h, v3.16b // pairwise add V2, accumulate into v1 + + shrn v0.8b, v0.8h, #2 // divide by 4 + shrn v1.8b, v1.8h, #2 // divide by 4 + + st1 { v0.8b }, [x0], #8 // store U into dst1 + st1 { v1.8b }, [x2], #8 // store V into dst2 + + subs w10, w10, #8 + b.gt 2b + + // next row + add x4, x4, x5, sxtw // src1 += srcPadding + add x9, x9, x5, sxtw // src2 += srcPadding + add x0, x0, x1, sxtw // dst1 += dstPadding1 + add x2, x2, x3, sxtw // dst2 += dstPadding2 + + subs w7, w7, #2 + b.gt 1b + + ret +endfunc -- 2.39.2
_______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".