On 11/07/15 10:40 AM, James Darnley wrote: > Speed of all modes increased by a factor between 7.4 and 19.8 largely > depending > on whether bytes are unpacked into words. Modes 2, 3, and 4 have been sped-up > by a factor of 43 (thanks quick sort!) > > All modes are available on x86_64 but only modes 1, 10, 11, 12, 13, 14, 19, > 20, > 21, and 22 are available on x86 due to the number of SIMD registers used. > --- > libavfilter/removegrain.h | 40 ++ > libavfilter/vf_removegrain.c | 38 +- > libavfilter/x86/Makefile | 2 + > libavfilter/x86/vf_removegrain.asm | 1215 > +++++++++++++++++++++++++++++++++ > libavfilter/x86/vf_removegrain_init.c | 128 ++++ > 5 files changed, 1404 insertions(+), 19 deletions(-) > create mode 100644 libavfilter/removegrain.h > create mode 100644 libavfilter/x86/vf_removegrain.asm > create mode 100644 libavfilter/x86/vf_removegrain_init.c > > diff --git a/libavfilter/removegrain.h b/libavfilter/removegrain.h > new file mode 100644 > index 0000000..60401fb > --- /dev/null > +++ b/libavfilter/removegrain.h > @@ -0,0 +1,40 @@ > +/* > + * Copyright (c) 2015 Paul B Mahol > + * Copyright (c) 2015 James Darnley > + * > + * This file is part of FFmpeg. > + * > + * FFmpeg is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; either > + * version 2.1 of the License, or (at your option) any later version. > + * > + * FFmpeg is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with FFmpeg; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 > USA > + */ > + > +#include "avfilter.h" > + > +typedef struct RemoveGrainContext { > + const AVClass *class; > + > + int mode[4]; > + > + int nb_planes; > + int planewidth[4]; > + int planeheight[4]; > + int skip_even; > + int skip_odd; > + > + int (*rg[4])(int c, int a1, int a2, int a3, int a4, int a5, int a6, int > a7, int a8); > + > + void (*fl[4])(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels); > +} RemoveGrainContext; > + > +void ff_removegrain_init_x86(RemoveGrainContext *rg); > diff --git a/libavfilter/vf_removegrain.c b/libavfilter/vf_removegrain.c > index 77b3561..da17f6a 100644 > --- a/libavfilter/vf_removegrain.c > +++ b/libavfilter/vf_removegrain.c > @@ -2,6 +2,7 @@ > * Copyright (c) 2012 Laurent de Soras > * Copyright (c) 2013 Fredrik Mellbin > * Copyright (c) 2015 Paul B Mahol > + * Copyright (c) 2015 James Darnley > * > * This file is part of FFmpeg. > * > @@ -20,32 +21,15 @@ > * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 > USA > */ > > -/* > - * TODO: add SIMD > - */ > - > #include "libavutil/imgutils.h" > #include "libavutil/opt.h" > #include "libavutil/pixdesc.h" > #include "avfilter.h" > #include "formats.h" > #include "internal.h" > +#include "removegrain.h" > #include "video.h" > > -typedef struct RemoveGrainContext { > - const AVClass *class; > - > - int mode[4]; > - > - int nb_planes; > - int planewidth[4]; > - int planeheight[4]; > - int skip_even; > - int skip_odd; > - > - int (*rg[4])(int c, int a1, int a2, int a3, int a4, int a5, int a6, int > a7, int a8); > -} RemoveGrainContext; > - > #define OFFSET(x) offsetof(RemoveGrainContext, x) > #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM > > @@ -142,6 +126,7 @@ static int mode05(int c, int a1, int a2, int a3, int a4, > int a5, int a6, int a7, > > const int mindiff = FFMIN(FFMIN(c1, c2), FFMIN(c3, c4)); > > + /* When adding SIMD notice the return order here: 4, 2, 3, 1. */ > if (mindiff == c4) { > return av_clip(c, mi4, ma4); > } else if (mindiff == c2) { > @@ -524,6 +509,9 @@ static int config_input(AVFilterLink *inlink) > } > } > > + if (ARCH_X86) > + ff_removegrain_init_x86(s); > + > return 0; > } > > @@ -566,7 +554,19 @@ static int filter_slice(AVFilterContext *ctx, void *arg, > int jobnr, int nb_jobs) > } > > *dst++ = *src++; > - for (x = 1; x < s->planewidth[i] - 1; x++) { > + > + if (s->fl[i]) { > + int w_asm = (s->planewidth[i] - 2) & ~15;
I don't thinks this belongs here. If you add an AVX2 version as you intended, wouldn't this need to be 31? A wrapper function to have this code in the x86 folder may be the best option. > + > + s->fl[i](dst, src, in->linesize[i], w_asm); > + > + x = 1 + w_asm; > + dst += w_asm; > + src += w_asm; > + } else > + x = 1; > + > + for (; x < s->planewidth[i] - 1; x++) { > const int a1 = src[-op]; > const int a2 = src[-o0]; > const int a3 = src[-om]; > diff --git a/libavfilter/x86/Makefile b/libavfilter/x86/Makefile > index 49f45b6..32a154d 100644 > --- a/libavfilter/x86/Makefile > +++ b/libavfilter/x86/Makefile > @@ -7,6 +7,7 @@ OBJS-$(CONFIG_INTERLACE_FILTER) += > x86/vf_interlace_init.o > OBJS-$(CONFIG_NOISE_FILTER) += x86/vf_noise.o > OBJS-$(CONFIG_PP7_FILTER) += x86/vf_pp7_init.o > OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup_init.o > +OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += x86/vf_removegrain_init.o > OBJS-$(CONFIG_SPP_FILTER) += x86/vf_spp.o > OBJS-$(CONFIG_TINTERLACE_FILTER) += x86/vf_tinterlace_init.o > OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o > @@ -19,6 +20,7 @@ YASM-OBJS-$(CONFIG_IDET_FILTER) += > x86/vf_idet.o > YASM-OBJS-$(CONFIG_INTERLACE_FILTER) += x86/vf_interlace.o > YASM-OBJS-$(CONFIG_PP7_FILTER) += x86/vf_pp7.o > YASM-OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup.o > +YASM-OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += x86/vf_removegrain.o > YASM-OBJS-$(CONFIG_TINTERLACE_FILTER) += x86/vf_interlace.o > YASM-OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume.o > YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o > x86/yadif-16.o x86/yadif-10.o > diff --git a/libavfilter/x86/vf_removegrain.asm > b/libavfilter/x86/vf_removegrain.asm > new file mode 100644 > index 0000000..5e1feea > --- /dev/null > +++ b/libavfilter/x86/vf_removegrain.asm > @@ -0,0 +1,1215 @@ > +;***************************************************************************** > +;* x86-optimized functions for yadif filter > +;* > +;* Copyright (C) 2015 James Darnley > +;* > +;* This file is part of FFmpeg. > +;* > +;* TODO: gpl text goes here. > +;***************************************************************************** > + > +; column: -1 0 +1 > +; row -1: a1 a2 a3 > +; row 0: a4 c a5 > +; row +1: a6 a7 a8 > + > +%include "libavutil/x86/x86util.asm" > + > +SECTION_RODATA Declare it SECTION_RODATA 32 now so you don't forget when you add AVX2 versions. > + > +pw_4: times 16 dw 4 > +pw_8: times 16 dw 8 > +pw_div9: times 16 dw ((1<<16)+4)/9 > + > +SECTION_TEXT > + > +;*** Preprocessor helpers > + > +%define a1 srcq+stride_n-1 > +%define a2 srcq+stride_n > +%define a3 srcq+stride_n+1 > +%define a4 srcq-1 > +%define c srcq > +%define a5 srcq+1 > +%define a6 srcq+stride_p-1 > +%define a7 srcq+stride_p > +%define a8 srcq+stride_p+1 > + > +; %1 dest simd register > +; %2 source memory location > +; %3 zero location (simd register/memory) > +%macro LOAD 3 > + movh %1, %2 > + punpcklbw %1, %3 > +%endmacro > + > +%macro LOAD_SQUARE 0 > + movu m1, [a1] > + movu m2, [a2] > + movu m3, [a3] > + movu m4, [a4] > + movu m0, [c] > + movu m5, [a5] > + movu m6, [a6] > + movu m7, [a7] > + movu m8, [a8] > +%endmacro > + > +; %1 zero location (simd register/memory) > +%macro LOAD_SQUARE_16 1 > + LOAD m1, [a1], %1 > + LOAD m2, [a2], %1 > + LOAD m3, [a3], %1 > + LOAD m4, [a4], %1 > + LOAD m0, [c], %1 > + LOAD m5, [a5], %1 > + LOAD m6, [a6], %1 > + LOAD m7, [a7], %1 > + LOAD m8, [a8], %1 > +%endmacro > + > +%macro SORT_AXIS 0 > + mova m9, m1 > + pminub m1, m8 > + pmaxub m8, m9 > + mova m10, m2 > + pminub m2, m7 > + pmaxub m7, m10 > + mova m11, m3 > + pminub m3, m6 > + pmaxub m6, m11 > + mova m12, m4 > + pminub m4, m5 > + pmaxub m5, m12 > +%endmacro > + > +%macro SORT_AXIS_16 0 > + mova m9, m1 > + pminsw m1, m8 > + pmaxsw m8, m9 > + mova m10, m2 > + pminsw m2, m7 > + pmaxsw m7, m10 > + mova m11, m3 > + pminsw m3, m6 > + pmaxsw m6, m11 > + mova m12, m4 > + pminsw m4, m5 > + pmaxsw m5, m12 > +%endmacro > + > +; %1 simd register to hold maximums > +; %2 simd register to hold minimums > +; %3 temp location (simd register/memory) > +%macro SORT_PAIR 3 > + mova %3, %1 > + pminub %1, %2 > + pmaxub %2, %3 > +%endmacro These three SORT macros could be simplified into something like this %macro SORT_PAIR 4 mova %4, %2 pmin%1 %2, %3 pmax%1 %3, %4 %endmacro %macro SORT_AXIS 0 SORT_PAIR ub, m1, m8, m9 SORT_PAIR ub, m2, m7, m10 SORT_PAIR ub, m3, m6, m11 SORT_PAIR ub, m4, m5, m12 %endmacro %macro SORT_AXIS_16 0 SORT_PAIR sw, m1, m8, m9 SORT_PAIR sw, m2, m7, m10 SORT_PAIR sw, m3, m6, m11 SORT_PAIR sw, m4, m5, m12 %endmacro > + > +; The loop doesn't need to do all the iterations. It could stop when the > right > +; pixels are in the right registers. > +%macro SORT_SQUARE 0 > + %assign k 7 > + %rep 7 > + %assign i 1 > + %assign j 2 > + %rep k > + SORT_PAIR m %+ i , m %+ j , m9 > + %assign i i+1 > + %assign j j+1 > + %endrep > + %assign k k-1 > + %endrep > +%endmacro > + > +; %1 dest simd register > +; %2 source (simd register/memory) > +; %3 temp simd register > +%macro ABS_DIFF 3 > + mova %3, %2 > + psubusb %3, %1 > + psubusb %1, %2 > + por %1, %3 > +%endmacro > + > +; %1 dest simd register > +; %2 source (simd register/memory) > +; %3 temp simd register > +%macro ABS_DIFF_W 3 > + mova %3, %2 > + psubusw %3, %1 > + psubusw %1, %2 > + por %1, %3 > +%endmacro No way to achieve this using the pabs* ssse3 instructions? > + > +; %1 simd register that hold the mask and will hold the result > +; %2 simd register that holds the "true" values > +; %3 location of the "false" values (simd register/memory) > +%macro BLEND 3 ; mask, true, false > + pand %2, %1 > + pandn %1, %3 > + por %1, %2 > +%endmacro This doesn't follow the sse4 blend instructions' semantic, so it will make adding AVX2 versions harder. Try instead %macro BLEND 3 ; false, true, mask pand %2, %3 pandn %3, %1 por %3, %2 SWAP %1, %3 %endmacro Which will let you use "vpblendvb %1, %1, %2, %3" for the avx2 version. SSE4's pblendvb is kinda crappy, requiring the mask to be in xmm0, so adding an SSE4 version may not be worth it. [...] > diff --git a/libavfilter/x86/vf_removegrain_init.c > b/libavfilter/x86/vf_removegrain_init.c > new file mode 100644 > index 0000000..450185f > --- /dev/null > +++ b/libavfilter/x86/vf_removegrain_init.c > @@ -0,0 +1,128 @@ > +#include "libavutil/attributes.h" > +#include "libavutil/cpu.h" > +#include "libavutil/x86/cpu.h" > +#include "libavfilter/removegrain.h" > + > +void ff_rg_fl_mode_1_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_10_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_11_12_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, > int pixels); > +void ff_rg_fl_mode_13_14_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, > int pixels); > +void ff_rg_fl_mode_19_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_20_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_21_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_22_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +#if ARCH_X86_64 > +void ff_rg_fl_mode_2_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_3_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_4_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_5_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_6_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_7_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_8_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_9_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_15_16_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, > int pixels); > +void ff_rg_fl_mode_17_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_18_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_23_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +void ff_rg_fl_mode_24_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int > pixels); > +#endif > + > +av_cold void ff_removegrain_init_x86(RemoveGrainContext *rg) > +{ > + int cpu_flags = av_get_cpu_flags(); > + int i; > + > + for (i = 0; i < rg->nb_planes; i++) { > + switch (rg->mode[i]) { > + case 1: > + if (EXTERNAL_SSE2(cpu_flags)) I think it will be much cleaner and simpler if you put the switch inside a single EXTERNAL_SSE2 branch, rather than checking it for every mode. > + rg->fl[i] = ff_rg_fl_mode_1_sse2; > + break; > + case 10: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_10_sse2; > + break; > + case 11: > + case 12: /* fall through */ > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_11_12_sse2; > + break; > + case 13: > + case 14: /* fall through */ > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_13_14_sse2; > + break; > + case 19: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_19_sse2; > + break; > + case 20: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_20_sse2; > + break; > + case 21: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_21_sse2; > + break; > + case 22: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_22_sse2; > + break; > +#if ARCH_X86_64 > + case 2: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_2_sse2; > + break; > + case 3: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_3_sse2; > + break; > + case 4: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_4_sse2; > + break; > + case 5: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_5_sse2; > + break; > + case 6: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_6_sse2; > + break; > + case 7: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_7_sse2; > + break; > + case 8: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_8_sse2; > + break; > + case 9: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_9_sse2; > + break; > + case 15: > + case 16: /* fall through */ > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_15_16_sse2; > + break; > + case 17: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_17_sse2; > + break; > + case 18: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_18_sse2; > + break; > + case 23: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_23_sse2; > + break; > + case 24: > + if (EXTERNAL_SSE2(cpu_flags)) > + rg->fl[i] = ff_rg_fl_mode_24_sse2; > + break; > +#endif /* ARCH_x86_64 */ > + } > + } > +} > _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel