From db2c5213377ecde3e24d2f4a27cb790c158a7ef9 Mon Sep 17 00:00:00 2001
From: Rong Yan <rongyan236@gmail.com>
Date: Thu, 27 Nov 2014 05:51:01 +0000
Subject: [PATCH 3/5] libavcodec/ppc/h264chroma_template.c :fix
 PREFIX_h264_chroma_mc8_altivec() PREFIX_no_rnd_vc1_chroma_mc8_altivec()
 CHROMA_MC8_ALTIVEC_CORE_SIMPLE() CHROMA_MC8_ALTIVEC_CORE() add marcos
 GET_VSRC1() GET_VSRC() for POWER LE

---
 libavcodec/ppc/h264chroma_template.c | 265 ++++++++++++++---------------------
 1 file changed, 109 insertions(+), 156 deletions(-)

diff --git a/libavcodec/ppc/h264chroma_template.c b/libavcodec/ppc/h264chroma_template.c
index 7436e11..590aeec 100644
--- a/libavcodec/ppc/h264chroma_template.c
+++ b/libavcodec/ppc/h264chroma_template.c
@@ -19,57 +19,97 @@
  */
 
 #include "libavutil/mem.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
 
 /* this code assume that stride % 16 == 0 */
 
-#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
-        vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
-        vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
-\
+#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) {\
+        vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\
+        vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\
+        \
         psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
         psum = vec_mladd(vB, vsrc1ssH, psum);\
         psum = vec_mladd(vC, vsrc2ssH, psum);\
         psum = vec_mladd(vD, vsrc3ssH, psum);\
         psum = BIAS2(psum);\
         psum = vec_sr(psum, v6us);\
-\
+        \
         vdst = vec_ld(0, dst);\
         ppsum = (vec_u8)vec_pack(psum, psum);\
         vfdst = vec_perm(vdst, ppsum, fperm);\
-\
+        \
         OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
+        \
         vec_st(fsum, 0, dst);\
-\
+        \
         vsrc0ssH = vsrc2ssH;\
         vsrc1ssH = vsrc3ssH;\
-\
+        \
         dst += stride;\
-        src += stride;
-
-#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
-\
-        vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
-        vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
-\
+        src += stride;\
+}
+#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE {\
+        \
+        vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\
+        vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\
+        \
         psum = vec_mladd(vA, vsrc0ssH, v32ss);\
         psum = vec_mladd(vE, vsrc1ssH, psum);\
         psum = vec_sr(psum, v6us);\
-\
+        \
         vdst = vec_ld(0, dst);\
         ppsum = (vec_u8)vec_pack(psum, psum);\
         vfdst = vec_perm(vdst, ppsum, fperm);\
-\
+        \
         OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
+        \
         vec_st(fsum, 0, dst);\
-\
+        \
         dst += stride;\
-        src += stride;
+        src += stride;\
+}
 
 #define noop(a) a
 #define add28(a) vec_add(v28ss, a)
 
+#if HAVE_BIGENDIAN
+#define GET_VSRC1(vs0, off, b, perm0, s){\
+    vec_u8 vsrcCuc, vsrcDuc;\
+    vsrcCuc = vec_ld(off, s);\
+    if (loadSecond){\
+        vsrcDuc = vec_ld(off + b, s);\
+    } else\
+        vsrcDuc = vsrcCuc;\
+    \
+    vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0);\
+}
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){\
+    vec_u8 vsrcCuc, vsrcDuc;\
+    vsrcCuc = vec_ld(off, s);\
+    if (loadSecond){\
+        vsrcDuc = vec_ld(off + b, s);\
+    } else\
+        vsrcDuc = vsrcCuc;\
+    \
+    vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0);\
+    if (reallyBadAlign){\
+        vs1 = vsrcDuc;\
+    } else\
+        vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1);\
+}
+#else /* HAVE_BIGENDIAN */
+
+#define GET_VSRC1(vs0, off, b, perm0, s){\
+    vs0 = vec_vsx_ld(off, s);\
+}
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){\
+    vs0 = vec_vsx_ld(off, s);\
+    vs1 = vec_vsx_ld(off + 1, s);\
+}
+#endif /* HAVE_BIGENDIAN */
+
+
 #ifdef PREFIX_h264_chroma_mc8_altivec
 static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                                     int stride, int h, int x, int y) {
@@ -78,25 +118,29 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                          ((    x) * (8 - y)),
                          ((8 - x) * (    y)),
                          ((    x) * (    y))};
-    register int i;
     vec_u8 fperm;
-    const vec_s32 vABCD = vec_ld(0, ABCD);
-    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
-    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
-    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
-    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
     LOAD_ZERO;
+    const vec_s32 vABCD = vec_ld(0, ABCD);
+    const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+    const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+    const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+    const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
     const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
     const vec_u16 v6us = vec_splat_u16(6);
-    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
-    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
-    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+    vec_u8 vsrcperm0, vsrcperm1;
     vec_u8 vsrc0uc, vsrc1uc;
     vec_s16 vsrc0ssH, vsrc1ssH;
-    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+    vec_u8 vsrc2uc, vsrc3uc;
     vec_s16 vsrc2ssH, vsrc3ssH, psum;
     vec_u8 vdst, ppsum, vfdst, fsum;
+    register int i;
+#if HAVE_BIGENDIAN
+    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+    vsrcperm0 = vec_lvsl(0, src);
+    vsrcperm1 = vec_lvsl(1, src);
+#endif
 
     if (((unsigned long)dst) % 16 == 0) {
         fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -110,89 +154,28 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                          0x1C, 0x1D, 0x1E, 0x1F};
     }
 
-    vsrcAuc = vec_ld(0, src);
-
-    if (loadSecond)
-        vsrcBuc = vec_ld(16, src);
-    vsrcperm0 = vec_lvsl(0, src);
-    vsrcperm1 = vec_lvsl(1, src);
+    GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
 
-    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
-    if (reallyBadAlign)
-        vsrc1uc = vsrcBuc;
-    else
-        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
-    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
-    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
+    vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);
+    vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);
 
     if (ABCD[3]) {
-        if (!loadSecond) {// -> !reallyBadAlign
-            for (i = 0 ; i < h ; i++) {
-                vsrcCuc = vec_ld(stride + 0, src);
-                vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
-            }
-        } else {
-            vec_u8 vsrcDuc;
-            for (i = 0 ; i < h ; i++) {
-                vsrcCuc = vec_ld(stride + 0, src);
-                vsrcDuc = vec_ld(stride + 16, src);
-                vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                if (reallyBadAlign)
-                    vsrc3uc = vsrcDuc;
-                else
-                    vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
-            }
+        for (i = 0 ; i < h ; i++) {
+            GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+            CHROMA_MC8_ALTIVEC_CORE(v32ss, noop);
         }
     } else {
         const vec_s16 vE = vec_add(vB, vC);
         if (ABCD[2]) { // x == 0 B == 0
-            if (!loadSecond) {// -> !reallyBadAlign
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(stride + 0, src);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
-                    vsrc0uc = vsrc1uc;
-                }
-            } else {
-                vec_u8 vsrcDuc;
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(stride + 0, src);
-                    vsrcDuc = vec_ld(stride + 15, src);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
-                    vsrc0uc = vsrc1uc;
-                }
+            for (i = 0 ; i < h ; i++) {
+                GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src);
+                CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
+                vsrc0uc = vsrc1uc;
             }
         } else { // y == 0 C == 0
-            if (!loadSecond) {// -> !reallyBadAlign
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(0, src);
-                    vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-                }
-            } else {
-                vec_u8 vsrcDuc;
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(0, src);
-                    vsrcDuc = vec_ld(15, src);
-                    vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                    if (reallyBadAlign)
-                        vsrc1uc = vsrcDuc;
-                    else
-                        vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-                }
+            for (i = 0 ; i < h ; i++) {
+               GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src);
+               CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
             }
         }
     }
@@ -207,25 +190,28 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
                          ((    x) * (8 - y)),
                          ((8 - x) * (    y)),
                          ((    x) * (    y))};
-    register int i;
     vec_u8 fperm;
-    const vec_s32 vABCD = vec_ld(0, ABCD);
-    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
-    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
-    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
-    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
     LOAD_ZERO;
+    const vec_s32 vABCD = vec_ld(0, ABCD);
+    const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+    const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+    const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+    const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
     const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
     const vec_u16 v6us  = vec_splat_u16(6);
-    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
-    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
-    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+    vec_u8 vsrcperm0, vsrcperm1;
     vec_u8 vsrc0uc, vsrc1uc;
     vec_s16 vsrc0ssH, vsrc1ssH;
-    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+    vec_u8 vsrc2uc, vsrc3uc;
     vec_s16 vsrc2ssH, vsrc3ssH, psum;
     vec_u8 vdst, ppsum, vfdst, fsum;
+    register int i;
+#if HAVE_BIGENDIAN
+    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+    vsrcperm0 = vec_lvsl(0, src);
+    vsrcperm1 = vec_lvsl(1, src);
+#endif
 
     if (((unsigned long)dst) % 16 == 0) {
         fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -239,47 +225,14 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
                          0x1C, 0x1D, 0x1E, 0x1F};
     }
 
-    vsrcAuc = vec_ld(0, src);
-
-    if (loadSecond)
-        vsrcBuc = vec_ld(16, src);
-    vsrcperm0 = vec_lvsl(0, src);
-    vsrcperm1 = vec_lvsl(1, src);
-
-    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
-    if (reallyBadAlign)
-        vsrc1uc = vsrcBuc;
-    else
-        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
-    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
-    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
-    if (!loadSecond) {// -> !reallyBadAlign
-        for (i = 0 ; i < h ; i++) {
-
-
-            vsrcCuc = vec_ld(stride + 0, src);
-
-            vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-            vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
-        }
-    } else {
-        vec_u8 vsrcDuc;
-        for (i = 0 ; i < h ; i++) {
-            vsrcCuc = vec_ld(stride + 0, src);
-            vsrcDuc = vec_ld(stride + 16, src);
+    GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
 
-            vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-            if (reallyBadAlign)
-                vsrc3uc = vsrcDuc;
-            else
-                vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+    vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc0uc);
+    vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc1uc);
 
-            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
-        }
+    for (i = 0 ; i < h ; i++) {
+        GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+        CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28);
     }
 }
 #endif
-- 
1.9.1

