Control: tags 773580 + patch pending

On Sat, 20 Dec 2014 at 11:06:56 +0000, Simon McVittie wrote:
> On 20/12/14 10:57, Ivo De Decker wrote:
> > The latest upload of lzo2 failed on mips, powerpc, s390x (and sparc).
> 
> In other words, on big-endian architectures (where byteswapping to fetch
> a LE value is not just a memcpy).

Fixed, build-tested (including regression tests) on amd64, armel,
powerpc, s390x and sparc, and tested for the original bug on armel.

I attach the diff since the last maintainer upload, the diff since my
previous NMU, and the updated patch. I'll upload the NMU shortly.

    S
diffstat for lzo2-2.08 lzo2-2.08

 changelog                                                               |   13 
+
 patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch |   82 
++++++----
 2 files changed, 68 insertions(+), 27 deletions(-)

diff -Nru lzo2-2.08/debian/changelog lzo2-2.08/debian/changelog
--- lzo2-2.08/debian/changelog  2014-12-16 23:35:43.000000000 +0000
+++ lzo2-2.08/debian/changelog  2014-12-20 17:50:47.000000000 +0000
@@ -1,3 +1,16 @@
+lzo2 (2.08-1.2) unstable; urgency=low
+
+  * Non-maintainer upload.
+  * Adjust patch from previous upload so the "modern C" code path still
+    defines some typedefs: lzo_memops_TU1p is a pointer to unsigned byte
+    (used by the byteswapping implementation on non-powerpc big-endian
+    architectures), and lzo_memops_TU2p and lzo_memops_TU4p
+    are pointers to unsigned 2- and 4-byte quantities (needed by the
+    powerpc assembler implementation). Together, these fix FTBFS on
+    big-endian platforms. (Closes: #773580)
+
+ -- Simon McVittie <s...@debian.org>  Sat, 20 Dec 2014 17:50:38 +0000
+
 lzo2 (2.08-1.1) unstable; urgency=low
 
   * Non-maintainer upload.
diff -Nru 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
--- 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
    2014-12-16 23:35:43.000000000 +0000
+++ 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
    2014-12-20 17:50:47.000000000 +0000
@@ -1,5 +1,5 @@
 From: Simon McVittie <s...@debian.org>
-Date: Tue, 16 Dec 2014 23:35:27 +0000
+Date: Sat, 20 Dec 2014 17:50:27 +0000
 Subject: Conditionally replace reinvention of memcpy() with calls to memcpy()
 
 gcc already knows how to inline memcpy calls with constant n,
@@ -16,16 +16,16 @@
 
 Bug-Debian: https://bugs.debian.org/757037
 ---
- minilzo/minilzo.c | 64 ++++++++++++++++++++++++++++++++++++++++++++-----------
+ minilzo/minilzo.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++---------
  src/lzo_conf.h    |  2 --
- src/lzo_func.h    | 55 ++++++++++++++++++++++++++++++++++++++---------
- 3 files changed, 97 insertions(+), 24 deletions(-)
+ src/lzo_func.h    | 71 +++++++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 125 insertions(+), 24 deletions(-)
 
 diff --git a/minilzo/minilzo.c b/minilzo/minilzo.c
-index ab2be5f..7e15646 100644
+index ab2be5f..146b383 100644
 --- a/minilzo/minilzo.c
 +++ b/minilzo/minilzo.c
-@@ -3354,6 +3354,37 @@ lzo_bitops_unused_funcs(void)
+@@ -3354,6 +3354,49 @@ lzo_bitops_unused_funcs(void)
      LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
  }
  
@@ -38,6 +38,18 @@
 + * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
 +typedef unsigned char *lzo_memops_TU1p;
 +
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
 +#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
 +#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
 +#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
@@ -63,7 +75,7 @@
  #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
  #ifndef __lzo_memops_tcheck
  #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && 
__lzo_alignof(t) == (b))
-@@ -3523,6 +3554,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+@@ -3523,6 +3566,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
      if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
      LZO_BLOCK_END
  
@@ -72,7 +84,7 @@
  __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
  {
      lzo_uint16_t v;
-@@ -3539,7 +3572,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
+@@ -3539,7 +3584,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
  #endif
      return v;
  }
@@ -81,7 +93,7 @@
  #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
  #else
  #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
-@@ -3561,13 +3594,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
+@@ -3561,13 +3606,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
  #endif
      return v;
  }
@@ -97,7 +109,7 @@
  #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
  #endif
  
-@@ -3577,7 +3610,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
+@@ -3577,7 +3622,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
      LZO_MEMOPS_COPY2(&v, ss);
      return v;
  }
@@ -106,7 +118,7 @@
  #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
  #else
  #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
-@@ -3589,14 +3622,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
+@@ -3589,14 +3634,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
      LZO_MEMOPS_COPY4(&v, ss);
      return v;
  }
@@ -132,7 +144,7 @@
  #endif
  
  __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t 
vv)
-@@ -3613,7 +3655,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -3613,7 +3667,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
      d[1] = LZO_BYTE((vv >>  8) & 0xff);
  #endif
  }
@@ -141,7 +153,7 @@
  #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
-@@ -3635,7 +3677,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+@@ -3635,7 +3689,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
      d[3] = LZO_BYTE((vv >> 24) & 0xff);
  #endif
  }
@@ -150,7 +162,7 @@
  #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
-@@ -3645,7 +3687,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -3645,7 +3699,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
  {
      LZO_MEMOPS_COPY2(dd, &vv);
  }
@@ -159,7 +171,7 @@
  #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
-@@ -3655,7 +3697,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+@@ -3655,7 +3709,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
  {
      LZO_MEMOPS_COPY4(dd, &vv);
  }
@@ -168,7 +180,7 @@
  #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
-@@ -3746,11 +3788,9 @@ lzo_memops_unused_funcs(void)
+@@ -3746,11 +3800,9 @@ lzo_memops_unused_funcs(void)
  #ifndef UA_GET_NE32
  #define UA_GET_NE32         LZO_MEMOPS_GET_NE32
  #endif
@@ -197,10 +209,10 @@
  #define UA_PUT_LE16         LZO_MEMOPS_PUT_LE16
  #endif
 diff --git a/src/lzo_func.h b/src/lzo_func.h
-index dfaa676..dd65f27 100644
+index dfaa676..5e3b814 100644
 --- a/src/lzo_func.h
 +++ b/src/lzo_func.h
-@@ -164,6 +164,30 @@ lzo_bitops_unused_funcs(void)
+@@ -164,6 +164,46 @@ lzo_bitops_unused_funcs(void)
  // memops
  ************************************************************************/
  
@@ -209,6 +221,22 @@
 + * unlike the macros below. */
 +#if LZO_CFG_MODERN_C+0
 +
++/* ISO C says char pointers of any signedness can alias anything
++ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
++typedef unsigned char *lzo_memops_TU1p;
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
 +#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
 +#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
 +#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
@@ -231,7 +259,7 @@
  #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
  #ifndef __lzo_memops_tcheck
  #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && 
__lzo_alignof(t) == (b))
-@@ -333,6 +357,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+@@ -333,6 +373,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
      if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
      LZO_BLOCK_END
  
@@ -240,7 +268,7 @@
  __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
  {
      lzo_uint16_t v;
-@@ -349,7 +375,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
+@@ -349,7 +391,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
  #endif
      return v;
  }
@@ -249,7 +277,7 @@
  #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
  #else
  #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
-@@ -371,13 +397,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
+@@ -371,13 +413,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
  #endif
      return v;
  }
@@ -265,7 +293,7 @@
  #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
  #endif
  
-@@ -387,7 +413,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
+@@ -387,7 +429,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
      LZO_MEMOPS_COPY2(&v, ss);
      return v;
  }
@@ -274,7 +302,7 @@
  #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
  #else
  #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
-@@ -399,14 +425,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
+@@ -399,14 +441,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
      LZO_MEMOPS_COPY4(&v, ss);
      return v;
  }
@@ -300,7 +328,7 @@
  #endif
  
  __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t 
vv)
-@@ -423,7 +458,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -423,7 +474,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
      d[1] = LZO_BYTE((vv >>  8) & 0xff);
  #endif
  }
@@ -309,7 +337,7 @@
  #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
-@@ -445,7 +480,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+@@ -445,7 +496,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
      d[3] = LZO_BYTE((vv >> 24) & 0xff);
  #endif
  }
@@ -318,7 +346,7 @@
  #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
-@@ -455,7 +490,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -455,7 +506,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
  {
      LZO_MEMOPS_COPY2(dd, &vv);
  }
@@ -327,7 +355,7 @@
  #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
  #else
  #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
-@@ -465,7 +500,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+@@ -465,7 +516,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
  {
      LZO_MEMOPS_COPY4(dd, &vv);
  }
diffstat for lzo2-2.08 lzo2-2.08

 changelog                                                               |   23 
 patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch |  366 
++++++++++
 patches/series                                                          |    1 
 rules                                                                   |    1 
 4 files changed, 391 insertions(+)

diff -Nru lzo2-2.08/debian/changelog lzo2-2.08/debian/changelog
--- lzo2-2.08/debian/changelog  2014-07-15 02:03:18.000000000 +0100
+++ lzo2-2.08/debian/changelog  2014-12-20 17:50:47.000000000 +0000
@@ -1,3 +1,26 @@
+lzo2 (2.08-1.2) unstable; urgency=low
+
+  * Non-maintainer upload.
+  * Adjust patch from previous upload so the "modern C" code path still
+    defines some typedefs: lzo_memops_TU1p is a pointer to unsigned byte
+    (used by the byteswapping implementation on non-powerpc big-endian
+    architectures), and lzo_memops_TU2p and lzo_memops_TU4p
+    are pointers to unsigned 2- and 4-byte quantities (needed by the
+    powerpc assembler implementation). Together, these fix FTBFS on
+    big-endian platforms. (Closes: #773580)
+
+ -- Simon McVittie <s...@debian.org>  Sat, 20 Dec 2014 17:50:38 +0000
+
+lzo2 (2.08-1.1) unstable; urgency=low
+
+  * Non-maintainer upload.
+  * Replace liblzo's reinvention of memcpy() with calls to memcpy().
+    gcc already knows how to inline memcpy calls with constant n,
+    and also gets the alignment constraints right, avoiding incorrect
+    unaligned accesses on armel (Closes: #757037)
+
+ -- Simon McVittie <s...@debian.org>  Tue, 16 Dec 2014 23:35:36 +0000
+
 lzo2 (2.08-1) unstable; urgency=low
 
   * New upstream release (closes: #752861) (CVE-2014-4607)
diff -Nru 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
--- 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
    1970-01-01 01:00:00.000000000 +0100
+++ 
lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
    2014-12-20 17:50:47.000000000 +0000
@@ -0,0 +1,366 @@
+From: Simon McVittie <s...@debian.org>
+Date: Sat, 20 Dec 2014 17:50:27 +0000
+Subject: Conditionally replace reinvention of memcpy() with calls to memcpy()
+
+gcc already knows how to inline memcpy calls with constant n,
+and also gets the alignment constraints right, avoiding incorrect
+unaligned accesses on armel.
+
+Unconditionally define LZO_MEMOPS_GET_NE64 since it's trivial
+to do in terms of LZO_MEMOPS_COPY8.
+
+I've made the "modern C" version conditional since lzo seems to aim
+to be portable to anything and everything, but it would probably
+be better off just requiring a compiler from this century and
+a set of correctly working memwhatever() implementations.
+
+Bug-Debian: https://bugs.debian.org/757037
+---
+ minilzo/minilzo.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++---------
+ src/lzo_conf.h    |  2 --
+ src/lzo_func.h    | 71 +++++++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 125 insertions(+), 24 deletions(-)
+
+diff --git a/minilzo/minilzo.c b/minilzo/minilzo.c
+index ab2be5f..146b383 100644
+--- a/minilzo/minilzo.c
++++ b/minilzo/minilzo.c
+@@ -3354,6 +3354,49 @@ lzo_bitops_unused_funcs(void)
+     LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
+ }
+ 
++/* Modern compilers know that memcpy() and memset() with constant n can be
++ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
++ * unlike the macros below. */
++#if LZO_CFG_MODERN_C+0
++
++/* ISO C says char pointers of any signedness can alias anything
++ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
++typedef unsigned char *lzo_memops_TU1p;
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
++#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
++#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
++#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
++#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
++/* lzo does not appear to use these macros between overlapping buffers
++ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
++#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
++#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
++
++#else /* !LZO_CFG_MODERN_C */
++
++/* Standard C says a lot of this is undefined behaviour; maybe
++ * you can get away with it in older compilers. */
++
+ #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
+ #ifndef __lzo_memops_tcheck
+ #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && 
__lzo_alignof(t) == (b))
+@@ -3523,6 +3566,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+     if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
+     LZO_BLOCK_END
+ 
++#endif /* !LZO_CFG_MODERN_C */
++
+ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ {
+     lzo_uint16_t v;
+@@ -3539,7 +3584,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
+ #endif
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
+@@ -3561,13 +3606,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
+ #endif
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE32(ss)    * (const lzo_memops_TU4p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE32(ss)    lzo_memops_get_le32(ss)
+ #endif
+ 
+-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
+ #endif
+ 
+@@ -3577,7 +3622,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
+     LZO_MEMOPS_COPY2(&v, ss);
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
+@@ -3589,14 +3634,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
+     LZO_MEMOPS_COPY4(&v, ss);
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE32(ss)    * (const lzo_memops_TU4p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE32(ss)    lzo_memops_get_ne32(ss)
+ #endif
+ 
+-#if (LZO_OPT_UNALIGNED64)
++__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
++{
++    lzo_uint64_t v;
++    LZO_MEMOPS_COPY8(&v, ss);
++    return v;
++}
++
++#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
++#else
++#define LZO_MEMOPS_GET_NE64(ss)    lzo_memops_get_ne64(ss)
+ #endif
+ 
+ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t 
vv)
+@@ -3613,7 +3667,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+     d[1] = LZO_BYTE((vv >>  8) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
+@@ -3635,7 +3689,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+     d[3] = LZO_BYTE((vv >> 24) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
+@@ -3645,7 +3699,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+ {
+     LZO_MEMOPS_COPY2(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
+@@ -3655,7 +3709,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+ {
+     LZO_MEMOPS_COPY4(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
+@@ -3746,11 +3800,9 @@ lzo_memops_unused_funcs(void)
+ #ifndef UA_GET_NE32
+ #define UA_GET_NE32         LZO_MEMOPS_GET_NE32
+ #endif
+-#ifdef LZO_MEMOPS_GET_NE64
+ #ifndef UA_GET_NE64
+ #define UA_GET_NE64         LZO_MEMOPS_GET_NE64
+ #endif
+-#endif
+ #ifndef UA_PUT_LE16
+ #define UA_PUT_LE16         LZO_MEMOPS_PUT_LE16
+ #endif
+diff --git a/src/lzo_conf.h b/src/lzo_conf.h
+index cc2e85d..3c77caa 100644
+--- a/src/lzo_conf.h
++++ b/src/lzo_conf.h
+@@ -314,11 +314,9 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint64_t) == 8)
+ #ifndef UA_GET_NE32
+ #define UA_GET_NE32         LZO_MEMOPS_GET_NE32
+ #endif
+-#ifdef LZO_MEMOPS_GET_NE64
+ #ifndef UA_GET_NE64
+ #define UA_GET_NE64         LZO_MEMOPS_GET_NE64
+ #endif
+-#endif
+ #ifndef UA_PUT_LE16
+ #define UA_PUT_LE16         LZO_MEMOPS_PUT_LE16
+ #endif
+diff --git a/src/lzo_func.h b/src/lzo_func.h
+index dfaa676..5e3b814 100644
+--- a/src/lzo_func.h
++++ b/src/lzo_func.h
+@@ -164,6 +164,46 @@ lzo_bitops_unused_funcs(void)
+ // memops
+ ************************************************************************/
+ 
++/* Modern compilers know that memcpy() and memset() with constant n can be
++ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
++ * unlike the macros below. */
++#if LZO_CFG_MODERN_C+0
++
++/* ISO C says char pointers of any signedness can alias anything
++ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
++typedef unsigned char *lzo_memops_TU1p;
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
++#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
++#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
++#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
++#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
++/* lzo does not appear to use these macros between overlapping buffers
++ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
++#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
++#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
++
++#else /* !LZO_CFG_MODERN_C */
++
+ #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
+ #ifndef __lzo_memops_tcheck
+ #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && 
__lzo_alignof(t) == (b))
+@@ -333,6 +373,8 @@ 
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+     if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
+     LZO_BLOCK_END
+ 
++#endif /* !LZO_CFG_MODERN_C */
++
+ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ {
+     lzo_uint16_t v;
+@@ -349,7 +391,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_le16(const lzo_voidp ss)
+ #endif
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
+@@ -371,13 +413,13 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_le32(const lzo_voidp ss)
+ #endif
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE32(ss)    * (const lzo_memops_TU4p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE32(ss)    lzo_memops_get_le32(ss)
+ #endif
+ 
+-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
+ #endif
+ 
+@@ -387,7 +429,7 @@ __lzo_static_forceinline lzo_uint16_t 
lzo_memops_get_ne16(const lzo_voidp ss)
+     LZO_MEMOPS_COPY2(&v, ss);
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
+@@ -399,14 +441,23 @@ __lzo_static_forceinline lzo_uint32_t 
lzo_memops_get_ne32(const lzo_voidp ss)
+     LZO_MEMOPS_COPY4(&v, ss);
+     return v;
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE32(ss)    * (const lzo_memops_TU4p) (const 
lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE32(ss)    lzo_memops_get_ne32(ss)
+ #endif
+ 
+-#if (LZO_OPT_UNALIGNED64)
++__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
++{
++    lzo_uint64_t v;
++    LZO_MEMOPS_COPY8(&v, ss);
++    return v;
++}
++
++#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE64(ss)    * (const lzo_memops_TU8p) (const 
lzo_memops_TU0p) (ss)
++#else
++#define LZO_MEMOPS_GET_NE64(ss)    lzo_memops_get_ne64(ss)
+ #endif
+ 
+ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t 
vv)
+@@ -423,7 +474,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+     d[1] = LZO_BYTE((vv >>  8) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
+@@ -445,7 +496,7 @@ __lzo_static_forceinline void 
lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+     d[3] = LZO_BYTE((vv >> 24) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
+@@ -455,7 +506,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+ {
+     LZO_MEMOPS_COPY2(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
+@@ -465,7 +516,7 @@ __lzo_static_forceinline void 
lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+ {
+     LZO_MEMOPS_COPY4(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) 
(dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
diff -Nru lzo2-2.08/debian/patches/series lzo2-2.08/debian/patches/series
--- lzo2-2.08/debian/patches/series     1970-01-01 01:00:00.000000000 +0100
+++ lzo2-2.08/debian/patches/series     2014-12-20 17:50:47.000000000 +0000
@@ -0,0 +1 @@
+0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
diff -Nru lzo2-2.08/debian/rules lzo2-2.08/debian/rules
--- lzo2-2.08/debian/rules      2013-08-26 20:24:58.000000000 +0100
+++ lzo2-2.08/debian/rules      2014-12-20 17:50:47.000000000 +0000
@@ -9,6 +9,7 @@
 DEB_INSTALL_DOCS_ALL =
 DEB_MAKE_CHECK_TARGET = check test
 DEB_DH_MAKESHLIBS_ARGS = --add-udeb=liblzo2-2-udeb
+CPPFLAGS += -DLZO_CFG_MODERN_C=1
 
 common-install-impl::
        mkdir -p $(DEB_DESTDIR)/lib/$(DEB_HOST_MULTIARCH)
From: Simon McVittie <s...@debian.org>
Date: Sat, 20 Dec 2014 17:50:27 +0000
Subject: Conditionally replace reinvention of memcpy() with calls to memcpy()

gcc already knows how to inline memcpy calls with constant n,
and also gets the alignment constraints right, avoiding incorrect
unaligned accesses on armel.

Unconditionally define LZO_MEMOPS_GET_NE64 since it's trivial
to do in terms of LZO_MEMOPS_COPY8.

I've made the "modern C" version conditional since lzo seems to aim
to be portable to anything and everything, but it would probably
be better off just requiring a compiler from this century and
a set of correctly working memwhatever() implementations.

Bug-Debian: https://bugs.debian.org/757037
---
 minilzo/minilzo.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++---------
 src/lzo_conf.h    |  2 --
 src/lzo_func.h    | 71 +++++++++++++++++++++++++++++++++++++++++++--------
 3 files changed, 125 insertions(+), 24 deletions(-)

diff --git a/minilzo/minilzo.c b/minilzo/minilzo.c
index ab2be5f..146b383 100644
--- a/minilzo/minilzo.c
+++ b/minilzo/minilzo.c
@@ -3354,6 +3354,49 @@ lzo_bitops_unused_funcs(void)
     LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
 }
 
+/* Modern compilers know that memcpy() and memset() with constant n can be
+ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
+ * unlike the macros below. */
+#if LZO_CFG_MODERN_C+0
+
+/* ISO C says char pointers of any signedness can alias anything
+ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
+typedef unsigned char *lzo_memops_TU1p;
+
+/* Used by powerpc assembler implementations of byteswapping */
+#if (LZO_OPT_UNALIGNED16)
+typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
+typedef lzo_memops_TU2 *lzo_memops_TU2p;
+#endif
+
+/* Used by powerpc assembler implementations of byteswapping */
+#if (LZO_OPT_UNALIGNED32)
+typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
+typedef lzo_memops_TU4 *lzo_memops_TU4p;
+#endif
+
+#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
+#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
+#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
+#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
+/* lzo does not appear to use these macros between overlapping buffers
+ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
+#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
+#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
+#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
+#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
+#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
+#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
+#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
+#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
+#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
+#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
+
+#else /* !LZO_CFG_MODERN_C */
+
+/* Standard C says a lot of this is undefined behaviour; maybe
+ * you can get away with it in older compilers. */
+
 #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
 #ifndef __lzo_memops_tcheck
 #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && __lzo_alignof(t) == (b))
@@ -3523,6 +3566,8 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
     if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
     LZO_BLOCK_END
 
+#endif /* !LZO_CFG_MODERN_C */
+
 __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
 {
     lzo_uint16_t v;
@@ -3539,7 +3584,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
 #endif
     return v;
 }
-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
@@ -3561,13 +3606,13 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_le32(const lzo_voidp ss)
 #endif
     return v;
 }
-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE32(ss)    * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_LE32(ss)    lzo_memops_get_le32(ss)
 #endif
 
-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
 #endif
 
@@ -3577,7 +3622,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_ne16(const lzo_voidp ss)
     LZO_MEMOPS_COPY2(&v, ss);
     return v;
 }
-#if (LZO_OPT_UNALIGNED16)
+#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
@@ -3589,14 +3634,23 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_ne32(const lzo_voidp ss)
     LZO_MEMOPS_COPY4(&v, ss);
     return v;
 }
-#if (LZO_OPT_UNALIGNED32)
+#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE32(ss)    * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_NE32(ss)    lzo_memops_get_ne32(ss)
 #endif
 
-#if (LZO_OPT_UNALIGNED64)
+__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
+{
+    lzo_uint64_t v;
+    LZO_MEMOPS_COPY8(&v, ss);
+    return v;
+}
+
+#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE64(ss)    * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
+#else
+#define LZO_MEMOPS_GET_NE64(ss)    lzo_memops_get_ne64(ss)
 #endif
 
 __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
@@ -3613,7 +3667,7 @@ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
     d[1] = LZO_BYTE((vv >>  8) & 0xff);
 #endif
 }
-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
@@ -3635,7 +3689,7 @@ __lzo_static_forceinline void lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
     d[3] = LZO_BYTE((vv >> 24) & 0xff);
 #endif
 }
-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
@@ -3645,7 +3699,7 @@ __lzo_static_forceinline void lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
 {
     LZO_MEMOPS_COPY2(dd, &vv);
 }
-#if (LZO_OPT_UNALIGNED16)
+#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
@@ -3655,7 +3709,7 @@ __lzo_static_forceinline void lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
 {
     LZO_MEMOPS_COPY4(dd, &vv);
 }
-#if (LZO_OPT_UNALIGNED32)
+#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
@@ -3746,11 +3800,9 @@ lzo_memops_unused_funcs(void)
 #ifndef UA_GET_NE32
 #define UA_GET_NE32         LZO_MEMOPS_GET_NE32
 #endif
-#ifdef LZO_MEMOPS_GET_NE64
 #ifndef UA_GET_NE64
 #define UA_GET_NE64         LZO_MEMOPS_GET_NE64
 #endif
-#endif
 #ifndef UA_PUT_LE16
 #define UA_PUT_LE16         LZO_MEMOPS_PUT_LE16
 #endif
diff --git a/src/lzo_conf.h b/src/lzo_conf.h
index cc2e85d..3c77caa 100644
--- a/src/lzo_conf.h
+++ b/src/lzo_conf.h
@@ -314,11 +314,9 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint64_t) == 8)
 #ifndef UA_GET_NE32
 #define UA_GET_NE32         LZO_MEMOPS_GET_NE32
 #endif
-#ifdef LZO_MEMOPS_GET_NE64
 #ifndef UA_GET_NE64
 #define UA_GET_NE64         LZO_MEMOPS_GET_NE64
 #endif
-#endif
 #ifndef UA_PUT_LE16
 #define UA_PUT_LE16         LZO_MEMOPS_PUT_LE16
 #endif
diff --git a/src/lzo_func.h b/src/lzo_func.h
index dfaa676..5e3b814 100644
--- a/src/lzo_func.h
+++ b/src/lzo_func.h
@@ -164,6 +164,46 @@ lzo_bitops_unused_funcs(void)
 // memops
 ************************************************************************/
 
+/* Modern compilers know that memcpy() and memset() with constant n can be
+ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
+ * unlike the macros below. */
+#if LZO_CFG_MODERN_C+0
+
+/* ISO C says char pointers of any signedness can alias anything
+ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
+typedef unsigned char *lzo_memops_TU1p;
+
+/* Used by powerpc assembler implementations of byteswapping */
+#if (LZO_OPT_UNALIGNED16)
+typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
+typedef lzo_memops_TU2 *lzo_memops_TU2p;
+#endif
+
+/* Used by powerpc assembler implementations of byteswapping */
+#if (LZO_OPT_UNALIGNED32)
+typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
+typedef lzo_memops_TU4 *lzo_memops_TU4p;
+#endif
+
+#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
+#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
+#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
+#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
+/* lzo does not appear to use these macros between overlapping buffers
+ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
+#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
+#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
+#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
+#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
+#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
+#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
+#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
+#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
+#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
+#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
+
+#else /* !LZO_CFG_MODERN_C */
+
 #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
 #ifndef __lzo_memops_tcheck
 #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && __lzo_alignof(t) == (b))
@@ -333,6 +373,8 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
     if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
     LZO_BLOCK_END
 
+#endif /* !LZO_CFG_MODERN_C */
+
 __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
 {
     lzo_uint16_t v;
@@ -349,7 +391,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
 #endif
     return v;
 }
-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE16(ss)    * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_LE16(ss)    lzo_memops_get_le16(ss)
@@ -371,13 +413,13 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_le32(const lzo_voidp ss)
 #endif
     return v;
 }
-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE32(ss)    * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_LE32(ss)    lzo_memops_get_le32(ss)
 #endif
 
-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_LE64(ss)    * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
 #endif
 
@@ -387,7 +429,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_ne16(const lzo_voidp ss)
     LZO_MEMOPS_COPY2(&v, ss);
     return v;
 }
-#if (LZO_OPT_UNALIGNED16)
+#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE16(ss)    * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_NE16(ss)    lzo_memops_get_ne16(ss)
@@ -399,14 +441,23 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_ne32(const lzo_voidp ss)
     LZO_MEMOPS_COPY4(&v, ss);
     return v;
 }
-#if (LZO_OPT_UNALIGNED32)
+#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE32(ss)    * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
 #else
 #define LZO_MEMOPS_GET_NE32(ss)    lzo_memops_get_ne32(ss)
 #endif
 
-#if (LZO_OPT_UNALIGNED64)
+__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
+{
+    lzo_uint64_t v;
+    LZO_MEMOPS_COPY8(&v, ss);
+    return v;
+}
+
+#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_GET_NE64(ss)    * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
+#else
+#define LZO_MEMOPS_GET_NE64(ss)    lzo_memops_get_ne64(ss)
 #endif
 
 __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
@@ -423,7 +474,7 @@ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
     d[1] = LZO_BYTE((vv >>  8) & 0xff);
 #endif
 }
-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
@@ -445,7 +496,7 @@ __lzo_static_forceinline void lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
     d[3] = LZO_BYTE((vv >> 24) & 0xff);
 #endif
 }
-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
+#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
@@ -455,7 +506,7 @@ __lzo_static_forceinline void lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
 {
     LZO_MEMOPS_COPY2(dd, &vv);
 }
-#if (LZO_OPT_UNALIGNED16)
+#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
@@ -465,7 +516,7 @@ __lzo_static_forceinline void lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
 {
     LZO_MEMOPS_COPY4(dd, &vv);
 }
-#if (LZO_OPT_UNALIGNED32)
+#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
 #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
 #else
 #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)

Reply via email to