Hi!

While trying to look for bugs using the
https://sourceware.org/ml/binutils/2016-05/msg00328.html
hacks, in order to achive more testing I've also turned all
dg-do compile tests into dg-do assemble, so that they would be assembled and
I could watch out diagnostics.  There are about 2 tests that use complete
garbage in inline asm, which is fine, but I guess the following ones are
unintended that it isn't valid assembly.
The first one, for -m64 it would be enough to use "r" (1LL) or "r" (2LL),
but for -m32 kmovq supports just loading from m64.
The second one has -mavx512f only enabled, so I've replaced the avx512dq
instruction used in there with an avx512f one.
And the third one contains asm template that is only valid for 32-bit code.

Tested on x86_64-linux and i686-linux, ok for trunk?

2016-05-21  Jakub Jelinek  <ja...@redhat.com>

        * gcc.target/i386/avx512bw-kunpckdq-1.c (avx512bw_test): Use "m"
        constraint instead of "r".
        * gcc.target/i386/avx512f-additional-reg-names.c (foo): Use vpxord
        insn instead of vxorpd.
        * gcc.target/i386/strinline.c (__mempcpy_by2): Use empty asm template
        string for x86_64.

--- gcc/testsuite/gcc.target/i386/avx512bw-kunpckdq-1.c.jj      2014-12-03 
15:06:06.469866209 +0100
+++ gcc/testsuite/gcc.target/i386/avx512bw-kunpckdq-1.c 2016-05-21 
18:35:34.269533825 +0200
@@ -8,9 +8,10 @@ void
 avx512bw_test () {
   __mmask64 k1, k2, k3;
   volatile __m512i x;
+  long long one = 1, two = 2;
 
-  __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
-  __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+  __asm__( "kmovq %1, %0" : "=k" (k1) : "m" (one) );
+  __asm__( "kmovq %1, %0" : "=k" (k2) : "m" (two) );
 
   k3 = _mm512_kunpackd (k1, k2);
   x = _mm512_mask_avg_epu8 (x, k3, x, x);
--- gcc/testsuite/gcc.target/i386/avx512f-additional-reg-names.c.jj     
2014-10-01 16:27:25.838134349 +0200
+++ gcc/testsuite/gcc.target/i386/avx512f-additional-reg-names.c        
2016-05-21 18:37:46.505781090 +0200
@@ -5,5 +5,5 @@ void foo ()
 {
   register int zmm_var asm ("zmm6") __attribute__((unused));
 
-  __asm__ __volatile__("vxorpd %%zmm0, %%zmm0, %%zmm7\n" : : : "zmm7" );
+  __asm__ __volatile__("vpxord %%zmm0, %%zmm0, %%zmm7\n" : : : "zmm7" );
 }
--- gcc/testsuite/gcc.target/i386/strinline.c.jj        2014-09-25 
15:02:06.703336175 +0200
+++ gcc/testsuite/gcc.target/i386/strinline.c   2016-05-21 18:37:07.454298661 
+0200
@@ -8,7 +8,11 @@ __mempcpy_by2 (char *__dest, __const cha
   register char *__tmp = __dest;
   register unsigned long int __d0, __d1;
   __asm__ __volatile__
-    ("shrl      $1,%3\n\t"
+    (
+#ifdef __x86_64__
+     ""
+#else
+     "shrl      $1,%3\n\t"
      "jz        2f\n"
      "1:\n\t"
      "movl      (%2),%0\n\t"
@@ -20,6 +24,7 @@ __mempcpy_by2 (char *__dest, __const cha
      "2:\n\t"
      "movw      (%2),%w0\n\t"
      "movw      %w0,(%1)"
+#endif
      : "=&q" (__d0), "=r" (__tmp), "=&r" (__src), "=&r" (__d1),
        "=m" ( *(struct { __extension__ char __x[__srclen]; } *)__dest)
      : "1" (__tmp), "2" (__src), "3" (__srclen / 2),

        Jakub

Reply via email to