Hi,

this is the spurious error on asm statements of the form:

  error: 'asm' operand requires impossible reload

present for IA-64 on mainline and 4.7 branch.  As diagnosed by Ulrich, the code 
responsible for the error implicitly assumes that constraints accepting memory 
operands also accept pseudo-registers during reload.  That isn't true for the 
Q constraint of IA-64.  The patch also fixes the MEM_VOLATILE_P access not 
properly guarded by a MEM_P test in the expression.

Bootstrapped/regtested on IA-64/Linux, OK for mainline and 4.7 branch?


2012-04-11  Eric Botcazou  <ebotca...@adacore.com>

        PR target/48496
        * config/ia64/constraints.md (Q): Only accept non-volatile MEMs and
        also pseudo-registers during reload.


2012-04-11  Eric Botcazou  <ebotca...@adacore.com>

        * gcc.target/ia64/pr48496.c: New test.
        * gcc.target/ia64/pr52657.c: Likewise.


-- 
Eric Botcazou
Index: config/ia64/constraints.md
===================================================================
--- config/ia64/constraints.md	(revision 186272)
+++ config/ia64/constraints.md	(working copy)
@@ -111,11 +111,16 @@ (define_constraint "H"
 
 ;; Note that while this accepts mem, it only accepts non-volatile mem,
 ;; and so cannot be "fixed" by adjusting the address.  Thus it cannot
-;; and does not use define_memory_constraint.
+;; and does not use define_memory_constraint.  But it needs to accept
+;; pseudo-registers during reload like a define_memory_constraint.
 (define_constraint "Q"
   "Non-volatile memory for FP_REG loads/stores"
-  (and (match_operand 0 "memory_operand")
-       (match_test "!MEM_VOLATILE_P (op)")))
+  (ior (and (match_code "mem")
+	    (match_test "!MEM_VOLATILE_P (op)")
+	    (match_operand 0 "memory_operand"))
+       (and (match_code "reg")
+	    (match_test "!HARD_REGISTER_P (op)")
+	    (match_test "reload_in_progress"))))
 
 (define_constraint "R"
   "1..4 for shladd arguments"
/* { dg-do compile } */
/* { dg-options "-O2" } */

typedef unsigned int UINT64 __attribute__((__mode__(__DI__)));

typedef struct
{
  UINT64 x[2] __attribute__((aligned(16)));
} fpreg;

struct ia64_args
{
  fpreg fp_regs[8];
  UINT64 gp_regs[8];
};

ffi_call(long i, long gpcount, long fpcount, void **avalue)
{
  struct ia64_args *stack;
  stack = __builtin_alloca (64);
  asm ("stf.spill %0 = %1%P0" : "=m" (*&stack->fp_regs[fpcount++])
                              : "f"(*(double *)avalue[i]));
  stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
}
/* { dg-do compile } */
/* { dg-options "-O" } */

typedef unsigned long int mp_limb_t;

typedef struct
{
  int _mp_alloc;
  int _mp_size;
  mp_limb_t *_mp_d;
} __mpz_struct;

typedef __mpz_struct mpz_t[1];
typedef mp_limb_t * mp_ptr;
typedef const mp_limb_t * mp_srcptr;
typedef long int mp_size_t;

extern mp_limb_t __gmpn_addmul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);

void
__gmpn_redc_2 (mp_ptr rp, mp_ptr up, mp_srcptr mp, mp_size_t n, mp_srcptr mip)
{
  mp_limb_t q[2];
  mp_size_t j;
  mp_limb_t upn;

  for (j = n - 2; j >= 0; j -= 2)
    {
      mp_limb_t _ph, _pl;
      __asm__ ("xma.hu %0 = %3, %5, f0\n\t"
               "xma.l %1 = %3, %5, f0\n\t"
               ";;\n\t"
               "xma.l %0 = %3, %4, %0\n\t"
               ";;\n\t"
               "xma.l %0 = %2, %5, %0"
               : "=&f" (q[1]), "=&f" (q[0])
               : "f" (mip[1]), "f" (mip[0]), "f" (up[1]), "f" (up[0]));
      upn = up[n];
      up[1] = __gmpn_addmul_2 (up, mp, n, q);
      up[0] = up[n];
      up[n] = upn;
      up += 2;
    }
}

Reply via email to