Hello,

(Having a great time in Ottawa :)

move_invariant_reg has this code:

      /* Replace the uses we know to be dominated.  It saves work for copy
         propagation, and also it is necessary so that dependent invariants
         are computed right.  */
      if (inv->def)
        {
          for (use = inv->def->uses; use; use = use->next)
            {
              *use->pos = reg;
              df_insn_rescan (use->insn);
            }
        }

and we're observing a case where this updates only one of two
match_dup related ioperands. This is on i386-mingwin and the insn in
question is coming from this pattern:

   (define_insn "allocate_stack_worker_32"
     [(set (match_operand:SI 0 "register_operand" "+a")
           (unspec_volatile:SI [(match_dup 0)] UNSPECV_STACK_PROBE))
      (set (reg:SI SP_REG) (minus:SI (reg:SI SP_REG) (match_dup 0)))
      (clobber (reg:CC FLAGS_REG))]
     "!TARGET_64BIT && TARGET_STACK_PROBE"
     "call\t___chkstk"

I think the intent is to convey that __chkstk is using eax as
an argument and is clobbering it.

Is this a valid pattern ?


FWIW, we're observing the anomaly on the Ada testcase below, which
produces 

   .152r.loop2_done
   ...
   (insn 46 109 48 4 p.adb:10 (parallel [
               (set (reg:SI 90)
                   (unspec_volatile:SI [
                           (reg:SI 105)
                       ] 1))
   ...

when compiled by mainline with -O1.

Later on, we're observing bogus code calling __chkstk without reloading
eax with the proer value at a loop branch point:

    .004t.gimple
      ...
      D.1376 = __builtin_alloca (D.1363);
      ...

    .s
        ...
        movl    -28(%ebp), %eax  # D.1363, tmp102
        addl    $30, %eax        #, tmp102
        shrl    $4, %eax         #, tmp103
        sall    $4, %eax         #, tmp104
        movl    %eax, -32(%ebp)  # tmp104,                 <=== compute arg in 
eax
L5:                                                              
        movl    %esi, %edi       # ivtmp.34, J8b.7
        movl    %esp, -16(%ebp)  # saved_stack.8, saved_stack.8
        call    ___chkstk                                  <==== allocate
        leal    27(%esp), %eax   #, tmp91
        movl    %eax, %edx       # tmp91, A14b.4
        andl    $-16, %edx       #, A14b.4
        testl   %ebx, %ebx       # l.0
        jle     L3       #,
        movb    $0, (%edx)       #,* A14b.4
        cmpl    $1, %ebx         #, l.0
        je      L3       #,
        movl    $1, %eax         #, J15b.6
L4:
        addl    $1, %eax         #, J15b.6
        movb    $0, -1(%edx,%eax)        #,
        cmpl    %eax, %ebx       # J15b.6, l.0
        jne     L4       #,
L3:
        leal    -1(%edi), %eax   #, tmp94
        imull   -28(%ebp), %eax  # D.1363, tmp95
        addl    -20(%ebp), %eax  # blob.2, tmp96              <==== clobber eax
        movl    -28(%ebp), %ecx  # D.1363,
        movl    %ecx, 8(%esp)    #,
        movl    %edx, 4(%esp)    # A14b.4,
        movl    %eax, (%esp)     # tmp96,* saved_stack.8
        call    _memcpy  #
        movl    -16(%ebp), %esp  # saved_stack.8, saved_stack.8
        cmpl    -24(%ebp), %edi  # D.1366, J8b.7
        je      L2       #,
        addl    $1, %esi         #, ivtmp.34
        jmp     L5       #                                    <=== branch back 
here
                                                                   eax clobbered

Reply via email to