Ahmad has helped doing some atom performance testing (ChromeOS
benchmarks) with this patch. In summary, there is no statistically
significant regression seen. There is one improvement of about +1.9%
(v8 benchmark) which looks real.

David

On Wed, Dec 12, 2012 at 9:24 AM, Xinliang David Li <davi...@google.com> wrote:
> On Wed, Dec 12, 2012 at 8:37 AM, Jan Hubicka <hubi...@ucw.cz> wrote:
>>> I noticed in prologue/epilogue, GCC prefers to use MOVs followed by a
>>> SP adjustment instead of a sequence of pushes/pops. The preference to
>>> the MOVs are good for old CPU micro-architectures (before pentium-4,
>>> K10), because it breaks the data dependency.  In modern
>>> micro-architecture, push/pop is implemented using a mechanism called
>>> stack engine. The data dependency is removed by the hardware, and
>>> push/pop becomes very cheap (1 uOp, 1 cycle latency), and they are
>>> smaller. There is no longer the need to avoid using them.   This is
>>> also what ICC does.
>>>
>>> The following patch fixed the problem. It passes bootstrap/regression
>>> test. OK to install?
>>>
>>> thanks,
>>>
>>> David
>>>
>>> Index: config/i386/i386.c
>>> ===================================================================
>>> --- config/i386/i386.c (revision 194324)
>>> +++ config/i386/i386.c (working copy)
>>> @@ -1919,10 +1919,10 @@ static unsigned int initial_ix86_tune_fe
>>>    m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
>>>
>>>    /* X86_TUNE_PROLOGUE_USING_MOVE */
>>> -  m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
>>> +  m_PPRO | m_ATHLON_K8,
>>>
>>>    /* X86_TUNE_EPILOGUE_USING_MOVE */
>>> -  m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
>>> +  m_PPRO | m_ATHLON_K8,
>>
>> Push/pops wrt moves was always difficult to tune on old CPUs, so I am happy 
>> it
>> is gone from generic (in fact I had similar patch pending).
>> Are you sure about Atom having stack engine, too?
>>
>
> Good question. The instruction latency table
> (http://www.agner.org/optimize/instruction_tables.pdf) shows that for
> Atom: push r has one 1uop, 1 cycle latency. However the instruction is
> not pairable which will affect ILP. The guide here
> http://www.agner.org/optimize/microarchitecture.pdf does not mention
> Atom has stack engine either.
>
> I will help collect some performance data on Atom.
>
>
> thanks,
>
> David
>
>
>> Related thing is accumulate_outgoing_args. Igor is testing it on Core and I 
>> will
>> give it a try on K10.
>>
>> Honza
>>
>> I am attaching the changes for core costs I made if someone is interested in
>> testing them.  If we can declare P4/PPRo and maybe K8 chips obsolette for
>> generic, there is room for improvement in generic, too. Like using inc/dec
>> again.
>>
>> Honza
>>
>> Index: config/i386/i386.c
>> ===================================================================
>> --- config/i386/i386.c  (revision 194452)
>> +++ config/i386/i386.c  (working copy)
>> @@ -1620,14 +1620,14 @@ struct processor_costs core_cost = {
>>    COSTS_N_INSNS (8),                   /* cost of FABS instruction.  */
>>    COSTS_N_INSNS (8),                   /* cost of FCHS instruction.  */
>>    COSTS_N_INSNS (40),                  /* cost of FSQRT instruction.  */
>> -  {{libcall, {{1024, rep_prefix_4_byte, true}, {-1, libcall, false}}},
>> -   {libcall, {{24, loop, true}, {128, rep_prefix_8_byte, true},
>> +  {{libcall, {{8192, rep_prefix_4_byte, true}, {-1, libcall, false}}},
>> +   {libcall, {{24, loop, true}, {8192, rep_prefix_8_byte, true},
>>                {-1, libcall, false}}}},
>>    {{libcall, {{6, loop_1_byte, true},
>>                {24, loop, true},
>>                {8192, rep_prefix_4_byte, true},
>>                {-1, libcall, false}}},
>> -   {libcall, {{24, loop, true}, {512, rep_prefix_8_byte, true},
>> +   {libcall, {{24, loop, true}, {8192, rep_prefix_8_byte, true},
>>                {-1, libcall, false}}}},
>>    1,                                   /* scalar_stmt_cost.  */
>>    1,                                   /* scalar load_cost.  */
>> @@ -1806,7 +1806,7 @@ static unsigned int initial_ix86_tune_fe
>>    m_PPRO,
>>
>>    /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
>> -  m_CORE2I7 | m_GENERIC,
>> +  m_GENERIC | m_CORE2,
>>
>>    /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall
>>     * on 16-bit immediate moves into memory on Core2 and Corei7.  */
>> @@ -1822,7 +1822,7 @@ static unsigned int initial_ix86_tune_fe
>>    m_K6,
>>
>>    /* X86_TUNE_USE_CLTD */
>> -  ~(m_PENT | m_ATOM | m_K6),
>> +  ~(m_PENT | m_ATOM | m_K6 | m_GENERIC),
>>
>>    /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx.  */
>>    m_PENT4,
>> @@ -1901,7 +1901,7 @@ static unsigned int initial_ix86_tune_fe
>>    m_COREI7 | m_BDVER,
>>
>>    /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
>> -  m_BDVER ,
>> +  m_BDVER,
>>
>>    /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and 
>> dependencies
>>       are resolved on SSE register parts instead of whole registers, so we 
>> may
>> @@ -1910,10 +1910,10 @@ static unsigned int initial_ix86_tune_fe
>>    m_ATHLON_K8,
>>
>>    /* X86_TUNE_SSE_TYPELESS_STORES */
>> -  m_AMD_MULTIPLE,
>> +  m_AMD_MULTIPLE | m_CORE2I7, /*????*/
>>
>>    /* X86_TUNE_SSE_LOAD0_BY_PXOR */
>> -  m_PPRO | m_P4_NOCONA,
>> +  m_PPRO | m_P4_NOCONA | m_CORE2I7, /*????*/
>>
>>    /* X86_TUNE_MEMORY_MISMATCH_STALL */
>>    m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
>> @@ -1938,7 +1938,7 @@ static unsigned int initial_ix86_tune_fe
>>
>>    /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
>>       than 4 branch instructions in the 16 byte window.  */
>> -  m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
>> +  m_PPRO | m_P4_NOCONA | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
>>
>>    /* X86_TUNE_SCHEDULE */
>>    m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | 
>> m_GENERIC,
>> @@ -1947,10 +1947,10 @@ static unsigned int initial_ix86_tune_fe
>>    m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
>>
>>    /* X86_TUNE_USE_INCDEC */
>> -  ~(m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GENERIC),
>> +  ~(m_P4_NOCONA | m_ATOM | m_GENERIC),
>>
>>    /* X86_TUNE_PAD_RETURNS */
>> -  m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC,
>> +  m_AMD_MULTIPLE | m_GENERIC,
>>
>>    /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion.  */
>>    m_ATOM,
>> @@ -1959,7 +1959,7 @@ static unsigned int initial_ix86_tune_fe
>>    m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_ATHLON_K8 | 
>> m_GENERIC,
>>
>>    /* X86_TUNE_AVOID_VECTOR_DECODE */
>> -  m_CORE2I7 | m_K8 | m_GENERIC64,
>> +  m_K8 | m_GENERIC64,
>>
>>    /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
>>       and SImode multiply, but 386 and 486 do HImode multiply faster.  */
>> @@ -1967,11 +1967,11 @@ static unsigned int initial_ix86_tune_fe
>>
>>    /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
>>       vector path on AMD machines.  */
>> -  m_CORE2I7 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC64,
>> +  m_CORE2I7 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER,
>>
>>    /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
>>       machines.  */
>> -  m_CORE2I7 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC64,
>> +  m_CORE2I7 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER,
>>
>>    /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
>>       than a MOV.  */
>> @@ -1988,7 +1988,7 @@ static unsigned int initial_ix86_tune_fe
>>
>>    /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
>>       from FP to FP. */
>> -  m_CORE2I7 | m_AMDFAM10 | m_GENERIC,
>> +  m_AMDFAM10 | m_GENERIC,
>>
>>    /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
>>       from integer to FP. */
>> @@ -1997,7 +1997,7 @@ static unsigned int initial_ix86_tune_fe
>>    /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
>>       with a subsequent conditional jump instruction into a single
>>       compare-and-branch uop.  */
>> -  m_BDVER,
>> +  m_BDVER | m_CORE2I7,
>>
>>    /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
>>       will impact LEA instruction selection. */
>> @@ -2052,7 +2052,7 @@ static unsigned int initial_ix86_arch_fe
>>  };
>>
>>  static const unsigned int x86_accumulate_outgoing_args
>> -  = m_PPRO | m_P4_NOCONA | m_ATOM | m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC;
>> +  = m_PPRO | m_P4_NOCONA | m_ATOM | m_AMD_MULTIPLE | m_GENERIC;
>>
>>  static const unsigned int x86_arch_always_fancy_math_387
>>    = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | 
>> m_GENERIC;
>> Index: config/i386/i386.md
>> ===================================================================
>> --- config/i386/i386.md (revision 194452)
>> +++ config/i386/i386.md (working copy)
>> @@ -965,7 +965,7 @@
>>         (compare:CC (match_operand:SDWIM 1 "nonimmediate_operand")
>>                     (match_operand:SDWIM 2 "<general_operand>")))
>>     (set (pc) (if_then_else
>> -              (match_operator 0 "ordered_comparison_operator"
>> +              (match_operator 0 "comparison_operator"
>>                 [(reg:CC FLAGS_REG) (const_int 0)])
>>                (label_ref (match_operand 3))
>>                (pc)))]
>> @@ -983,7 +983,7 @@
>>         (compare:CC (match_operand:SWIM 2 "nonimmediate_operand")
>>                     (match_operand:SWIM 3 "<general_operand>")))
>>     (set (match_operand:QI 0 "register_operand")
>> -       (match_operator 1 "ordered_comparison_operator"
>> +       (match_operator 1 "comparison_operator"
>>           [(reg:CC FLAGS_REG) (const_int 0)]))]
>>    ""
>>  {

Reply via email to