The separate push{lq}_cfi and pop_{lq}_cfi macros aren't needed.  Push
and pop only come in one size per architecture, so the trailing 'q' or
'l' characters are redundant, and awkward to use in arch-independent
code.

Replace the push/pop CFI macros with architecture-independent versions:
push_cfi, pop_cfi, etc.

This change is purely cosmetic, with no resulting object code changes.

Suggested-by: "H. Peter Anvin" <h...@zytor.com>
Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 arch/x86/ia32/ia32entry.S      |  60 ++++++------
 arch/x86/include/asm/calling.h |  28 +++---
 arch/x86/include/asm/dwarf2.h  |  92 ++++++------------
 arch/x86/include/asm/frame.h   |   4 +-
 arch/x86/kernel/entry_32.S     | 214 ++++++++++++++++++++---------------------
 arch/x86/kernel/entry_64.S     |  96 +++++++++---------
 arch/x86/lib/atomic64_386_32.S |   4 +-
 arch/x86/lib/atomic64_cx8_32.S |  40 ++++----
 arch/x86/lib/checksum_32.S     |  42 ++++----
 arch/x86/lib/cmpxchg16b_emu.S  |   6 +-
 arch/x86/lib/cmpxchg8b_emu.S   |   6 +-
 arch/x86/lib/msr-reg.S         |  34 +++----
 arch/x86/lib/rwsem.S           |  40 ++++----
 arch/x86/lib/thunk_32.S        |  12 +--
 arch/x86/lib/thunk_64.S        |  36 +++----
 15 files changed, 343 insertions(+), 371 deletions(-)

diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 63450a5..1664d12 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -124,19 +124,19 @@ ENTRY(ia32_sysenter_target)
        CFI_REGISTER rip,r10
 
        /* Construct struct pt_regs on stack */
-       pushq_cfi       $__USER32_DS            /* pt_regs->ss */
-       pushq_cfi       %rbp                    /* pt_regs->sp */
+       push_cfi        $__USER32_DS            /* pt_regs->ss */
+       push_cfi        %rbp                    /* pt_regs->sp */
        CFI_REL_OFFSET  rsp,0
-       pushfq_cfi                              /* pt_regs->flags */
-       pushq_cfi       $__USER32_CS            /* pt_regs->cs */
-       pushq_cfi       %r10 /* pt_regs->ip = thread_info->sysenter_return */
+       pushf_cfi                               /* pt_regs->flags */
+       push_cfi        $__USER32_CS            /* pt_regs->cs */
+       push_cfi        %r10 /* pt_regs->ip = thread_info->sysenter_return */
        CFI_REL_OFFSET  rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi       $-ENOSYS                /* pt_regs->ax */
+       push_cfi_reg    rax                     /* pt_regs->orig_ax */
+       push_cfi_reg    rdi                     /* pt_regs->di */
+       push_cfi_reg    rsi                     /* pt_regs->si */
+       push_cfi_reg    rdx                     /* pt_regs->dx */
+       push_cfi_reg    rcx                     /* pt_regs->cx */
+       push_cfi        $-ENOSYS                /* pt_regs->ax */
        cld
        sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
        CFI_ADJUST_CFA_OFFSET 10*8
@@ -282,8 +282,8 @@ sysexit_audit:
 #endif
 
 sysenter_fix_flags:
-       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
-       popfq_cfi
+       push_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+       popf_cfi
        jmp sysenter_flags_fixed
 
 sysenter_tracesys:
@@ -353,20 +353,20 @@ ENTRY(ia32_cstar_target)
        movl    %eax,%eax
 
        /* Construct struct pt_regs on stack */
-       pushq_cfi       $__USER32_DS            /* pt_regs->ss */
-       pushq_cfi       %r8                     /* pt_regs->sp */
+       push_cfi        $__USER32_DS            /* pt_regs->ss */
+       push_cfi        %r8                     /* pt_regs->sp */
        CFI_REL_OFFSET rsp,0
-       pushq_cfi       %r11                    /* pt_regs->flags */
-       pushq_cfi       $__USER32_CS            /* pt_regs->cs */
-       pushq_cfi       %rcx                    /* pt_regs->ip */
+       push_cfi        %r11                    /* pt_regs->flags */
+       push_cfi        $__USER32_CS            /* pt_regs->cs */
+       push_cfi        %rcx                    /* pt_regs->ip */
        CFI_REL_OFFSET rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rbp                     /* pt_regs->cx */
+       push_cfi_reg    rax                     /* pt_regs->orig_ax */
+       push_cfi_reg    rdi                     /* pt_regs->di */
+       push_cfi_reg    rsi                     /* pt_regs->si */
+       push_cfi_reg    rdx                     /* pt_regs->dx */
+       push_cfi_reg    rbp                     /* pt_regs->cx */
        movl    %ebp,%ecx
-       pushq_cfi       $-ENOSYS                /* pt_regs->ax */
+       push_cfi        $-ENOSYS                /* pt_regs->ax */
        sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
        CFI_ADJUST_CFA_OFFSET 10*8
 
@@ -506,12 +506,12 @@ ENTRY(ia32_syscall)
        movl    %eax,%eax
 
        /* Construct struct pt_regs on stack (iret frame is already on stack) */
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi       $-ENOSYS                /* pt_regs->ax */
+       push_cfi_reg    rax                     /* pt_regs->orig_ax */
+       push_cfi_reg    rdi                     /* pt_regs->di */
+       push_cfi_reg    rsi                     /* pt_regs->si */
+       push_cfi_reg    rdx                     /* pt_regs->dx */
+       push_cfi_reg    rcx                     /* pt_regs->cx */
+       push_cfi        $-ENOSYS                /* pt_regs->ax */
        cld
        sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
        CFI_ADJUST_CFA_OFFSET 10*8
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 1c8b50e..4abc60f 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -224,23 +224,23 @@ For 32-bit we have the following conventions - kernel is 
built with
  */
 
        .macro SAVE_ALL
-       pushl_cfi_reg eax
-       pushl_cfi_reg ebp
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg edx
-       pushl_cfi_reg ecx
-       pushl_cfi_reg ebx
+       push_cfi_reg eax
+       push_cfi_reg ebp
+       push_cfi_reg edi
+       push_cfi_reg esi
+       push_cfi_reg edx
+       push_cfi_reg ecx
+       push_cfi_reg ebx
        .endm
 
        .macro RESTORE_ALL
-       popl_cfi_reg ebx
-       popl_cfi_reg ecx
-       popl_cfi_reg edx
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi_reg ebp
-       popl_cfi_reg eax
+       pop_cfi_reg ebx
+       pop_cfi_reg ecx
+       pop_cfi_reg edx
+       pop_cfi_reg esi
+       pop_cfi_reg edi
+       pop_cfi_reg ebp
+       pop_cfi_reg eax
        .endm
 
 #endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index de1cdaf..5af7e15 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -5,6 +5,8 @@
 #warning "asm/dwarf2.h should be only included in pure assembly files"
 #endif
 
+#include <asm/asm.h>
+
 /*
  * Macros for dwarf2 CFI unwind table entries.
  * See "as.info" for details on these pseudo ops. Unfortunately
@@ -80,79 +82,39 @@
  * what you're doing if you use them.
  */
 #ifdef __ASSEMBLY__
-#ifdef CONFIG_X86_64
-       .macro pushq_cfi reg
-       pushq \reg
-       CFI_ADJUST_CFA_OFFSET 8
-       .endm
-
-       .macro pushq_cfi_reg reg
-       pushq %\reg
-       CFI_ADJUST_CFA_OFFSET 8
-       CFI_REL_OFFSET \reg, 0
-       .endm
 
-       .macro popq_cfi reg
-       popq \reg
-       CFI_ADJUST_CFA_OFFSET -8
-       .endm
-
-       .macro popq_cfi_reg reg
-       popq %\reg
-       CFI_ADJUST_CFA_OFFSET -8
-       CFI_RESTORE \reg
-       .endm
+#define STACK_WORD_SIZE __ASM_SEL(4,8)
 
-       .macro pushfq_cfi
-       pushfq
-       CFI_ADJUST_CFA_OFFSET 8
+       .macro push_cfi reg
+       push \reg
+       CFI_ADJUST_CFA_OFFSET STACK_WORD_SIZE
        .endm
 
-       .macro popfq_cfi
-       popfq
-       CFI_ADJUST_CFA_OFFSET -8
-       .endm
-
-       .macro movq_cfi reg offset=0
-       movq %\reg, \offset(%rsp)
-       CFI_REL_OFFSET \reg, \offset
-       .endm
-
-       .macro movq_cfi_restore offset reg
-       movq \offset(%rsp), %\reg
-       CFI_RESTORE \reg
-       .endm
-#else /*!CONFIG_X86_64*/
-       .macro pushl_cfi reg
-       pushl \reg
-       CFI_ADJUST_CFA_OFFSET 4
-       .endm
-
-       .macro pushl_cfi_reg reg
-       pushl %\reg
-       CFI_ADJUST_CFA_OFFSET 4
+       .macro push_cfi_reg reg
+       push %\reg
+       CFI_ADJUST_CFA_OFFSET STACK_WORD_SIZE
        CFI_REL_OFFSET \reg, 0
        .endm
 
-       .macro popl_cfi reg
-       popl \reg
-       CFI_ADJUST_CFA_OFFSET -4
+       .macro pop_cfi reg
+       pop \reg
+       CFI_ADJUST_CFA_OFFSET -STACK_WORD_SIZE
        .endm
 
-       .macro popl_cfi_reg reg
-       popl %\reg
-       CFI_ADJUST_CFA_OFFSET -4
+       .macro pop_cfi_reg reg
+       pop %\reg
+       CFI_ADJUST_CFA_OFFSET -STACK_WORD_SIZE
        CFI_RESTORE \reg
        .endm
 
-       .macro pushfl_cfi
-       pushfl
-       CFI_ADJUST_CFA_OFFSET 4
+       .macro pushf_cfi
+       pushf
+       CFI_ADJUST_CFA_OFFSET STACK_WORD_SIZE
        .endm
 
-       .macro popfl_cfi
-       popfl
-       CFI_ADJUST_CFA_OFFSET -4
+       .macro popf_cfi
+       popf
+       CFI_ADJUST_CFA_OFFSET -STACK_WORD_SIZE
        .endm
 
        .macro movl_cfi reg offset=0
@@ -164,7 +126,17 @@
        movl \offset(%esp), %\reg
        CFI_RESTORE \reg
        .endm
-#endif /*!CONFIG_X86_64*/
+
+       .macro movq_cfi reg offset=0
+       movq %\reg, \offset(%rsp)
+       CFI_REL_OFFSET \reg, \offset
+       .endm
+
+       .macro movq_cfi_restore offset reg
+       movq \offset(%rsp), %\reg
+       CFI_RESTORE \reg
+       .endm
+
 #endif /*__ASSEMBLY__*/
 
 #endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f4..325e4e8 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -8,12 +8,12 @@
    frame pointer later */
 #ifdef CONFIG_FRAME_POINTER
        .macro FRAME
-       __ASM_SIZE(push,_cfi)   %__ASM_REG(bp)
+       push_cfi                %__ASM_REG(bp)
        CFI_REL_OFFSET          __ASM_REG(bp), 0
        __ASM_SIZE(mov)         %__ASM_REG(sp), %__ASM_REG(bp)
        .endm
        .macro ENDFRAME
-       __ASM_SIZE(pop,_cfi)    %__ASM_REG(bp)
+       pop_cfi                 %__ASM_REG(bp)
        CFI_RESTORE             __ASM_REG(bp)
        .endm
 #else
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1c30976..7e88181 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -113,7 +113,7 @@
 
  /* unfortunately push/pop can't be no-op */
 .macro PUSH_GS
-       pushl_cfi $0
+       push_cfi $0
 .endm
 .macro POP_GS pop=0
        addl $(4 + \pop), %esp
@@ -137,12 +137,12 @@
 #else  /* CONFIG_X86_32_LAZY_GS */
 
 .macro PUSH_GS
-       pushl_cfi %gs
+       push_cfi %gs
        /*CFI_REL_OFFSET gs, 0*/
 .endm
 
 .macro POP_GS pop=0
-98:    popl_cfi %gs
+98:    pop_cfi %gs
        /*CFI_RESTORE gs*/
   .if \pop <> 0
        add $\pop, %esp
@@ -186,25 +186,25 @@
 .macro SAVE_ALL
        cld
        PUSH_GS
-       pushl_cfi %fs
+       push_cfi %fs
        /*CFI_REL_OFFSET fs, 0;*/
-       pushl_cfi %es
+       push_cfi %es
        /*CFI_REL_OFFSET es, 0;*/
-       pushl_cfi %ds
+       push_cfi %ds
        /*CFI_REL_OFFSET ds, 0;*/
-       pushl_cfi %eax
+       push_cfi %eax
        CFI_REL_OFFSET eax, 0
-       pushl_cfi %ebp
+       push_cfi %ebp
        CFI_REL_OFFSET ebp, 0
-       pushl_cfi %edi
+       push_cfi %edi
        CFI_REL_OFFSET edi, 0
-       pushl_cfi %esi
+       push_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl_cfi %edx
+       push_cfi %edx
        CFI_REL_OFFSET edx, 0
-       pushl_cfi %ecx
+       push_cfi %ecx
        CFI_REL_OFFSET ecx, 0
-       pushl_cfi %ebx
+       push_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        movl $(__USER_DS), %edx
        movl %edx, %ds
@@ -215,29 +215,29 @@
 .endm
 
 .macro RESTORE_INT_REGS
-       popl_cfi %ebx
+       pop_cfi %ebx
        CFI_RESTORE ebx
-       popl_cfi %ecx
+       pop_cfi %ecx
        CFI_RESTORE ecx
-       popl_cfi %edx
+       pop_cfi %edx
        CFI_RESTORE edx
-       popl_cfi %esi
+       pop_cfi %esi
        CFI_RESTORE esi
-       popl_cfi %edi
+       pop_cfi %edi
        CFI_RESTORE edi
-       popl_cfi %ebp
+       pop_cfi %ebp
        CFI_RESTORE ebp
-       popl_cfi %eax
+       pop_cfi %eax
        CFI_RESTORE eax
 .endm
 
 .macro RESTORE_REGS pop=0
        RESTORE_INT_REGS
-1:     popl_cfi %ds
+1:     pop_cfi %ds
        /*CFI_RESTORE ds;*/
-2:     popl_cfi %es
+2:     pop_cfi %es
        /*CFI_RESTORE es;*/
-3:     popl_cfi %fs
+3:     pop_cfi %fs
        /*CFI_RESTORE fs;*/
        POP_GS \pop
 .pushsection .fixup, "ax"
@@ -289,24 +289,24 @@
 
 ENTRY(ret_from_fork)
        CFI_STARTPROC
-       pushl_cfi %eax
+       push_cfi %eax
        call schedule_tail
        GET_THREAD_INFO(%ebp)
-       popl_cfi %eax
-       pushl_cfi $0x0202               # Reset kernel eflags
-       popfl_cfi
+       pop_cfi %eax
+       push_cfi $0x0202                # Reset kernel eflags
+       popf_cfi
        jmp syscall_exit
        CFI_ENDPROC
 END(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
        CFI_STARTPROC
-       pushl_cfi %eax
+       push_cfi %eax
        call schedule_tail
        GET_THREAD_INFO(%ebp)
-       popl_cfi %eax
-       pushl_cfi $0x0202               # Reset kernel eflags
-       popfl_cfi
+       pop_cfi %eax
+       push_cfi $0x0202                # Reset kernel eflags
+       popf_cfi
        movl PT_EBP(%esp),%eax
        call *PT_EBX(%esp)
        movl $0,PT_EAX(%esp)
@@ -385,13 +385,13 @@ sysenter_past_esp:
         * enough kernel state to call TRACE_IRQS_OFF can be called - but
         * we immediately enable interrupts at that point anyway.
         */
-       pushl_cfi $__USER_DS
+       push_cfi $__USER_DS
        /*CFI_REL_OFFSET ss, 0*/
-       pushl_cfi %ebp
+       push_cfi %ebp
        CFI_REL_OFFSET esp, 0
-       pushfl_cfi
+       pushf_cfi
        orl $X86_EFLAGS_IF, (%esp)
-       pushl_cfi $__USER_CS
+       push_cfi $__USER_CS
        /*CFI_REL_OFFSET cs, 0*/
        /*
         * Push current_thread_info()->sysenter_return to the stack.
@@ -401,10 +401,10 @@ sysenter_past_esp:
         * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
         * and THREAD_SIZE takes us to the bottom.
         */
-       pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + 
TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
+       push_cfi ((TI_sysenter_return) - THREAD_SIZE + 
TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
        CFI_REL_OFFSET eip, 0
 
-       pushl_cfi %eax
+       push_cfi %eax
        SAVE_ALL
        ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -453,11 +453,11 @@ sysenter_audit:
        /* movl PT_EAX(%esp), %eax      already set, syscall number: 1st arg to 
audit */
        movl PT_EBX(%esp), %edx         /* ebx/a0: 2nd arg to audit */
        /* movl PT_ECX(%esp), %ecx      already set, a1: 3nd arg to audit */
-       pushl_cfi PT_ESI(%esp)          /* a3: 5th arg */
-       pushl_cfi PT_EDX+4(%esp)        /* a2: 4th arg */
+       push_cfi PT_ESI(%esp)           /* a3: 5th arg */
+       push_cfi PT_EDX+4(%esp) /* a2: 4th arg */
        call __audit_syscall_entry
-       popl_cfi %ecx /* get that remapped edx off the stack */
-       popl_cfi %ecx /* get that remapped esi off the stack */
+       pop_cfi %ecx /* get that remapped edx off the stack */
+       pop_cfi %ecx /* get that remapped esi off the stack */
        movl PT_EAX(%esp),%eax          /* reload syscall number */
        jmp sysenter_do_call
 
@@ -493,7 +493,7 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(system_call)
        RING0_INT_FRAME                 # can't unwind into user space anyway
        ASM_CLAC
-       pushl_cfi %eax                  # save orig_eax
+       push_cfi %eax                   # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
                                        # system call tracing in operation / 
emulation
@@ -577,8 +577,8 @@ ldt_ss:
        shr $16, %edx
        mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
        mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-       pushl_cfi $__ESPFIX_SS
-       pushl_cfi %eax                  /* new kernel esp */
+       push_cfi $__ESPFIX_SS
+       push_cfi %eax                   /* new kernel esp */
        /* Disable interrupts, but do not irqtrace this section: we
         * will soon execute iret and the tracer was already set to
         * the irqstate after the iret */
@@ -634,9 +634,9 @@ work_notifysig:                             # deal with 
pending signals and
 #ifdef CONFIG_VM86
        ALIGN
 work_notifysig_v86:
-       pushl_cfi %ecx                  # save ti_flags for do_notify_resume
+       push_cfi %ecx                   # save ti_flags for do_notify_resume
        call save_v86_state             # %eax contains pt_regs pointer
-       popl_cfi %ecx
+       pop_cfi %ecx
        movl %eax, %esp
        jmp 1b
 #endif
@@ -701,8 +701,8 @@ END(sysenter_badsys)
        mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
        shl $16, %eax
        addl %esp, %eax                 /* the adjusted stack pointer */
-       pushl_cfi $__KERNEL_DS
-       pushl_cfi %eax
+       push_cfi $__KERNEL_DS
+       push_cfi %eax
        lss (%esp), %esp                /* switch to the normal stack segment */
        CFI_ADJUST_CFA_OFFSET -8
 #endif
@@ -731,7 +731,7 @@ ENTRY(irq_entries_start)
        RING0_INT_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-       pushl_cfi $(~vector+0x80)       /* Note: always in signed byte range */
+       push_cfi $(~vector+0x80)        /* Note: always in signed byte range */
     vector=vector+1
        jmp     common_interrupt
        CFI_ADJUST_CFA_OFFSET -4
@@ -759,7 +759,7 @@ ENDPROC(common_interrupt)
 ENTRY(name)                            \
        RING0_INT_FRAME;                \
        ASM_CLAC;                       \
-       pushl_cfi $~(nr);               \
+       push_cfi $~(nr);                \
        SAVE_ALL;                       \
        TRACE_IRQS_OFF                  \
        movl %esp,%eax;                 \
@@ -786,8 +786,8 @@ ENDPROC(name)
 ENTRY(coprocessor_error)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_coprocessor_error
+       push_cfi $0
+       push_cfi $do_coprocessor_error
        jmp error_code
        CFI_ENDPROC
 END(coprocessor_error)
@@ -795,14 +795,14 @@ END(coprocessor_error)
 ENTRY(simd_coprocessor_error)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
+       push_cfi $0
 #ifdef CONFIG_X86_INVD_BUG
        /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-       ALTERNATIVE "pushl_cfi $do_general_protection", \
+       ALTERNATIVE "push_cfi $do_general_protection",  \
                    "pushl $do_simd_coprocessor_error", \
                    X86_FEATURE_XMM
 #else
-       pushl_cfi $do_simd_coprocessor_error
+       push_cfi $do_simd_coprocessor_error
 #endif
        jmp error_code
        CFI_ENDPROC
@@ -811,8 +811,8 @@ END(simd_coprocessor_error)
 ENTRY(device_not_available)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $-1                   # mark this as an int
-       pushl_cfi $do_device_not_available
+       push_cfi $-1                    # mark this as an int
+       push_cfi $do_device_not_available
        jmp error_code
        CFI_ENDPROC
 END(device_not_available)
@@ -832,8 +832,8 @@ END(native_irq_enable_sysexit)
 ENTRY(overflow)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_overflow
+       push_cfi $0
+       push_cfi $do_overflow
        jmp error_code
        CFI_ENDPROC
 END(overflow)
@@ -841,8 +841,8 @@ END(overflow)
 ENTRY(bounds)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_bounds
+       push_cfi $0
+       push_cfi $do_bounds
        jmp error_code
        CFI_ENDPROC
 END(bounds)
@@ -850,8 +850,8 @@ END(bounds)
 ENTRY(invalid_op)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_invalid_op
+       push_cfi $0
+       push_cfi $do_invalid_op
        jmp error_code
        CFI_ENDPROC
 END(invalid_op)
@@ -859,8 +859,8 @@ END(invalid_op)
 ENTRY(coprocessor_segment_overrun)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_coprocessor_segment_overrun
+       push_cfi $0
+       push_cfi $do_coprocessor_segment_overrun
        jmp error_code
        CFI_ENDPROC
 END(coprocessor_segment_overrun)
@@ -868,7 +868,7 @@ END(coprocessor_segment_overrun)
 ENTRY(invalid_TSS)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_invalid_TSS
+       push_cfi $do_invalid_TSS
        jmp error_code
        CFI_ENDPROC
 END(invalid_TSS)
@@ -876,7 +876,7 @@ END(invalid_TSS)
 ENTRY(segment_not_present)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_segment_not_present
+       push_cfi $do_segment_not_present
        jmp error_code
        CFI_ENDPROC
 END(segment_not_present)
@@ -884,7 +884,7 @@ END(segment_not_present)
 ENTRY(stack_segment)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_stack_segment
+       push_cfi $do_stack_segment
        jmp error_code
        CFI_ENDPROC
 END(stack_segment)
@@ -892,7 +892,7 @@ END(stack_segment)
 ENTRY(alignment_check)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_alignment_check
+       push_cfi $do_alignment_check
        jmp error_code
        CFI_ENDPROC
 END(alignment_check)
@@ -900,8 +900,8 @@ END(alignment_check)
 ENTRY(divide_error)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0                    # no error code
-       pushl_cfi $do_divide_error
+       push_cfi $0                     # no error code
+       push_cfi $do_divide_error
        jmp error_code
        CFI_ENDPROC
 END(divide_error)
@@ -910,8 +910,8 @@ END(divide_error)
 ENTRY(machine_check)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi machine_check_vector
+       push_cfi $0
+       push_cfi machine_check_vector
        jmp error_code
        CFI_ENDPROC
 END(machine_check)
@@ -920,8 +920,8 @@ END(machine_check)
 ENTRY(spurious_interrupt_bug)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_spurious_interrupt_bug
+       push_cfi $0
+       push_cfi $do_spurious_interrupt_bug
        jmp error_code
        CFI_ENDPROC
 END(spurious_interrupt_bug)
@@ -938,7 +938,7 @@ ENTRY(xen_sysenter_target)
 
 ENTRY(xen_hypervisor_callback)
        CFI_STARTPROC
-       pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+       push_cfi $-1 /* orig_ax = -1 => not a system call */
        SAVE_ALL
        TRACE_IRQS_OFF
 
@@ -977,7 +977,7 @@ ENDPROC(xen_hypervisor_callback)
 # We distinguish between categories by maintaining a status value in EAX.
 ENTRY(xen_failsafe_callback)
        CFI_STARTPROC
-       pushl_cfi %eax
+       push_cfi %eax
        movl $1,%eax
 1:     mov 4(%esp),%ds
 2:     mov 8(%esp),%es
@@ -986,12 +986,12 @@ ENTRY(xen_failsafe_callback)
        /* EAX == 0 => Category 1 (Bad segment)
           EAX != 0 => Category 2 (Bad IRET) */
        testl %eax,%eax
-       popl_cfi %eax
+       pop_cfi %eax
        lea 16(%esp),%esp
        CFI_ADJUST_CFA_OFFSET -16
        jz 5f
        jmp iret_exc
-5:     pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+5:     push_cfi $-1 /* orig_ax = -1 => not a system call */
        SAVE_ALL
        jmp ret_from_exception
        CFI_ENDPROC
@@ -1197,7 +1197,7 @@ return_to_handler:
 ENTRY(trace_page_fault)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $trace_do_page_fault
+       push_cfi $trace_do_page_fault
        jmp error_code
        CFI_ENDPROC
 END(trace_page_fault)
@@ -1206,23 +1206,23 @@ END(trace_page_fault)
 ENTRY(page_fault)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_page_fault
+       push_cfi $do_page_fault
        ALIGN
 error_code:
        /* the function address is in %gs's slot on the stack */
-       pushl_cfi %fs
+       push_cfi %fs
        /*CFI_REL_OFFSET fs, 0*/
-       pushl_cfi %es
+       push_cfi %es
        /*CFI_REL_OFFSET es, 0*/
-       pushl_cfi %ds
+       push_cfi %ds
        /*CFI_REL_OFFSET ds, 0*/
-       pushl_cfi_reg eax
-       pushl_cfi_reg ebp
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg edx
-       pushl_cfi_reg ecx
-       pushl_cfi_reg ebx
+       push_cfi_reg eax
+       push_cfi_reg ebp
+       push_cfi_reg edi
+       push_cfi_reg esi
+       push_cfi_reg edx
+       push_cfi_reg ecx
+       push_cfi_reg ebx
        cld
        movl $(__KERNEL_PERCPU), %ecx
        movl %ecx, %fs
@@ -1263,9 +1263,9 @@ END(page_fault)
        movl TSS_sysenter_sp0 + \offset(%esp), %esp
        CFI_DEF_CFA esp, 0
        CFI_UNDEFINED eip
-       pushfl_cfi
-       pushl_cfi $__KERNEL_CS
-       pushl_cfi $sysenter_past_esp
+       pushf_cfi
+       push_cfi $__KERNEL_CS
+       push_cfi $sysenter_past_esp
        CFI_REL_OFFSET eip, 0
 .endm
 
@@ -1276,7 +1276,7 @@ ENTRY(debug)
        jne debug_stack_correct
        FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
-       pushl_cfi $-1                   # mark this as an int
+       push_cfi $-1                    # mark this as an int
        SAVE_ALL
        TRACE_IRQS_OFF
        xorl %edx,%edx                  # error code 0
@@ -1298,28 +1298,28 @@ ENTRY(nmi)
        RING0_INT_FRAME
        ASM_CLAC
 #ifdef CONFIG_X86_ESPFIX32
-       pushl_cfi %eax
+       push_cfi %eax
        movl %ss, %eax
        cmpw $__ESPFIX_SS, %ax
-       popl_cfi %eax
+       pop_cfi %eax
        je nmi_espfix_stack
 #endif
        cmpl $ia32_sysenter_target,(%esp)
        je nmi_stack_fixup
-       pushl_cfi %eax
+       push_cfi %eax
        movl %esp,%eax
        /* Do not access memory above the end of our stack page,
         * it might not exist.
         */
        andl $(THREAD_SIZE-1),%eax
        cmpl $(THREAD_SIZE-20),%eax
-       popl_cfi %eax
+       pop_cfi %eax
        jae nmi_stack_correct
        cmpl $ia32_sysenter_target,12(%esp)
        je nmi_debug_stack_check
 nmi_stack_correct:
        /* We have a RING0_INT_FRAME here */
-       pushl_cfi %eax
+       push_cfi %eax
        SAVE_ALL
        xorl %edx,%edx          # zero error code
        movl %esp,%eax          # pt_regs pointer
@@ -1349,14 +1349,14 @@ nmi_espfix_stack:
         *
         * create the pointer to lss back
         */
-       pushl_cfi %ss
-       pushl_cfi %esp
+       push_cfi %ss
+       push_cfi %esp
        addl $4, (%esp)
        /* copy the iret frame of 12 bytes */
        .rept 3
-       pushl_cfi 16(%esp)
+       push_cfi 16(%esp)
        .endr
-       pushl_cfi %eax
+       push_cfi %eax
        SAVE_ALL
        FIXUP_ESPFIX_STACK              # %eax == %esp
        xorl %edx,%edx                  # zero error code
@@ -1372,7 +1372,7 @@ END(nmi)
 ENTRY(int3)
        RING0_INT_FRAME
        ASM_CLAC
-       pushl_cfi $-1                   # mark this as an int
+       push_cfi $-1                    # mark this as an int
        SAVE_ALL
        TRACE_IRQS_OFF
        xorl %edx,%edx          # zero error code
@@ -1384,7 +1384,7 @@ END(int3)
 
 ENTRY(general_protection)
        RING0_EC_FRAME
-       pushl_cfi $do_general_protection
+       push_cfi $do_general_protection
        jmp error_code
        CFI_ENDPROC
 END(general_protection)
@@ -1393,7 +1393,7 @@ END(general_protection)
 ENTRY(async_page_fault)
        RING0_EC_FRAME
        ASM_CLAC
-       pushl_cfi $do_async_page_fault
+       push_cfi $do_async_page_fault
        jmp error_code
        CFI_ENDPROC
 END(async_page_fault)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 09c3f9e..de7b5ff 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -219,8 +219,8 @@ GLOBAL(system_call_after_swapgs)
        movq    PER_CPU_VAR(cpu_current_top_of_stack),%rsp
 
        /* Construct struct pt_regs on stack */
-       pushq_cfi $__USER_DS                    /* pt_regs->ss */
-       pushq_cfi PER_CPU_VAR(rsp_scratch)      /* pt_regs->sp */
+       push_cfi $__USER_DS                     /* pt_regs->ss */
+       push_cfi PER_CPU_VAR(rsp_scratch)       /* pt_regs->sp */
        /*
         * Re-enable interrupts.
         * We use 'rsp_scratch' as a scratch space, hence irq-off block above
@@ -229,20 +229,20 @@ GLOBAL(system_call_after_swapgs)
         * with using rsp_scratch:
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi       %r11                    /* pt_regs->flags */
-       pushq_cfi       $__USER_CS              /* pt_regs->cs */
-       pushq_cfi       %rcx                    /* pt_regs->ip */
+       push_cfi        %r11                    /* pt_regs->flags */
+       push_cfi        $__USER_CS              /* pt_regs->cs */
+       push_cfi        %rcx                    /* pt_regs->ip */
        CFI_REL_OFFSET rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi       $-ENOSYS                /* pt_regs->ax */
-       pushq_cfi_reg   r8                      /* pt_regs->r8 */
-       pushq_cfi_reg   r9                      /* pt_regs->r9 */
-       pushq_cfi_reg   r10                     /* pt_regs->r10 */
-       pushq_cfi_reg   r11                     /* pt_regs->r11 */
+       push_cfi_reg    rax                     /* pt_regs->orig_ax */
+       push_cfi_reg    rdi                     /* pt_regs->di */
+       push_cfi_reg    rsi                     /* pt_regs->si */
+       push_cfi_reg    rdx                     /* pt_regs->dx */
+       push_cfi_reg    rcx                     /* pt_regs->cx */
+       push_cfi        $-ENOSYS                /* pt_regs->ax */
+       push_cfi_reg    r8                      /* pt_regs->r8 */
+       push_cfi_reg    r9                      /* pt_regs->r9 */
+       push_cfi_reg    r10                     /* pt_regs->r10 */
+       push_cfi_reg    r11                     /* pt_regs->r11 */
        sub     $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
        CFI_ADJUST_CFA_OFFSET 6*8
 
@@ -374,9 +374,9 @@ int_careful:
        jnc  int_very_careful
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi %rdi
+       push_cfi %rdi
        SCHEDULE_USER
-       popq_cfi %rdi
+       pop_cfi %rdi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        jmp int_with_check
@@ -389,10 +389,10 @@ int_very_careful:
        /* Check for syscall exit trace */
        testl $_TIF_WORK_SYSCALL_EXIT,%edx
        jz int_signal
-       pushq_cfi %rdi
+       push_cfi %rdi
        leaq 8(%rsp),%rdi       # &ptregs -> arg1
        call syscall_trace_leave
-       popq_cfi %rdi
+       pop_cfi %rdi
        andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
        jmp int_restore_rest
 
@@ -603,8 +603,8 @@ ENTRY(ret_from_fork)
 
        LOCK ; btr $TIF_FORK,TI_flags(%r8)
 
-       pushq_cfi $0x0002
-       popfq_cfi                               # reset kernel eflags
+       push_cfi $0x0002
+       popf_cfi                                # reset kernel eflags
 
        call schedule_tail                      # rdi: 'prev' task parameter
 
@@ -640,7 +640,7 @@ ENTRY(irq_entries_start)
        INTR_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-       pushq_cfi $(~vector+0x80)       /* Note: always in signed byte range */
+       push_cfi $(~vector+0x80)        /* Note: always in signed byte range */
     vector=vector+1
        jmp     common_interrupt
        CFI_ADJUST_CFA_OFFSET -8
@@ -807,8 +807,8 @@ native_irq_return_iret:
 
 #ifdef CONFIG_X86_ESPFIX64
 native_irq_return_ldt:
-       pushq_cfi %rax
-       pushq_cfi %rdi
+       push_cfi %rax
+       push_cfi %rdi
        SWAPGS
        movq PER_CPU_VAR(espfix_waddr),%rdi
        movq %rax,(0*8)(%rdi)   /* RAX */
@@ -823,11 +823,11 @@ native_irq_return_ldt:
        movq (5*8)(%rsp),%rax   /* RSP */
        movq %rax,(4*8)(%rdi)
        andl $0xffff0000,%eax
-       popq_cfi %rdi
+       pop_cfi %rdi
        orq PER_CPU_VAR(espfix_stack),%rax
        SWAPGS
        movq %rax,%rsp
-       popq_cfi %rax
+       pop_cfi %rax
        jmp native_irq_return_iret
 #endif
 
@@ -838,9 +838,9 @@ retint_careful:
        jnc   retint_signal
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi %rdi
+       push_cfi %rdi
        SCHEDULE_USER
-       popq_cfi %rdi
+       pop_cfi %rdi
        GET_THREAD_INFO(%rcx)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -872,7 +872,7 @@ END(common_interrupt)
 ENTRY(\sym)
        INTR_FRAME
        ASM_CLAC
-       pushq_cfi $~(\num)
+       push_cfi $~(\num)
 .Lcommon_\sym:
        interrupt \do_sym
        jmp ret_from_intr
@@ -969,7 +969,7 @@ ENTRY(\sym)
        PARAVIRT_ADJUST_EXCEPTION_FRAME
 
        .ifeq \has_error_code
-       pushq_cfi $-1                   /* ORIG_RAX: no syscall to restart */
+       push_cfi $-1                    /* ORIG_RAX: no syscall to restart */
        .endif
 
        ALLOC_PT_GPREGS_ON_STACK
@@ -1086,14 +1086,14 @@ idtentry simd_coprocessor_error 
do_simd_coprocessor_error has_error_code=0
        /* edi:  new selector */
 ENTRY(native_load_gs_index)
        CFI_STARTPROC
-       pushfq_cfi
+       pushf_cfi
        DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
        SWAPGS
 gs_change:
        movl %edi,%gs
 2:     mfence          /* workaround */
        SWAPGS
-       popfq_cfi
+       popf_cfi
        ret
        CFI_ENDPROC
 END(native_load_gs_index)
@@ -1111,7 +1111,7 @@ bad_gs:
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(do_softirq_own_stack)
        CFI_STARTPROC
-       pushq_cfi %rbp
+       push_cfi %rbp
        CFI_REL_OFFSET rbp,0
        mov  %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
@@ -1210,9 +1210,9 @@ ENTRY(xen_failsafe_callback)
        CFI_RESTORE r11
        addq $0x30,%rsp
        CFI_ADJUST_CFA_OFFSET -0x30
-       pushq_cfi $0    /* RIP */
-       pushq_cfi %r11
-       pushq_cfi %rcx
+       push_cfi $0     /* RIP */
+       push_cfi %r11
+       push_cfi %rcx
        jmp general_protection
        CFI_RESTORE_STATE
 1:     /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
@@ -1222,7 +1222,7 @@ ENTRY(xen_failsafe_callback)
        CFI_RESTORE r11
        addq $0x30,%rsp
        CFI_ADJUST_CFA_OFFSET -0x30
-       pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+       push_cfi $-1 /* orig_ax = -1 => not a system call */
        ALLOC_PT_GPREGS_ON_STACK
        SAVE_C_REGS
        SAVE_EXTRA_REGS
@@ -1417,7 +1417,7 @@ ENTRY(nmi)
         */
 
        /* Use %rdx as our temp variable throughout */
-       pushq_cfi %rdx
+       push_cfi %rdx
        CFI_REL_OFFSET rdx, 0
 
        /*
@@ -1473,18 +1473,18 @@ nested_nmi:
        movq %rdx, %rsp
        CFI_ADJUST_CFA_OFFSET 1*8
        leaq -10*8(%rsp), %rdx
-       pushq_cfi $__KERNEL_DS
-       pushq_cfi %rdx
-       pushfq_cfi
-       pushq_cfi $__KERNEL_CS
-       pushq_cfi $repeat_nmi
+       push_cfi $__KERNEL_DS
+       push_cfi %rdx
+       pushf_cfi
+       push_cfi $__KERNEL_CS
+       push_cfi $repeat_nmi
 
        /* Put stack back */
        addq $(6*8), %rsp
        CFI_ADJUST_CFA_OFFSET -6*8
 
 nested_nmi_out:
-       popq_cfi %rdx
+       pop_cfi %rdx
        CFI_RESTORE rdx
 
        /* No need to check faults here */
@@ -1532,7 +1532,7 @@ first_nmi:
        CFI_RESTORE rdx
 
        /* Set the NMI executing variable on the stack. */
-       pushq_cfi $1
+       push_cfi $1
 
        /*
         * Leave room for the "copied" frame
@@ -1542,7 +1542,7 @@ first_nmi:
 
        /* Copy the stack frame to the Saved frame */
        .rept 5
-       pushq_cfi 11*8(%rsp)
+       push_cfi 11*8(%rsp)
        .endr
        CFI_DEF_CFA_OFFSET 5*8
 
@@ -1569,7 +1569,7 @@ repeat_nmi:
        addq $(10*8), %rsp
        CFI_ADJUST_CFA_OFFSET -10*8
        .rept 5
-       pushq_cfi -6*8(%rsp)
+       push_cfi -6*8(%rsp)
        .endr
        subq $(5*8), %rsp
        CFI_DEF_CFA_OFFSET 5*8
@@ -1580,7 +1580,7 @@ end_repeat_nmi:
         * NMI if the first NMI took an exception and reset our iret stack
         * so that we repeat another NMI.
         */
-       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
+       push_cfi $-1            /* ORIG_RAX: no syscall to restart */
        ALLOC_PT_GPREGS_ON_STACK
 
        /*
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 00933d5..aa17c69 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -15,12 +15,12 @@
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
-       pushfl_cfi
+       pushf_cfi
        cli
 .endm
 
 .macro UNLOCK reg
-       popfl_cfi
+       popf_cfi
 .endm
 
 #define BEGIN(op) \
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 082a851..c5dd086 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -57,10 +57,10 @@ ENDPROC(atomic64_xchg_cx8)
 .macro addsub_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
        CFI_STARTPROC
-       pushl_cfi_reg ebp
-       pushl_cfi_reg ebx
-       pushl_cfi_reg esi
-       pushl_cfi_reg edi
+       push_cfi_reg ebp
+       push_cfi_reg ebx
+       push_cfi_reg esi
+       push_cfi_reg edi
 
        movl %eax, %esi
        movl %edx, %edi
@@ -79,10 +79,10 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg edi
-       popl_cfi_reg esi
-       popl_cfi_reg ebx
-       popl_cfi_reg ebp
+       pop_cfi_reg edi
+       pop_cfi_reg esi
+       pop_cfi_reg ebx
+       pop_cfi_reg ebp
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
@@ -94,7 +94,7 @@ addsub_return sub sub sbb
 .macro incdec_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
        CFI_STARTPROC
-       pushl_cfi_reg ebx
+       push_cfi_reg ebx
 
        read64 %esi
 1:
@@ -109,7 +109,7 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg ebx
+       pop_cfi_reg ebx
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
@@ -120,7 +120,7 @@ incdec_return dec sub sbb
 
 ENTRY(atomic64_dec_if_positive_cx8)
        CFI_STARTPROC
-       pushl_cfi_reg ebx
+       push_cfi_reg ebx
 
        read64 %esi
 1:
@@ -136,18 +136,18 @@ ENTRY(atomic64_dec_if_positive_cx8)
 2:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg ebx
+       pop_cfi_reg ebx
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
 
 ENTRY(atomic64_add_unless_cx8)
        CFI_STARTPROC
-       pushl_cfi_reg ebp
-       pushl_cfi_reg ebx
+       push_cfi_reg ebp
+       push_cfi_reg ebx
 /* these just push these two parameters on the stack */
-       pushl_cfi_reg edi
-       pushl_cfi_reg ecx
+       push_cfi_reg edi
+       push_cfi_reg ecx
 
        movl %eax, %ebp
        movl %edx, %edi
@@ -169,8 +169,8 @@ ENTRY(atomic64_add_unless_cx8)
 3:
        addl $8, %esp
        CFI_ADJUST_CFA_OFFSET -8
-       popl_cfi_reg ebx
-       popl_cfi_reg ebp
+       pop_cfi_reg ebx
+       pop_cfi_reg ebp
        ret
 4:
        cmpl %edx, 4(%esp)
@@ -182,7 +182,7 @@ ENDPROC(atomic64_add_unless_cx8)
 
 ENTRY(atomic64_inc_not_zero_cx8)
        CFI_STARTPROC
-       pushl_cfi_reg ebx
+       push_cfi_reg ebx
 
        read64 %esi
 1:
@@ -199,7 +199,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
 
        movl $1, %eax
 3:
-       popl_cfi_reg ebx
+       pop_cfi_reg ebx
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 9bc944a..42c1f9f 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -51,8 +51,8 @@ unsigned int csum_partial(const unsigned char * buff, int 
len, unsigned int sum)
           */           
 ENTRY(csum_partial)
        CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       push_cfi_reg esi
+       push_cfi_reg ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: unsigned char *buff
@@ -129,8 +129,8 @@ ENTRY(csum_partial)
        jz 8f
        roll $8, %eax
 8:
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       pop_cfi_reg ebx
+       pop_cfi_reg esi
        ret
        CFI_ENDPROC
 ENDPROC(csum_partial)
@@ -141,8 +141,8 @@ ENDPROC(csum_partial)
 
 ENTRY(csum_partial)
        CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       push_cfi_reg esi
+       push_cfi_reg ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: const unsigned char *buf
@@ -249,8 +249,8 @@ ENTRY(csum_partial)
        jz 90f
        roll $8, %eax
 90: 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       pop_cfi_reg ebx
+       pop_cfi_reg esi
        ret
        CFI_ENDPROC
 ENDPROC(csum_partial)
@@ -290,9 +290,9 @@ ENTRY(csum_partial_copy_generic)
        CFI_STARTPROC
        subl  $4,%esp   
        CFI_ADJUST_CFA_OFFSET 4
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       push_cfi_reg edi
+       push_cfi_reg esi
+       push_cfi_reg ebx
        movl ARGBASE+16(%esp),%eax      # sum
        movl ARGBASE+12(%esp),%ecx      # len
        movl ARGBASE+4(%esp),%esi       # src
@@ -401,10 +401,10 @@ DST(      movb %cl, (%edi)        )
 
 .previous
 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi %ecx                   # equivalent to addl $4,%esp
+       pop_cfi_reg ebx
+       pop_cfi_reg esi
+       pop_cfi_reg edi
+       pop_cfi %ecx                    # equivalent to addl $4,%esp
        ret     
        CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
@@ -427,9 +427,9 @@ ENDPROC(csum_partial_copy_generic)
                
 ENTRY(csum_partial_copy_generic)
        CFI_STARTPROC
-       pushl_cfi_reg ebx
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
+       push_cfi_reg ebx
+       push_cfi_reg edi
+       push_cfi_reg esi
        movl ARGBASE+4(%esp),%esi       #src
        movl ARGBASE+8(%esp),%edi       #dst    
        movl ARGBASE+12(%esp),%ecx      #len
@@ -489,9 +489,9 @@ DST(        movb %dl, (%edi)         )
        jmp  7b                 
 .previous                              
 
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi_reg ebx
+       pop_cfi_reg esi
+       pop_cfi_reg edi
+       pop_cfi_reg ebx
        ret
        CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 40a1725..b18f317 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -32,7 +32,7 @@ CFI_STARTPROC
 # *atomic* on a single cpu (as provided by the this_cpu_xx class of
 # macros).
 #
-       pushfq_cfi
+       pushf_cfi
        cli
 
        cmpq PER_CPU_VAR((%rsi)), %rax
@@ -44,13 +44,13 @@ CFI_STARTPROC
        movq %rcx, PER_CPU_VAR(8(%rsi))
 
        CFI_REMEMBER_STATE
-       popfq_cfi
+       popf_cfi
        mov $1, %al
        ret
 
        CFI_RESTORE_STATE
 .Lnot_same:
-       popfq_cfi
+       popf_cfi
        xor %al,%al
        ret
 
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index b4807fce..a4862d0 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -27,7 +27,7 @@ CFI_STARTPROC
 # set the whole ZF thing (caller will just compare
 # eax:edx with the expected value)
 #
-       pushfl_cfi
+       pushf_cfi
        cli
 
        cmpl  (%esi), %eax
@@ -39,7 +39,7 @@ CFI_STARTPROC
        movl %ecx, 4(%esi)
 
        CFI_REMEMBER_STATE
-       popfl_cfi
+       popf_cfi
        ret
 
        CFI_RESTORE_STATE
@@ -48,7 +48,7 @@ CFI_STARTPROC
 .Lhalf_same:
        movl 4(%esi), %edx
 
-       popfl_cfi
+       popf_cfi
        ret
 
 CFI_ENDPROC
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 3ca5218..046a560 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -14,8 +14,8 @@
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
-       pushq_cfi_reg rbx
-       pushq_cfi_reg rbp
+       push_cfi_reg rbx
+       push_cfi_reg rbp
        movq    %rdi, %r10      /* Save pointer */
        xorl    %r11d, %r11d    /* Return value */
        movl    (%rdi), %eax
@@ -35,8 +35,8 @@ ENTRY(\op\()_safe_regs)
        movl    %ebp, 20(%r10)
        movl    %esi, 24(%r10)
        movl    %edi, 28(%r10)
-       popq_cfi_reg rbp
-       popq_cfi_reg rbx
+       pop_cfi_reg rbp
+       pop_cfi_reg rbx
        ret
 3:
        CFI_RESTORE_STATE
@@ -53,12 +53,12 @@ ENDPROC(\op\()_safe_regs)
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
-       pushl_cfi_reg ebx
-       pushl_cfi_reg ebp
-       pushl_cfi_reg esi
-       pushl_cfi_reg edi
-       pushl_cfi $0              /* Return value */
-       pushl_cfi %eax
+       push_cfi_reg ebx
+       push_cfi_reg ebp
+       push_cfi_reg esi
+       push_cfi_reg edi
+       push_cfi $0              /* Return value */
+       push_cfi %eax
        movl    4(%eax), %ecx
        movl    8(%eax), %edx
        movl    12(%eax), %ebx
@@ -68,9 +68,9 @@ ENTRY(\op\()_safe_regs)
        movl    (%eax), %eax
        CFI_REMEMBER_STATE
 1:     \op
-2:     pushl_cfi %eax
+2:     push_cfi %eax
        movl    4(%esp), %eax
-       popl_cfi (%eax)
+       pop_cfi (%eax)
        addl    $4, %esp
        CFI_ADJUST_CFA_OFFSET -4
        movl    %ecx, 4(%eax)
@@ -79,11 +79,11 @@ ENTRY(\op\()_safe_regs)
        movl    %ebp, 20(%eax)
        movl    %esi, 24(%eax)
        movl    %edi, 28(%eax)
-       popl_cfi %eax
-       popl_cfi_reg edi
-       popl_cfi_reg esi
-       popl_cfi_reg ebp
-       popl_cfi_reg ebx
+       pop_cfi %eax
+       pop_cfi_reg edi
+       pop_cfi_reg esi
+       pop_cfi_reg ebp
+       pop_cfi_reg ebx
        ret
 3:
        CFI_RESTORE_STATE
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 2322abe..c630a80 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -34,10 +34,10 @@
  */
 
 #define save_common_regs \
-       pushl_cfi_reg ecx
+       push_cfi_reg ecx
 
 #define restore_common_regs \
-       popl_cfi_reg ecx
+       pop_cfi_reg ecx
 
        /* Avoid uglifying the argument copying x86-64 needs to do. */
        .macro movq src, dst
@@ -64,22 +64,22 @@
  */
 
 #define save_common_regs \
-       pushq_cfi_reg rdi; \
-       pushq_cfi_reg rsi; \
-       pushq_cfi_reg rcx; \
-       pushq_cfi_reg r8;  \
-       pushq_cfi_reg r9;  \
-       pushq_cfi_reg r10; \
-       pushq_cfi_reg r11
+       push_cfi_reg rdi; \
+       push_cfi_reg rsi; \
+       push_cfi_reg rcx; \
+       push_cfi_reg r8;  \
+       push_cfi_reg r9;  \
+       push_cfi_reg r10; \
+       push_cfi_reg r11
 
 #define restore_common_regs \
-       popq_cfi_reg r11; \
-       popq_cfi_reg r10; \
-       popq_cfi_reg r9; \
-       popq_cfi_reg r8; \
-       popq_cfi_reg rcx; \
-       popq_cfi_reg rsi; \
-       popq_cfi_reg rdi
+       pop_cfi_reg r11; \
+       pop_cfi_reg r10; \
+       pop_cfi_reg r9; \
+       pop_cfi_reg r8; \
+       pop_cfi_reg rcx; \
+       pop_cfi_reg rsi; \
+       pop_cfi_reg rdi
 
 #endif
 
@@ -87,10 +87,10 @@
 ENTRY(call_rwsem_down_read_failed)
        CFI_STARTPROC
        save_common_regs
-       __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+       push_cfi_reg __ASM_REG(dx)
        movq %rax,%rdi
        call rwsem_down_read_failed
-       __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+       pop_cfi_reg __ASM_REG(dx)
        restore_common_regs
        ret
        CFI_ENDPROC
@@ -122,10 +122,10 @@ ENDPROC(call_rwsem_wake)
 ENTRY(call_rwsem_downgrade_wake)
        CFI_STARTPROC
        save_common_regs
-       __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+       push_cfi_reg __ASM_REG(dx)
        movq %rax,%rdi
        call rwsem_downgrade_wake
-       __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+       pop_cfi_reg __ASM_REG(dx)
        restore_common_regs
        ret
        CFI_ENDPROC
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 5eb7150..bb370de 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -13,9 +13,9 @@
        .globl \name
 \name:
        CFI_STARTPROC
-       pushl_cfi_reg eax
-       pushl_cfi_reg ecx
-       pushl_cfi_reg edx
+       push_cfi_reg eax
+       push_cfi_reg ecx
+       push_cfi_reg edx
 
        .if \put_ret_addr_in_eax
        /* Place EIP in the arg1 */
@@ -23,9 +23,9 @@
        .endif
 
        call \func
-       popl_cfi_reg edx
-       popl_cfi_reg ecx
-       popl_cfi_reg eax
+       pop_cfi_reg edx
+       pop_cfi_reg ecx
+       pop_cfi_reg eax
        ret
        CFI_ENDPROC
        _ASM_NOKPROBE(\name)
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index f89ba4e9..39ad268 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -17,15 +17,15 @@
        CFI_STARTPROC
 
        /* this one pushes 9 elems, the next one would be %rIP */
-       pushq_cfi_reg rdi
-       pushq_cfi_reg rsi
-       pushq_cfi_reg rdx
-       pushq_cfi_reg rcx
-       pushq_cfi_reg rax
-       pushq_cfi_reg r8
-       pushq_cfi_reg r9
-       pushq_cfi_reg r10
-       pushq_cfi_reg r11
+       push_cfi_reg rdi
+       push_cfi_reg rsi
+       push_cfi_reg rdx
+       push_cfi_reg rcx
+       push_cfi_reg rax
+       push_cfi_reg r8
+       push_cfi_reg r9
+       push_cfi_reg r10
+       push_cfi_reg r11
 
        .if \put_ret_addr_in_rdi
        /* 9*8(%rsp) is return addr on stack */
@@ -60,15 +60,15 @@
        CFI_STARTPROC
        CFI_ADJUST_CFA_OFFSET 9*8
 restore:
-       popq_cfi_reg r11
-       popq_cfi_reg r10
-       popq_cfi_reg r9
-       popq_cfi_reg r8
-       popq_cfi_reg rax
-       popq_cfi_reg rcx
-       popq_cfi_reg rdx
-       popq_cfi_reg rsi
-       popq_cfi_reg rdi
+       pop_cfi_reg r11
+       pop_cfi_reg r10
+       pop_cfi_reg r9
+       pop_cfi_reg r8
+       pop_cfi_reg rax
+       pop_cfi_reg rcx
+       pop_cfi_reg rdx
+       pop_cfi_reg rsi
+       pop_cfi_reg rdi
        ret
        CFI_ENDPROC
        _ASM_NOKPROBE(restore)
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to