PER_CPU_VAR macro is intended to be applied to a symbol and should not be used with general operands. Introduce new PER_CPU_ARG macro and use it in cmpxchg{8,16}b_emu.S instead.
PER_CPU_VAR macro will be repurposed for %rip-relative addressing. Also add a missing function comment to this_cpu_cmpxchg8b_emu. No functional changes intended. Cc: Thomas Gleixner <t...@linutronix.de> Cc: Ingo Molnar <mi...@redhat.com> Cc: Borislav Petkov <b...@alien8.de> Cc: Dave Hansen <dave.han...@linux.intel.com> Cc: "H. Peter Anvin" <h...@zytor.com> Cc: Peter Zijlstra <pet...@infradead.org> Signed-off-by: Uros Bizjak <ubiz...@gmail.com> -- v2: Introduce PER_CPU_ARG macro to conditionally enable segment registers in cmpxchg{8,16}b_emu.S for CONFIG_SMP. --- arch/x86/include/asm/percpu.h | 2 ++ arch/x86/lib/cmpxchg16b_emu.S | 12 ++++++------ arch/x86/lib/cmpxchg8b_emu.S | 24 ++++++++++++++++++------ 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 34734d730463..83e6a4bcea38 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -11,8 +11,10 @@ #ifdef __ASSEMBLY__ #ifdef CONFIG_SMP +#define PER_CPU_ARG(arg) %__percpu_seg:arg #define PER_CPU_VAR(var) %__percpu_seg:var #else /* ! SMP */ +#define PER_CPU_ARG(arg) arg #define PER_CPU_VAR(var) var #endif /* SMP */ diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index 6962df315793..b6b942d07a00 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -23,14 +23,14 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) cli /* if (*ptr == old) */ - cmpq PER_CPU_VAR(0(%rsi)), %rax + cmpq PER_CPU_ARG(0(%rsi)), %rax jne .Lnot_same - cmpq PER_CPU_VAR(8(%rsi)), %rdx + cmpq PER_CPU_ARG(8(%rsi)), %rdx jne .Lnot_same /* *ptr = new */ - movq %rbx, PER_CPU_VAR(0(%rsi)) - movq %rcx, PER_CPU_VAR(8(%rsi)) + movq %rbx, PER_CPU_ARG(0(%rsi)) + movq %rcx, PER_CPU_ARG(8(%rsi)) /* set ZF in EFLAGS to indicate success */ orl $X86_EFLAGS_ZF, (%rsp) @@ -42,8 +42,8 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) /* *ptr != old */ /* old = *ptr */ - movq PER_CPU_VAR(0(%rsi)), %rax - movq PER_CPU_VAR(8(%rsi)), %rdx + movq PER_CPU_ARG(0(%rsi)), %rax + movq PER_CPU_ARG(8(%rsi)), %rdx /* clear ZF in EFLAGS to indicate failure */ andl $(~X86_EFLAGS_ZF), (%rsp) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 49805257b125..9a0a7feeaf7c 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -53,18 +53,30 @@ EXPORT_SYMBOL(cmpxchg8b_emu) #ifndef CONFIG_UML +/* + * Emulate 'cmpxchg8b %fs:(%rsi)' + * + * Inputs: + * %esi : memory location to compare + * %eax : low 32 bits of old value + * %edx : high 32 bits of old value + * %ebx : low 32 bits of new value + * %ecx : high 32 bits of new value + * + * Notably this is not LOCK prefixed and is not safe against NMIs + */ SYM_FUNC_START(this_cpu_cmpxchg8b_emu) pushfl cli - cmpl PER_CPU_VAR(0(%esi)), %eax + cmpl PER_CPU_ARG(0(%esi)), %eax jne .Lnot_same2 - cmpl PER_CPU_VAR(4(%esi)), %edx + cmpl PER_CPU_ARG(4(%esi)), %edx jne .Lnot_same2 - movl %ebx, PER_CPU_VAR(0(%esi)) - movl %ecx, PER_CPU_VAR(4(%esi)) + movl %ebx, PER_CPU_ARG(0(%esi)) + movl %ecx, PER_CPU_ARG(4(%esi)) orl $X86_EFLAGS_ZF, (%esp) @@ -72,8 +84,8 @@ SYM_FUNC_START(this_cpu_cmpxchg8b_emu) RET .Lnot_same2: - movl PER_CPU_VAR(0(%esi)), %eax - movl PER_CPU_VAR(4(%esi)), %edx + movl PER_CPU_ARG(0(%esi)), %eax + movl PER_CPU_ARG(4(%esi)), %edx andl $(~X86_EFLAGS_ZF), (%esp) -- 2.41.0