[Xen-devel] [PATCH v5 24/27] x86_32: assembly, add ENDs to some functions and relabel with SYM_CODE_*

2017-11-30 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them (as global) using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4cef47b8027f..f705f082b3c6 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -361,9 +361,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index feac1e5ecba0..71a05a6cc36a 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -8,8 +8,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -38,6 +37,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -71,7 +71,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -86,6 +86,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b9253728defd..5785a289a8ac 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -101,7 +101,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * i386 does not save SS and ESP when coming from kernel.
 * Instead, to get sp, ®s->sp is used (see ptrace.h).
@@ -169,6 +169,7 @@ SYM_CODE_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index a2bc3e35c2c6..0cf8ed275272 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6e56815e13a0..3cd15e34aa87 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -15,7 +15,7 @@
 
 .text
 
-ENTRY(swsusp_arch_suspend)
+SYM_CODE_START(swsusp_arch_suspend)
movl %esp, saved_context_esp
movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp
@@ -26,8 +26,9 @@ ENTRY(swsusp_arch_suspend)
 
call swsusp_save
ret
+SYM_CODE_END(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
movlmmu_cr4_features, %ecx
movlresume_pg_dir, %eax
subl$__PAGE_OFFSET, %eax
@@ -83,3 +84,4 @@ done:
xorl%eax, %eax
 
ret
+SYM_CODE_END(restore_image)
diff --git a/arch/x86/realmode/rm/trampoline_32.S 
b/arch/x86/realmode/rm/trampoline_32.S
index e96efcd60bf7..a3b047a44c5c 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode

[Xen-devel] [PATCH v5 19/27] x86: assembly, make some functions local

2017-11-30 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, we mark them "static". In assembly,
annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their
ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE depends on
ENDPROC/END for a particular function (C or non-C).

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 25 +
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/xen/xen-pvh.S  |  4 ++--
 7 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c3426184d3c6..3543ee220ab3 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -411,7 +411,7 @@ syscall_return_via_sysret:
USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
-ENTRY(stub_ptregs_64)
+SYM_CODE_START_LOCAL(stub_ptregs_64)
/*
 * Syscalls marked as needing ptregs land here.
 * If we are on the fast path, we need to save the extra regs,
@@ -436,7 +436,7 @@ ENTRY(stub_ptregs_64)
 
 1:
jmp *%rax   /* Called from C */
-END(stub_ptregs_64)
+SYM_CODE_END(stub_ptregs_64)
 
 .macro ptregs_stub func
 ENTRY(ptregs_\func)
@@ -1139,7 +1139,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1157,7 +1158,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1242,7 +1243,7 @@ idtentry machine_check
has_error_code=0paranoid=1 do_sym=*machine_check_vec
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
SAVE_C_REGS 8
@@ -1260,7 +1261,7 @@ ENTRY(paranoid_entry)
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=ax save_reg=%r14
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1274,7 +1275,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1288,13 +1289,13 @@ ENTRY(paranoid_exit)
TRACE_IRQS_IRETQ_DEBUG
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch gs if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
SAVE_C_REGS 8
@@ -1383,7 +1384,7 @@ ENTRY(error_entry)
mov %rax, %rsp
decl%ebx
 

[Xen-devel] [PATCH v5 01/27] linkage: new macros for assembler symbols

2017-11-30 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so we can switch this back
   to %function and %object if this turns into a problem. Architectures
   can also override them by something like ", @function" if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   SYM_FUNC_INNER_LABEL_ALIGN -- only for labels in the middle of
functions, w/ alignment
   SYM_FUNC_INNER_LABEL -- only for labels in the middle of functions,
w/o alignment

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

   SYM_CODE_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_CODE_INNER_LABEL -- only for labels in the middle of code

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---
 Documentation/asm-annotations.rst | 218 
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 257 --
 3 files changed, 475 insertions(+), 10 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..3e9b426347f0
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,218 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembler. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+Some code like entries, trampolines, or boot code needs t

[Xen-devel] [PATCH v5 08/27] x86: assembly, annotate aliases

2017-11-30 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 1d34d5a14682..426a22192d69 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1873,8 +1873,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1885,8 +1884,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 8a10c9a9e2b5..0820db159053 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -150,13 +150,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.15.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v5 13/27] x86: xen-pvh, annotate data appropriatelly

2017-11-30 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/xen/xen-pvh.S | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index e1a5fbeae08d..52b28793a625 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -137,11 +137,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
.quad 0x/* reserved */
 #ifdef CONFIG_X86_64
@@ -150,12 +151,12 @@ gdt_start:
.quad GDT_ENTRY(0xc09a, 0, 0xf) /* __KERNEL_CS */
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* __KERNEL_DS */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 4
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill 256, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.15.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v5 23/27] x86_64: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2017-11-30 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-cry...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 
 arch/x86/boot/compressed/mem_encrypt.S |  8 ++--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 ++--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 +++---
 arch/x86/crypto/aesni-intel_asm.S  | 44 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 ++--
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 ++--
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 ++--
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 ++--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 ++--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 ++--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 ++--
 arch/x86/crypto/salsa20-x86_64-asm_64.S| 12 +++---
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 ++--
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 ++--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 ++--
 arch/x86/entry/entry_64.S  | 10 ++---
 arch/x86/entry/entry_64_compat.S   |  8 ++--
 arch/x86/kernel/acpi/wakeup_64.S   |  8 ++--
 arch/x86/kernel/head_64.S  | 12 +++---
 arch/x86/lib/checksum_32.S |  8 ++--
 arch/x86/lib/clear_page_64.S   | 12 +++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 
 arch/x86/lib/hweight.S |  8 ++--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S |  8 ++--
 arch/x86/lib/putuser

[Xen-devel] [PATCH v5 21/27] x86_64: assembly, add ENDs to some functions and relabel with SYM_CODE_*

2017-11-30 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them (as global) using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  3 ++-
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index 108dbd71ae0b..647be777b5cc 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -249,7 +249,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -369,6 +369,7 @@ lvl5:
  */
leaqrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..44755a847856 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -53,7 +53,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -68,9 +68,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 6e38c13c1873..7db1459d363f 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_CODE_INNER_LABEL(machine_real_restart_paging_off, 
SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index 41d4806d14a5..49f27822c9c1 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -81,12 +81,14 @@ ENTRY(trampoline_start)
 no_longmode:
hlt
jmp no_longmode
+SYM_CODE_END(trampoline_start)
+
 #include "../kernel/verify_cpu.S"
 
.section ".text32","ax"
.code32
.balign 4
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl%edx, %ss
addl$pa_real_mode_base, %esp
movl%edx, %ds
@@ -140,13 +142,15 @@ ENTRY(startup_32)
 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
 */
ljmpl   $__KERNEL_CS, $pa_startup_64
+SYM_CODE_END(startup_32)
 

[Xen-devel] [PATCH v5 22/27] x86_64: assembly, change all ENTRY+END to SYM_CODE_*

2017-11-30 Thread Jiri Slaby
Here, we change all code which is not marked as functions. In other
words, this code has been using END, not ENDPROC. So switch all of this
to appropriate new markings SYM_CODE_START and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 56 
 arch/x86/entry/entry_64_compat.S |  8 +++---
 arch/x86/kernel/ftrace_64.S  | 16 ++--
 arch/x86/xen/xen-asm_64.S|  4 +--
 arch/x86/xen/xen-head.S  |  8 +++---
 5 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3543ee220ab3..be1d53f3ac5f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -44,11 +44,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -161,7 +161,7 @@ END(native_usergs_sysret64)
 #define RSP_SCRATCHCPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \
SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
 
-ENTRY(entry_SYSCALL_64_trampoline)
+SYM_CODE_START(entry_SYSCALL_64_trampoline)
UNWIND_HINT_EMPTY
swapgs
 
@@ -187,11 +187,11 @@ ENTRY(entry_SYSCALL_64_trampoline)
 */
pushq   $entry_SYSCALL_64_after_hwframe
retq
-END(entry_SYSCALL_64_trampoline)
+SYM_CODE_END(entry_SYSCALL_64_trampoline)
 
.popsection
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -409,7 +409,7 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 SYM_CODE_START_LOCAL(stub_ptregs_64)
/*
@@ -439,11 +439,11 @@ SYM_CODE_START_LOCAL(stub_ptregs_64)
 SYM_CODE_END(stub_ptregs_64)
 
 .macro ptregs_stub func
-ENTRY(ptregs_\func)
+SYM_CODE_START(ptregs_\func)
UNWIND_HINT_FUNC
leaq\func(%rip), %rax
jmp stub_ptregs_64
-END(ptregs_\func)
+SYM_CODE_END(ptregs_\func)
 .endm
 
 /* Instantiate ptregs_stub for each ptregs-using syscall */
@@ -456,7 +456,7 @@ END(ptregs_\func)
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -487,7 +487,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -496,7 +496,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -522,14 +522,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -538,7 +538,7 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -874,14 +874,14 @@ SYM_CODE_END(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
ASM_CLAC
pushq   $~(\num)
 .Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -947,7 +947,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
  * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
  * space has not been allocated for them.)
  */
-ENTRY(switch_to_thread_stack)
+SYM_CODE_START(switch_to_thread_stack)
UNWIND_HINT_FUNC
 
pushq   %rdi
@@ -968,10 +968,10 @@ ENTRY(switch_to_thread_stack)
 
movq(%rdi), %rdi
ret
-END(switch_to_thread_stack)
+SYM_CODE_END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paran

[Xen-devel] [PATCH v7 20/28] x86/asm: make some functions local

2019-01-30 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, we mark them "static". In assembly,
annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their
ENDPROC to SYM_{FUNC,CODE}_END too). Whether we use FUNC or CODE,
depends on whether ENDPROC or END was used for a particular function
before.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/platform/pvh/head.S|  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6b60e5e6531c..2fe59c693732 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1046,7 +1046,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1064,7 +1065,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1155,7 +1156,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1182,7 +1183,7 @@ ENTRY(paranoid_entry)
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1196,7 +1197,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1213,12 +1214,12 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1302,16 +1303,16 @@ ENTRY(error_entry)
callfixup_bad_iret
mov %rax, %rsp
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testb   $3, CS(%rsp)
jz  retint_kernel
jmp retint_user
-END(error_exit)
+SYM_CODE_END(error_exit)
 
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/

[Xen-devel] [PATCH v7 23/28] x86_64/asm: change all ENTRY+END to SYM_CODE_*

2019-01-30 Thread Jiri Slaby
Here, we change all assembly code which is marked using END (and not
ENDPROC). We switch all these to appropriate new markings SYM_CODE_START
and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 48 
 arch/x86/entry/entry_64_compat.S |  8 +++---
 arch/x86/kernel/ftrace_64.S  |  4 +--
 arch/x86/xen/xen-asm_64.S|  8 +++---
 arch/x86/xen/xen-head.S  |  8 +++---
 5 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 2fe59c693732..88e865ec9695 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -373,7 +373,7 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -499,7 +499,7 @@ END(irq_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -565,7 +565,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 _ASM_NOKPROBE(interrupt_entry)
 
 
@@ -772,7 +772,7 @@ _ASM_NOKPROBE(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -780,7 +780,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 _ASM_NOKPROBE(\sym)
 .endm
 
@@ -879,7 +879,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
  * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
  */
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
/* Sanity check */
@@ -963,7 +963,7 @@ ENTRY(\sym)
jmp error_exit
.endif
 _ASM_NOKPROBE(\sym)
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 idtentry divide_error  do_divide_error 
has_error_code=0
@@ -1080,7 +1080,7 @@ SYM_CODE_END(xen_do_hypervisor_callback)
  * We distinguish between categories by comparing each saved segment register
  * with its current contents: any discrepancy means we in category 1.
  */
-ENTRY(xen_failsafe_callback)
+SYM_CODE_START(xen_failsafe_callback)
UNWIND_HINT_EMPTY
movl%ds, %ecx
cmpw%cx, 0x10(%rsp)
@@ -1110,7 +1110,7 @@ ENTRY(xen_failsafe_ca

[Xen-devel] [PATCH v7 09/28] x86/asm: annotate aliases

2019-01-30 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 6c349e844581..19effbf9ce35 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 3b24dc05251c..68fcd8f9a48b 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -27,7 +27,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -41,7 +41,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 1e9ef0ba30a5..30dcc311f566 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -168,13 +168,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.20.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v7 01/28] linkage: new macros for assembler symbols

2019-01-30 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so we can switch this back
   to %function and %object if this turns into a problem. Architectures
   can also override them by something like ", @function" if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_INNER_LABEL -- only for labels in the middle of code

   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

[v6]
* fixed typos found by Randy Dunlap
* remove doubled INNER_LABEL macros, one pair was unused

[v7]
* rebased on the top of current code

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---
 Documentation/asm-annotations.rst | 217 ++
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 245 +-
 3 files changed, 461 insertions(+), 11 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..265d64a1fc0b
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,217 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembler. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+Some code like entries, trampolines, or boot code needs to be written in
+assembly. The same as in C, we group such code into functions

[Xen-devel] [PATCH v7 14/28] xen/pvh: annotate data appropriatelly

2019-01-30 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros to have:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/platform/pvh/head.S | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..4e63480bb223 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -150,11 +150,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
 #ifdef CONFIG_X86_64
.quad GDT_ENTRY(0xa09a, 0, 0xf) /* PVH_CS_SEL */
@@ -163,15 +164,14 @@ gdt_start:
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* PVH_DS_SEL */
.quad GDT_ENTRY(0x4090, 0, 0x18)/* PVH_CANARY_SEL */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 16
-canary:
-   .fill 48, 1, 0
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
 
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.20.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v7 22/28] x86_64/asm: add ENDs to some functions and relabel with SYM_CODE_*

2019-01-30 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END appropriatelly too.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index 8f5a58c28dd4..157b0cbc77ca 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -439,6 +439,7 @@ trampoline_return:
  */
leaqrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -568,7 +569,7 @@ adjust_got:
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX on return means we need to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -621,6 +622,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(paging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index 3008baa2fa95..e9983385c8b7 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -53,7 +53,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -68,9 +68,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 424826afb501..f10515b10e0a 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index 9e5f9ade43c8..408f81710ccd 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -81,12 +81,14 @@ ENTRY(trampoline_start)
 no_longmode:
  

[Xen-devel] [PATCH v7 25/28] x86_32/asm: add ENDs to some functions and relabel with SYM_CODE_*

2019-01-30 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 07029b98111d..f7190d5da9f1 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -790,9 +790,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index feac1e5ecba0..71a05a6cc36a 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -8,8 +8,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -38,6 +37,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -71,7 +71,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -86,6 +86,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b855dc10daeb..f4dca7df8ad6 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -102,7 +102,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * i386 does not save SS and ESP when coming from kernel.
 * Instead, to get sp, ®s->sp is used (see ptrace.h).
@@ -170,6 +170,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2d5390d84467..bfd713034e9b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6fe383002125..a19ed3d23185 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movlrestore_jump_address, %ebx
movlrestore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movlrelocated_restore_code, %eax
jmpl*%eax
+SYM_CODE_END(restore_image)
 
 /* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
movltemp_pgt, %eax
movl%eax, %cr3
 
@@ -77,6 +78,7 @@ copy_loop:
 
 done:
jmpl*%ebx
+SYM_CODE_END(core_restore_code)
 
/* code below belongs to the image kernel */
.align PAGE_SIZE
di

Re: [Xen-devel] [PATCH v7 01/28] linkage: new macros for assembler symbols

2019-02-01 Thread Jiri Slaby
On 31. 01. 19, 17:00, Borislav Petkov wrote:
>>  Documentation/asm-annotations.rst | 217 ++
> 
> I guess you wanna integrate that into the doc hierarchy. Hunk ontop:
> 
> ---
> diff --git a/Documentation/index.rst b/Documentation/index.rst
> index c858c2e66e36..754055d9565c 100644
> --- a/Documentation/index.rst
> +++ b/Documentation/index.rst
> @@ -91,6 +91,14 @@ needed).
> vm/index
> bpf/index
>  
> +Architecture-agnostic documentation
> +---
> +
> +.. toctree::
> +   :maxdepth: 2
> +
> +   asm-annotations
> +
>  Architecture-specific documentation
>  ---

Thanks, all comments applied.

I will wait for a couple of days for more feedback, if any, and respin.
Perhaps for the last time as these patches are slowly starting bother me
after two years of inability to get them upstream (without having NACK
or serious objections either) and the constant need of rebasing
(spectre/meltdown changes were the ugliest to this series).

thanks,
-- 
js
suse labs

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v8 25/28] x86_32/asm: add ENDs to some functions and relabel with SYM_CODE_*

2019-08-08 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4900a6a5e125..64fe7aa50ad2 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 427249292aef..daf88f8143c5 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -9,8 +9,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -87,6 +87,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 219be1309c37..a43ed4c0402d 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * We're here from an mcount/fentry CALL, and the stack frame looks 
like:
 *
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
popl%eax
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2d5390d84467..bfd713034e9b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6fe383002125..a19ed3d23185 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movlrestore_jump_address, %ebx
movlrestore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movlrelocated_restore_code, %eax
jmpl*%eax
+SYM_CODE_END(restore_image)
 
 /* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
movltemp_pgt, %eax
movl%eax, %cr3
 
@@ -77,6 +78,7 @@ copy_loop:
 
 done:
jmpl*%ebx
+SYM_CODE_END(core_restore_code)
 
/* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/trampoline_32.S 
b/arch/x86/realmode/rm/tram

[Xen-devel] [PATCH v8 23/28] x86_64/asm: change all ENTRY+END to SYM_CODE_*

2019-08-08 Thread Jiri Slaby
Here, we change all assembly code which is marked using END (and not
ENDPROC). We switch all these to appropriate new markings SYM_CODE_START
and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 52 
 arch/x86/entry/entry_64_compat.S |  8 ++---
 arch/x86/kernel/ftrace_64.S  |  4 +--
 arch/x86/kernel/head_64.S| 12 
 arch/x86/xen/xen-asm_64.S|  8 ++---
 arch/x86/xen/xen-head.S  |  8 ++---
 6 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6d6cb84952ff..bd04476d432d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
 vector=FIRST_SYSTEM_VECTOR
 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
.align  8
vector=vector+1
 .endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -511,7 +511,7 @@ END(spurious_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -577,7 +577,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 _ASM_NOKPROBE(interrupt_entry)
 
 
@@ -793,7 +793,7 @@ _ASM_NOKPROBE(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -801,7 +801,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 _ASM_NOKPROBE(\sym)
 .endm
 
@@ -966,7 +966,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
  * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
  */
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 
ist_offset=0 create_gap=0 read_cr2=0
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
/* Sanity check */
@@ -1016,7 +1016,7 @@ ENTRY(\sym)
.endif
 
 _ASM_NOKPROBE(\sym)
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 idtentry divide_error  do_divide_error 
has_error_code=0

[Xen-devel] [PATCH v8 09/28] x86/asm: annotate aliases

2019-08-08 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb462a0a..9d8d5f2296e1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 92748660ba51..57a64266ba69 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b49c06..45c1249f370d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.22.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v8 01/28] linkage: new macros for assembler symbols

2019-08-08 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so we can switch this back
   to %function and %object if this turns into a problem. Architectures
   can also override them by something like ", @function" if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_INNER_LABEL -- only for labels in the middle of code

   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

[v6]
* fixed typos found by Randy Dunlap
* remove doubled INNER_LABEL macros, one pair was unused

[v8]
* use lkml.kernel.org for links
* link the docs from index.rst (by bpetkov)
* fixed typos on the docs

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---
 Documentation/asm-annotations.rst | 217 ++
 Documentation/index.rst   |   8 +
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 245 +-
 4 files changed, 469 insertions(+), 11 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..a967648e4325
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,217 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017-2019 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembly. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+Some code like entr

[Xen-devel] [PATCH v8 22/28] x86_64/asm: add ENDs to some functions and relabel with SYM_CODE_*

2019-08-08 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END appropriatelly too.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index 70a25e0fa71e..362bd01d3bfb 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -442,6 +442,7 @@ trampoline_return:
  */
leaqrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -571,7 +572,7 @@ adjust_got:
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX means trampoline needs to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -634,6 +635,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(paging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0a7ece..4057cd5af7e2 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -52,7 +52,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -67,9 +67,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -97,6 +98,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 424826afb501..f10515b10e0a 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index c1aeab1dae25..251758ed7443 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -78,12 +78,14 @@ ENTRY(trampoline_start)
 no_longmode:
  

[Xen-devel] [PATCH v8 20/28] x86/asm: make some functions local

2019-08-08 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, we mark them "static". In assembly,
annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their
ENDPROC to SYM_{FUNC,CODE}_END too). Whether we use FUNC or CODE,
depends on whether ENDPROC or END was used for a particular function
before.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/platform/pvh/head.S|  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 50c19edbf639..6d6cb84952ff 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1099,7 +1099,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1117,7 +1118,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1212,7 +1213,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1239,7 +1240,7 @@ ENTRY(paranoid_entry)
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1253,7 +1254,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1270,12 +1271,12 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1350,16 +1351,16 @@ ENTRY(error_entry)
callfixup_bad_iret
mov %rax, %rsp
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testb   $3, CS(%rsp)
jz  retint_kernel
jmp retint_user
-END(error_exit)
+SYM_CODE_END(error_exit)
 
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/

[Xen-devel] [PATCH v8 14/28] xen/pvh: annotate data appropriatelly

2019-08-08 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros to have:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/platform/pvh/head.S | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..4e63480bb223 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -150,11 +150,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
 #ifdef CONFIG_X86_64
.quad GDT_ENTRY(0xa09a, 0, 0xf) /* PVH_CS_SEL */
@@ -163,15 +164,14 @@ gdt_start:
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* PVH_DS_SEL */
.quad GDT_ENTRY(0x4090, 0, 0x18)/* PVH_CANARY_SEL */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 16
-canary:
-   .fill 48, 1, 0
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
 
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.22.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v8 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2019-08-08 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-cry...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 ++--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +--
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 ++--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   | 12 ++--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 +++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 +++---
 arch/x86/lib/hweight.S   |  8 +--
 arch/x86/lib/iomap_copy_64.S |  4 +-
 arch/x86/lib/memcpy_64.S |  4 +-
 arch/x86/lib/memmove_64.S|  4 +-
 arch/x86/lib/memset_64.S |  4 +-
 arch/x86/lib/msr-reg.S   |  8 +--
 arch/x86/lib/putuser.S   | 16 +++---
 arch/x86/lib/retpoline.S |  4 +-
 arch/x86/mm/mem_encrypt_boot.S   |  8 +--
 arch/x86/platform/efi/efi_stub_64.S  |  4 +-
 arch/x86/platform/efi/efi_thunk_64.S |  4 +-
 arch/x86/power/hibernate_asm_64.S|  8 +--
 arch/x86/xen/xen-asm.S   | 28 -
 arch/x86/xen/xen-asm_64.S| 16 +++---
 include/linux/linkage.h  |  4 ++
 70 files changed, 379 insertions(+), 375 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index 31312070db22..593913692d16 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -23,7 +23,7

Re: [Xen-devel] [PATCH v8 01/28] linkage: new macros for assembler symbols

2019-08-29 Thread Jiri Slaby
On 12. 08. 19, 19:06, Borislav Petkov wrote:
>> +  Again, every ``SYM_CODE_START*`` **shall** be coupled by ``SYM_CODE_END``.
> 
> Btw, this coupling: I haven't gone through the whole patchset but do we
> have automatic checking which makes sure a starting macro is coupled
> with the correct ending macro?

I do, but it's not part of this series. It's a pinch of link-time magic,
but it works reliably (see e.g. 1cbec37b3f9c). I will post it if/after
this gets merged. There were other approaches proposed in the past too
-- using objtool for that (not implemented).

>> +Overriding Macros
>> +~
>> +Architecture can also override any of the macros in their own
> 
> "Other architectures... "

Not only "other", x86 can override the types if need be too.

>> +``asm/linkage.h``, including macros specifying the type of a symbol
>> +(``SYM_T_FUNC``, ``SYM_T_OBJECT``, and ``SYM_T_NONE``).  As every macro
>> +described in this file is surrounded by ``#ifdef`` + ``#endif``, it is 
>> enough
>> +to define the macros differently in the aforementioned 
>> architecture-dependent
>> +header.

thanks,
-- 
js
suse labs

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v9 23/28] x86_64/asm: Change all ENTRY+END to SYM_CODE_*

2019-10-11 Thread Jiri Slaby
Here, all assembly code which is marked using END (and not ENDPROC) is
changed. All these are switched to appropriate new markings
SYM_CODE_START and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 52 
 arch/x86/entry/entry_64_compat.S |  8 ++---
 arch/x86/kernel/ftrace_64.S  |  4 +--
 arch/x86/kernel/head_64.S| 12 
 arch/x86/xen/xen-asm_64.S|  8 ++---
 arch/x86/xen/xen-head.S  |  8 ++---
 6 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1568da63bf16..13e4fe378e5a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
 vector=FIRST_SYSTEM_VECTOR
 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
.align  8
vector=vector+1
 .endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -511,7 +511,7 @@ END(spurious_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -579,7 +579,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 _ASM_NOKPROBE(interrupt_entry)
 
 
@@ -795,7 +795,7 @@ _ASM_NOKPROBE(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -803,7 +803,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 _ASM_NOKPROBE(\sym)
 .endm
 
@@ -968,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
  * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
  */
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 
ist_offset=0 create_gap=0 read_cr2=0
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
/* Sanity check */
@@ -1018,7 +1018,7 @@ ENTRY(\sym)
.endif
 
 _ASM_NOKPROBE(\sym)
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 idtentry divide_error  do_divide_error  

[Xen-devel] [PATCH v9 14/28] xen/pvh: Annotate data appropriately

2019-10-11 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros to have:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/platform/pvh/head.S | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..4e63480bb223 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -150,11 +150,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
 #ifdef CONFIG_X86_64
.quad GDT_ENTRY(0xa09a, 0, 0xf) /* PVH_CS_SEL */
@@ -163,15 +164,14 @@ gdt_start:
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* PVH_DS_SEL */
.quad GDT_ENTRY(0x4090, 0, 0x18)/* PVH_CANARY_SEL */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 16
-canary:
-   .fill 48, 1, 0
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
 
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.23.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v9 24/28] x86_64/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*

2019-10-11 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-cry...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 ++--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +--
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 ++--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   | 12 ++--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 +++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 +++---
 arch/x86/lib/hweight.S   |  8 +--
 arch/x86/lib/iomap_copy_64.S |  4 +-
 arch/x86/lib/memcpy_64.S |  4 +-
 arch/x86/lib/memmove_64.S|  4 +-
 arch/x86/lib/memset_64.S |  4 +-
 arch/x86/lib/msr-reg.S   |  8 +--
 arch/x86/lib/putuser.S   | 16 +++---
 arch/x86/lib/retpoline.S |  4 +-
 arch/x86/mm/mem_encrypt_boot.S   |  8 +--
 arch/x86/platform/efi/efi_stub_64.S  |  4 +-
 arch/x86/platform/efi/efi_thunk_64.S |  4 +-
 arch/x86/power/hibernate_asm_64.S|  8 +--
 arch/x86/xen/xen-asm.S   | 28 -
 arch/x86/xen/xen-asm_64.S| 16 +++---
 include/linux/linkage.h  |  4 ++
 70 files changed, 379 insertions(+), 375 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index 31312070db22..593913692d16 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compr

[Xen-devel] [PATCH v9 09/28] x86/asm: Annotate aliases

2019-10-11 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy as it avoid nesting and double symbols.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb462a0a..9d8d5f2296e1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 92748660ba51..57a64266ba69 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b49c06..45c1249f370d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.23.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v9 22/28] x86_64/asm: Add ENDs to some functions and relabel with SYM_CODE_*

2019-10-11 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So they are annotated using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END appropriately too.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Borislav Petkov 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index ca762ea6a681..55800467ce5c 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -442,6 +442,7 @@ trampoline_return:
  */
leaq.Lrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -571,7 +572,7 @@ SYM_FUNC_END(.Lrelocated)
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX means trampoline needs to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -634,6 +635,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0a7ece..4057cd5af7e2 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -52,7 +52,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -67,9 +67,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -97,6 +98,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 424826afb501..f10515b10e0a 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index c1aeab1dae25..251758ed7443 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -78,12

[Xen-devel] [PATCH v9 25/28] x86_32/asm: Add ENDs to some functions and relabel with SYM_CODE_*

2019-10-11 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So they are annotated using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriately.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4900a6a5e125..64fe7aa50ad2 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 427249292aef..daf88f8143c5 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -9,8 +9,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -87,6 +87,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 219be1309c37..a43ed4c0402d 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * We're here from an mcount/fentry CALL, and the stack frame looks 
like:
 *
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
popl%eax
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e2b3e6cf86ca..7029bbaccc41 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6fe383002125..a19ed3d23185 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movlrestore_jump_address, %ebx
movlrestore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movlrelocated_restore_code, %eax
jmpl*%eax
+SYM_CODE_END(restore_image)
 
 /* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
movltemp_pgt, %eax
movl%eax, %cr3
 
@@ -77,6 +78,7 @@ copy_loop:
 
 done:
jmpl*%ebx
+SYM_CODE_END(core_restore_code)
 
/* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/trampoline_32.S 
b/arch/x86/rea

[Xen-devel] [PATCH v9 20/28] x86/asm: Make some functions local

2019-10-11 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, they are marked "static". In
assembly, annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch
their ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE is used,
depends on whether ENDPROC or END was used for a particular function
before.

Signed-off-by: Jiri Slaby 
Cc: Borislav Petkov 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/platform/pvh/head.S|  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 57d246048ac6..1568da63bf16 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1214,7 +1215,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry)
FENCE_SWAPGS_KERNEL_ENTRY
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1262,7 +1263,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1279,12 +1280,12 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1364,16 +1365,16 @@ ENTRY(error_entry)
callfixup_bad_iret
mov %rax, %rsp
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testb   $3, CS(%rsp)
jz  retint_kernel
jmp .Lretint_user
-END(error_exit)
+SYM_CODE_END(error_exit)
 
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page

[Xen-devel] [PATCH v9 01/28] linkage: Introduce new macros for assembler symbols

2019-10-11 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so this can be switched
   back to %function and %object if this turns into a problem.
   Architectures can also override them by something like ", @function"
   if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_INNER_LABEL -- only for labels in the middle of code

   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Borislav Petkov 
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---

Notes:
[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

[v6]
* fixed typos found by Randy Dunlap
* remove doubled INNER_LABEL macros, one pair was unused

[v8]
* use lkml.kernel.org for links
* link the docs from index.rst (by Boris)
* fixed typos on the docs

[v9]
* updated the docs as requested by Boris

 Documentation/asm-annotations.rst | 216 ++
 Documentation/index.rst   |   8 +
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 245 +-
 4 files changed, 468 insertions(+), 11 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..29ccd6e61fe5
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,216 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017-2019 Jiri Slaby
+
+This document describes the new macros for annotation of 

[Xen-devel] [PATCH -resend 13/27] x86: xen-pvh, annotate data appropriatelly

2018-05-10 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/xen/xen-pvh.S | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index e1a5fbeae08d..52b28793a625 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -137,11 +137,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
.quad 0x/* reserved */
 #ifdef CONFIG_X86_64
@@ -150,12 +151,12 @@ gdt_start:
.quad GDT_ENTRY(0xc09a, 0, 0xf) /* __KERNEL_CS */
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* __KERNEL_DS */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 4
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill 256, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.16.3


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH -resend 19/27] x86: assembly, make some functions local

2018-05-10 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, we mark them "static". In assembly,
annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their
ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE depends on
ENDPROC/END for a particular function (C or non-C).

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/xen/xen-pvh.S  |  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index fda55310de2a..c6841c038170 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1068,7 +1068,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1086,7 +1087,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1175,7 +1176,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1192,7 +1193,7 @@ ENTRY(paranoid_entry)
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1206,7 +1207,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1221,13 +1222,13 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1314,7 +1315,7 @@ ENTRY(error_entry)
mov %rax, %rsp
decl%ebx
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
 
 /*
@@ -1322,14 +1323,14 @@ END(error_entry)
  *   1: already in kernel mode, don't need SWAPGS
  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for 
return to usermode
  */
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl   %ebx, %ebx

[Xen-devel] [PATCH -resend 22/27] x86_64: assembly, change all ENTRY+END to SYM_CODE_*

2018-05-10 Thread Jiri Slaby
Here, we change all code which is not marked as functions. In other
words, this code has been using END, not ENDPROC. So switch all of this
to appropriate new markings SYM_CODE_START and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 56 
 arch/x86/entry/entry_64_compat.S |  8 +++---
 arch/x86/kernel/ftrace_64.S  |  4 +--
 arch/x86/xen/xen-asm_64.S|  8 +++---
 arch/x86/xen/xen-head.S  |  8 +++---
 5 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c6841c038170..1b0631971dde 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -163,7 +163,7 @@ END(native_usergs_sysret64)
 #define RSP_SCRATCHCPU_ENTRY_AREA_entry_stack + \
SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
 
-ENTRY(entry_SYSCALL_64_trampoline)
+SYM_CODE_START(entry_SYSCALL_64_trampoline)
UNWIND_HINT_EMPTY
swapgs
 
@@ -193,17 +193,17 @@ ENTRY(entry_SYSCALL_64_trampoline)
pushq   %rdi
movq$entry_SYSCALL_64_stage2, %rdi
JMP_NOSPEC %rdi
-END(entry_SYSCALL_64_trampoline)
+SYM_CODE_END(entry_SYSCALL_64_trampoline)
 
.popsection
 
-ENTRY(entry_SYSCALL_64_stage2)
+SYM_CODE_START(entry_SYSCALL_64_stage2)
UNWIND_HINT_EMPTY
popq%rdi
jmp entry_SYSCALL_64_after_hwframe
-END(entry_SYSCALL_64_stage2)
+SYM_CODE_END(entry_SYSCALL_64_stage2)
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -336,13 +336,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -384,7 +384,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -393,7 +393,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -419,14 +419,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -435,7 +435,7 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -561,7 +561,7 @@ END(irq_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -627,7 +627,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 
 
 /* Interrupt entry/exit. */
@@ -832,7 +832,7 @@ SYM_CODE_END(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -840,7 +840,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -902,7 +902,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
-EN

[Xen-devel] [PATCH -resend 24/27] x86_32: assembly, add ENDs to some functions and relabel with SYM_CODE_*

2018-05-10 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them (as global) using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 66c688c15501..7742435271c1 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -376,9 +376,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index feac1e5ecba0..71a05a6cc36a 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -8,8 +8,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -38,6 +37,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -71,7 +71,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -86,6 +86,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 36ed448934ec..e2463acbd2f9 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -102,7 +102,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * i386 does not save SS and ESP when coming from kernel.
 * Instead, to get sp, ®s->sp is used (see ptrace.h).
@@ -170,6 +170,7 @@ SYM_CODE_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 1a6a6b4e4b4c..ba9df7cc545d 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6e56815e13a0..3cd15e34aa87 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -15,7 +15,7 @@
 
 .text
 
-ENTRY(swsusp_arch_suspend)
+SYM_CODE_START(swsusp_arch_suspend)
movl %esp, saved_context_esp
movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp
@@ -26,8 +26,9 @@ ENTRY(swsusp_arch_suspend)
 
call swsusp_save
ret
+SYM_CODE_END(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
movlmmu_cr4_features, %ecx
movlresume_pg_dir, %eax
subl$__PAGE_OFFSET, %eax
@@ -83,3 +84,4 @@ done:
xorl%eax, %eax
 
ret
+SYM_CODE_END(restore_image)
diff --git a/arch/x86/realmode/rm/trampoline_32.S 
b/arch/x86/realmode/rm/trampoline_32.S
index e96efcd60bf7..a3b047a44c5c 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode

[Xen-devel] [PATCH -resend 21/27] x86_64: assembly, add ENDs to some functions and relabel with SYM_CODE_*

2018-05-10 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them (as global) using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index a1a92f6fc8e4..d056c789f90d 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -400,6 +400,7 @@ trampoline_return:
  */
leaqrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -521,7 +522,7 @@ SYM_FUNC_END(relocated)
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX on return means we need to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -574,6 +575,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(paging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..44755a847856 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -53,7 +53,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -68,9 +68,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 6e38c13c1873..7db1459d363f 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_CODE_INNER_LABEL(machine_real_restart_paging_off, 
SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index 9e5f9ade43c8..408f81710ccd 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -81,12 +81,14 @@ ENT

[Xen-devel] [PATCH -resend 08/27] x86: assembly, annotate aliases

2018-05-10 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index b482ac1a1fb3..c85ecb163c78 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..e8f6f482bb20 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -164,13 +164,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.16.3


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH -resend 01/27] linkage: new macros for assembler symbols

2018-05-10 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so we can switch this back
   to %function and %object if this turns into a problem. Architectures
   can also override them by something like ", @function" if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   SYM_FUNC_INNER_LABEL_ALIGN -- only for labels in the middle of
functions, w/ alignment
   SYM_FUNC_INNER_LABEL -- only for labels in the middle of functions,
w/o alignment

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

   SYM_CODE_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_CODE_INNER_LABEL -- only for labels in the middle of code

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---
 Documentation/asm-annotations.rst | 218 
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 257 --
 3 files changed, 475 insertions(+), 10 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..3e9b426347f0
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,218 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembler. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+Some code like entries, trampolines, or boot code needs t

[Xen-devel] [PATCH -resend 23/27] x86_64: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-10 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-cry...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S |  8 +--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 60 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 10 ++--
 arch/x86/entry/entry_64_compat.S   |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 20 
 arch/x86/kernel/head_64.S  | 12 ++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S  

Re: [Xen-devel] [PATCH -resend 01/27] linkage: new macros for assembler symbols

2018-05-14 Thread Jiri Slaby
On 05/14/2018, 05:04 AM, Randy Dunlap wrote:
> HTH.

Definitely, thanks for proof-reading.

-- 
js
suse labs

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v6 14/28] xen/pvh: annotate data appropriatelly

2018-05-18 Thread Jiri Slaby
Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros to have:
   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/xen/xen-pvh.S | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index e1a5fbeae08d..52b28793a625 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -137,11 +137,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
.quad 0x/* reserved */
 #ifdef CONFIG_X86_64
@@ -150,12 +151,12 @@ gdt_start:
.quad GDT_ENTRY(0xc09a, 0, 0xf) /* __KERNEL_CS */
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* __KERNEL_DS */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 4
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill 256, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
-- 
2.16.3


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v6 01/28] linkage: new macros for assembler symbols

2018-05-18 Thread Jiri Slaby
Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so we can switch this back
   to %function and %object if this turns into a problem. Architectures
   can also override them by something like ", @function" if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_INNER_LABEL -- only for labels in the middle of code

   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

[v2]
* use SYM_ prefix and sane names
* add SYM_START and SYM_END and parametrize all the macros

[v3]
* add SYM_DATA, SYM_DATA_LOCAL, and SYM_DATA_END_LABEL

[v4]
* add _NOALIGN versions of some macros
* add _CODE_ derivates of _FUNC_ macros

[v5]
* drop "SIMPLE" from data annotations
* switch NOALIGN and ALIGN variants of inner labels
* s/visibility/linkage/; s@SYM_V_@SYM_L_@
* add Documentation

[v6]
* fixed typos found by Randy Dunlap
* remove doubled INNER_LABEL macros, one pair was unused

Signed-off-by: Jiri Slaby 
Cc: Andrew Morton 
Cc: Boris Ostrovsky 
Cc: h...@zytor.com
Cc: Ingo Molnar 
Cc: jpoim...@redhat.com
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: mi...@redhat.com
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: xen-devel@lists.xenproject.org
Cc: x...@kernel.org
---
 Documentation/asm-annotations.rst | 217 ++
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 243 --
 3 files changed, 460 insertions(+), 10 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index ..265d64a1fc0b
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,217 @@
+Assembler Annotations
+=====
+
+Copyright (c) 2017 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembler. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+Some code like entries, trampolines, or boot code needs to be written in
+assembly. The same as in C, we group such code into functions and accompany
+them wit

[Xen-devel] [PATCH v6 22/28] x86_64/asm: add ENDs to some functions and relabel with SYM_CODE_*

2018-05-18 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END appropriatelly too.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index a1a92f6fc8e4..d056c789f90d 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -400,6 +400,7 @@ trampoline_return:
  */
leaqrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -521,7 +522,7 @@ SYM_FUNC_END(relocated)
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX on return means we need to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -574,6 +575,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(paging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..44755a847856 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -53,7 +53,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -68,9 +68,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 424826afb501..f10515b10e0a 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
movl%ecx, %gs
movl%ecx, %ss
ljmpw   $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
 
 /*
  * This is 16-bit protected mode code to disable paging and the cache,
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index 9e5f9ade43c8..408f81710ccd 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
 
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
 
@@ -81,12 +81,14 @@ ENTRY(trampoline_start)
 no_

[Xen-devel] [PATCH v6 20/28] x86/asm: make some functions local

2018-05-18 Thread Jiri Slaby
There is a couple of assembly functions, which are invoked only locally
in the file they are defined. In C, we mark them "static". In assembly,
annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their
ENDPROC to SYM_{FUNC,CODE}_END too). Whether we use FUNC or CODE,
depends on whether ENDPROC or END was used for a particular function
before.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/xen/xen-pvh.S  |  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d23921..31312070db22 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d55228508e80..d2e2de100b16 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1068,7 +1068,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1086,7 +1087,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1175,7 +1176,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1192,7 +1193,7 @@ ENTRY(paranoid_entry)
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1206,7 +1207,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1221,13 +1222,13 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1314,7 +1315,7 @@ ENTRY(error_entry)
mov %rax, %rsp
decl%ebx
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
 
 /*
@@ -1322,14 +1323,14 @@ END(error_entry)
  *   1: already in kernel mode, don't need SWAPGS
  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for 
return to usermode
  */
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl   %eb

[Xen-devel] [PATCH v6 23/28] x86_64/asm: change all ENTRY+END to SYM_CODE_*

2018-05-18 Thread Jiri Slaby
Here, we change all assembly code which is marked using END (and not
ENDPROC). We switch all these to appropriate new markings SYM_CODE_START
and SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_64.S| 56 
 arch/x86/entry/entry_64_compat.S |  8 +++---
 arch/x86/kernel/ftrace_64.S  |  4 +--
 arch/x86/xen/xen-asm_64.S|  8 +++---
 arch/x86/xen/xen-head.S  |  8 +++---
 5 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d2e2de100b16..6f85f43a4877 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -163,7 +163,7 @@ END(native_usergs_sysret64)
 #define RSP_SCRATCHCPU_ENTRY_AREA_entry_stack + \
SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
 
-ENTRY(entry_SYSCALL_64_trampoline)
+SYM_CODE_START(entry_SYSCALL_64_trampoline)
UNWIND_HINT_EMPTY
swapgs
 
@@ -193,17 +193,17 @@ ENTRY(entry_SYSCALL_64_trampoline)
pushq   %rdi
movq$entry_SYSCALL_64_stage2, %rdi
JMP_NOSPEC %rdi
-END(entry_SYSCALL_64_trampoline)
+SYM_CODE_END(entry_SYSCALL_64_trampoline)
 
.popsection
 
-ENTRY(entry_SYSCALL_64_stage2)
+SYM_CODE_START(entry_SYSCALL_64_stage2)
UNWIND_HINT_EMPTY
popq%rdi
jmp entry_SYSCALL_64_after_hwframe
-END(entry_SYSCALL_64_stage2)
+SYM_CODE_END(entry_SYSCALL_64_stage2)
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -336,13 +336,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -384,7 +384,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -393,7 +393,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -419,14 +419,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -435,7 +435,7 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -561,7 +561,7 @@ END(irq_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -627,7 +627,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 
 
 /* Interrupt entry/exit. */
@@ -832,7 +832,7 @@ SYM_CODE_END(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -840,7 +840,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -902,7 +902,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET

[Xen-devel] [PATCH v6 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-18 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-cry...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S |  8 +--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 60 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 10 ++--
 arch/x86/entry/entry_64_compat.S   |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 20 
 arch/x86/kernel/head_64.S  | 12 ++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S  

[Xen-devel] [PATCH v6 25/28] x86_32/asm: add ENDs to some functions and relabel with SYM_CODE_*

2018-05-18 Thread Jiri Slaby
All these are functions which are invoked from elsewhere, but they are
not typical C functions. So we annotate them using the new
SYM_CODE_START. All these were not balanced with any END, so mark their
ends by SYM_CODE_END, appropriatelly.

Signed-off-by: Jiri Slaby 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux...@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index f701541ecf10..75d9670bffd8 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -376,9 +376,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index feac1e5ecba0..71a05a6cc36a 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -8,8 +8,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -38,6 +37,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -71,7 +71,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -86,6 +86,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b855dc10daeb..f4dca7df8ad6 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -102,7 +102,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * i386 does not save SS and ESP when coming from kernel.
 * Instead, to get sp, ®s->sp is used (see ptrace.h).
@@ -170,6 +170,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 1a6a6b4e4b4c..ba9df7cc545d 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6e56815e13a0..3cd15e34aa87 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -15,7 +15,7 @@
 
 .text
 
-ENTRY(swsusp_arch_suspend)
+SYM_CODE_START(swsusp_arch_suspend)
movl %esp, saved_context_esp
movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp
@@ -26,8 +26,9 @@ ENTRY(swsusp_arch_suspend)
 
call swsusp_save
ret
+SYM_CODE_END(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
movlmmu_cr4_features, %ecx
movlresume_pg_dir, %eax
subl$__PAGE_OFFSET, %eax
@@ -83,3 +84,4 @@ done:
xorl%eax, %eax
 
ret
+SYM_CODE_END(restore_image)
diff --git a/arch/x86/realmode/rm/trampoline_32.S 
b/arch/x86/realmode/rm/trampoline_32.S
index e96efcd60bf7..a3b047a44c5c 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode/rm/tr

[Xen-devel] [PATCH v6 09/28] x86/asm: annotate aliases

2018-05-18 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index b482ac1a1fb3..c85ecb163c78 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..e8f6f482bb20 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -164,13 +164,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.16.3


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen-netfront: wait xenbus state change when load module manually

2018-08-24 Thread Jiri Slaby
On 07/30/2018, 10:18 AM, Xiao Liang wrote:
> On 07/29/2018 11:30 PM, David Miller wrote:
>> From: Xiao Liang 
>> Date: Fri, 27 Jul 2018 17:56:08 +0800
>>
>>> @@ -1330,6 +1331,11 @@ static struct net_device
>>> *xennet_create_dev(struct xenbus_device *dev)
>>>   netif_carrier_off(netdev);
>>>     xenbus_switch_state(dev, XenbusStateInitialising);
>>> +    wait_event(module_load_q,
>>> +   xenbus_read_driver_state(dev->otherend) !=
>>> +   XenbusStateClosed &&
>>> +   xenbus_read_driver_state(dev->otherend) !=
>>> +   XenbusStateUnknown);
>>>   return netdev;
>>>      exit:
>> What performs the wakeups that will trigger for this sleep site?
> In my understanding, backend leaving closed/unknow state can trigger the
> wakeups. I mean to make sure both sides are ready for creating connection.

While backporting this to 4.12, I was surprised by the commit the same
as Boris and David.

So I assume the explanation is that wake_up_all of module_unload_q in
netback_changed wakes also all the processes waiting on module_load_q?
If so, what makes sure that module_unload_q is queued and the process is
the same as for module_load_q?

To me, it looks rather error-prone. Unless it is erroneous now, at least
for future changes. Wouldn't it make sense to wake up module_load_q
along with module_unload_q in netback_changed? Or drop module_load_q
completely and use only module_unload_q (i.e. in xennet_create_dev too)?

thanks,
-- 
js
suse labs

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen-netfront: wait xenbus state change when load module manually

2018-09-07 Thread Jiri Slaby
On 08/24/2018, 04:26 PM, Boris Ostrovsky wrote:
> On 08/24/2018 07:26 AM, Juergen Gross wrote:
>> On 24/08/18 13:12, Jiri Slaby wrote:
>>> On 07/30/2018, 10:18 AM, Xiao Liang wrote:
>>>> On 07/29/2018 11:30 PM, David Miller wrote:
>>>>> From: Xiao Liang 
>>>>> Date: Fri, 27 Jul 2018 17:56:08 +0800
>>>>>
>>>>>> @@ -1330,6 +1331,11 @@ static struct net_device
>>>>>> *xennet_create_dev(struct xenbus_device *dev)
>>>>>>   netif_carrier_off(netdev);
>>>>>>     xenbus_switch_state(dev, XenbusStateInitialising);
>>>>>> +    wait_event(module_load_q,
>>>>>> +   xenbus_read_driver_state(dev->otherend) !=
>>>>>> +   XenbusStateClosed &&
>>>>>> +   xenbus_read_driver_state(dev->otherend) !=
>>>>>> +   XenbusStateUnknown);
>>>>>>   return netdev;
>>>>>>      exit:
>>>>> What performs the wakeups that will trigger for this sleep site?
>>>> In my understanding, backend leaving closed/unknow state can trigger the
>>>> wakeups. I mean to make sure both sides are ready for creating connection.
>>> While backporting this to 4.12, I was surprised by the commit the same
>>> as Boris and David.
>>>
>>> So I assume the explanation is that wake_up_all of module_unload_q in
>>> netback_changed wakes also all the processes waiting on module_load_q?
>>> If so, what makes sure that module_unload_q is queued and the process is
>>> the same as for module_load_q?
>> How could it? Either the thread is waiting on module_unload_q _or_ on
>> module_load_q. It can't wait on two queues at the same time.
>>
>>> To me, it looks rather error-prone. Unless it is erroneous now, at least
>>> for future changes. Wouldn't it make sense to wake up module_load_q
>>> along with module_unload_q in netback_changed? Or drop module_load_q
>>> completely and use only module_unload_q (i.e. in xennet_create_dev too)?
>> To me this looks just wrong. A thread waiting on module_load_q won't be
>> woken up again.
>>
>> I'd drop module_load_q in favor of module_unload_q.
> 
> 
> Yes, use single queue, but rename it to something more neutral. module_wq?

Can somebody who is actually using the module fix this, please?

I could fix it, but untested changes are "a bit" worse than tested changes.

thanks,
-- 
js
suse labs

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [PATCH v2] xen/hvc: replace BUG_ON() with negative return value

2021-07-07 Thread Jiri Slaby

On 07. 07. 21, 12:40, Juergen Gross wrote:

And btw, since I've got puzzled by the linuxppc-dev@ in the recipients
list, I did look up relevant entries in ./MAINTAINERS. Shouldn't the
file be part of "XEN HYPERVISOR INTERFACE"?


I wouldn't mind. Greg, Jiri, what do you think?


/me concurs.

thanks,
--
js
suse labs



Re: [PATCH 08/11] sysctl: Add size to register_sysctl_init

2023-06-21 Thread Jiri Slaby

On 21. 06. 23, 11:09, Joel Granados wrote:

In order to remove the end element from the ctl_table struct arrays, we
explicitly define the size when registering the targes. We add a size
argument to the register_sysctl_init call and pass an ARRAY_SIZE for all
the callers.


Hi, I am missing here (or in 00/00) _why_ you are doing that. Is it by a 
chance those saved 9k? I hope not.


thanks,
--
js
suse labs




Re: [PATCH 08/11] sysctl: Add size to register_sysctl_init

2023-06-21 Thread Jiri Slaby

On 21. 06. 23, 15:15, Joel Granados wrote:

On Wed, Jun 21, 2023 at 12:47:58PM +0200, Greg Kroah-Hartman wrote:

On Wed, Jun 21, 2023 at 11:09:57AM +0200, Joel Granados wrote:

  static int __init random_sysctls_init(void)
  {
-   register_sysctl_init("kernel/random", random_table);
+   register_sysctl_init("kernel/random", random_table,
+ARRAY_SIZE(random_table));


As mentioned before, why not just do:

#define register_sysctl_init(string, table) \
__register_sysctl_init(string, table, ARRAY_SIZE(table);

Answered you in the original mail where you suggested it.


I am curious what that was, do you have a link?

--
js
suse labs




Re: [PATCH 08/11] sysctl: Add size to register_sysctl_init

2023-06-21 Thread Jiri Slaby

On 21. 06. 23, 15:11, Joel Granados wrote:

On Wed, Jun 21, 2023 at 11:56:03AM +0200, Jiri Slaby wrote:

On 21. 06. 23, 11:09, Joel Granados wrote:

In order to remove the end element from the ctl_table struct arrays, we
explicitly define the size when registering the targes. We add a size
argument to the register_sysctl_init call and pass an ARRAY_SIZE for all
the callers.


Hi, I am missing here (or in 00/00) _why_ you are doing that. Is it by a

Not sure what happened. I used the kernels get_maintainers.pl script
together with git-send-email. These are my settings:

"
tocmd ="`pwd`/scripts/get_maintainer.pl --nogit --nogit-fallback --norolestats --m 
--nol --nor"
cccmd ="`pwd`/scripts/get_maintainer.pl --nogit --nogit-fallback --norolestats --l 
--r --nom"
"

Could it be that there is an error in MAINTAINERS?


Sorry, I don't see what you are asking about. I was asking about 
motivation behind the series. That is a must in commit logs.


thanks,
--
js
suse labs




Re: [PATCH 04/15] tty: Remove now superfluous sentinel element from ctl_table array

2023-10-02 Thread Jiri Slaby

On 28. 09. 23, 15:21, Joel Granados via B4 Relay wrote:

From: Joel Granados 

This commit comes at the tail end of a greater effort to remove the
empty elements at the end of the ctl_table arrays (sentinels) which
will reduce the overall build time size of the kernel and run time
memory bloat by ~64 bytes per sentinel (further information Link :
https://lore.kernel.org/all/zo5yx5jfoggi%2f...@bombadil.infradead.org/)

Remove sentinel from tty_table

Signed-off-by: Joel Granados 
---
  drivers/tty/tty_io.c | 3 +--
  1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8a94e5a43c6d..2f925dc54a20 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3607,8 +3607,7 @@ static struct ctl_table tty_table[] = {
.proc_handler   = proc_dointvec,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
-   },
-   { }
+   }


Why to remove the comma? One would need to add one when adding a new entry?

thanks,
--
js
suse labs




Re: [PATCH v2 04/15] tty: Remove now superfluous sentinel element from ctl_table array

2023-10-02 Thread Jiri Slaby

On 02. 10. 23, 10:55, Joel Granados via B4 Relay wrote:

From: Joel Granados 

This commit comes at the tail end of a greater effort to remove the
empty elements at the end of the ctl_table arrays (sentinels) which
will reduce the overall build time size of the kernel and run time
memory bloat by ~64 bytes per sentinel (further information Link :
https://lore.kernel.org/all/zo5yx5jfoggi%2f...@bombadil.infradead.org/)

Remove sentinel from tty_table

Signed-off-by: Joel Granados 


Reviewed-by: Jiri Slaby 

thanks,
--
js
suse labs




Re: [PATCH] xen/pciback: use sysfs_emit_at() instead of scnprintf()

2025-03-17 Thread Jiri Slaby

On 17. 03. 25, 8:16, tang.dongx...@zte.com.cn wrote:

From: TangDongxing 

Follow the advice in Documentation/filesystems/sysfs.rst:
show() should only use sysfs_emit() or sysfs_emit_at() when formatting
the value to be returned to user space.

Signed-off-by: TangDongxing 
---
  drivers/xen/xen-pciback/pci_stub.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/xen/xen-pciback/pci_stub.c 
b/drivers/xen/xen-pciback/pci_stub.c
index b616b7768c3b..a0782a74ed34 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -1281,7 +1281,7 @@ static ssize_t slots_show(struct device_driver *drv, char 
*buf)
if (count >= PAGE_SIZE)
break;

-   count += scnprintf(buf + count, PAGE_SIZE - count,
+   count += sysfs_emit_at(buf, count,


This is wrong too [1].

[1] 
https://lore.kernel.org/all/9c552d9a-2d46-4069-a9c4-35fab857b...@kernel.org/


regards,
--
js
suse labs




[PATCH 15/46] x86/xen, lto: Mark xen_vcpu_stolen() as __visible

2022-11-14 Thread Jiri Slaby (SUSE)
From: Andi Kleen 

Symbols referenced from assembler (either directly or e.f. from
DEFINE_STATIC_KEY()) need to be global and visible in gcc LTO because
they could end up in a different object file than the assembler. This
can lead to linker errors without this patch.

So mark xen_vcpu_stolen() as __visible.

Cc: Juergen Gross 
Cc: Stefano Stabellini 
Cc: Oleksandr Tyshchenko 
Cc: 
Signed-off-by: Andi Kleen 
Signed-off-by: Martin Liska 
Signed-off-by: Jiri Slaby 
---
 drivers/xen/time.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 152dd33bb223..006a04592c8f 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -145,7 +145,7 @@ void xen_get_runstate_snapshot(struct vcpu_runstate_info 
*res)
 }
 
 /* return true when a vcpu could run but has no real cpu to run on */
-bool xen_vcpu_stolen(int vcpu)
+__visible bool xen_vcpu_stolen(int vcpu)
 {
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
 }
-- 
2.38.1




[Xen-devel] [tip: x86/asm] x86/asm: Make some functions local

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: ef1e03152cb027d5925646d4d1772ced7595292f
Gitweb:
https://git.kernel.org/tip/ef1e03152cb027d5925646d4d1772ced7595292f
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:00 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 11:34:39 +02:00

x86/asm: Make some functions local

There are a couple of assembly functions which are invoked only locally
in the file they are defined. In C, they are marked "static". In
assembly, annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch
their ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE is used,
depends on whether ENDPROC or END was used for a particular function
before.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Cc: Andy Lutomirski 
Cc: Andy Shevchenko 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Darren Hart 
Cc: Greg Kroah-Hartman 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Juergen Gross 
Cc: linux-a...@vger.kernel.org
Cc: linux-efi 
Cc: linux-...@vger.kernel.org
Cc: Matt Fleming 
Cc: Peter Zijlstra 
Cc: platform-driver-...@vger.kernel.org
Cc: Stefano Stabellini 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-21-jsl...@suse.cz
---
 arch/x86/boot/compressed/efi_thunk_64.S |  8 
 arch/x86/entry/entry_64.S   | 21 +++--
 arch/x86/lib/copy_page_64.S |  4 ++--
 arch/x86/lib/memcpy_64.S| 12 ++--
 arch/x86/lib/memset_64.S|  8 
 arch/x86/platform/efi/efi_thunk_64.S| 12 ++--
 arch/x86/platform/pvh/head.S|  4 ++--
 7 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index d66000d..3131207 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -99,12 +99,12 @@ ENTRY(efi64_thunk)
ret
 ENDPROC(efi64_thunk)
 
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movqfunc_rt_ptr(%rip), %rax
push%rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
 
.code32
 /*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
  *
  * The stack should represent the 32-bit calling convention.
  */
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl$__KERNEL_DS, %eax
movl%eax, %ds
movl%eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
btsl$X86_CR0_PG_BIT, %eax
movl%eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
 
.data
.balign 8
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 57d2460..1568da6 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback 
has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)  /* 
do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* 
do_hypervisor_callback(struct *pt_regs) */
callxen_maybe_preempt_hcall
 #endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1214,7 +1215,7 @@ idtentry machine_checkdo_mce  
has_error_code=0paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry)
FENCE_SWAPGS_KERNEL_ENTRY
 
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1262,7 +1263,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1279,12 +1280,12 @@ ENTRY(paranoid_exit)
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  */
-ENTRY(error_entry)
+SYM_CODE_S

[Xen-devel] [tip: x86/asm] x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 78762b0e79bc1dd01347be061abdf505202152c9
Gitweb:
https://git.kernel.org/tip/78762b0e79bc1dd01347be061abdf505202152c9
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:05 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 11:58:33 +02:00

x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*

All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END, appropriately.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Boris Ostrovsky  [xen bits]
Reviewed-by: Rafael J. Wysocki  [hibernate]
Cc: Andy Lutomirski 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: Len Brown 
Cc: linux-a...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: Pingfan Liu 
Cc: Stefano Stabellini 
Cc: "Steven Rostedt (VMware)" 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-26-jsl...@suse.cz
---
 arch/x86/entry/entry_32.S| 3 ++-
 arch/x86/kernel/acpi/wakeup_32.S | 7 ---
 arch/x86/kernel/ftrace_32.S  | 3 ++-
 arch/x86/kernel/head_32.S| 3 ++-
 arch/x86/power/hibernate_asm_32.S| 6 --
 arch/x86/realmode/rm/trampoline_32.S | 6 --
 arch/x86/xen/xen-asm_32.S| 7 ---
 7 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4900a6a..64fe7aa 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, 
SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl$5*4, %esp  /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 4272492..daf88f8 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -9,8 +9,7 @@
.code32
ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw$__KERNEL_DS, %ax
movw%ax, %ss
movw%ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
# jump to place where we left off
movlsaved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
jmp bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
popfl
ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
callsave_processor_state
callsave_registers
pushl   $3
@@ -87,6 +87,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 219be13..a43ed4c 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
 * We're here from an mcount/fentry CALL, and the stack frame looks 
like:
 *
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
popl%eax
 
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e2b3e6c..7029bba 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/power/hibernate_asm_32.S 
b/arch/x86/power/hibernate_asm_32.S
index 6fe3830..a19ed3d 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movlrestore_jump_address, %ebx
movlrestore_cr3, %ebp
@@ -45,9

[Xen-devel] [tip: x86/asm] linkage: Introduce new macros for assembler symbols

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: ffedeeb780dc554eff3d3b16e6a462a26a41d7ec
Gitweb:
https://git.kernel.org/tip/ffedeeb780dc554eff3d3b16e6a462a26a41d7ec
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:50:41 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 09:48:11 +02:00

linkage: Introduce new macros for assembler symbols

Introduce new C macros for annotations of functions and data in
assembly. There is a long-standing mess in macros like ENTRY, END,
ENDPROC and similar. They are used in different manners and sometimes
incorrectly.

So introduce macros with clear use to annotate assembly as follows:

a) Support macros for the ones below
   SYM_T_FUNC -- type used by assembler to mark functions
   SYM_T_OBJECT -- type used by assembler to mark data
   SYM_T_NONE -- type used by assembler to mark entries of unknown type

   They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE
   respectively. According to the gas manual, this is the most portable
   way. I am not sure about other assemblers, so this can be switched
   back to %function and %object if this turns into a problem.
   Architectures can also override them by something like ", @function"
   if they need.

   SYM_A_ALIGN, SYM_A_NONE -- align the symbol?
   SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols

b) Mostly internal annotations, used by the ones below
   SYM_ENTRY -- use only if you have to (for non-paired symbols)
   SYM_START -- use only if you have to (for paired symbols)
   SYM_END -- use only if you have to (for paired symbols)

c) Annotations for code
   SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code
   SYM_INNER_LABEL -- only for labels in the middle of code

   SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for
one function
   SYM_FUNC_START_ALIAS -- use where there are two global names for one
function
   SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function

   SYM_FUNC_START -- use for global functions
   SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment
   SYM_FUNC_START_LOCAL -- use for local functions
   SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o
alignment
   SYM_FUNC_START_WEAK -- use for weak functions
   SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment
   SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
SYM_FUNC_START_WEAK, ...

   For functions with special (non-C) calling conventions:
   SYM_CODE_START -- use for non-C (special) functions
   SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o
alignment
   SYM_CODE_START_LOCAL -- use for local non-C (special) functions
   SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special)
functions, w/o alignment
   SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START

d) For data
   SYM_DATA_START -- global data symbol
   SYM_DATA_START_LOCAL -- local data symbol
   SYM_DATA_END -- the end of the SYM_DATA_START symbol
   SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol
   SYM_DATA -- start+end wrapper around simple global data
   SYM_DATA_LOCAL -- start+end wrapper around simple local data

==

The macros allow to pair starts and ends of functions and mark functions
correctly in the output ELF objects.

All users of the old macros in x86 are converted to use these in further
patches.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Acked-by: Rafael J. Wysocki 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Boris Ostrovsky 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Jonathan Corbet 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: Len Brown 
Cc: Linus Torvalds 
Cc: linux-a...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: linux-ker...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Mark Rutland 
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-2-jsl...@suse.cz
---
 Documentation/asm-annotations.rst | 216 ++-
 Documentation/index.rst   |   8 +-
 arch/x86/include/asm/linkage.h|  10 +-
 include/linux/linkage.h   | 245 -
 4 files changed, 468 insertions(+), 11 deletions(-)
 create mode 100644 Documentation/asm-annotations.rst

diff --git a/Documentation/asm-annotations.rst 
b/Documentation/asm-annotations.rst
new file mode 100644
index 000..29ccd6e
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,216 @@
+Assembler Annotations
+=
+
+Copyright (c) 2017-2019 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembly. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+-
+So

[Xen-devel] [tip: x86/asm] x86/asm: Annotate aliases

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: e9b9d020c4873d5e90d9986cfd137afbafbc5bfa
Gitweb:
https://git.kernel.org/tip/e9b9d020c4873d5e90d9986cfd137afbafbc5bfa
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:50:49 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 10:38:23 +02:00

x86/asm: Annotate aliases

_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy as it avoids nesting and double symbols.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Juergen Gross  [xen parts]
Cc: Boris Ostrovsky 
Cc: "David S. Miller" 
Cc: Greg Kroah-Hartman 
Cc: Herbert Xu 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: linux-a...@vger.kernel.org
Cc: linux-cry...@vger.kernel.org
Cc: Peter Zijlstra 
Cc: Stefano Stabellini 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-10-jsl...@suse.cz
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb4..9d8d5f2 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9274866..57a6426 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d..50c1648 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c..927ac44 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b..45c1249 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [tip: x86/asm] x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 6dcc5627f6aec4cb1d1494d06a48d8061db06a04
Gitweb:
https://git.kernel.org/tip/6dcc5627f6aec4cb1d1494d06a48d8061db06a04
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:04 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 11:58:33 +02:00

x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*

These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START and their ENDPROC's by
SYM_FUNC_END.

Make sure ENTRY/ENDPROC is not defined on X86_64, given these were the
last users.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Acked-by: Herbert Xu  [crypto]
Cc: Allison Randal 
Cc: Andrey Ryabinin 
Cc: Andy Lutomirski 
Cc: Andy Shevchenko 
Cc: Ard Biesheuvel 
Cc: Armijn Hemel 
Cc: Cao jin 
Cc: Darren Hart 
Cc: Dave Hansen 
Cc: "David S. Miller" 
Cc: Enrico Weigelt 
Cc: Greg Kroah-Hartman 
Cc: Herbert Xu 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Jim Mattson 
Cc: Joerg Roedel 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: Kate Stewart 
Cc: "Kirill A. Shutemov" 
Cc: kvm ML 
Cc: Len Brown 
Cc: linux-a...@vger.kernel.org
Cc: linux-cry...@vger.kernel.org
Cc: linux-efi 
Cc: linux-...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Mark Rutland 
Cc: Matt Fleming 
Cc: Paolo Bonzini 
Cc: Pavel Machek 
Cc: Peter Zijlstra 
Cc: platform-driver-...@vger.kernel.org
Cc: "Radim Krčmář" 
Cc: Sean Christopherson 
Cc: Stefano Stabellini 
Cc: "Steven Rostedt (VMware)" 
Cc: Thomas Gleixner 
Cc: Vitaly Kuznetsov 
Cc: Wanpeng Li 
Cc: Wei Huang 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Cc: Xiaoyao Li 
Link: https://lkml.kernel.org/r/2019105108.12392-25-jsl...@suse.cz
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 ++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 +--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +-
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 ++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 ++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 +--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++---
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   | 12 ++--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 ++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 ++---
 arch/x86/lib

[Xen-devel] [tip: x86/asm] x86/asm/64: Change all ENTRY+END to SYM_CODE_*

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: bc7b11c04ee9c9b0451ebf85bf64e0de69fdbb17
Gitweb:
https://git.kernel.org/tip/bc7b11c04ee9c9b0451ebf85bf64e0de69fdbb17
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:03 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 11:58:26 +02:00

x86/asm/64: Change all ENTRY+END to SYM_CODE_*

Change all assembly code which is marked using END (and not ENDPROC).
Switch all these to the appropriate new annotation SYM_CODE_START and
SYM_CODE_END.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: Andy Lutomirski 
Cc: Cao jin 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Jiri Kosina 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: linux-a...@vger.kernel.org
Cc: Maran Wilson 
Cc: Peter Zijlstra 
Cc: Stefano Stabellini 
Cc: "Steven Rostedt (VMware)" 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-24-jsl...@suse.cz
---
 arch/x86/entry/entry_64.S| 52 +++
 arch/x86/entry/entry_64_compat.S |  8 ++---
 arch/x86/kernel/ftrace_64.S  |  4 +-
 arch/x86/kernel/head_64.S| 12 +++
 arch/x86/xen/xen-asm_64.S|  8 ++---
 arch/x86/xen/xen-head.S  |  8 ++---
 6 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1568da6..13e4fe3 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,11 +46,11 @@
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
 * Interrupts are off on entry.
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq%rdi
popq%rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
 * Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq%rbp
 
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq%rax, %rdi
callschedule_tail   /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
 */
movq$0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
 vector=FIRST_EXTERNAL_VECTOR
 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
.align  8
vector=vector+1
 .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
 vector=FIRST_SYSTEM_VECTOR
 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
.align  8
vector=vector+1
 .endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -511,7 +511,7 @@ END(spurious_entries_start)
  * | return address|
  * ++
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -579,7 +579,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
 
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 _ASM_NOKPROBE(interrupt_entry)
 
 
@@ -795,7 +795,7 @@ _ASM_NOKPROBE(common_interrupt)
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq   $~(\num)
 .Lcommon_\sym:
@@ -803,7 +803,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call\do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 _ASM_

[Xen-devel] [tip: x86/asm] xen/pvh: Annotate data appropriately

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 1de5bdce0c3f8294d0aabc48fb5497814589422f
Gitweb:
https://git.kernel.org/tip/1de5bdce0c3f8294d0aabc48fb5497814589422f
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:50:54 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 10:46:06 +02:00

xen/pvh: Annotate data appropriately

Use the new SYM_DATA_START_LOCAL, and SYM_DATA_END* macros to get:

   8 OBJECT  LOCAL  DEFAULT6 gdt
  000832 OBJECT  LOCAL  DEFAULT6 gdt_start
  0028 0 OBJECT  LOCAL  DEFAULT6 gdt_end
  0028   256 OBJECT  LOCAL  DEFAULT6 early_stack
  0128 0 OBJECT  LOCAL  DEFAULT6 early_stack

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Boris Ostrovsky 
Cc: Andy Shevchenko 
Cc: Darren Hart 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Juergen Gross 
Cc: linux-a...@vger.kernel.org
Cc: platform-driver-...@vger.kernel.org
Cc: Stefano Stabellini 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-15-jsl...@suse.cz
---
 arch/x86/platform/pvh/head.S | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825b..4e63480 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -150,11 +150,12 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x/* NULL descriptor */
 #ifdef CONFIG_X86_64
.quad GDT_ENTRY(0xa09a, 0, 0xf) /* PVH_CS_SEL */
@@ -163,15 +164,14 @@ gdt_start:
 #endif
.quad GDT_ENTRY(0xc092, 0, 0xf) /* PVH_DS_SEL */
.quad GDT_ENTRY(0x4090, 0, 0x18)/* PVH_CANARY_SEL */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
 
.balign 16
-canary:
-   .fill 48, 1, 0
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
 
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
 
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
 _ASM_PTR (pvh_start_xen - __START_KERNEL_map))

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [tip: x86/asm] x86/asm/64: Add ENDs to some functions and relabel with SYM_CODE_*

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 4aec216b93dd8e3597124f41369ec835ff18dbd0
Gitweb:
https://git.kernel.org/tip/4aec216b93dd8e3597124f41369ec835ff18dbd0
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:02 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 11:58:16 +02:00

x86/asm/64: Add ENDs to some functions and relabel with SYM_CODE_*

All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END appropriately too.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Boris Ostrovsky  [xen bits]
Acked-by: Rafael J. Wysocki  [power mgmt]
Cc: Andy Shevchenko 
Cc: Cao jin 
Cc: Darren Hart 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: Juergen Gross 
Cc: "Kirill A. Shutemov" 
Cc: linux-a...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Pavel Machek 
Cc: Pingfan Liu 
Cc: platform-driver-...@vger.kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Stefano Stabellini 
Cc: Thomas Gleixner 
Cc: Wei Huang 
Cc: x86-ml 
Cc: xen-devel@lists.xenproject.org
Cc: Xiaoyao Li 
Link: https://lkml.kernel.org/r/2019105108.12392-23-jsl...@suse.cz
---
 arch/x86/boot/compressed/head_64.S   |  6 --
 arch/x86/platform/olpc/xo1-wakeup.S  |  3 ++-
 arch/x86/power/hibernate_asm_64.S|  6 --
 arch/x86/realmode/rm/reboot.S|  3 ++-
 arch/x86/realmode/rm/trampoline_64.S | 10 +++---
 arch/x86/realmode/rm/wakeup_asm.S|  3 ++-
 arch/x86/xen/xen-asm_64.S|  6 --
 7 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index ca762ea..5580046 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
 
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
 * 64bit entry is 0x200 and it is ABI so immutable!
 * We come here either from startup_32 or directly from a
@@ -442,6 +442,7 @@ trampoline_return:
  */
leaq.Lrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
 
@@ -571,7 +572,7 @@ SYM_FUNC_END(.Lrelocated)
  * ECX contains the base address of the trampoline memory.
  * Non zero RDX means trampoline needs to enable 5-level paging.
  */
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl$__KERNEL_DS, %eax
movl%eax, %ds
@@ -634,6 +635,7 @@ ENTRY(trampoline_32bit_src)
movl%eax, %cr0
 
lret
+SYM_CODE_END(trampoline_32bit_src)
 
.code64
 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S 
b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2..75f4faf 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
 
ret
 
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
callsave_processor_state
callsave_registers
 
@@ -110,6 +110,7 @@ ret_point:
callrestore_registers
callrestore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 saved_gdt: .long   0,0
diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0..4057cd5 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -52,7 +52,7 @@ ENTRY(swsusp_arch_suspend)
ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movqrestore_jump_address(%rip), %r8
movqrestore_cr3(%rip), %r9
@@ -67,9 +67,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movqrelocated_restore_code(%rip), %rcx
jmpq*%rcx
+SYM_CODE_END(restore_image)
 
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq%rax, %cr3
/* flush TLB */
@@ -97,6 +98,7 @@ ENTRY(core_restore_code)
 .Ldone:
/* jump to the restore_registers address from the image header */
jmpq*%r8
+SYM_CODE_END(core_restore_code)
 
 /* code below belongs to the image kernel */
.align PAGE_SIZE
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index 424826a..f10515b 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
  */
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
 
 #ifdef