... and zero it more efficiently. A diff of the resulting binaries shows that the size of the BSS doesn't actually change from the extra ALIGN()s, but they do guarantee that it is safe to clear in an more efficient manner than 1 byte at a time.
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> CC: Keir Fraser <k...@xen.org> CC: Jan Beulich <jbeul...@suse.com> --- xen/arch/x86/boot/head.S | 3 ++- xen/arch/x86/xen.lds.S | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S index 2d0e56c..b5a1d45 100644 --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -127,7 +127,8 @@ __start: mov $sym_phys(__bss_end),%ecx sub %edi,%ecx xor %eax,%eax - rep stosb + shr $2,%ecx + rep stosl /* Interrogate CPU extended features via CPUID. */ mov $0x80000000,%eax diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 4699a04..b1926e3 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -163,6 +163,7 @@ SECTIONS __init_end = .; .bss : { /* BSS */ + . = ALIGN(8); __bss_start = .; *(.bss.stack_aligned) . = ALIGN(PAGE_SIZE); @@ -175,6 +176,7 @@ SECTIONS *(.bss.percpu.read_mostly) . = ALIGN(SMP_CACHE_BYTES); __per_cpu_data_end = .; + . = ALIGN(8); __bss_end = .; } :text _end = . ; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel