Hi Stefano,
On 25/05/18 20:18, Stefano Stabellini wrote:
On Tue, 22 May 2018, Julien Grall wrote:
The function ARM_SMCCC_ARCH_WORKAROUND_2 will be called by the guest for
enabling/disabling the ssbd mitigation. So we want the handling to
be as fast as possible.
The new sequence will forward guest's ARCH_WORKAROUND_2 call to EL3 and
also track the state of the workaround per-vCPU.
Note that since we need to execute branches, this always executes after
the spectre-v2 mitigation.
This code is based on KVM counterpart "arm64: KVM: Handle guest's
ARCH_WORKAROUND_2 requests" written by Marc Zyngier.
This is part of XSA-263.
Signed-off-by: Julien Grall <julien.gr...@arm.com>
I think the patch works as intended.
---
xen/arch/arm/arm64/asm-offsets.c | 2 ++
xen/arch/arm/arm64/entry.S | 43 +++++++++++++++++++++++++++++++++++++++-
xen/arch/arm/cpuerrata.c | 18 +++++++++++++++++
3 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c
index ce24e44473..f5c696d092 100644
--- a/xen/arch/arm/arm64/asm-offsets.c
+++ b/xen/arch/arm/arm64/asm-offsets.c
@@ -22,6 +22,7 @@
void __dummy__(void)
{
OFFSET(UREGS_X0, struct cpu_user_regs, x0);
+ OFFSET(UREGS_X1, struct cpu_user_regs, x1);
OFFSET(UREGS_LR, struct cpu_user_regs, lr);
OFFSET(UREGS_SP, struct cpu_user_regs, sp);
@@ -45,6 +46,7 @@ void __dummy__(void)
BLANK();
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+ OFFSET(CPUINFO_flags, struct cpu_info, flags);
OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index e2344e565f..8e25ff3997 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -1,4 +1,6 @@
#include <asm/asm_defns.h>
+#include <asm/current.h>
+#include <asm/macros.h>
#include <asm/regs.h>
#include <asm/alternative.h>
#include <asm/smccc.h>
@@ -241,7 +243,7 @@ guest_sync:
* be encoded as an immediate for cmp.
*/
eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
- cbnz w0, guest_sync_slowpath
+ cbnz w0, check_wa2
/*
* Clobber both x0 and x1 to prevent leakage. Note that thanks
@@ -250,6 +252,45 @@ guest_sync:
mov x1, xzr
eret
+check_wa2:
+ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+ eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
We come to check_wa2 after checking on #ARM_SMCCC_ARCH_WORKAROUND_1_FID,
so maybe we can skip this?
This is necessary. w0 contains "guest x0" xor
"ARM_SMCCC_ARCH_WORKAROUND_1_FID". So we first need to revert back the
xor to get "guest x0".
Note, it would be possible to combine the 2 xor. Something like:
eor w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^
ARM_SMCCC_ARCH_WORKAROUND_2_FID).
Which version do you prefer?
+ eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_2_FID
+ cbnz w0, guest_sync_slowpath
+#ifdef CONFIG_ARM_SSBD
+alternative_cb arm_enable_wa2_handling
+ b wa2_end
+alternative_cb_end
+ /* Sanitize the argument */
+ mov x0, #-(UREGS_kernel_sizeof - UREGS_X1) /* x0 := offset of
guest's x1 on the stack */
+ ldr x1, [sp, x0] /* Load guest's x1 */
+ cmp w1, wzr
+ cset x1, ne
+
+ /*
+ * Update the guest flag. At this stage sp point after the field
+ * guest_cpu_user_regs in cpu_info.
+ */
+ adr_cpu_info x2
+ ldr x0, [x2, #CPUINFO_flags]
+ bfi x0, x1, #CPUINFO_WORKAROUND_2_FLAG_SHIFT, #1
+ str x0, [x2, #CPUINFO_flags]
+
+ /* Check that we actually need to perform the call */
+ ldr_this_cpu x0, ssbd_callback_required, x2
+ cbz x0, wa2_end
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2_FID
+ smc #0
Shouldn't we make the call only if get_cpu_info()->flags changed?
There are no harm to call ARCH_WORKAROUND_2 if the flag didn't changed.
However the guest should already avoid to do the call when it is not
necessary. So that's not a common case that we should care.
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel