From: Will Deacon <w...@kernel.org>

We will soon need to synchronise multiple CPUs in the hyp text at EL2.
The qspinlock-based locking used by the host is overkill for this purpose
and requires a working "percpu" implementation for the MCS nodes.

Implement a simple ticket locking scheme based heavily on the code removed
by c11090474d70 ("arm64: locking: Replace ticket lock implementation with
qspinlock").

[ qperret: removed the __KVM_NVHE_HYPERVISOR__ build-time check from
  spinlock.h ]

Signed-off-by: Will Deacon <w...@kernel.org>
Signed-off-by: Quentin Perret <qper...@google.com>
---
 arch/arm64/kvm/hyp/include/nvhe/spinlock.h | 95 ++++++++++++++++++++++
 arch/arm64/kvm/hyp/include/nvhe/util.h     | 25 ++++++
 2 files changed, 120 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/include/nvhe/spinlock.h
 create mode 100644 arch/arm64/kvm/hyp/include/nvhe/util.h

diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h 
b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
new file mode 100644
index 000000000000..bbfe2cbd9f62
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A stand-alone ticket spinlock implementation, primarily for use by the
+ * non-VHE hypervisor code running at EL2.
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <w...@kernel.org>
+ *
+ * Heavily based on the implementation removed by c11090474d70 which was:
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ARM64_KVM_HYP_SPINLOCK_H__
+#define __ARM64_KVM_HYP_SPINLOCK_H__
+
+#include <asm/alternative.h>
+
+typedef union hyp_spinlock {
+       u32     __val;
+       struct {
+#ifdef __AARCH64EB__
+               u16 next, owner;
+#else
+               u16 owner, next;
+#endif
+       };
+} hyp_spinlock_t;
+
+#define hyp_spin_lock_init(l)                                          \
+do {                                                                   \
+       *(l) = (hyp_spinlock_t){ .__val = 0 };                          \
+} while (0)
+
+static inline void hyp_spin_lock(hyp_spinlock_t *lock)
+{
+       u32 tmp;
+       hyp_spinlock_t lockval, newval;
+
+       asm volatile(
+       /* Atomically increment the next ticket. */
+       ALTERNATIVE(
+       /* LL/SC */
+"      prfm    pstl1strm, %3\n"
+"1:    ldaxr   %w0, %3\n"
+"      add     %w1, %w0, #(1 << 16)\n"
+"      stxr    %w2, %w1, %3\n"
+"      cbnz    %w2, 1b\n",
+       /* LSE atomics */
+"      .arch_extension lse\n"
+"      mov     %w2, #(1 << 16)\n"
+"      ldadda  %w2, %w0, %3\n"
+       __nops(3),
+       ARM64_HAS_LSE_ATOMICS)
+
+       /* Did we get the lock? */
+"      eor     %w1, %w0, %w0, ror #16\n"
+"      cbz     %w1, 3f\n"
+       /*
+        * No: spin on the owner. Send a local event to avoid missing an
+        * unlock before the exclusive load.
+        */
+"      sevl\n"
+"2:    wfe\n"
+"      ldaxrh  %w2, %4\n"
+"      eor     %w1, %w2, %w0, lsr #16\n"
+"      cbnz    %w1, 2b\n"
+       /* We got the lock. Critical section starts here. */
+"3:"
+       : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+       : "Q" (lock->owner)
+       : "memory");
+}
+
+static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
+{
+       u64 tmp;
+
+       asm volatile(
+       ALTERNATIVE(
+       /* LL/SC */
+       "       ldrh    %w1, %0\n"
+       "       add     %w1, %w1, #1\n"
+       "       stlrh   %w1, %0",
+       /* LSE atomics */
+       "       .arch_extension lse\n"
+       "       mov     %w1, #1\n"
+       "       staddlh %w1, %0\n"
+       __nops(1),
+       ARM64_HAS_LSE_ATOMICS)
+       : "=Q" (lock->owner), "=&r" (tmp)
+       :
+       : "memory");
+}
+
+#endif /* __ARM64_KVM_HYP_SPINLOCK_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/util.h 
b/arch/arm64/kvm/hyp/include/nvhe/util.h
new file mode 100644
index 000000000000..9c58cc436a83
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/util.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Standalone re-implementations of kernel interfaces for use at EL2.
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <w...@kernel.org>
+ */
+
+#ifndef __KVM_NVHE_HYPERVISOR__
+#error "Attempt to include nVHE code outside of EL2 object"
+#endif
+
+#ifndef __ARM64_KVM_NVHE_UTIL_H__
+#define __ARM64_KVM_NVHE_UTIL_H__
+
+/* Locking (hyp_spinlock_t) */
+#include <nvhe/spinlock.h>
+
+#undef spin_lock_init
+#define spin_lock_init                         hyp_spin_lock_init
+#undef spin_lock
+#define spin_lock                              hyp_spin_lock
+#undef spin_unlock
+#define        spin_unlock                             hyp_spin_unlock
+
+#endif /* __ARM64_KVM_NVHE_UTIL_H__ */
-- 
2.29.2.299.gdc1121823c-goog

Reply via email to