From: Sean Christopherson <sea...@google.com>

A VMM may send a signal to its threads while they've entered KVM_RUN. If
that thread happens to be trying to make the huge page recovery vhost
task, it will fail with -ERESTARTNOINTR. We need to retry if that
happens, so make call_once complete only if what it called was
successful.

Based-on-a-patch-by: Sean Christopherson <sea...@google.com>
[implemented caller side, commit log]
Signed-off-by: Keith Busch <kbu...@kernel.org>
---
 arch/x86/kvm/mmu/mmu.c    | 10 ++++------
 include/linux/call_once.h | 16 +++++++++++-----
 2 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 18ca1ea6dc240..8160870398b90 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
        return true;
 }
 
-static void kvm_mmu_start_lpage_recovery(struct once *once)
+static int kvm_mmu_start_lpage_recovery(struct once *once)
 {
        struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
        struct kvm *kvm = container_of(ka, struct kvm, arch);
@@ -7472,12 +7472,13 @@ static void kvm_mmu_start_lpage_recovery(struct once 
*once)
                                      kvm, "kvm-nx-lpage-recovery");
 
        if (IS_ERR(nx_thread))
-               return;
+               return PTR_ERR(nx_thread);
 
        vhost_task_start(nx_thread);
 
        /* Make the task visible only once it is fully started. */
        WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
+       return 0;
 }
 
 int kvm_mmu_post_init_vm(struct kvm *kvm)
@@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
        if (nx_hugepage_mitigation_hard_disabled)
                return 0;
 
-       call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
-       if (!kvm->arch.nx_huge_page_recovery_thread)
-               return -ENOMEM;
-       return 0;
+       return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
 }
 
 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
index 6261aa0b3fb00..ddcfd91493eaa 100644
--- a/include/linux/call_once.h
+++ b/include/linux/call_once.h
@@ -26,20 +26,26 @@ do {                                                        
                \
        __once_init((once), #once, &__key);                             \
 } while (0)
 
-static inline void call_once(struct once *once, void (*cb)(struct once *))
+static inline int call_once(struct once *once, int (*cb)(struct once *))
 {
+       int r;
+
         /* Pairs with atomic_set_release() below.  */
         if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
-                return;
+               return 0;
 
         guard(mutex)(&once->lock);
         WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
         if (atomic_read(&once->state) != ONCE_NOT_STARTED)
-                return;
+                return -EINVAL;
 
         atomic_set(&once->state, ONCE_RUNNING);
-        cb(once);
-        atomic_set_release(&once->state, ONCE_COMPLETED);
+       r = cb(once);
+       if (r)
+               atomic_set(&once->state, ONCE_NOT_STARTED);
+       else
+               atomic_set_release(&once->state, ONCE_COMPLETED);
+       return r;
 }
 
 #endif /* _LINUX_CALL_ONCE_H */
-- 
2.43.5


Reply via email to