On 7/19/23 01:44, Sean Christopherson wrote:
From: Chao Peng<chao.p.p...@linux.intel.com>

Currently in mmu_notifier invalidate path, hva range is recorded and
then checked against by mmu_notifier_retry_hva() in the page fault
handling path. However, for the to be introduced private memory, a page
fault may not have a hva associated, checking gfn(gpa) makes more sense.

For existing hva based shared memory, gfn is expected to also work. The
only downside is when aliasing multiple gfns to a single hva, the
current algorithm of checking multiple ranges could result in a much
larger range being rejected. Such aliasing should be uncommon, so the
impact is expected small.

Reviewed-by: Paolo Bonzini <pbonz...@redhat.com>

Suggested-by: Sean Christopherson<sea...@google.com>
Signed-off-by: Chao Peng<chao.p.p...@linux.intel.com>
Reviewed-by: Fuad Tabba<ta...@google.com>
Tested-by: Fuad Tabba<ta...@google.com>
[sean: convert vmx_set_apic_access_page_addr() to gfn-based API]
Signed-off-by: Sean Christopherson<sea...@google.com>
---
  arch/x86/kvm/mmu/mmu.c   | 10 ++++++----
  arch/x86/kvm/vmx/vmx.c   | 11 +++++------
  include/linux/kvm_host.h | 33 +++++++++++++++++++++------------
  virt/kvm/kvm_main.c      | 40 +++++++++++++++++++++++++++++++---------
  4 files changed, 63 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d72f2b20f430..b034727c4cf9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3087,7 +3087,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, 
u64 *sptep)
   *
   * There are several ways to safely use this helper:
   *
- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
   *   consuming it.  In this case, mmu_lock doesn't need to be held during the
   *   lookup, but it does need to be held while checking the MMU notifier.
   *
@@ -4400,7 +4400,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
                return true;
return fault->slot &&
-              mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva);
+              mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
  }
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -6301,7 +6301,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, 
gfn_t gfn_end)
write_lock(&kvm->mmu_lock); - kvm_mmu_invalidate_begin(kvm, 0, -1ul);
+       kvm_mmu_invalidate_begin(kvm);
+
+       kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); @@ -6314,7 +6316,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
        if (flush)
                kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - 
gfn_start);
- kvm_mmu_invalidate_end(kvm, 0, -1ul);
+       kvm_mmu_invalidate_end(kvm);
write_unlock(&kvm->mmu_lock);
  }
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0ecf4be2c6af..946380b53cf5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6729,10 +6729,10 @@ static void vmx_set_apic_access_page_addr(struct 
kvm_vcpu *vcpu)
                return;
/*
-        * Grab the memslot so that the hva lookup for the mmu_notifier retry
-        * is guaranteed to use the same memslot as the pfn lookup, i.e. rely
-        * on the pfn lookup's validation of the memslot to ensure a valid hva
-        * is used for the retry check.
+        * Explicitly grab the memslot using KVM's internal slot ID to ensure
+        * KVM doesn't unintentionally grab a userspace memslot.  It_should_
+        * be impossible for userspace to create a memslot for the APIC when
+        * APICv is enabled, but paranoia won't hurt in this case.
         */
        slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
        if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
@@ -6757,8 +6757,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu 
*vcpu)
                return;
read_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_invalidate_retry_hva(kvm, mmu_seq,
-                                    gfn_to_hva_memslot(slot, gfn))) {
+       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
                kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
                read_unlock(&vcpu->kvm->mmu_lock);
                goto out;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b901571ab61e..90a0be261a5c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -788,8 +788,8 @@ struct kvm {
        struct mmu_notifier mmu_notifier;
        unsigned long mmu_invalidate_seq;
        long mmu_invalidate_in_progress;
-       unsigned long mmu_invalidate_range_start;
-       unsigned long mmu_invalidate_range_end;
+       gfn_t mmu_invalidate_range_start;
+       gfn_t mmu_invalidate_range_end;
  #endif
        struct list_head devices;
        u64 manual_dirty_log_protect;
@@ -1371,10 +1371,9 @@ void kvm_mmu_free_memory_cache(struct 
kvm_mmu_memory_cache *mc);
  void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
  #endif
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
-                             unsigned long end);
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
-                           unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
@@ -1940,9 +1939,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, 
unsigned long mmu_seq)
        return 0;
  }
-static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
                                           unsigned long mmu_seq,
-                                          unsigned long hva)
+                                          gfn_t gfn)
  {
        lockdep_assert_held(&kvm->mmu_lock);
        /*
@@ -1951,10 +1950,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm 
*kvm,
         * that might be being invalidated. Note that it may include some false
         * positives, due to shortcuts when handing concurrent invalidations.
         */
-       if (unlikely(kvm->mmu_invalidate_in_progress) &&
-           hva >= kvm->mmu_invalidate_range_start &&
-           hva < kvm->mmu_invalidate_range_end)
-               return 1;
+       if (unlikely(kvm->mmu_invalidate_in_progress)) {
+               /*
+                * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+                * but before updating the range is a KVM bug.
+                */
+               if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA 
||
+                                kvm->mmu_invalidate_range_end == INVALID_GPA))
+                       return 1;
+
+               if (gfn >= kvm->mmu_invalidate_range_start &&
+                   gfn < kvm->mmu_invalidate_range_end)
+                       return 1;
+       }
+
        if (kvm->mmu_invalidate_seq != mmu_seq)
                return 1;
        return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 50aea855eeae..8101b11a13ba 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -518,9 +518,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct 
mmu_notifier *mn)
typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); -typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
-                            unsigned long end);
-
+typedef void (*on_lock_fn_t)(struct kvm *kvm);
  typedef void (*on_unlock_fn_t)(struct kvm *kvm);
struct kvm_mmu_notifier_range {
@@ -617,7 +615,8 @@ static __always_inline int __kvm_handle_hva_range(struct 
kvm *kvm,
                                locked = true;
                                KVM_MMU_LOCK(kvm);
                                if (!IS_KVM_NULL_FN(range->on_lock))
-                                       range->on_lock(kvm, range->start, 
range->end);
+                                       range->on_lock(kvm);
+
                                if (IS_KVM_NULL_FN(range->handler))
                                        break;
                        }
@@ -721,15 +720,26 @@ static void kvm_mmu_notifier_change_pte(struct 
mmu_notifier *mn,
        kvm_handle_hva_range(mn, address, address + 1, pte, 
kvm_change_spte_gfn);
  }
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
-                             unsigned long end)
+void kvm_mmu_invalidate_begin(struct kvm *kvm)
  {
+       lockdep_assert_held_write(&kvm->mmu_lock);
        /*
         * The count increase must become visible at unlock time as no
         * spte can be established without taking the mmu_lock and
         * count is also read inside the mmu_lock critical section.
         */
        kvm->mmu_invalidate_in_progress++;
+
+       if (likely(kvm->mmu_invalidate_in_progress == 1))
+               kvm->mmu_invalidate_range_start = INVALID_GPA;
+}
+
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
+{
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
+
        if (likely(kvm->mmu_invalidate_in_progress == 1)) {
                kvm->mmu_invalidate_range_start = start;
                kvm->mmu_invalidate_range_end = end;
@@ -750,6 +760,12 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned 
long start,
        }
  }
+static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+       kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
+       return kvm_unmap_gfn_range(kvm, range);
+}
+

Reply via email to