On Wed, Oct 26, 2022 at 01:46:20PM -0700, Isaku Yamahata wrote:
> On Tue, Oct 25, 2022 at 11:13:42PM +0800,
> Chao Peng <chao.p.p...@linux.intel.com> wrote:
> 
> > When private/shared memory are mixed in a large page, the lpage_info may
> > not be accurate and should be updated with this mixed info. A large page
> > has mixed pages can't be really mapped as large page since its
> > private/shared pages are from different physical memory.
> > 
> > Update lpage_info when private/shared memory attribute is changed. If
> > both private and shared pages are within a large page region, it can't
> > be mapped as large page. It's a bit challenge to track the mixed
> > info in a 'count' like variable, this patch instead reserves a bit in
> > 'disallow_lpage' to indicate a large page has mixed private/share pages.
> > 
> > Signed-off-by: Chao Peng <chao.p.p...@linux.intel.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |   8 +++
> >  arch/x86/kvm/mmu/mmu.c          | 112 +++++++++++++++++++++++++++++++-
> >  arch/x86/kvm/x86.c              |   2 +
> >  include/linux/kvm_host.h        |  19 ++++++
> >  virt/kvm/kvm_main.c             |  16 +++--
> >  5 files changed, 152 insertions(+), 5 deletions(-)
> > 
> ...
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 33b1aec44fb8..67a9823a8c35 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> ...
> > @@ -6910,3 +6915,108 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
> >     if (kvm->arch.nx_lpage_recovery_thread)
> >             kthread_stop(kvm->arch.nx_lpage_recovery_thread);
> >  }
> > +
> > +static inline bool linfo_is_mixed(struct kvm_lpage_info *linfo)
> > +{
> > +   return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > +}
> > +
> > +static inline void linfo_update_mixed(struct kvm_lpage_info *linfo, bool 
> > mixed)
> > +{
> > +   if (mixed)
> > +           linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > +   else
> > +           linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > +}
> > +
> > +static bool mem_attr_is_mixed_2m(struct kvm *kvm, unsigned int attr,
> > +                            gfn_t start, gfn_t end)
> > +{
> > +   XA_STATE(xas, &kvm->mem_attr_array, start);
> > +   gfn_t gfn = start;
> > +   void *entry;
> > +   bool shared = attr == KVM_MEM_ATTR_SHARED;
> > +   bool mixed = false;
> > +
> > +   rcu_read_lock();
> > +   entry = xas_load(&xas);
> > +   while (gfn < end) {
> > +           if (xas_retry(&xas, entry))
> > +                   continue;
> > +
> > +           KVM_BUG_ON(gfn != xas.xa_index, kvm);
> > +
> > +           if ((entry && !shared) || (!entry && shared)) {
> > +                   mixed = true;
> > +                   goto out;
> 
> nitpick: goto isn't needed. break should work.

Thanks.

> 
> > +           }
> > +
> > +           entry = xas_next(&xas);
> > +           gfn++;
> > +   }
> > +out:
> > +   rcu_read_unlock();
> > +   return mixed;
> > +}
> > +
> > +static bool mem_attr_is_mixed(struct kvm *kvm, struct kvm_memory_slot 
> > *slot,
> > +                         int level, unsigned int attr,
> > +                         gfn_t start, gfn_t end)
> > +{
> > +   unsigned long gfn;
> > +   void *entry;
> > +
> > +   if (level == PG_LEVEL_2M)
> > +           return mem_attr_is_mixed_2m(kvm, attr, start, end);
> > +
> > +   entry = xa_load(&kvm->mem_attr_array, start);
> > +   for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
> > +           if (linfo_is_mixed(lpage_info_slot(gfn, slot, level - 1)))
> > +                   return true;
> > +           if (xa_load(&kvm->mem_attr_array, gfn) != entry)
> > +                   return true;
> > +   }
> > +   return false;
> > +}
> > +
> > +void kvm_arch_update_mem_attr(struct kvm *kvm, struct kvm_memory_slot 
> > *slot,
> > +                         unsigned int attr, gfn_t start, gfn_t end)
> > +{
> > +
> > +   unsigned long lpage_start, lpage_end;
> > +   unsigned long gfn, pages, mask;
> > +   int level;
> > +
> > +   WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
> > +                   "Unsupported mem attribute.\n");
> > +
> > +   /*
> > +    * The sequence matters here: we update the higher level basing on the
> > +    * lower level's scanning result.
> > +    */
> > +   for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> > +           pages = KVM_PAGES_PER_HPAGE(level);
> > +           mask = ~(pages - 1);
> 
> nitpick: KVM_HPAGE_MASK(level).  Maybe matter of preference.

Yes, haven't noticed there is a KVM_HPAGE_MASK defined. Have no
strong preference here, since I already have KVM_PAGES_PER_HPAGE(level),
getting mask is straightforward.

A single KVM_HPAGE_MASK(level) will not give me what I need since here
is gfn, KVM_HPAGE_MASK(level)>> PAGE_SHIFT should be the right
equivalent.

Chao
> 
> 
> > +           lpage_start = max(start & mask, slot->base_gfn);
> > +           lpage_end = (end - 1) & mask;
> > +
> > +           /*
> > +            * We only need to scan the head and tail page, for middle pages
> > +            * we know they are not mixed.
> > +            */
> > +           linfo_update_mixed(lpage_info_slot(lpage_start, slot, level),
> > +                              mem_attr_is_mixed(kvm, slot, level, attr,
> > +                                                lpage_start, start));
> > +
> > +           if (lpage_start == lpage_end)
> > +                   return;
> > +
> > +           for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
> > +                   linfo_update_mixed(lpage_info_slot(gfn, slot, level),
> > +                                      false);
> > +
> > +           linfo_update_mixed(lpage_info_slot(lpage_end, slot, level),
> > +                              mem_attr_is_mixed(kvm, slot, level, attr,
> > +                                                end, lpage_end + pages));
> > +   }
> > +}
> 
> -- 
> Isaku Yamahata <isaku.yamah...@gmail.com>

Reply via email to