On 24/02/2016 10:51, Xiao Guangrong wrote:
> non-leaf shadow pages are always write protected, it can be the user
> of page track
> 
> Signed-off-by: Xiao Guangrong <guangrong.x...@linux.intel.com>
> ---
>  arch/x86/kvm/mmu.c | 26 +++++++++++++++++++++-----
>  1 file changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 58c067d..74684b2 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -806,11 +806,17 @@ static void account_shadowed(struct kvm *kvm, struct 
> kvm_mmu_page *sp)
>       struct kvm_memory_slot *slot;
>       gfn_t gfn;
>  
> +     kvm->arch.indirect_shadow_pages++;
>       gfn = sp->gfn;
>       slots = kvm_memslots_for_spte_role(kvm, sp->role);
>       slot = __gfn_to_memslot(slots, gfn);
> +
> +     /* the non-leaf shadow pages are keeping readonly. */
> +     if (sp->role.level > PT_PAGE_TABLE_LEVEL)
> +             return kvm_slot_page_track_add_page(kvm, slot, gfn,
> +                                                 KVM_PAGE_TRACK_WRITE);
> +
>       kvm_mmu_gfn_disallow_lpage(slot, gfn);
> -     kvm->arch.indirect_shadow_pages++;
>  }
>  
>  static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
> @@ -819,11 +825,15 @@ static void unaccount_shadowed(struct kvm *kvm, struct 
> kvm_mmu_page *sp)
>       struct kvm_memory_slot *slot;
>       gfn_t gfn;
>  
> +     kvm->arch.indirect_shadow_pages--;
>       gfn = sp->gfn;
>       slots = kvm_memslots_for_spte_role(kvm, sp->role);
>       slot = __gfn_to_memslot(slots, gfn);
> +     if (sp->role.level > PT_PAGE_TABLE_LEVEL)
> +             return kvm_slot_page_track_remove_page(kvm, slot, gfn,
> +                                                    KVM_PAGE_TRACK_WRITE);
> +
>       kvm_mmu_gfn_allow_lpage(slot, gfn);
> -     kvm->arch.indirect_shadow_pages--;
>  }
>  
>  static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
> @@ -2132,12 +2142,18 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
> kvm_vcpu *vcpu,
>       hlist_add_head(&sp->hash_link,
>               &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
>       if (!direct) {
> -             if (rmap_write_protect(vcpu, gfn))
> +             /*
> +              * we should do write protection before syncing pages
> +              * otherwise the content of the synced shadow page may
> +              * be inconsistent with guest page table.
> +              */
> +             account_shadowed(vcpu->kvm, sp);
> +
> +             if (level == PT_PAGE_TABLE_LEVEL &&
> +                   rmap_write_protect(vcpu, gfn))
>                       kvm_flush_remote_tlbs(vcpu->kvm);
>               if (level > PT_PAGE_TABLE_LEVEL && need_sync)
>                       kvm_sync_pages(vcpu, gfn);
> -
> -             account_shadowed(vcpu->kvm, sp);
>       }
>       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
>       clear_page(sp->spt);
> 

Hi,

I'm applying this series with just a very simple change on top of this patch:

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 74684b2b7e69..fe03d2a1d4d3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2148,10 +2148,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                 * be inconsistent with guest page table.
                 */
                account_shadowed(vcpu->kvm, sp);
-
                if (level == PT_PAGE_TABLE_LEVEL &&
                      rmap_write_protect(vcpu, gfn))
                        kvm_flush_remote_tlbs(vcpu->kvm);
+
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        kvm_sync_pages(vcpu, gfn);
        }

Yes, it's just moving around an empty line. :)

Paolo

Reply via email to