On Wed, Sep 23, 2009 at 09:47:18PM +0300, Izik Eidus wrote:
> this is needed for kvm if it want ksm to directly map pages into its
> shadow page tables.
> 
> Signed-off-by: Izik Eidus <[email protected]>
> ---
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/mmu.c              |   62 +++++++++++++++++++++++++++++++++-----
>  virt/kvm/kvm_main.c             |   14 +++++++++
>  3 files changed, 68 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 3be0004..d838922 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
>  #define KVM_ARCH_WANT_MMU_NOTIFIER
>  int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
>  int kvm_age_hva(struct kvm *kvm, unsigned long hva);
> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>  int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
>  int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
>  int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 5cd8b4e..ceec065 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
>       return write_protected;
>  }
>  
> -static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
> +static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
>  {
>       u64 *spte;
>       int need_tlb_flush = 0;
> @@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned 
> long *rmapp)
>       return need_tlb_flush;
>  }
>  
> -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
> -                       int (*handler)(struct kvm *kvm, unsigned long *rmapp))
> +static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
> +{
> +     int need_flush = 0;
> +     u64 *spte, new_spte;
> +     pte_t *ptep = (pte_t *)data;
> +     pfn_t new_pfn;
> +
> +     WARN_ON(pte_huge(*ptep));
> +     new_pfn = pte_pfn(*ptep);
> +     spte = rmap_next(kvm, rmapp, NULL);
> +     while (spte) {
> +             BUG_ON(!is_shadow_present_pte(*spte));
> +             rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
> +             need_flush = 1;
> +             if (pte_write(*ptep)) {
> +                     rmap_remove(kvm, spte);
> +                     __set_spte(spte, shadow_trap_nonpresent_pte);
> +                     spte = rmap_next(kvm, rmapp, NULL);
> +             } else {
> +                     new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
> +                     new_spte |= new_pfn << PAGE_SHIFT;

                        new_spte |= (u64)new_pfn << PAGE_SHIFT;

Otherwise looks good to me.

> +                     new_spte &= ~PT_WRITABLE_MASK;
> +                     new_spte &= ~SPTE_HOST_WRITEABLE;
> +                     if (is_writeble_pte(*spte))
> +                             kvm_set_pfn_dirty(spte_to_pfn(*spte));
> +                     __set_spte(spte, new_spte);
> +                     spte = rmap_next(kvm, rmapp, spte);
> +             }
> +     }
> +     if (need_flush)
> +             kvm_flush_remote_tlbs(kvm);
> +
> +     return 0;
> +}
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to