On 02/10/2012 02:29 PM, Takuya Yoshikawa wrote:

> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 1561028..69d06f5 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -682,6 +682,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
> gva)
>       mmu_topup_memory_caches(vcpu);
> 
>       spin_lock(&vcpu->kvm->mmu_lock);
> +
>       for_each_shadow_entry(vcpu, gva, iterator) {
>               level = iterator.level;
>               sptep = iterator.sptep;
> @@ -697,8 +698,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
> gva)
>                       pte_gpa = FNAME(get_level1_sp_gpa)(sp);
>                       pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
> 
> -                     if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
> -                             kvm_flush_remote_tlbs(vcpu->kvm);
> +                     mmu_page_zap_pte(vcpu->kvm, sp, sptep);
> 
>                       if (!rmap_can_add(vcpu))
>                               break;
> @@ -713,6 +713,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
> gva)
>               if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
>                       break;
>       }
> +
> +     kvm_flush_remote_tlbs(vcpu->kvm);
>       spin_unlock(&vcpu->kvm->mmu_lock);


It is obvious wrong, i do not think all tlbs always need be flushed...

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to