Nicholas Piggin <npig...@gmail.com> writes:

> This removes the fixed sized kvm->arch.nested_guests array.
>
> Signed-off-by: Nicholas Piggin <npig...@gmail.com>
> ---

Reviewed-by: Fabiano Rosas <faro...@linux.ibm.com>

>  arch/powerpc/include/asm/kvm_host.h |   3 +-
>  arch/powerpc/kvm/book3s_hv_nested.c | 110 +++++++++++++++-------------
>  2 files changed, 59 insertions(+), 54 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_host.h 
> b/arch/powerpc/include/asm/kvm_host.h
> index d9bf60bf0816..5fd0564e5c94 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -326,8 +326,7 @@ struct kvm_arch {
>       struct list_head uvmem_pfns;
>       struct mutex mmu_setup_lock;    /* nests inside vcpu mutexes */
>       u64 l1_ptcr;
> -     int max_nested_lpid;
> -     struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
> +     struct idr kvm_nested_guest_idr;
>       /* This array can grow quite large, keep it at the end */
>       struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
>  #endif
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
> b/arch/powerpc/kvm/book3s_hv_nested.c
> index 9d373f8963ee..1eff969b095c 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct 
> kvm_nested_guest *gp)
>       kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
>  }
>
> -void kvmhv_vm_nested_init(struct kvm *kvm)
> -{
> -     kvm->arch.max_nested_lpid = -1;
> -}
> -
>  /*
>   * Handle the H_SET_PARTITION_TABLE hcall.
>   * r4 = guest real address of partition table + log_2(size) - 12
> @@ -660,6 +655,35 @@ static void kvmhv_update_ptbl_cache(struct 
> kvm_nested_guest *gp)
>       kvmhv_set_nested_ptbl(gp);
>  }
>
> +void kvmhv_vm_nested_init(struct kvm *kvm)
> +{
> +     idr_init(&kvm->arch.kvm_nested_guest_idr);
> +}
> +
> +static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
> +{
> +     return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
> +}
> +
> +static bool __prealloc_nested(struct kvm *kvm, int lpid)
> +{
> +     if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
> +                             NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
> +             return false;
> +     return true;
> +}
> +
> +static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest 
> *gp)
> +{
> +     if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
> +             WARN_ON(1);
> +}
> +
> +static void __remove_nested(struct kvm *kvm, int lpid)
> +{
> +     idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
> +}
> +
>  static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned 
> int lpid)
>  {
>       struct kvm_nested_guest *gp;
> @@ -720,13 +744,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest 
> *gp)
>       long ref;
>
>       spin_lock(&kvm->mmu_lock);
> -     if (gp == kvm->arch.nested_guests[lpid]) {
> -             kvm->arch.nested_guests[lpid] = NULL;
> -             if (lpid == kvm->arch.max_nested_lpid) {
> -                     while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
> -                             ;
> -                     kvm->arch.max_nested_lpid = lpid;
> -             }
> +     if (gp == __find_nested(kvm, lpid)) {
> +             __remove_nested(kvm, lpid);
>               --gp->refcnt;
>       }
>       ref = gp->refcnt;
> @@ -743,24 +762,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest 
> *gp)
>   */
>  void kvmhv_release_all_nested(struct kvm *kvm)
>  {
> -     int i;
> +     int lpid;
>       struct kvm_nested_guest *gp;
>       struct kvm_nested_guest *freelist = NULL;
>       struct kvm_memory_slot *memslot;
>       int srcu_idx, bkt;
>
>       spin_lock(&kvm->mmu_lock);
> -     for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
> -             gp = kvm->arch.nested_guests[i];
> -             if (!gp)
> -                     continue;
> -             kvm->arch.nested_guests[i] = NULL;
> +     idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
> +             __remove_nested(kvm, lpid);
>               if (--gp->refcnt == 0) {
>                       gp->next = freelist;
>                       freelist = gp;
>               }
>       }
> -     kvm->arch.max_nested_lpid = -1;
> +     idr_destroy(&kvm->arch.kvm_nested_guest_idr);
> +     /* idr is empty and may be reused at this point */
>       spin_unlock(&kvm->mmu_lock);
>       while ((gp = freelist) != NULL) {
>               freelist = gp->next;
> @@ -797,7 +814,7 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm 
> *kvm, int l1_lpid,
>               return NULL;
>
>       spin_lock(&kvm->mmu_lock);
> -     gp = kvm->arch.nested_guests[l1_lpid];
> +     gp = __find_nested(kvm, l1_lpid);
>       if (gp)
>               ++gp->refcnt;
>       spin_unlock(&kvm->mmu_lock);
> @@ -808,17 +825,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm 
> *kvm, int l1_lpid,
>       newgp = kvmhv_alloc_nested(kvm, l1_lpid);
>       if (!newgp)
>               return NULL;
> +
> +     if (!__prealloc_nested(kvm, l1_lpid)) {
> +             kvmhv_release_nested(newgp);
> +             return NULL;
> +     }
> +
>       spin_lock(&kvm->mmu_lock);
> -     if (kvm->arch.nested_guests[l1_lpid]) {
> -             /* someone else beat us to it */
> -             gp = kvm->arch.nested_guests[l1_lpid];
> -     } else {
> -             kvm->arch.nested_guests[l1_lpid] = newgp;
> +     gp = __find_nested(kvm, l1_lpid);
> +     if (!gp) {
> +             __add_nested(kvm, l1_lpid, newgp);
>               ++newgp->refcnt;
>               gp = newgp;
>               newgp = NULL;
> -             if (l1_lpid > kvm->arch.max_nested_lpid)
> -                     kvm->arch.max_nested_lpid = l1_lpid;
>       }
>       ++gp->refcnt;
>       spin_unlock(&kvm->mmu_lock);
> @@ -841,20 +860,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
>               kvmhv_release_nested(gp);
>  }
>
> -static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
> -{
> -     if (lpid > kvm->arch.max_nested_lpid)
> -             return NULL;
> -     return kvm->arch.nested_guests[lpid];
> -}
> -
>  pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
>                                unsigned long ea, unsigned *hshift)
>  {
>       struct kvm_nested_guest *gp;
>       pte_t *pte;
>
> -     gp = kvmhv_find_nested(kvm, lpid);
> +     gp = __find_nested(kvm, lpid);
>       if (!gp)
>               return NULL;
>
> @@ -960,7 +972,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 
> n_rmap,
>
>       gpa = n_rmap & RMAP_NESTED_GPA_MASK;
>       lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
> -     gp = kvmhv_find_nested(kvm, lpid);
> +     gp = __find_nested(kvm, lpid);
>       if (!gp)
>               return;
>
> @@ -1152,16 +1164,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct 
> kvm_vcpu *vcpu, int ric)
>  {
>       struct kvm *kvm = vcpu->kvm;
>       struct kvm_nested_guest *gp;
> -     int i;
> +     int lpid;
>
>       spin_lock(&kvm->mmu_lock);
> -     for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
> -             gp = kvm->arch.nested_guests[i];
> -             if (gp) {
> -                     spin_unlock(&kvm->mmu_lock);
> -                     kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
> -                     spin_lock(&kvm->mmu_lock);
> -             }
> +     idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
> +             spin_unlock(&kvm->mmu_lock);
> +             kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
> +             spin_lock(&kvm->mmu_lock);
>       }
>       spin_unlock(&kvm->mmu_lock);
>  }
> @@ -1313,7 +1322,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, 
> unsigned long lpid,
>        * H_ENTER_NESTED call. Since we can't differentiate this case from
>        * the invalid case, we ignore such flush requests and return success.
>        */
> -     if (!kvmhv_find_nested(vcpu->kvm, lpid))
> +     if (!__find_nested(vcpu->kvm, lpid))
>               return H_SUCCESS;
>
>       /*
> @@ -1657,15 +1666,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu 
> *vcpu)
>
>  int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
>  {
> -     int ret = -1;
> +     int ret = lpid + 1;
>
>       spin_lock(&kvm->mmu_lock);
> -     while (++lpid <= kvm->arch.max_nested_lpid) {
> -             if (kvm->arch.nested_guests[lpid]) {
> -                     ret = lpid;
> -                     break;
> -             }
> -     }
> +     if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
> +             ret = -1;
>       spin_unlock(&kvm->mmu_lock);
> +
>       return ret;
>  }

Reply via email to