When installing a new leaf pte onto an invalid ptep, we need to get_page(ptep). When just updating a valid leaf ptep, we shouldn't get_page(ptep). Incorrect page_count of translation tables might lead to memory leak, when unmapping a stage 2 memory range.
Signed-off-by: Yanan Wang <wangyana...@huawei.com> --- arch/arm64/kvm/hyp/pgtable.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 0271b4a3b9fe..696b6aa83faf 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -186,6 +186,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, return old == pte; smp_store_release(ptep, pte); + get_page(virt_to_page(ptep)); return true; } @@ -476,6 +477,7 @@ static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, /* There's an existing valid leaf entry, so perform break-before-make */ kvm_set_invalid_pte(ptep); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); + put_page(virt_to_page(ptep)); kvm_set_valid_leaf_pte(ptep, phys, data->attr, level); out: data->phys += granule; @@ -512,7 +514,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, } if (stage2_map_walker_try_leaf(addr, end, level, ptep, data)) - goto out_get_page; + return 0; if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; @@ -536,9 +538,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, } kvm_set_table_pte(ptep, childp); - -out_get_page: get_page(page); + return 0; } -- 2.19.1