On Fri, Apr 30, 2021 at 11:36:08AM -0300, Leonardo Bras wrote:
> Every time a memory hotplug happens, and the memory limit crosses a 2^n
> value, it may be necessary to perform HPT resizing-up, which can take
> some time (over 100ms in my tests).
> 
> It usually is not an issue, but it can take some time if a lot of memory
> is added to a guest with little starting memory:
> Adding 256G to a 2GB guest, for example will require 8 HPT resizes.
> 
> Perform an HPT resize before memory hotplug, updating HPT to its
> final size (considering a successful hotplug), taking the number of
> HPT resizes to at most one per memory hotplug action.
> 
> Signed-off-by: Leonardo Bras <leobra...@gmail.com>

Reviewed-by: David Gibson <da...@gibson.dropbear.id.au>

> ---
>  arch/powerpc/include/asm/book3s/64/hash.h     |  2 ++
>  arch/powerpc/mm/book3s64/hash_utils.c         | 20 +++++++++++++++++++
>  .../platforms/pseries/hotplug-memory.c        |  9 +++++++++
>  3 files changed, 31 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h 
> b/arch/powerpc/include/asm/book3s/64/hash.h
> index d959b0195ad9..fad4af8b8543 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, 
> unsigned long end,
>                                int nid, pgprot_t prot);
>  int hash__remove_section_mapping(unsigned long start, unsigned long end);
>  
> +void hash_batch_expand_prepare(unsigned long newsize);
> +
>  #endif /* !__ASSEMBLY__ */
>  #endif /* __KERNEL__ */
>  #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
> b/arch/powerpc/mm/book3s64/hash_utils.c
> index 608e4ed397a9..3fa395b3fe57 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -859,6 +859,26 @@ int hash__remove_section_mapping(unsigned long start, 
> unsigned long end)
>  
>       return rc;
>  }
> +
> +void hash_batch_expand_prepare(unsigned long newsize)
> +{
> +     const u64 starting_size = ppc64_pft_size;
> +
> +     /*
> +      * Resizing-up HPT should never fail, but there are some cases system 
> starts with higher
> +      * SHIFT than required, and we go through the funny case of resizing 
> HPT down while
> +      * adding memory
> +      */
> +
> +     while (resize_hpt_for_hotplug(newsize, false) == -ENOSPC) {
> +             newsize *= 2;
> +             pr_warn("Hash collision while resizing HPT\n");
> +
> +             /* Do not try to resize to the starting size, or bigger value */
> +             if (htab_shift_for_mem_size(newsize) >= starting_size)
> +                     break;
> +     }
> +}
>  #endif /* CONFIG_MEMORY_HOTPLUG */
>  
>  static void __init hash_init_partition_table(phys_addr_t hash_table,
> diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c 
> b/arch/powerpc/platforms/pseries/hotplug-memory.c
> index 8377f1f7c78e..48b2cfe4ce69 100644
> --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
> +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
> @@ -13,6 +13,7 @@
>  #include <linux/memory.h>
>  #include <linux/memory_hotplug.h>
>  #include <linux/slab.h>
> +#include <linux/pgtable.h>
>  
>  #include <asm/firmware.h>
>  #include <asm/machdep.h>
> @@ -671,6 +672,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
>       if (lmbs_available < lmbs_to_add)
>               return -EINVAL;
>  
> +     if (!radix_enabled())
> +             hash_batch_expand_prepare(memblock_phys_mem_size() +
> +                                              lmbs_to_add * 
> drmem_lmb_size());
> +
>       for_each_drmem_lmb(lmb) {
>               if (lmb->flags & DRCONF_MEM_ASSIGNED)
>                       continue;
> @@ -788,6 +793,10 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 
> drc_index)
>       if (lmbs_available < lmbs_to_add)
>               return -EINVAL;
>  
> +     if (!radix_enabled())
> +             hash_batch_expand_prepare(memblock_phys_mem_size() +
> +                                       lmbs_to_add * drmem_lmb_size());
> +
>       for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
>               if (lmb->flags & DRCONF_MEM_ASSIGNED)
>                       continue;

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature

Reply via email to