setting /transparent_hugepages/enabled=always allows applications to benefit from THPs without having to madvise. However, the pf handler takes very few considerations to decide weather or not to actually use a THP. This can lead to a lot of wasted memory. khugepaged only operates on memory that was either allocated with enabled=always or MADV_HUGEPAGE.
Introduce the ability to set enabled=defer, which will prevent THPs from being allocated by the page fault handler unless madvise is set, leaving it up to khugepaged to decide which allocations will collapse to a THP. This should allow applications to benefits from THPs, while curbing some of the memory waste. Co-developed-by: Rafael Aquini <raqu...@redhat.com> Signed-off-by: Rafael Aquini <raqu...@redhat.com> Signed-off-by: Nico Pache <npa...@redhat.com> --- include/linux/huge_mm.h | 15 +++++++++++++-- mm/huge_memory.c | 31 +++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 95be9d266eff..b154bd11bd93 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -48,6 +48,7 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_UNSUPPORTED, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, @@ -186,6 +187,7 @@ static inline bool hugepage_global_enabled(void) { return transparent_hugepage_flags & ((1<<TRANSPARENT_HUGEPAGE_FLAG) | + (1<<TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG) | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); } @@ -195,6 +197,12 @@ static inline bool hugepage_global_always(void) (1<<TRANSPARENT_HUGEPAGE_FLAG); } +static inline bool hugepage_global_defer(void) +{ + return transparent_hugepage_flags & + (1<<TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG); +} + static inline int highest_order(unsigned long orders) { return fls_long(orders) - 1; @@ -291,13 +299,16 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long tva_flags, unsigned long orders) { + if ((tva_flags & TVA_IN_PF) && hugepage_global_defer() && + !(vm_flags & VM_HUGEPAGE)) + return 0; + /* Optimization to check if required orders are enabled early. */ if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) { unsigned long mask = READ_ONCE(huge_anon_orders_always); - if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_anon_orders_madvise); - if (hugepage_global_always() || + if (hugepage_global_always() || hugepage_global_defer() || ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) mask |= READ_ONCE(huge_anon_orders_inherit); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 35515bff392b..4eee0001e0fa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -297,12 +297,15 @@ static ssize_t enabled_show(struct kobject *kobj, const char *output; if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) - output = "[always] madvise never"; + output = "[always] madvise defer never"; else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) - output = "always [madvise] never"; + output = "always [madvise] defer never"; + else if (test_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, + &transparent_hugepage_flags)) + output = "always madvise [defer] never"; else - output = "always madvise [never]"; + output = "always madvise defer [never]"; return sysfs_emit(buf, "%s\n", output); } @@ -315,13 +318,20 @@ static ssize_t enabled_store(struct kobject *kobj, if (sysfs_streq(buf, "always")) { clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + } else if (sysfs_streq(buf, "defer")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags); } else ret = -EINVAL; @@ -954,18 +964,31 @@ static int __init setup_transparent_hugepage(char *str) &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, + &transparent_hugepage_flags); ret = 1; + } else if (!strcmp(str, "defer")) { + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, + &transparent_hugepage_flags); } else if (!strcmp(str, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, + &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, - &transparent_hugepage_flags); + &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, + &transparent_hugepage_flags); ret = 1; } out: -- 2.48.1