From: Lance Yang <[email protected]>

Enable the optimization introduced in the previous patch for x86.

Add pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast to track whether
flush_tlb_multi() sends real IPIs. Initialize it once in
native_pv_tlb_init() during boot.

On CONFIG_PARAVIRT systems, tlb_table_flush_implies_ipi_broadcast() reads
the pv_ops property. On non-PARAVIRT, it directly checks for INVLPGB.

PV backends (KVM, Xen, Hyper-V) typically have their own implementations
and don't call native_flush_tlb_multi() directly, so they cannot be trusted
to provide the IPI guarantees we need. They keep the property false.

Two-step plan as David suggested[1]:

Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
flush sent IPIs. INVLPGB is excluded because when supported, we cannot
guarantee IPIs were sent, keeping it clean and simple.

Step 2 (future work): Send targeted IPIs only to CPUs actually doing
software/lockless page table walks, benefiting all architectures.

Regarding Step 2, it obviously only applies to setups where Step 1 does
not apply: like x86 with INVLPGB or arm64.

[1] 
https://lore.kernel.org/linux-mm/[email protected]/

Suggested-by: David Hildenbrand (Arm) <[email protected]>
Signed-off-by: Lance Yang <[email protected]>
---
 arch/x86/include/asm/paravirt_types.h |  5 +++++
 arch/x86/include/asm/smp.h            |  7 +++++++
 arch/x86/include/asm/tlb.h            | 20 +++++++++++++++++++-
 arch/x86/kernel/paravirt.c            | 16 ++++++++++++++++
 arch/x86/kernel/smpboot.c             |  1 +
 5 files changed, 48 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 9bcf6bce88f6..ec01268f2e3e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -112,6 +112,11 @@ struct pv_mmu_ops {
        void (*flush_tlb_multi)(const struct cpumask *cpus,
                                const struct flush_tlb_info *info);
 
+       /*
+        * True if flush_tlb_multi() sends real IPIs to all target CPUs.
+        */
+       bool flush_tlb_multi_implies_ipi_broadcast;
+
        /* Hook for intercepting the destruction of an mm_struct. */
        void (*exit_mmap)(struct mm_struct *mm);
        void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, 
bool enc);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 84951572ab81..4ac175414ac1 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -105,6 +105,13 @@ void native_smp_prepare_boot_cpu(void);
 void smp_prepare_cpus_common(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
 void native_smp_cpus_done(unsigned int max_cpus);
+
+#ifdef CONFIG_PARAVIRT
+void __init native_pv_tlb_init(void);
+#else
+static inline void native_pv_tlb_init(void) { }
+#endif
+
 int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
 int native_cpu_disable(void);
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 866ea78ba156..87ef7147eac8 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -5,10 +5,23 @@
 #define tlb_flush tlb_flush
 static inline void tlb_flush(struct mmu_gather *tlb);
 
+#define tlb_table_flush_implies_ipi_broadcast 
tlb_table_flush_implies_ipi_broadcast
+static inline bool tlb_table_flush_implies_ipi_broadcast(void);
+
 #include <asm-generic/tlb.h>
 #include <linux/kernel.h>
 #include <vdso/bits.h>
 #include <vdso/page.h>
+#include <asm/paravirt.h>
+
+static inline bool tlb_table_flush_implies_ipi_broadcast(void)
+{
+#ifdef CONFIG_PARAVIRT
+       return pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast;
+#else
+       return !cpu_feature_enabled(X86_FEATURE_INVLPGB);
+#endif
+}
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
@@ -20,7 +33,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
                end = tlb->end;
        }
 
-       flush_tlb_mm_range(tlb->mm, start, end, stride_shift, 
tlb->freed_tables);
+       /*
+        * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
+        * also receive IPIs during unsharing page tables.
+        */
+       flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
+                          tlb->freed_tables || tlb->unshared_tables);
 }
 
 static inline void invlpg(unsigned long addr)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index a6ed52cae003..b681b8319295 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -154,6 +154,7 @@ struct paravirt_patch_template pv_ops = {
        .mmu.flush_tlb_kernel   = native_flush_tlb_global,
        .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
        .mmu.flush_tlb_multi    = native_flush_tlb_multi,
+       .mmu.flush_tlb_multi_implies_ipi_broadcast = false,
 
        .mmu.exit_mmap          = paravirt_nop,
        .mmu.notify_page_enc_status_changed     = paravirt_nop,
@@ -221,3 +222,18 @@ NOKPROBE_SYMBOL(native_load_idt);
 
 EXPORT_SYMBOL(pv_ops);
 EXPORT_SYMBOL_GPL(pv_info);
+
+void __init native_pv_tlb_init(void)
+{
+       /*
+        * If PV backend already set the property, respect it.
+        * Otherwise, check if native TLB flush sends real IPIs to all target
+        * CPUs (i.e., not using INVLPGB broadcast invalidation).
+        */
+       if (pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast)
+               return;
+
+       if (pv_ops.mmu.flush_tlb_multi == native_flush_tlb_multi &&
+           !cpu_feature_enabled(X86_FEATURE_INVLPGB))
+               pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast = true;
+}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5cd6950ab672..3cdb04162843 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void)
                switch_gdt_and_percpu_base(me);
 
        native_pv_lock_init();
+       native_pv_tlb_init();
 }
 
 void __init native_smp_cpus_done(unsigned int max_cpus)
-- 
2.49.0


Reply via email to