From: Nadav Amit <na...@vmware.com>

Reduce the chances that inc/dec_tlb_flush_pending() will be abused by
moving them into mmu_gather.c, which is more of their natural place.
This also allows to reduce the clutter on mm_types.h.

Signed-off-by: Nadav Amit <na...@vmware.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Will Deacon <w...@kernel.org>
Cc: Yu Zhao <yuz...@google.com>
Cc: Nick Piggin <npig...@gmail.com>
Cc: x...@kernel.org
---
 include/linux/mm_types.h | 54 ----------------------------------------
 mm/mmu_gather.c          | 54 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 54 insertions(+), 54 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 812ee0fd4c35..676795dfd5d4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -615,60 +615,6 @@ static inline void init_tlb_flush_pending(struct mm_struct 
*mm)
        atomic_set(&mm->tlb_flush_pending, 0);
 }
 
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
-{
-       atomic_inc(&mm->tlb_flush_pending);
-       /*
-        * The only time this value is relevant is when there are indeed pages
-        * to flush. And we'll only flush pages after changing them, which
-        * requires the PTL.
-        *
-        * So the ordering here is:
-        *
-        *      atomic_inc(&mm->tlb_flush_pending);
-        *      spin_lock(&ptl);
-        *      ...
-        *      set_pte_at();
-        *      spin_unlock(&ptl);
-        *
-        *                              spin_lock(&ptl)
-        *                              mm_tlb_flush_pending();
-        *                              ....
-        *                              spin_unlock(&ptl);
-        *
-        *      flush_tlb_range();
-        *      atomic_dec(&mm->tlb_flush_pending);
-        *
-        * Where the increment if constrained by the PTL unlock, it thus
-        * ensures that the increment is visible if the PTE modification is
-        * visible. After all, if there is no PTE modification, nobody cares
-        * about TLB flushes either.
-        *
-        * This very much relies on users (mm_tlb_flush_pending() and
-        * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
-        * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
-        * locks (PPC) the unlock of one doesn't order against the lock of
-        * another PTL.
-        *
-        * The decrement is ordered by the flush_tlb_range(), such that
-        * mm_tlb_flush_pending() will not return false unless all flushes have
-        * completed.
-        */
-}
-
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
-{
-       /*
-        * See inc_tlb_flush_pending().
-        *
-        * This cannot be smp_mb__before_atomic() because smp_mb() simply does
-        * not order against TLB invalidate completion, which is what we need.
-        *
-        * Therefore we must rely on tlb_flush_*() to guarantee order.
-        */
-       atomic_dec(&mm->tlb_flush_pending);
-}
-
 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 {
        /*
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 5a659d4e59eb..13338c096cc6 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -249,6 +249,60 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+       atomic_inc(&mm->tlb_flush_pending);
+       /*
+        * The only time this value is relevant is when there are indeed pages
+        * to flush. And we'll only flush pages after changing them, which
+        * requires the PTL.
+        *
+        * So the ordering here is:
+        *
+        *      atomic_inc(&mm->tlb_flush_pending);
+        *      spin_lock(&ptl);
+        *      ...
+        *      set_pte_at();
+        *      spin_unlock(&ptl);
+        *
+        *                              spin_lock(&ptl)
+        *                              mm_tlb_flush_pending();
+        *                              ....
+        *                              spin_unlock(&ptl);
+        *
+        *      flush_tlb_range();
+        *      atomic_dec(&mm->tlb_flush_pending);
+        *
+        * Where the increment if constrained by the PTL unlock, it thus
+        * ensures that the increment is visible if the PTE modification is
+        * visible. After all, if there is no PTE modification, nobody cares
+        * about TLB flushes either.
+        *
+        * This very much relies on users (mm_tlb_flush_pending() and
+        * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+        * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+        * locks (PPC) the unlock of one doesn't order against the lock of
+        * another PTL.
+        *
+        * The decrement is ordered by the flush_tlb_range(), such that
+        * mm_tlb_flush_pending() will not return false unless all flushes have
+        * completed.
+        */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+       /*
+        * See inc_tlb_flush_pending().
+        *
+        * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+        * not order against TLB invalidate completion, which is what we need.
+        *
+        * Therefore we must rely on tlb_flush_*() to guarantee order.
+        */
+       atomic_dec(&mm->tlb_flush_pending);
+}
+
 /**
  * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
  * @tlb: the mmu_gather structure to initialize
-- 
2.25.1

Reply via email to