Hi Ben,

Le 24/07/2017 à 06:28, Benjamin Herrenschmidt a écrit :
Instead of comparing the whole CPU mask every time, let's
keep a counter of how many bits are set in the mask. Thus
testing for a local mm only requires testing if that counter
is 1 and the current CPU bit is set in the mask.


I'm trying to see if we could merge this patch with what I'm trying to do to mark a context as requiring global TLBIs.
In http://patchwork.ozlabs.org/patch/796775/
I'm introducing a 'flags' per memory context, using one bit to say if the context needs global TLBIs. The 2 could co-exist, just checking... Do you think about using the actual active_cpus count down the road, or is it just a matter of knowing if there are more than one active cpus?

Thanks,

  Fred



Signed-off-by: Benjamin Herrenschmidt <b...@kernel.crashing.org>
---
  arch/powerpc/include/asm/book3s/64/mmu.h |  3 +++
  arch/powerpc/include/asm/mmu_context.h   |  9 +++++++++
  arch/powerpc/include/asm/tlb.h           | 11 ++++++++++-
  arch/powerpc/mm/mmu_context_book3s64.c   |  2 ++
  4 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 1a220cdff923..c3b00e8ff791 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -83,6 +83,9 @@ typedef struct {
        mm_context_id_t id;
        u16 user_psize;         /* page size index */

+       /* Number of bits in the mm_cpumask */
+       atomic_t active_cpus;
+
        /* NPU NMMU context */
        struct npu_context *npu_context;

diff --git a/arch/powerpc/include/asm/mmu_context.h 
b/arch/powerpc/include/asm/mmu_context.h
index ff1aeb2cd19f..cf8f50cd4030 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -96,6 +96,14 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
                                   struct mm_struct *mm) { }
  #endif

+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void inc_mm_active_cpus(struct mm_struct *mm)
+{
+       atomic_inc(&mm->context.active_cpus);
+}
+#else
+static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
+#endif

  /*
   * switch_mm is the entry point called from the architecture independent
@@ -110,6 +118,7 @@ static inline void switch_mm_irqs_off(struct mm_struct 
*prev,
        /* Mark this context has been used on the new CPU */
        if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
                cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+               inc_mm_active_cpus(next);
                smp_mb();
                new_on_cpu = true;
        }
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 609557569f65..a7eabff27a0f 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -69,13 +69,22 @@ static inline int mm_is_core_local(struct mm_struct *mm)
                              topology_sibling_cpumask(smp_processor_id()));
  }

+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+       if (atomic_read(&mm->context.active_cpus) > 1)
+               return false;
+       return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+#else /* CONFIG_PPC_BOOK3S_64 */
  static inline int mm_is_thread_local(struct mm_struct *mm)
  {
        return cpumask_equal(mm_cpumask(mm),
                              cpumask_of(smp_processor_id()));
  }
+#endif /* !CONFIG_PPC_BOOK3S_64 */

-#else
+#else /* CONFIG_SMP */
  static inline int mm_is_core_local(struct mm_struct *mm)
  {
        return 1;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c 
b/arch/powerpc/mm/mmu_context_book3s64.c
index 8159f5219137..de17d3e714aa 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -174,6 +174,8 @@ int init_new_context(struct task_struct *tsk, struct 
mm_struct *mm)
  #ifdef CONFIG_SPAPR_TCE_IOMMU
        mm_iommu_init(mm);
  #endif
+       atomic_set(&mm->context.active_cpus, 0);
+
        return 0;
  }


Reply via email to