Le 17/04/2019 à 15:03, Aneesh Kumar K.V a écrit :
Currently, our mm_context_t on book3s64 include all hash specific
context details like slice mask and subpage protection details. We
can skip allocating these with radix translation. This will help us to save
8K per mm_context with radix translation.

With the patch applied we have

sizeof(mm_context_t)  = 136
sizeof(struct hash_mm_context)  = 8288

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
  arch/powerpc/include/asm/book3s/64/mmu-hash.h | 33 ++++++++++++-
  arch/powerpc/include/asm/book3s/64/mmu.h      | 49 +++++--------------
  arch/powerpc/kernel/setup-common.c            |  6 +++
  arch/powerpc/mm/hash_utils_64.c               |  4 +-
  arch/powerpc/mm/mmu_context_book3s64.c        | 16 +++++-
  5 files changed, 68 insertions(+), 40 deletions(-)


[...]

diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c
index a07de8608484..21b1ce200b22 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -947,6 +947,12 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = klimit;
+#ifdef CONFIG_PPC_MM_SLICES
+#if defined(CONFIG_PPC_8xx)
+       init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+#endif
+#endif
+

In the previous patch, you moved the above into early_init_mmu(). Why bringing it back here ?

Christophe

  #ifdef CONFIG_SPAPR_TCE_IOMMU
        mm_iommu_init(&init_mm);
  #endif
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2cb3a456f5b5..04ac7c36d380 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -968,6 +968,7 @@ void __init hash__early_init_devtree(void)
        htab_scan_page_sizes();
  }
+struct hash_mm_context init_hash_mm_context;
  void __init hash__early_init_mmu(void)
  {
  #ifndef CONFIG_PPC_64K_PAGES
@@ -1041,7 +1042,8 @@ void __init hash__early_init_mmu(void)
         */
        htab_initialize();
- init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
+       init_mm.context.hash_context = &init_hash_mm_context;
+       init_mm.context.hash_context->slb_addr_limit = 
DEFAULT_MAP_WINDOW_USER64;
pr_info("Initializing hash mmu with SLB\n");
        /* Initialize SLB management */
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c 
b/arch/powerpc/mm/mmu_context_book3s64.c
index f720c5cc0b5e..6eef5a36b2e9 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -63,6 +63,12 @@ static int hash__init_new_context(struct mm_struct *mm)
        if (index < 0)
                return index;
+ mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL);
+       if (!mm->context.hash_context) {
+               ida_free(&mmu_context_ida, index);
+               return -ENOMEM;
+       }
+
        /*
         * The old code would re-promote on fork, we don't do that when using
         * slices as it could cause problem promoting slices that have been
@@ -77,8 +83,14 @@ static int hash__init_new_context(struct mm_struct *mm)
         * We should not be calling init_new_context() on init_mm. Hence a
         * check against 0 is OK.
         */
-       if (mm->context.id == 0)
+       if (mm->context.id == 0) {
+               memset(mm->context.hash_context, 0, sizeof(struct 
hash_mm_context));
                slice_init_new_context_exec(mm);
+       } else {
+               /* This is fork. Copy hash_context details from current->mm */
+               memcpy(mm->context.hash_context, 
current->mm->context.hash_context, sizeof(struct hash_mm_context));
+
+       }
subpage_prot_init_new_context(mm); @@ -118,6 +130,7 @@ static int radix__init_new_context(struct mm_struct *mm)
        asm volatile("ptesync;isync" : : : "memory");
mm->context.npu_context = NULL;
+       mm->context.hash_context = NULL;
return index;
  }
@@ -162,6 +175,7 @@ static void destroy_contexts(mm_context_t *ctx)
                if (context_id)
                        ida_free(&mmu_context_ida, context_id);
        }
+       kfree(ctx->hash_context);
  }
static void pmd_frag_destroy(void *pmd_frag)

Reply via email to