Most of the code for initializing the vDSOs in arm64 and compat will
be in common, hence a refactor of the current code is required to avoid
duplication and simplify maintainability.

Refactor vdso.c to simplify the implementation of arm64 vDSO compat
(which will be pushed with a future patch).

Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frasc...@arm.com>
Tested-by: Shijith Thotton <sthot...@marvell.com>
Tested-by: Andre Przywara <andre.przyw...@arm.com>
---
 arch/arm64/kernel/vdso.c | 215 ++++++++++++++++++++++++++-------------
 1 file changed, 144 insertions(+), 71 deletions(-)

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 23c38303a52a..aa1fb25a9fe4 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -40,7 +40,31 @@
 #include <asm/vdso.h>
 
 extern char vdso_start[], vdso_end[];
-static unsigned long vdso_pages __ro_after_init;
+
+/* vdso_lookup arch_index */
+enum arch_vdso_type {
+       ARM64_VDSO = 0,
+};
+#define VDSO_TYPES             (ARM64_VDSO + 1)
+
+struct __vdso_abi {
+       const char *name;
+       const char *vdso_code_start;
+       const char *vdso_code_end;
+       unsigned long vdso_pages;
+       /* Data Mapping */
+       struct vm_special_mapping *dm;
+       /* Code Mapping */
+       struct vm_special_mapping *cm;
+};
+
+static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
+       {
+               .name = "vdso",
+               .vdso_code_start = vdso_start,
+               .vdso_code_end = vdso_end,
+       },
+};
 
 /*
  * The vDSO data page.
@@ -51,10 +75,110 @@ static union {
 } vdso_data_store __page_aligned_data;
 struct vdso_data *vdso_data = vdso_data_store.data;
 
+static int __vdso_remap(enum arch_vdso_type arch_index,
+                       const struct vm_special_mapping *sm,
+                       struct vm_area_struct *new_vma)
+{
+       unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+       unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
+                                 vdso_lookup[arch_index].vdso_code_start;
+
+       if (vdso_size != new_size)
+               return -EINVAL;
+
+       current->mm->context.vdso = (void *)new_vma->vm_start;
+
+       return 0;
+}
+
+static int __vdso_init(enum arch_vdso_type arch_index)
+{
+       int i;
+       struct page **vdso_pagelist;
+       unsigned long pfn;
+
+       if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
+               pr_err("vDSO is not a valid ELF object!\n");
+               return -EINVAL;
+       }
+
+       vdso_lookup[arch_index].vdso_pages = (
+                       vdso_lookup[arch_index].vdso_code_end -
+                       vdso_lookup[arch_index].vdso_code_start) >>
+                       PAGE_SHIFT;
+
+       /* Allocate the vDSO pagelist, plus a page for the data. */
+       vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
+                               sizeof(struct page *),
+                               GFP_KERNEL);
+       if (vdso_pagelist == NULL)
+               return -ENOMEM;
+
+       /* Grab the vDSO data page. */
+       vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
+
+       /* Grab the vDSO code pages. */
+       pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
+
+       for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
+               vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
+
+       vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
+       vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
+
+       return 0;
+}
+
+static int __setup_additional_pages(enum arch_vdso_type arch_index,
+                                   struct mm_struct *mm,
+                                   struct linux_binprm *bprm,
+                                   int uses_interp)
+{
+       unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+       void *ret;
+
+       vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
+       /* Be sure to map the data page */
+       vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+
+       vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+       if (IS_ERR_VALUE(vdso_base)) {
+               ret = ERR_PTR(vdso_base);
+               goto up_fail;
+       }
+
+       ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+                                      VM_READ|VM_MAYREAD,
+                                      vdso_lookup[arch_index].dm);
+       if (IS_ERR(ret))
+               goto up_fail;
+
+       vdso_base += PAGE_SIZE;
+       mm->context.vdso = (void *)vdso_base;
+       ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      vdso_lookup[arch_index].cm);
+       if (IS_ERR(ret))
+               goto up_fail;
+
+       return 0;
+
+up_fail:
+       mm->context.vdso = NULL;
+       return PTR_ERR(ret);
+}
+
 #ifdef CONFIG_COMPAT
 /*
  * Create and map the vectors page for AArch32 tasks.
  */
+/*
+ * aarch32_vdso_pages:
+ * 0 - kuser helpers
+ * 1 - sigreturn code
+ */
 #define C_VECTORS      0
 #define C_SIGPAGE      1
 #define C_PAGES                (C_SIGPAGE + 1)
@@ -183,18 +307,18 @@ int aarch32_setup_additional_pages(struct linux_binprm 
*bprm, int uses_interp)
 static int vdso_mremap(const struct vm_special_mapping *sm,
                struct vm_area_struct *new_vma)
 {
-       unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
-       unsigned long vdso_size = vdso_end - vdso_start;
-
-       if (vdso_size != new_size)
-               return -EINVAL;
-
-       current->mm->context.vdso = (void *)new_vma->vm_start;
-
-       return 0;
+       return __vdso_remap(ARM64_VDSO, sm, new_vma);
 }
 
-static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
+/*
+ * aarch64_vdso_pages:
+ * 0 - vvar
+ * 1 - vdso
+ */
+#define A_VVAR         0
+#define A_VDSO         1
+#define A_PAGES                (A_VDSO + 1)
+static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
        {
                .name   = "[vvar]",
        },
@@ -206,37 +330,10 @@ static struct vm_special_mapping vdso_spec[2] 
__ro_after_init = {
 
 static int __init vdso_init(void)
 {
-       int i;
-       struct page **vdso_pagelist;
-       unsigned long pfn;
-
-       if (memcmp(vdso_start, "\177ELF", 4)) {
-               pr_err("vDSO is not a valid ELF object!\n");
-               return -EINVAL;
-       }
-
-       vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
-
-       /* Allocate the vDSO pagelist, plus a page for the data. */
-       vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
-                               GFP_KERNEL);
-       if (vdso_pagelist == NULL)
-               return -ENOMEM;
-
-       /* Grab the vDSO data page. */
-       vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
+       vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
+       vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
 
-       /* Grab the vDSO code pages. */
-       pfn = sym_to_pfn(vdso_start);
-
-       for (i = 0; i < vdso_pages; i++)
-               vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
-
-       vdso_spec[0].pages = &vdso_pagelist[0];
-       vdso_spec[1].pages = &vdso_pagelist[1];
-
-       return 0;
+       return __vdso_init(ARM64_VDSO);
 }
 arch_initcall(vdso_init);
 
@@ -244,41 +341,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
                                int uses_interp)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
-       void *ret;
-
-       vdso_text_len = vdso_pages << PAGE_SHIFT;
-       /* Be sure to map the data page */
-       vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+       int ret;
 
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
-       vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
-       if (IS_ERR_VALUE(vdso_base)) {
-               ret = ERR_PTR(vdso_base);
-               goto up_fail;
-       }
-       ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
-                                      VM_READ|VM_MAYREAD,
-                                      &vdso_spec[0]);
-       if (IS_ERR(ret))
-               goto up_fail;
-
-       vdso_base += PAGE_SIZE;
-       mm->context.vdso = (void *)vdso_base;
-       ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
-                                      VM_READ|VM_EXEC|
-                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                      &vdso_spec[1]);
-       if (IS_ERR(ret))
-               goto up_fail;
 
+       ret = __setup_additional_pages(ARM64_VDSO,
+                                      mm,
+                                      bprm,
+                                      uses_interp);
 
        up_write(&mm->mmap_sem);
-       return 0;
 
-up_fail:
-       mm->context.vdso = NULL;
-       up_write(&mm->mmap_sem);
-       return PTR_ERR(ret);
+       return ret;
 }
-- 
2.21.0

Reply via email to