On !MMU mode, the address of vdso is accessible from userspace.  This
commit implements the entry point by pointing a block of page address.

This commit also add memory permission configuration of vdso page to be
executable.

Signed-off-by: Hajime Tazaki <thehaj...@gmail.com>
Signed-off-by: Ricardo Koller <ricar...@google.com>
---
 arch/x86/um/vdso/um_vdso.c | 41 +++++++++++++++++++++++++-------------
 arch/x86/um/vdso/vma.c     | 14 +++++++++++++
 2 files changed, 41 insertions(+), 14 deletions(-)

diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c
index cbae2584124f..a78d095655f1 100644
--- a/arch/x86/um/vdso/um_vdso.c
+++ b/arch/x86/um/vdso/um_vdso.c
@@ -19,15 +19,35 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, 
struct timezone *tz);
 __kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
 long __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache 
*unused);
 
+#ifdef CONFIG_MMU
+#define __VDSO_SYSCALL1(sysnr, ret, a0)                \
+       asm("syscall"                           \
+           : "=a" (ret)                        \
+           : "0" (sysnr), "D" (a0)             \
+           : "rcx", "r11", "memory")
+#define __VDSO_SYSCALL2(sysnr, ret, a0, a1)            \
+       asm("syscall"                                   \
+           : "=a" (ret)                                \
+           : "0" (sysnr), "D" (a0), "S" (a1)           \
+           : "rcx", "r11", "memory")
+#else
+#define __VDSO_SYSCALL1(sysnr, ret, a0)                \
+       asm("call *%%rax"                               \
+           : "=a" (ret)                        \
+           : "a" (sysnr), "D" (a0)     \
+           : "rcx", "r11", "memory")
+#define __VDSO_SYSCALL2(sysnr, ret, a0, a1)            \
+       asm("call *%%rax"                                       \
+           : "=a" (ret)                                \
+           : "a" (sysnr), "D" (a0), "S" (a1)   \
+           : "rcx", "r11", "memory")
+#endif
+
 int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
 {
        long ret;
 
-       asm("syscall"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
-               : "rcx", "r11", "memory");
-
+       __VDSO_SYSCALL2(__NR_clock_gettime, ret, clock, ts);
        return ret;
 }
 int clock_gettime(clockid_t, struct __kernel_old_timespec *)
@@ -37,11 +57,7 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, 
struct timezone *tz)
 {
        long ret;
 
-       asm("syscall"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
-               : "rcx", "r11", "memory");
-
+       __VDSO_SYSCALL2(__NR_gettimeofday, ret, tv, tz);
        return ret;
 }
 int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
@@ -51,10 +67,7 @@ __kernel_old_time_t __vdso_time(__kernel_old_time_t *t)
 {
        long secs;
 
-       asm volatile("syscall"
-               : "=a" (secs)
-               : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
-
+       __VDSO_SYSCALL1(__NR_time, secs, t);
        return secs;
 }
 __kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, 
alias("__vdso_time")));
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index f238f7b33cdd..093fed27ad49 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -9,6 +9,7 @@
 #include <asm/page.h>
 #include <asm/elf.h>
 #include <linux/init.h>
+#include <os.h>
 
 static unsigned int __read_mostly vdso_enabled = 1;
 unsigned long um_vdso_addr;
@@ -24,7 +25,9 @@ static int __init init_vdso(void)
 
        BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
 
+#ifdef CONFIG_MMU
        um_vdso_addr = task_size - PAGE_SIZE;
+#endif
 
        vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
        if (!vdsop)
@@ -40,6 +43,15 @@ static int __init init_vdso(void)
        copy_page(page_address(um_vdso), vdso_start);
        *vdsop = um_vdso;
 
+#ifndef CONFIG_MMU
+       /* this is fine with NOMMU as everything is accessible */
+       um_vdso_addr = (unsigned long)page_address(um_vdso);
+       os_protect_memory((void *)um_vdso_addr, vdso_end - vdso_start, 1, 0, 1);
+       pr_info("vdso_start=%lx um_vdso_addr=%lx pg_um_vdso=%lx",
+              (unsigned long)vdso_start, um_vdso_addr,
+              (unsigned long)page_address(um_vdso));
+#endif
+
        return 0;
 
 oom:
@@ -50,6 +62,7 @@ static int __init init_vdso(void)
 }
 subsys_initcall(init_vdso);
 
+#ifdef CONFIG_MMU
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct vm_area_struct *vma;
@@ -74,3 +87,4 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
 
        return IS_ERR(vma) ? PTR_ERR(vma) : 0;
 }
+#endif
-- 
2.43.0


Reply via email to