Author: andrew
Date: Thu Apr 13 13:46:01 2017
New Revision: 316756
URL: https://svnweb.freebsd.org/changeset/base/316756

Log:
  In ARMv8.1 ARM has added a process state bit to disable access to userspace
  from the kernel. Make use of this to restrict accessing userspace to just
  the functions that explicitly handle crossing the user kernel boundary.
  
  Reported by:  kib
  Sponsored by: DARPA, AFRL
  Differential Revision:        https://reviews.freebsd.org/D10371

Modified:
  head/sys/arm64/arm64/copyinout.S
  head/sys/arm64/arm64/machdep.c
  head/sys/arm64/arm64/mp_machdep.c
  head/sys/arm64/arm64/support.S
  head/sys/arm64/include/armreg.h
  head/sys/arm64/include/asm.h
  head/sys/arm64/include/cpufunc.h

Modified: head/sys/arm64/arm64/copyinout.S
==============================================================================
--- head/sys/arm64/arm64/copyinout.S    Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/arm64/copyinout.S    Thu Apr 13 13:46:01 2017        
(r316756)
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
  */
 ENTRY(copyio_fault)
        SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */
+       EXIT_USER_ACCESS_CHECK(w0, x1)
 copyio_fault_nopcb:
        mov     x0, #EFAULT
        ret
@@ -99,6 +100,7 @@ ENTRY(copyinstr)
 
        adr     x6, copyio_fault /* Get the handler address */
        SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+       ENTER_USER_ACCESS(w6, x7)
 
        ldr     x7, =VM_MAXUSER_ADDRESS
 1:     cmp     x0, x7
@@ -111,7 +113,9 @@ ENTRY(copyinstr)
        sub     x2, x2, #1      /* len-- */
        cbnz    x2, 1b
 
-2:     SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+2:     EXIT_USER_ACCESS(w6)
+       SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
 
 3:     cbz     x3, 4f          /* Check if done != NULL */
        str     x5, [x3]        /* done = count */
@@ -145,7 +149,7 @@ END(copyinstr)
 copycommon:
        adr     x6, copyio_fault /* Get the handler address */
        SET_FAULT_HANDLER(x6, x7) /* Set the handler */
-
+       ENTER_USER_ACCESS(w6, x7)
 
        /* Check alignment */
        orr     x3, x0, x1
@@ -214,6 +218,7 @@ last_byte:
        strb    w3, [x1]
 
 ending:
+       EXIT_USER_ACCESS_CHECK(w6, x7)
        SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
 
        mov     x0, xzr         /* return 0 */

Modified: head/sys/arm64/arm64/machdep.c
==============================================================================
--- head/sys/arm64/arm64/machdep.c      Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/arm64/machdep.c      Thu Apr 13 13:46:01 2017        
(r316756)
@@ -118,6 +118,7 @@ int64_t dcache_line_size;   /* The minimum
 int64_t icache_line_size;      /* The minimum I cache line size */
 int64_t idcache_line_size;     /* The minimum cache line size */
 int64_t dczva_line_size;       /* The size of cache line the dc zva zeroes */
+int has_pan;
 
 /* pagezero_* implementations are provided in support.S */
 void pagezero_simple(void *);
@@ -127,6 +128,37 @@ void pagezero_cache(void *);
 void (*pagezero)(void *p) = pagezero_simple;
 
 static void
+pan_setup(void)
+{
+       uint64_t id_aa64mfr1;
+
+       id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+       if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
+               has_pan = 1;
+}
+
+void
+pan_enable(void)
+{
+
+       /*
+        * The LLVM integrated assembler doesn't understand the PAN
+        * PSTATE field. Because of this we need to manually create
+        * the instruction in an asm block. This is equivalent to:
+        * msr pan, #1
+        *
+        * This sets the PAN bit, stopping the kernel from accessing
+        * memory when userspace can also access it unless the kernel
+        * uses the userspace load/store instructions.
+        */
+       if (has_pan) {
+               WRITE_SPECIALREG(sctlr_el1,
+                   READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
+               __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
+       }
+}
+
+static void
 cpu_startup(void *dummy)
 {
 
@@ -997,6 +1029,7 @@ initarm(struct arm64_bootparams *abp)
        init_param1();
 
        cache_setup();
+       pan_setup();
 
        /* Bootstrap enough of pmap  to enter the kernel proper */
        pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
@@ -1019,6 +1052,7 @@ initarm(struct arm64_bootparams *abp)
 
        dbg_monitor_init();
        kdb_init();
+       pan_enable();
 
        early_boot = 0;
 }

Modified: head/sys/arm64/arm64/mp_machdep.c
==============================================================================
--- head/sys/arm64/arm64/mp_machdep.c   Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/arm64/mp_machdep.c   Thu Apr 13 13:46:01 2017        
(r316756)
@@ -272,6 +272,7 @@ init_secondary(uint64_t cpu)
 #endif
 
        dbg_monitor_init();
+       pan_enable();
 
        /* Enable interrupts */
        intr_enable();

Modified: head/sys/arm64/arm64/support.S
==============================================================================
--- head/sys/arm64/arm64/support.S      Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/arm64/support.S      Thu Apr 13 13:46:01 2017        
(r316756)
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
  */
 ENTRY(fsu_fault)
        SET_FAULT_HANDLER(xzr, x1)      /* Reset the handler function */
+       EXIT_USER_ACCESS_CHECK(w0, x1)
 fsu_fault_nopcb:
        mov     x0, #-1
        ret
@@ -57,11 +58,13 @@ ENTRY(casueword32)
        b.cs    fsu_fault_nopcb
        adr     x6, fsu_fault           /* Load the fault handler */
        SET_FAULT_HANDLER(x6, x4)       /* And set it */
+       ENTER_USER_ACCESS(w6, x4)
 1:     ldxr    w4, [x0]                /* Load-exclusive the data */
        cmp     w4, w1                  /* Compare */
        b.ne    2f                      /* Not equal, exit */
        stxr    w5, w3, [x0]            /* Store the new data */
        cbnz    w5, 1b                  /* Retry on failure */
+       EXIT_USER_ACCESS(w6)
 2:     SET_FAULT_HANDLER(xzr, x5)      /* Reset the fault handler */
        str     w4, [x2]                /* Store the read data */
        mov     x0, #0                  /* Success */
@@ -77,11 +80,13 @@ ENTRY(casueword)
        b.cs    fsu_fault_nopcb
        adr     x6, fsu_fault           /* Load the fault handler */
        SET_FAULT_HANDLER(x6, x4)       /* And set it */
+       ENTER_USER_ACCESS(w6, x4)
 1:     ldxr    x4, [x0]                /* Load-exclusive the data */
        cmp     x4, x1                  /* Compare */
        b.ne    2f                      /* Not equal, exit */
        stxr    w5, x3, [x0]            /* Store the new data */
        cbnz    w5, 1b                  /* Retry on failure */
+       EXIT_USER_ACCESS(w6)
 2:     SET_FAULT_HANDLER(xzr, x5)      /* Reset the fault handler */
        str     x4, [x2]                /* Store the read data */
        mov     x0, #0                  /* Success */
@@ -224,6 +229,7 @@ END(suword)
  */
 ENTRY(fsu_intr_fault)
        SET_FAULT_HANDLER(xzr, x1)      /* Reset the handler function */
+       EXIT_USER_ACCESS_CHECK(w0, x1)
        mov     x0, #-1
        ret
 END(fsu_fault)

Modified: head/sys/arm64/include/armreg.h
==============================================================================
--- head/sys/arm64/include/armreg.h     Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/include/armreg.h     Thu Apr 13 13:46:01 2017        
(r316756)
@@ -312,6 +312,7 @@
 #define        ID_AA64MMFR1_PAN(x)             ((x) & ID_AA64MMFR1_PAN_MASK)
 #define         ID_AA64MMFR1_PAN_NONE          (0x0 << ID_AA64MMFR1_PAN_SHIFT)
 #define         ID_AA64MMFR1_PAN_IMPL          (0x1 << ID_AA64MMFR1_PAN_SHIFT)
+#define         ID_AA64MMFR1_PAN_ATS1E1        (0x2 << ID_AA64MMFR1_PAN_SHIFT)
 
 /* ID_AA64PFR0_EL1 */
 #define        ID_AA64PFR0_MASK                0x0fffffff

Modified: head/sys/arm64/include/asm.h
==============================================================================
--- head/sys/arm64/include/asm.h        Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/include/asm.h        Thu Apr 13 13:46:01 2017        
(r316756)
@@ -71,4 +71,23 @@
        ldr     tmp, [tmp, #TD_PCB];            /* Load the pcb */      \
        str     handler, [tmp, #PCB_ONFAULT]    /* Set the handler */
 
+#define        ENTER_USER_ACCESS(reg, tmp)                                     
\
+       ldr     tmp, =has_pan;                  /* Get the addr of has_pan */ \
+       ldr     reg, [tmp];                     /* Read it */           \
+       cbz     reg, 997f;                      /* If no PAN skip */    \
+       .inst   0xd500409f | (0 << 8);          /* Clear PAN */         \
+       997:
+
+#define        EXIT_USER_ACCESS(reg)                                           
\
+       cbz     reg, 998f;                      /* If no PAN skip */    \
+       .inst   0xd500409f | (1 << 8);          /* Set PAN */           \
+       998:
+
+#define        EXIT_USER_ACCESS_CHECK(reg, tmp)                                
\
+       ldr     tmp, =has_pan;                  /* Get the addr of has_pan */ \
+       ldr     reg, [tmp];                     /* Read it */           \
+       cbz     reg, 999f;                      /* If no PAN skip */    \
+       .inst   0xd500409f | (1 << 8);          /* Set PAN */           \
+       999:
+
 #endif /* _MACHINE_ASM_H_ */

Modified: head/sys/arm64/include/cpufunc.h
==============================================================================
--- head/sys/arm64/include/cpufunc.h    Thu Apr 13 11:56:27 2017        
(r316755)
+++ head/sys/arm64/include/cpufunc.h    Thu Apr 13 13:46:01 2017        
(r316756)
@@ -33,6 +33,8 @@
 
 #include <machine/armreg.h>
 
+void pan_enable(void);
+
 static __inline void
 breakpoint(void)
 {
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to