Author: andrew
Date: Wed Apr 12 16:28:40 2017
New Revision: 316734
URL: https://svnweb.freebsd.org/changeset/base/316734

Log:
  Start to use the User and Privileged execute-never bits in the arm64
  pagetables. This sets both bits when entering an address we know shouldn't
  be executed.
  
  I expect we could mark all userspace pages as Privileged execute-never to
  ensure the kernel doesn't branch to one of these addresses.
  
  While here add the ARMv8.1 upper attributes.
  
  Reviewed by:  alc, kib (previous version)
  MFC after:    1 week
  Sponsored by: DARPA, AFRL
  Differential Revision:        https://reviews.freebsd.org/D10360

Modified:
  head/sys/arm64/arm64/pmap.c
  head/sys/arm64/include/pte.h

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Wed Apr 12 16:21:55 2017        (r316733)
+++ head/sys/arm64/arm64/pmap.c Wed Apr 12 16:28:40 2017        (r316734)
@@ -606,7 +606,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1,
                l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
 
                pmap_load_store(&pagetable_dmap[l1_slot],
-                   (pa & ~L1_OFFSET) | ATTR_DEFAULT |
+                   (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
                    ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
        }
 
@@ -2428,14 +2428,16 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
 {
        vm_offset_t va, va_next;
        pd_entry_t *l0, *l1, *l2;
-       pt_entry_t *l3p, l3;
+       pt_entry_t *l3p, l3, nbits;
 
-       if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+       KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
+       if (prot == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
        }
 
-       if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE)
+       if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+           (VM_PROT_WRITE | VM_PROT_EXECUTE))
                return;
 
        PMAP_LOCK(pmap);
@@ -2480,17 +2482,25 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
                for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
                    sva += L3_SIZE) {
                        l3 = pmap_load(l3p);
-                       if (pmap_l3_valid(l3)) {
+                       if (!pmap_l3_valid(l3))
+                               continue;
+
+                       nbits = 0;
+                       if ((prot & VM_PROT_WRITE) == 0) {
                                if ((l3 & ATTR_SW_MANAGED) &&
                                    pmap_page_dirty(l3)) {
                                        vm_page_dirty(PHYS_TO_VM_PAGE(l3 &
                                            ~ATTR_MASK));
                                }
-                               pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
-                               PTE_SYNC(l3p);
-                               /* XXX: Use pmap_invalidate_range */
-                               pmap_invalidate_page(pmap, va);
+                               nbits |= ATTR_AP(ATTR_AP_RO);
                        }
+                       if ((prot & VM_PROT_EXECUTE) == 0)
+                               nbits |= ATTR_XN;
+
+                       pmap_set(l3p, nbits);
+                       PTE_SYNC(l3p);
+                       /* XXX: Use pmap_invalidate_range */
+                       pmap_invalidate_page(pmap, va);
                }
        }
        PMAP_UNLOCK(pmap);
@@ -2709,6 +2719,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
            L3_PAGE);
        if ((prot & VM_PROT_WRITE) == 0)
                new_l3 |= ATTR_AP(ATTR_AP_RO);
+       if ((prot & VM_PROT_EXECUTE) == 0)
+               new_l3 |= ATTR_XN;
        if ((flags & PMAP_ENTER_WIRED) != 0)
                new_l3 |= ATTR_SW_WIRED;
        if ((va >> 63) == 0)
@@ -3115,6 +3127,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 
        pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
            ATTR_AP(ATTR_AP_RO) | L3_PAGE;
+       if ((prot & VM_PROT_EXECUTE) == 0)
+               pa |= ATTR_XN;
 
        /*
         * Now validate mapping with RO protection

Modified: head/sys/arm64/include/pte.h
==============================================================================
--- head/sys/arm64/include/pte.h        Wed Apr 12 16:21:55 2017        
(r316733)
+++ head/sys/arm64/include/pte.h        Wed Apr 12 16:28:40 2017        
(r316734)
@@ -46,6 +46,11 @@ typedef      uint64_t        pt_entry_t;             /* page 
ta
 /* Bits 58:55 are reserved for software */
 #define        ATTR_SW_MANAGED (1UL << 56)
 #define        ATTR_SW_WIRED   (1UL << 55)
+#define        ATTR_UXN        (1UL << 54)
+#define        ATTR_PXN        (1UL << 53)
+#define        ATTR_XN         (ATTR_PXN | ATTR_UXN)
+#define        ATTR_CONTIGUOUS (1UL << 52)
+#define        ATTR_DBM        (1UL << 51)
 #define        ATTR_nG         (1 << 11)
 #define        ATTR_AF         (1 << 10)
 #define        ATTR_SH(x)      ((x) << 8)
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to