Author: nwhitehorn
Date: Mon Jan 29 04:33:41 2018
New Revision: 328530
URL: https://svnweb.freebsd.org/changeset/base/328530

Log:
  Remove hard-coded trap-handling logic involving the segmented memory model
  used with hashed page tables on AIM and place it into a new, modular pmap
  function called pmap_decode_kernel_ptr(). This function is the inverse
  of pmap_map_user_ptr(). With POWER9 radix tables, which mapping to use
  becomes more complex than just AIM/BOOKE and it is best to have it in
  the same place as pmap_map_user_ptr().
  
  Reviewed by:  jhibbits

Modified:
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/include/pmap.h
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c
  head/sys/powerpc/powerpc/trap.c

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/aim/mmu_oea.c      Mon Jan 29 04:33:41 2018        
(r328530)
@@ -322,6 +322,8 @@ vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t
 void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
 static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t moea_methods[] = {
@@ -374,6 +376,7 @@ static mmu_method_t moea_methods[] = {
        MMUMETHOD(mmu_scan_init,        moea_scan_init),
        MMUMETHOD(mmu_dumpsys_map,      moea_dumpsys_map),
        MMUMETHOD(mmu_map_user_ptr,     moea_map_user_ptr),
+       MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr),
 
        { 0, 0 }
 };
@@ -1583,6 +1586,31 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const
            (uintptr_t)uaddr >> ADDR_SR_SHFT;
        curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid;
        __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid));
+
+       return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+       vm_offset_t user_sr;
+
+       if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
+               user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
+               addr &= ADDR_PIDX | ADDR_POFF;
+               addr |= user_sr << ADDR_SR_SHFT;
+               *decoded_addr = addr;
+               *is_user = 1;
+       } else {
+               *decoded_addr = addr;
+               *is_user = 0;
+       }
 
        return (0);
 }

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/aim/mmu_oea64.c    Mon Jan 29 04:33:41 2018        
(r328530)
@@ -288,6 +288,8 @@ vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page
 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
 static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t moea64_methods[] = {
@@ -339,6 +341,7 @@ static mmu_method_t moea64_methods[] = {
        MMUMETHOD(mmu_scan_init,        moea64_scan_init),
        MMUMETHOD(mmu_dumpsys_map,      moea64_dumpsys_map),
        MMUMETHOD(mmu_map_user_ptr,     moea64_map_user_ptr),
+       MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
 
        { 0, 0 }
 };
@@ -1905,6 +1908,31 @@ moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile con
 #else
        __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
 #endif
+
+       return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+       vm_offset_t user_sr;
+
+       if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
+               user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
+               addr &= ADDR_PIDX | ADDR_POFF;
+               addr |= user_sr << ADDR_SR_SHFT;
+               *decoded_addr = addr;
+               *is_user = 1;
+       } else {
+               *decoded_addr = addr;
+               *is_user = 0;
+       }
 
        return (0);
 }

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/booke/pmap.c       Mon Jan 29 04:33:41 2018        
(r328530)
@@ -382,6 +382,8 @@ static int          mmu_booke_change_attr(mmu_t mmu, 
vm_offset
     vm_size_t sz, vm_memattr_t mode);
 static int             mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int             mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t mmu_booke_methods[] = {
@@ -436,6 +438,7 @@ static mmu_method_t mmu_booke_methods[] = {
        MMUMETHOD(mmu_unmapdev,         mmu_booke_unmapdev),
        MMUMETHOD(mmu_change_attr,      mmu_booke_change_attr),
        MMUMETHOD(mmu_map_user_ptr,     mmu_booke_map_user_ptr),
+       MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
 
        /* dumpsys() support */
        MMUMETHOD(mmu_dumpsys_map,      mmu_booke_dumpsys_map),
@@ -2288,6 +2291,25 @@ mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile 
        if (klen)
                *klen = ulen;
 
+       return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+
+       if (addr < VM_MAXUSER_ADDRESS)
+               *is_user = 1;
+       else
+               *is_user = 0;
+
+       *decoded_addr = addr;
        return (0);
 }
 

Modified: head/sys/powerpc/include/pmap.h
==============================================================================
--- head/sys/powerpc/include/pmap.h     Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/include/pmap.h     Mon Jan 29 04:33:41 2018        
(r328530)
@@ -262,6 +262,8 @@ void                pmap_page_set_memattr(vm_page_t, 
vm_memattr_t);
 int            pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
 int            pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
                    void **kaddr, size_t ulen, size_t *klen);
+int            pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
+                   vm_offset_t *decoded_addr);
 void           pmap_deactivate(struct thread *);
 vm_paddr_t     pmap_kextract(vm_offset_t);
 int            pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);

Modified: head/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- head/sys/powerpc/powerpc/mmu_if.m   Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/powerpc/mmu_if.m   Mon Jan 29 04:33:41 2018        
(r328530)
@@ -840,6 +840,21 @@ METHOD int map_user_ptr {
 };
 
 /**
+ * @brief Decode a kernel pointer, as visible to the current thread,
+ * by setting whether it corresponds to a user or kernel address and
+ * the address in the respective memory maps to which the address as
+ * seen in the kernel corresponds. This is essentially the inverse of
+ * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
+ * Returns 0 on success or EFAULT if the address could not be mapped. 
+ */
+METHOD int decode_kernel_ptr {
+       mmu_t           _mmu;
+       vm_offset_t     addr;
+       int             *is_user;
+       vm_offset_t     *decoded_addr;
+};
+
+/**
  * @brief Reverse-map a kernel virtual address
  *
  * @param _va          kernel virtual address to reverse-map
@@ -998,3 +1013,4 @@ METHOD int change_attr {
        vm_size_t       _sz;
        vm_memattr_t    _mode;
 } DEFAULT mmu_null_change_attr;
+

Modified: head/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c    Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c    Mon Jan 29 04:33:41 2018        
(r328530)
@@ -520,6 +520,14 @@ pmap_map_user_ptr(pmap_t pm, volatile const void *uadd
        return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
 }
 
+int
+pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
+{
+
+       CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
+       return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
+}
+
 boolean_t
 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
 {

Modified: head/sys/powerpc/powerpc/trap.c
==============================================================================
--- head/sys/powerpc/powerpc/trap.c     Mon Jan 29 04:04:52 2018        
(r328529)
+++ head/sys/powerpc/powerpc/trap.c     Mon Jan 29 04:33:41 2018        
(r328530)
@@ -393,7 +393,8 @@ trap(struct trapframe *frame)
                        break;
 #if defined(__powerpc64__) && defined(AIM)
                case EXC_DSE:
-                       if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
+                       if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
+                           (frame->dar & SEGMENT_MASK) == USER_ADDR) {
                                __asm __volatile ("slbmte %0, %1" ::
                                        "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
                                        "r"(USER_SLB_SLBE));
@@ -731,10 +732,7 @@ trap_pfault(struct trapframe *frame, int user)
        struct          proc *p;
        vm_map_t        map;
        vm_prot_t       ftype;
-       int             rv;
-#ifdef AIM
-       register_t      user_sr;
-#endif
+       int             rv, is_user;
 
        td = curthread;
        p = td->td_proc;
@@ -759,21 +757,14 @@ trap_pfault(struct trapframe *frame, int user)
                KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace  NULL"));
                map = &p->p_vmspace->vm_map;
        } else {
-#ifdef BOOKE
-               if (eva < VM_MAXUSER_ADDRESS) {
-#else
-               if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
-#endif
-                       map = &p->p_vmspace->vm_map;
+               rv = pmap_decode_kernel_ptr(eva, &is_user, &eva);
+               if (rv != 0)
+                       return (SIGSEGV);
 
-#ifdef AIM
-                       user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
-                       eva &= ADDR_PIDX | ADDR_POFF;
-                       eva |= user_sr << ADDR_SR_SHFT;
-#endif
-               } else {
+               if (is_user)
+                       map = &p->p_vmspace->vm_map;
+               else
                        map = kernel_map;
-               }
        }
        va = trunc_page(eva);
 
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to