Author: nwhitehorn
Date: Sun Oct 17 15:22:59 2010
New Revision: 213979
URL: http://svn.freebsd.org/changeset/base/213979

Log:
  MFC r213307,213335:
  
  Add support for memory attributes (pmap_mapdev_attr() and friends) on
  PowerPC/AIM. This is currently stubbed out on Book-E, since I have no
  idea how to implement it there.

Modified:
  stable/8/sys/powerpc/aim/mmu_oea.c
  stable/8/sys/powerpc/aim/mmu_oea64.c
  stable/8/sys/powerpc/include/pmap.h
  stable/8/sys/powerpc/include/vm.h
  stable/8/sys/powerpc/powerpc/bus_machdep.c
  stable/8/sys/powerpc/powerpc/mmu_if.m
  stable/8/sys/powerpc/powerpc/pmap_dispatch.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)

Modified: stable/8/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- stable/8/sys/powerpc/aim/mmu_oea.c  Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/aim/mmu_oea.c  Sun Oct 17 15:22:59 2010        
(r213979)
@@ -221,8 +221,6 @@ u_int               moea_pteg_mask;
 struct pvo_head *moea_pvo_table;               /* pvo entries by pteg index */
 struct pvo_head moea_pvo_kunmanaged =
     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);        /* list of unmanaged 
pages */
-struct pvo_head moea_pvo_unmanaged =
-    LIST_HEAD_INITIALIZER(moea_pvo_unmanaged); /* list of unmanaged pages */
 
 uma_zone_t     moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
 uma_zone_t     moea_mpvo_zone; /* zone for pvo entries for managed pages */
@@ -327,9 +325,12 @@ void moea_deactivate(mmu_t, struct threa
 void moea_cpu_bootstrap(mmu_t, int);
 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
+void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
 vm_offset_t moea_kextract(mmu_t, vm_offset_t);
+void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
+void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
 
@@ -364,14 +365,17 @@ static mmu_method_t moea_methods[] = {
        MMUMETHOD(mmu_zero_page_idle,   moea_zero_page_idle),
        MMUMETHOD(mmu_activate,         moea_activate),
        MMUMETHOD(mmu_deactivate,       moea_deactivate),
+       MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
 
        /* Internal interfaces */
        MMUMETHOD(mmu_bootstrap,        moea_bootstrap),
        MMUMETHOD(mmu_cpu_bootstrap,    moea_cpu_bootstrap),
+       MMUMETHOD(mmu_mapdev_attr,      moea_mapdev_attr),
        MMUMETHOD(mmu_mapdev,           moea_mapdev),
        MMUMETHOD(mmu_unmapdev,         moea_unmapdev),
        MMUMETHOD(mmu_kextract,         moea_kextract),
        MMUMETHOD(mmu_kenter,           moea_kenter),
+       MMUMETHOD(mmu_kenter_attr,      moea_kenter_attr),
        MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
 
        { 0, 0 }
@@ -384,6 +388,41 @@ static mmu_def_t oea_mmu = {
 };
 MMU_DEF(oea_mmu);
 
+static __inline uint32_t
+moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
+{
+       uint32_t pte_lo;
+       int i;
+
+       if (ma != VM_MEMATTR_DEFAULT) {
+               switch (ma) {
+               case VM_MEMATTR_UNCACHEABLE:
+                       return (PTE_I | PTE_G);
+               case VM_MEMATTR_WRITE_COMBINING:
+               case VM_MEMATTR_WRITE_BACK:
+               case VM_MEMATTR_PREFETCHABLE:
+                       return (PTE_I);
+               case VM_MEMATTR_WRITE_THROUGH:
+                       return (PTE_W | PTE_M);
+               }
+       }
+
+       /*
+        * Assume the page is cache inhibited and access is guarded unless
+        * it's in our available memory array.
+        */
+       pte_lo = PTE_I | PTE_G;
+       for (i = 0; i < pregions_sz; i++) {
+               if ((pa >= pregions[i].mr_start) &&
+                   (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
+                       pte_lo = PTE_M;
+                       break;
+               }
+       }
+
+       return pte_lo;
+}
+
 static void
 tlbie(vm_offset_t va)
 {
@@ -425,22 +464,6 @@ va_to_pteg(u_int sr, vm_offset_t addr)
 }
 
 static __inline struct pvo_head *
-pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
-{
-       struct  vm_page *pg;
-
-       pg = PHYS_TO_VM_PAGE(pa);
-
-       if (pg_p != NULL)
-               *pg_p = pg;
-
-       if (pg == NULL)
-               return (&moea_pvo_unmanaged);
-
-       return (&pg->md.mdpg_pvoh);
-}
-
-static __inline struct pvo_head *
 vm_page_to_pvoh(vm_page_t m)
 {
 
@@ -881,6 +904,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
                        struct  vm_page m;
 
                        m.phys_addr = translations[i].om_pa + off;
+                       m.md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
                        PMAP_LOCK(&ofw_pmap);
                        moea_enter_locked(&ofw_pmap,
                                   translations[i].om_va + off, &m,
@@ -1087,7 +1111,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
        struct          pvo_head *pvo_head;
        uma_zone_t      zone;
        vm_page_t       pg;
-       u_int           pte_lo, pvo_flags, was_exec, i;
+       u_int           pte_lo, pvo_flags, was_exec;
        int             error;
 
        if (!moea_initialized) {
@@ -1126,19 +1150,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
                }
        }
 
-       /*
-        * Assume the page is cache inhibited and access is guarded unless
-        * it's in our available memory array.
-        */
-       pte_lo = PTE_I | PTE_G;
-       for (i = 0; i < pregions_sz; i++) {
-               if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
-                   (VM_PAGE_TO_PHYS(m) < 
-                       (pregions[i].mr_start + pregions[i].mr_size))) {
-                       pte_lo = PTE_M;
-                       break;
-               }
-       }
+       pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 
        if (prot & VM_PROT_WRITE) {
                pte_lo |= PTE_BW;
@@ -1371,14 +1383,60 @@ moea_ts_referenced(mmu_t mmu, vm_page_t 
 }
 
 /*
+ * Modify the WIMG settings of all mappings for a page.
+ */
+void
+moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
+{
+       struct  pvo_entry *pvo;
+       struct  pvo_head *pvo_head;
+       struct  pte *pt;
+       pmap_t  pmap;
+       u_int   lo;
+
+       if (m->flags & PG_FICTITIOUS) {
+               m->md.mdpg_cache_attrs = ma;
+               return;
+       }
+
+       vm_page_lock_queues();
+       pvo_head = vm_page_to_pvoh(m);
+       lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
+
+       LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
+               pmap = pvo->pvo_pmap;
+               PMAP_LOCK(pmap);
+               pt = moea_pvo_to_pte(pvo, -1);
+               pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
+               pvo->pvo_pte.pte.pte_lo |= lo;
+               if (pt != NULL) {
+                       moea_pte_change(pt, &pvo->pvo_pte.pte,
+                           pvo->pvo_vaddr);
+                       if (pvo->pvo_pmap == kernel_pmap)
+                               isync();
+               }
+               mtx_unlock(&moea_table_mutex);
+               PMAP_UNLOCK(pmap);
+       }
+       m->md.mdpg_cache_attrs = ma;
+       vm_page_unlock_queues();
+}
+
+/*
  * Map a wired page into kernel virtual address space.
  */
 void
 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
 {
+
+       moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+}
+
+void
+moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
+{
        u_int           pte_lo;
        int             error;  
-       int             i;
 
 #if 0
        if (va < VM_MIN_KERNEL_ADDRESS)
@@ -1386,14 +1444,7 @@ moea_kenter(mmu_t mmu, vm_offset_t va, v
                    va);
 #endif
 
-       pte_lo = PTE_I | PTE_G;
-       for (i = 0; i < pregions_sz; i++) {
-               if ((pa >= pregions[i].mr_start) &&
-                   (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
-                       pte_lo = PTE_M;
-                       break;
-               }
-       }       
+       pte_lo = moea_calc_wimg(pa, ma);
 
        PMAP_LOCK(kernel_pmap);
        error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
@@ -2382,6 +2433,13 @@ moea_dev_direct_mapped(mmu_t mmu, vm_off
 void *
 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
 {
+
+       return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+}
+
+void *
+moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
+{
        vm_offset_t va, tmpva, ppa, offset;
        int i;
 
@@ -2404,7 +2462,7 @@ moea_mapdev(mmu_t mmu, vm_offset_t pa, v
                panic("moea_mapdev: Couldn't alloc kernel virtual memory");
 
        for (tmpva = va; size > 0;) {
-               moea_kenter(mmu, tmpva, ppa);
+               moea_kenter_attr(mmu, tmpva, ppa, ma);
                tlbie(tmpva);
                size -= PAGE_SIZE;
                tmpva += PAGE_SIZE;

Modified: stable/8/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/8/sys/powerpc/aim/mmu_oea64.c        Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/aim/mmu_oea64.c        Sun Oct 17 15:22:59 2010        
(r213979)
@@ -290,11 +290,8 @@ u_int              moea64_pteg_mask;
  * PVO data.
  */
 struct pvo_head *moea64_pvo_table;             /* pvo entries by pteg index */
-/* lists of unmanaged pages */
-struct pvo_head moea64_pvo_kunmanaged =
+struct pvo_head moea64_pvo_kunmanaged =        /* list of unmanaged pages */
     LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
-struct pvo_head moea64_pvo_unmanaged =
-    LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
 
 uma_zone_t     moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
 uma_zone_t     moea64_mpvo_zone; /* zone for pvo entries for managed pages */
@@ -399,8 +396,11 @@ void moea64_zero_page_idle(mmu_t, vm_pag
 void moea64_activate(mmu_t, struct thread *);
 void moea64_deactivate(mmu_t, struct thread *);
 void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
+void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
 vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
+void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
+void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
 void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
 boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
@@ -436,14 +436,17 @@ static mmu_method_t moea64_bridge_method
        MMUMETHOD(mmu_zero_page_idle,   moea64_zero_page_idle),
        MMUMETHOD(mmu_activate,         moea64_activate),
        MMUMETHOD(mmu_deactivate,       moea64_deactivate),
+       MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
 
        /* Internal interfaces */
        MMUMETHOD(mmu_bootstrap,        moea64_bridge_bootstrap),
        MMUMETHOD(mmu_cpu_bootstrap,    moea64_bridge_cpu_bootstrap),
        MMUMETHOD(mmu_mapdev,           moea64_mapdev),
+       MMUMETHOD(mmu_mapdev_attr,      moea64_mapdev_attr),
        MMUMETHOD(mmu_unmapdev,         moea64_unmapdev),
        MMUMETHOD(mmu_kextract,         moea64_kextract),
        MMUMETHOD(mmu_kenter,           moea64_kenter),
+       MMUMETHOD(mmu_kenter_attr,      moea64_kenter_attr),
        MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
 
        { 0, 0 }
@@ -467,22 +470,6 @@ va_to_pteg(uint64_t vsid, vm_offset_t ad
 }
 
 static __inline struct pvo_head *
-pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
-{
-       struct  vm_page *pg;
-
-       pg = PHYS_TO_VM_PAGE(pa);
-
-       if (pg_p != NULL)
-               *pg_p = pg;
-
-       if (pg == NULL)
-               return (&moea64_pvo_unmanaged);
-
-       return (&pg->md.mdpg_pvoh);
-}
-
-static __inline struct pvo_head *
 vm_page_to_pvoh(vm_page_t m)
 {
 
@@ -608,11 +595,24 @@ moea64_pte_change(struct lpte *pt, struc
 }
 
 static __inline uint64_t
-moea64_calc_wimg(vm_offset_t pa)
+moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
 {
        uint64_t pte_lo;
        int i;
 
+       if (ma != VM_MEMATTR_DEFAULT) {
+               switch (ma) {
+               case VM_MEMATTR_UNCACHEABLE:
+                       return (LPTE_I | LPTE_G);
+               case VM_MEMATTR_WRITE_COMBINING:
+               case VM_MEMATTR_WRITE_BACK:
+               case VM_MEMATTR_PREFETCHABLE:
+                       return (LPTE_I);
+               case VM_MEMATTR_WRITE_THROUGH:
+                       return (LPTE_W | LPTE_M);
+               }
+       }
+
        /*
         * Assume the page is cache inhibited and access is guarded unless
         * it's in our available memory array.
@@ -1112,7 +1112,7 @@ void moea64_set_scratchpage_pa(int which
        moea64_scratchpage_pte[which]->pte_lo &= 
            ~(LPTE_WIMG | LPTE_RPGN);
        moea64_scratchpage_pte[which]->pte_lo |=
-           moea64_calc_wimg(pa) | (uint64_t)pa;
+           moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
        EIEIO();
 
        moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
@@ -1243,7 +1243,7 @@ moea64_enter_locked(pmap_t pmap, vm_offs
                zone = moea64_upvo_zone;
        }
 
-       pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m));
+       pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 
        if (prot & VM_PROT_WRITE) {
                pte_lo |= LPTE_BW;
@@ -1560,22 +1560,55 @@ moea64_ts_referenced(mmu_t mmu, vm_page_
 }
 
 /*
+ * Modify the WIMG settings of all mappings for a page.
+ */
+void
+moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
+{
+       struct  pvo_entry *pvo;
+       struct  pvo_head *pvo_head;
+       struct  lpte *pt;
+       pmap_t  pmap;
+       uint64_t lo;
+
+       if (m->flags & PG_FICTITIOUS) {
+               m->md.mdpg_cache_attrs = ma;
+               return;
+       }
+
+       vm_page_lock_queues();
+       pvo_head = vm_page_to_pvoh(m);
+       lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
+       LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
+               pmap = pvo->pvo_pmap;
+               PMAP_LOCK(pmap);
+               LOCK_TABLE();
+               pt = moea64_pvo_to_pte(pvo, -1);
+               pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
+               pvo->pvo_pte.lpte.pte_lo |= lo;
+               if (pt != NULL) {
+                       moea64_pte_change(pt, &pvo->pvo_pte.lpte,
+                           pvo->pvo_pmap, PVO_VADDR(pvo));
+                       if (pvo->pvo_pmap == kernel_pmap)
+                               isync();
+               }
+               UNLOCK_TABLE();
+               PMAP_UNLOCK(pmap);
+       }
+       m->md.mdpg_cache_attrs = ma;
+       vm_page_unlock_queues();
+}
+
+/*
  * Map a wired page into kernel virtual address space.
  */
 void
-moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
+moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
 {
        uint64_t        pte_lo;
        int             error;  
 
-#if 0
-       if (!pmap_bootstrapped) {
-               if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end)
-                       panic("Trying to enter an address in KVA -- %#x!\n",pa);
-       }
-#endif
-
-       pte_lo = moea64_calc_wimg(pa);
+       pte_lo = moea64_calc_wimg(pa, ma);
 
        PMAP_LOCK(kernel_pmap);
        error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
@@ -1595,6 +1628,13 @@ moea64_kenter(mmu_t mmu, vm_offset_t va,
        PMAP_UNLOCK(kernel_pmap);
 }
 
+void
+moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
+{
+
+       moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+}
+
 /*
  * Extract the physical page address associated with the given kernel virtual
  * address.
@@ -2477,7 +2517,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_o
  * NOT real memory.
  */
 void *
-moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
+moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
 {
        vm_offset_t va, tmpva, ppa, offset;
 
@@ -2491,7 +2531,7 @@ moea64_mapdev(mmu_t mmu, vm_offset_t pa,
                panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
 
        for (tmpva = va; size > 0;) {
-               moea64_kenter(mmu, tmpva, ppa);
+               moea64_kenter_attr(mmu, tmpva, ppa, ma);
                size -= PAGE_SIZE;
                tmpva += PAGE_SIZE;
                ppa += PAGE_SIZE;
@@ -2500,6 +2540,13 @@ moea64_mapdev(mmu_t mmu, vm_offset_t pa,
        return ((void *)(va + offset));
 }
 
+void *
+moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
+{
+
+       return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
+}
+
 void
 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
 {

Modified: stable/8/sys/powerpc/include/pmap.h
==============================================================================
--- stable/8/sys/powerpc/include/pmap.h Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/include/pmap.h Sun Oct 17 15:22:59 2010        
(r213979)
@@ -109,13 +109,13 @@ struct pvo_entry {
 LIST_HEAD(pvo_head, pvo_entry);
 
 struct md_page {
-       u_int64_t mdpg_attrs;
+       u_int64_t        mdpg_attrs;
+       vm_memattr_t     mdpg_cache_attrs;
        struct  pvo_head mdpg_pvoh;
 };
 
-#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
+#define        pmap_page_get_memattr(m)        ((m)->md.mdpg_cache_attrs)
 #define        pmap_page_is_mapped(m)  (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
-#define        pmap_page_set_memattr(m, ma)    (void)0
 
 #else
 
@@ -147,7 +147,6 @@ struct md_page {
 
 #define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
 #define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
-#define        pmap_page_set_memattr(m, ma)    (void)0
 
 #endif /* AIM */
 
@@ -169,9 +168,12 @@ extern     struct pmap kernel_pmap_store;
 
 void           pmap_bootstrap(vm_offset_t, vm_offset_t);
 void           pmap_kenter(vm_offset_t va, vm_offset_t pa);
+void           pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);
 void           pmap_kremove(vm_offset_t);
 void           *pmap_mapdev(vm_offset_t, vm_size_t);
+void           *pmap_mapdev_attr(vm_offset_t, vm_size_t, vm_memattr_t);
 void           pmap_unmapdev(vm_offset_t, vm_size_t);
+void           pmap_page_set_memattr(vm_page_t, vm_memattr_t);
 void           pmap_deactivate(struct thread *);
 vm_offset_t    pmap_kextract(vm_offset_t);
 int            pmap_dev_direct_mapped(vm_offset_t, vm_size_t);

Modified: stable/8/sys/powerpc/include/vm.h
==============================================================================
--- stable/8/sys/powerpc/include/vm.h   Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/include/vm.h   Sun Oct 17 15:22:59 2010        
(r213979)
@@ -32,11 +32,13 @@
 #include <machine/pte.h>
 
 /* Memory attributes. */
-#define        VM_MEMATTR_CACHING_INHIBIT      ((vm_memattr_t)PTE_I)
-#define        VM_MEMATTR_GUARD                ((vm_memattr_t)PTE_G)
-#define        VM_MEMATTR_MEMORY_COHERENCE     ((vm_memattr_t)PTE_M)
-#define        VM_MEMATTR_WRITE_THROUGH        ((vm_memattr_t)PTE_W)
-
 #define        VM_MEMATTR_DEFAULT              0
+#define        VM_MEMATTR_UNCACHEABLE          0x01
+#define        VM_MEMATTR_UNCACHED             VM_MEMATTR_UNCACHEABLE
+#define        VM_MEMATTR_CACHEABLE            0x02
+#define        VM_MEMATTR_WRITE_COMBINING      0x04
+#define        VM_MEMATTR_WRITE_BACK           0x08
+#define        VM_MEMATTR_WRITE_THROUGH        0x10
+#define        VM_MEMATTR_PREFETCHABLE         0x20
 
 #endif /* !_MACHINE_VM_H_ */

Modified: stable/8/sys/powerpc/powerpc/bus_machdep.c
==============================================================================
--- stable/8/sys/powerpc/powerpc/bus_machdep.c  Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/powerpc/bus_machdep.c  Sun Oct 17 15:22:59 2010        
(r213979)
@@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
 static struct {
        bus_addr_t addr;
        bus_size_t size;
+       int flags;
 } earlyboot_mappings[MAX_EARLYBOOT_MAPPINGS];
 static int earlyboot_map_idx = 0;
 
@@ -72,9 +73,11 @@ __ppc_ba(bus_space_handle_t bsh, bus_siz
 }
 
 static int
-bs_gen_map(bus_addr_t addr, bus_size_t size __unused, int flags __unused,
+bs_gen_map(bus_addr_t addr, bus_size_t size, int flags,
     bus_space_handle_t *bshp)
 {
+       vm_memattr_t ma;
+
        /*
         * Record what we did if we haven't enabled the MMU yet. We
         * will need to remap it as soon as the MMU comes up.
@@ -84,10 +87,20 @@ bs_gen_map(bus_addr_t addr, bus_size_t s
                    ("%s: too many early boot mapping requests", __func__));
                earlyboot_mappings[earlyboot_map_idx].addr = addr;
                earlyboot_mappings[earlyboot_map_idx].size = size;
+               earlyboot_mappings[earlyboot_map_idx].flags = flags;
                earlyboot_map_idx++;
                *bshp = addr;
        } else {
-               *bshp = (bus_space_handle_t)pmap_mapdev(addr,size);
+               ma = VM_MEMATTR_DEFAULT;
+               switch (flags) {
+                       case BUS_SPACE_MAP_CACHEABLE:
+                               ma = VM_MEMATTR_CACHEABLE;
+                               break;
+                       case BUS_SPACE_MAP_PREFETCHABLE:
+                               ma = VM_MEMATTR_PREFETCHABLE;
+                               break;
+               }
+               *bshp = (bus_space_handle_t)pmap_mapdev_attr(addr, size, ma);
        }
 
        return (0);
@@ -98,6 +111,7 @@ bs_remap_earlyboot(void)
 {
        int i;
        vm_offset_t pa, spa;
+       vm_memattr_t ma;
 
        if (hw_direct_map)
                return;
@@ -105,9 +119,19 @@ bs_remap_earlyboot(void)
        for (i = 0; i < earlyboot_map_idx; i++) {
                spa = earlyboot_mappings[i].addr;
 
+               ma = VM_MEMATTR_DEFAULT;
+               switch (earlyboot_mappings[i].flags) {
+                       case BUS_SPACE_MAP_CACHEABLE:
+                               ma = VM_MEMATTR_CACHEABLE;
+                               break;
+                       case BUS_SPACE_MAP_PREFETCHABLE:
+                               ma = VM_MEMATTR_PREFETCHABLE;
+                               break;
+               }
+
                pa = trunc_page(spa);
                while (pa < spa + earlyboot_mappings[i].size) {
-                       pmap_kenter(pa,pa);
+                       pmap_kenter_attr(pa, pa, ma);
                        pa += PAGE_SIZE;
                }
        }

Modified: stable/8/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- stable/8/sys/powerpc/powerpc/mmu_if.m       Sun Oct 17 14:48:53 2010        
(r213978)
+++ stable/8/sys/powerpc/powerpc/mmu_if.m       Sun Oct 17 15:22:59 2010        
(r213979)
@@ -110,6 +110,24 @@ CODE {
        {
                return (NULL);
        }
+
+       static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
+           vm_size_t size, vm_memattr_t ma)
+       {
+               return MMU_MAPDEV(mmu, pa, size);
+       }
+
+       static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
+           vm_offset_t pa, vm_memattr_t ma)
+       {
+               MMU_KENTER(mmu, va, pa);
+       }
+
+       static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
+           vm_memattr_t ma)
+       {
+               return;
+       }
 };
 
 
@@ -732,6 +750,37 @@ METHOD void * mapdev {
        vm_size_t       _size;
 };
 
+/**
+ * @brief Create a kernel mapping for a given physical address range.
+ * Called by bus code on behalf of device drivers. The mapping does not
+ * have to be a virtual address: it can be a direct-mapped physical address
+ * if that is supported by the MMU.
+ *
+ * @param _pa          start physical address
+ * @param _size                size in bytes of mapping
+ * @param _attr                cache attributes
+ *
+ * @retval addr                address of mapping.
+ */
+METHOD void * mapdev_attr {
+       mmu_t           _mmu;
+       vm_offset_t     _pa;
+       vm_size_t       _size;
+       vm_memattr_t    _attr;
+} DEFAULT mmu_null_mapdev_attr;
+
+/**
+ * @brief Change cache control attributes for a page. Should modify all
+ * mappings for that page.
+ *
+ * @param _m           page to modify
+ * @param _ma          new cache control attributes
+ */
+METHOD void page_set_memattr {
+       mmu_t           _mmu;
+       vm_page_t       _pg;
+       vm_memattr_t    _ma;
+} DEFAULT mmu_null_page_set_memattr;
 
 /**
  * @brief Remove the mapping created by mapdev. Called when a driver
@@ -772,6 +821,19 @@ METHOD void kenter {
        vm_offset_t     _pa;
 };
 
+/**
+ * @brief Map a wired page into kernel virtual address space
+ *
+ * @param _va          mapping virtual address
+ * @param _pa          mapping physical address
+ * @param _ma          mapping cache control attributes
+ */
+METHOD void kenter_attr {
+       mmu_t           _mmu;
+       vm_offset_t     _va;
+       vm_offset_t     _pa;
+       vm_memattr_t    _ma;
+} DEFAULT mmu_null_kenter_attr;
 
 /**
  * @brief Determine if the given physical address range has been direct-mapped.

Modified: stable/8/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- stable/8/sys/powerpc/powerpc/pmap_dispatch.c        Sun Oct 17 14:48:53 
2010        (r213978)
+++ stable/8/sys/powerpc/powerpc/pmap_dispatch.c        Sun Oct 17 15:22:59 
2010        (r213979)
@@ -425,6 +425,22 @@ pmap_mapdev(vm_offset_t pa, vm_size_t si
        return (MMU_MAPDEV(mmu_obj, pa, size));
 }
 
+void *
+pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
+{
+
+       CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
+       return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
+}
+
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+       CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
+       return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
+}
+
 void
 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 {
@@ -449,6 +465,14 @@ pmap_kenter(vm_offset_t va, vm_offset_t 
        MMU_KENTER(mmu_obj, va, pa);
 }
 
+void
+pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
+{
+
+       CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
+       MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
+}
+
 boolean_t
 pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
 {
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to