Author: marcel
Date: Wed Mar 19 21:30:10 2014
New Revision: 263380
URL: http://svnweb.freebsd.org/changeset/base/263380

Log:
  Add KTR events for the PMAP interface functions
  1.  move unmapped_buf_allowed to machdep.c.
  2.  map both pmap_mapbios() and pmap_mapdev() to pmap_mapdev_attr()
      and have the actual work done by pmap_mapdev_priv() (renamed from
      pmap_mapdev()). Use pmap_mapdev_priv() to map the I/O port space
      because we can't use CTR() that early.
  3.  add pmap_pinit_common() that's used by both pmap_pinit0() and
      pmap_pinit(). Previously pmap_pinit0() would call pmap_pinit(),
      but that would create 2 KTR events. While here, use pmap_t instead
      of "struct pmap *".
  4.  fix pmap_kenter() to use vm_paddr_t as the type for the physical.
  5.  various white-space adjustments for consistency.
  6.  use C99 and KNF for function definitions where appropriate.
  7.  slightly re-order prototypes and defines in <machine/pmap.h>
  
  No functional change (other than the creation of KTR_PMAP events).

Modified:
  head/sys/ia64/ia64/machdep.c
  head/sys/ia64/ia64/pmap.c
  head/sys/ia64/include/pmap.h

Modified: head/sys/ia64/ia64/machdep.c
==============================================================================
--- head/sys/ia64/ia64/machdep.c        Wed Mar 19 21:03:04 2014        
(r263379)
+++ head/sys/ia64/ia64/machdep.c        Wed Mar 19 21:30:10 2014        
(r263380)
@@ -133,6 +133,7 @@ SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTL
     "ITC frequency");
 
 int cold = 1;
+int unmapped_buf_allowed = 0;
 
 struct bootinfo *bootinfo;
 
@@ -746,8 +747,8 @@ ia64_init(void)
                mdlen = md->md_pages * EFI_PAGE_SIZE;
                switch (md->md_type) {
                case EFI_MD_TYPE_IOPORT:
-                       ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys,
-                           mdlen);
+                       ia64_port_base = pmap_mapdev_priv(md->md_phys,
+                           mdlen, VM_MEMATTR_UNCACHEABLE);
                        break;
                case EFI_MD_TYPE_PALCODE:
                        ia64_pal_base = md->md_phys;

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c   Wed Mar 19 21:03:04 2014        (r263379)
+++ head/sys/ia64/ia64/pmap.c   Wed Mar 19 21:30:10 2014        (r263380)
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
+#include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/mman.h>
 #include <sys/mutex.h>
@@ -484,6 +485,8 @@ void
 pmap_page_init(vm_page_t m)
 {
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        TAILQ_INIT(&m->md.pv_list);
        m->md.memattr = VM_MEMATTR_DEFAULT;
 }
@@ -497,6 +500,8 @@ void
 pmap_init(void)
 {
 
+       CTR1(KTR_PMAP, "%s()", __func__);
+
        ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), 
            NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
 }
@@ -604,13 +609,25 @@ pmap_free_rid(uint32_t rid)
  * Page table page management routines.....
  ***************************************************/
 
+static void
+pmap_pinit_common(pmap_t pmap)
+{
+       int i;
+
+       for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
+               pmap->pm_rid[i] = pmap_allocate_rid();
+       TAILQ_INIT(&pmap->pm_pvchunk);
+       bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
 void
-pmap_pinit0(struct pmap *pmap)
+pmap_pinit0(pmap_t pmap)
 {
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
+
        PMAP_LOCK_INIT(pmap);
-       /* kernel_pmap is the same as any other pmap. */
-       pmap_pinit(pmap);
+       pmap_pinit_common(pmap);
 }
 
 /*
@@ -618,14 +635,12 @@ pmap_pinit0(struct pmap *pmap)
  * such as one in a vmspace structure.
  */
 int
-pmap_pinit(struct pmap *pmap)
+pmap_pinit(pmap_t pmap)
 {
-       int i;
 
-       for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
-               pmap->pm_rid[i] = pmap_allocate_rid();
-       TAILQ_INIT(&pmap->pm_pvchunk);
-       bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+       CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
+
+       pmap_pinit_common(pmap);
        return (1);
 }
 
@@ -643,6 +658,8 @@ pmap_release(pmap_t pmap)
 {
        int i;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
+
        for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
                if (pmap->pm_rid[i])
                        pmap_free_rid(pmap->pm_rid[i]);
@@ -658,6 +675,8 @@ pmap_growkernel(vm_offset_t addr)
        struct ia64_lpte *leaf;
        vm_page_t nkpg;
 
+       CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
+
        while (kernel_vm_end <= addr) {
                if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64)
                        panic("%s: out of kernel address space", __func__);
@@ -1152,6 +1171,8 @@ pmap_extract(pmap_t pmap, vm_offset_t va
        pmap_t oldpmap;
        vm_paddr_t pa;
 
+       CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
+
        pa = 0;
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
@@ -1178,6 +1199,8 @@ pmap_extract_and_hold(pmap_t pmap, vm_of
        vm_page_t m;
        vm_paddr_t pa;
 
+       CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
+
        pa = 0;
        m = NULL;
        PMAP_LOCK(pmap);
@@ -1359,6 +1382,8 @@ pmap_kextract(vm_offset_t va)
        vm_paddr_t pa;
        u_int idx;
 
+       CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
+
        KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
 
        /* Regions 6 and 7 are direct mapped. */
@@ -1419,6 +1444,8 @@ pmap_qenter(vm_offset_t va, vm_page_t *m
        struct ia64_lpte *pte;
        int i;
 
+       CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, va, m, count);
+
        for (i = 0; i < count; i++) {
                pte = pmap_find_kpte(va);
                if (pmap_present(pte))
@@ -1442,6 +1469,8 @@ pmap_qremove(vm_offset_t va, int count)
        struct ia64_lpte *pte;
        int i;
 
+       CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, va, count);
+
        for (i = 0; i < count; i++) {
                pte = pmap_find_kpte(va);
                if (pmap_present(pte)) {
@@ -1458,10 +1487,12 @@ pmap_qremove(vm_offset_t va, int count)
  * to not have the PTE reflect that, nor update statistics.
  */
 void 
-pmap_kenter(vm_offset_t va, vm_offset_t pa)
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
        struct ia64_lpte *pte;
 
+       CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
+
        pte = pmap_find_kpte(va);
        if (pmap_present(pte))
                pmap_invalidate_page(va);
@@ -1480,6 +1511,8 @@ pmap_kremove(vm_offset_t va)
 {
        struct ia64_lpte *pte;
 
+       CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
+
        pte = pmap_find_kpte(va);
        if (pmap_present(pte)) {
                pmap_remove_vhpt(va);
@@ -1503,6 +1536,10 @@ pmap_kremove(vm_offset_t va)
 vm_offset_t
 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 {
+
+       CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
+           prot);
+
        return IA64_PHYS_TO_RR7(start);
 }
 
@@ -1522,6 +1559,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
        vm_offset_t va;
        struct ia64_lpte *pte;
 
+       CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
+
        /*
         * Perform an unsynchronized read.  This is, however, safe.
         */
@@ -1553,13 +1592,14 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
  *             inefficient because they iteratively called
  *             pmap_remove (slow...)
  */
-
 void
 pmap_remove_all(vm_page_t m)
 {
        pmap_t oldpmap;
        pv_entry_t pv;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_remove_all: page %p is not managed", m));
        rw_wlock(&pvh_global_lock);
@@ -1592,6 +1632,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
        pmap_t oldpmap;
        struct ia64_lpte *pte;
 
+       CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
+           prot);
+
        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
                return;
@@ -1658,6 +1701,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        struct ia64_lpte *pte;
        boolean_t icache_inval, managed;
 
+       CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
+           access, m, prot, wired);
+
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
@@ -1789,6 +1835,9 @@ pmap_enter_object(pmap_t pmap, vm_offset
        vm_page_t m;
        vm_pindex_t diff, psize;
 
+       CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
+           end, m_start, prot);
+
        VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
        psize = atop(end - start);
@@ -1813,12 +1862,13 @@ pmap_enter_object(pmap_t pmap, vm_offset
  * 4. No page table pages.
  * but is *MUCH* faster than pmap_enter...
  */
-
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
        pmap_t oldpmap;
 
+       CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
+
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
@@ -1876,11 +1926,13 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
  * faults on process startup and immediately after an mmap.
  */
 void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
-                   vm_object_t object, vm_pindex_t pindex,
-                   vm_size_t size)
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+    vm_pindex_t pindex, vm_size_t size)
 {
 
+       CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
+           object, pindex, size);
+
        VM_OBJECT_ASSERT_WLOCKED(object);
        KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
            ("pmap_object_init_pt: non-device object"));
@@ -1894,14 +1946,13 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
  *                     The mapping must already exist in the pmap.
  */
 void
-pmap_change_wiring(pmap, va, wired)
-       register pmap_t pmap;
-       vm_offset_t va;
-       boolean_t wired;
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 {
        pmap_t oldpmap;
        struct ia64_lpte *pte;
 
+       CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
+
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
 
@@ -1919,8 +1970,6 @@ pmap_change_wiring(pmap, va, wired)
        PMAP_UNLOCK(pmap);
 }
 
-
-
 /*
  *     Copy the range specified by src_addr/len
  *     from the source map to the range dst_addr/len
@@ -1928,30 +1977,31 @@ pmap_change_wiring(pmap, va, wired)
  *
  *     This routine is only advisory and need not do anything.
  */
-
 void
-pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t 
len,
-         vm_offset_t src_addr)
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_va, vm_size_t len,
+    vm_offset_t src_va)
 {
-}      
 
+       CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
+           src_pmap, dst_va, len, src_va);
+}
 
 /*
  *     pmap_zero_page zeros the specified hardware page by
  *     mapping it into virtual memory and using bzero to clear
  *     its contents.
  */
-
 void
 pmap_zero_page(vm_page_t m)
 {
        void *p;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        p = (void *)pmap_page_to_va(m);
        bzero(p, PAGE_SIZE);
 }
 
-
 /*
  *     pmap_zero_page_area zeros the specified hardware page by
  *     mapping it into virtual memory and using bzero to clear
@@ -1959,33 +2009,33 @@ pmap_zero_page(vm_page_t m)
  *
  *     off and size must reside within a single page.
  */
-
 void
 pmap_zero_page_area(vm_page_t m, int off, int size)
 {
        char *p;
 
+       CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
+
        p = (void *)pmap_page_to_va(m);
        bzero(p + off, size);
 }
 
-
 /*
  *     pmap_zero_page_idle zeros the specified hardware page by
  *     mapping it into virtual memory and using bzero to clear
  *     its contents.  This is for the vm_idlezero process.
  */
-
 void
 pmap_zero_page_idle(vm_page_t m)
 {
        void *p;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        p = (void *)pmap_page_to_va(m);
        bzero(p, PAGE_SIZE);
 }
 
-
 /*
  *     pmap_copy_page copies the specified (machine independent)
  *     page by mapping the page into virtual memory and using
@@ -1997,13 +2047,13 @@ pmap_copy_page(vm_page_t msrc, vm_page_t
 {
        void *dst, *src;
 
+       CTR3(KTR_PMAP, "%s(%p, %p)", __func__, msrc, mdst);
+
        src = (void *)pmap_page_to_va(msrc);
        dst = (void *)pmap_page_to_va(mdst);
        bcopy(src, dst, PAGE_SIZE);
 }
 
-int unmapped_buf_allowed;
-
 void
 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
     vm_offset_t b_offset, int xfersize)
@@ -2012,6 +2062,9 @@ pmap_copy_pages(vm_page_t ma[], vm_offse
        vm_offset_t a_pg_offset, b_pg_offset;
        int cnt;
 
+       CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
+           a_offset, mb, b_offset, xfersize);
+
        while (xfersize > 0) {
                a_pg_offset = a_offset & PAGE_MASK;
                cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
@@ -2042,6 +2095,8 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
        int loops = 0;
        boolean_t rv;
 
+       CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_page_exists_quick: page %p is not managed", m));
        rv = FALSE;
@@ -2073,6 +2128,8 @@ pmap_page_wired_mappings(vm_page_t m)
        pv_entry_t pv;
        int count;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        count = 0;
        if ((m->oflags & VPO_UNMANAGED) != 0)
                return (count);
@@ -2112,6 +2169,8 @@ pmap_remove_pages(pmap_t pmap)
        u_long inuse, bitmask;
        int allfree, bit, field, idx;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
+
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
@@ -2179,6 +2238,8 @@ pmap_ts_referenced(vm_page_t m)
        pv_entry_t pv;
        int count = 0;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_ts_referenced: page %p is not managed", m));
        rw_wlock(&pvh_global_lock);
@@ -2214,6 +2275,8 @@ pmap_is_modified(vm_page_t m)
        pv_entry_t pv;
        boolean_t rv;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_is_modified: page %p is not managed", m));
        rv = FALSE;
@@ -2254,6 +2317,8 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
 {
        struct ia64_lpte *pte;
 
+       CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
+
        pte = pmap_find_vhpt(addr);
        if (pte != NULL && pmap_present(pte))
                return (FALSE);
@@ -2274,6 +2339,8 @@ pmap_is_referenced(vm_page_t m)
        pv_entry_t pv;
        boolean_t rv;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_is_referenced: page %p is not managed", m));
        rv = FALSE;
@@ -2306,6 +2373,9 @@ pmap_advise(pmap_t pmap, vm_offset_t sva
        pmap_t oldpmap;
        vm_page_t m;
 
+       CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, sva, eva,
+           advice);
+
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
        for (; sva < eva; sva += PAGE_SIZE) {
@@ -2348,6 +2418,8 @@ pmap_clear_modify(vm_page_t m)
        pmap_t oldpmap, pmap;
        pv_entry_t pv;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_clear_modify: page %p is not managed", m));
        VM_OBJECT_ASSERT_WLOCKED(m->object);
@@ -2389,6 +2461,8 @@ pmap_remove_write(vm_page_t m)
        pv_entry_t pv;
        vm_prot_t prot;
 
+       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_remove_write: page %p is not managed", m));
 
@@ -2425,20 +2499,13 @@ pmap_remove_write(vm_page_t m)
        rw_wunlock(&pvh_global_lock);
 }
 
-/*
- * Map a set of physical memory pages into the kernel virtual
- * address space. Return a pointer to where it is mapped. This
- * routine is intended to be used for mapping device memory,
- * NOT real memory.
- */
-void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
+vm_offset_t
+pmap_mapdev_priv(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr)
 {
-       static void *last_va = NULL;
-       static vm_paddr_t last_pa = 0;
+       static vm_offset_t last_va = 0;
+       static vm_paddr_t last_pa = ~0UL;
        static vm_size_t last_sz = 0;
        struct efi_md *md;
-       vm_offset_t va;
 
        if (pa == last_pa && sz == last_sz)
                return (last_va);
@@ -2447,30 +2514,47 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
        if (md == NULL) {
                printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
                    __func__, pa, pa + sz - 1);
-               return ((void *)IA64_PHYS_TO_RR6(pa));
+               return (IA64_PHYS_TO_RR6(pa));
        }
 
        if (md->md_type == EFI_MD_TYPE_FREE) {
                printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
                    pa + sz - 1);
-                return (NULL);
+               return (0);
        }
 
-       va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
+       last_va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
            IA64_PHYS_TO_RR6(pa);
-
-       last_va = (void *)va;
        last_pa = pa;
        last_sz = sz;
        return (last_va);
 }
 
 /*
- * 'Unmap' a range mapped by pmap_mapdev().
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr)
+{
+       vm_offset_t va;
+
+       CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, sz, attr);
+
+       va = pmap_mapdev_priv(pa, sz, attr);
+       return ((void *)(uintptr_t)va);
+}
+
+/*
+ * 'Unmap' a range mapped by pmap_mapdev_attr().
  */
 void
 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 {
+
+       CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
 }
 
 /*
@@ -2496,6 +2580,8 @@ pmap_page_set_memattr(vm_page_t m, vm_me
        pv_entry_t pv;
        void *va;
 
+       CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
+
        rw_wlock(&pvh_global_lock);
        m->md.memattr = ma;
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2542,6 +2628,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad
        vm_paddr_t pa;
        int val;
 
+       CTR4(KTR_PMAP, "%s(%p, %#x, %p)", __func__, pmap, addr, locked_pa);
+
        PMAP_LOCK(pmap);
 retry:
        oldpmap = pmap_switch(pmap);
@@ -2574,9 +2662,15 @@ out:
        return (val);
 }
 
+/*
+ *
+ */
 void
 pmap_activate(struct thread *td)
 {
+
+       CTR2(KTR_PMAP, "%s(%p)", __func__, td);
+
        pmap_switch(vmspace_pmap(td->td_proc->p_vmspace));
 }
 
@@ -2609,6 +2703,9 @@ out:
        return (prevpm);
 }
 
+/*
+ *
+ */
 void
 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
@@ -2617,6 +2714,8 @@ pmap_sync_icache(pmap_t pm, vm_offset_t 
        vm_offset_t lim;
        vm_size_t len;
 
+       CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
+
        sz += va & 31;
        va &= ~31;
        sz = (sz + 31) & ~31;
@@ -2644,6 +2743,9 @@ void
 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
     vm_offset_t *addr, vm_size_t size)
 {
+
+       CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
+           size);
 }
 
 #include "opt_ddb.h"

Modified: head/sys/ia64/include/pmap.h
==============================================================================
--- head/sys/ia64/include/pmap.h        Wed Mar 19 21:03:04 2014        
(r263379)
+++ head/sys/ia64/include/pmap.h        Wed Mar 19 21:30:10 2014        
(r263380)
@@ -114,24 +114,29 @@ extern vm_offset_t virtual_end;
 extern uint64_t pmap_vhpt_base[];
 extern int pmap_vhpt_log2size;
 
+#define        pmap_mapbios(pa,sz)     
pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE)
+#define        pmap_mapdev(pa,sz)      
pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE)
+#define        pmap_unmapbios(va,sz)   pmap_unmapdev(va,sz)
+
 #define        pmap_page_get_memattr(m)        ((m)->md.memattr)
-#define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define        pmap_page_is_mapped(m)          (!TAILQ_EMPTY(&(m)->md.pv_list))
 #define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
-#define        pmap_mapbios(pa, sz)    pmap_mapdev(pa, sz)
-#define        pmap_unmapbios(va, sz)  pmap_unmapdev(va, sz)
 
-vm_offset_t pmap_alloc_vhpt(void);
-void   pmap_bootstrap(void);
-void   pmap_invalidate_all(void);
-void   pmap_kenter(vm_offset_t va, vm_offset_t pa);
+void   pmap_kenter(vm_offset_t va, vm_paddr_t pa);
 vm_paddr_t pmap_kextract(vm_offset_t va);
 void   pmap_kremove(vm_offset_t);
-void   *pmap_mapdev(vm_paddr_t, vm_size_t);
+void   *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
 void   pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+void   pmap_unmapdev(vm_offset_t, vm_size_t);
+
+/* Machine-architecture private */
+vm_offset_t pmap_alloc_vhpt(void);
+void   pmap_bootstrap(void);
+void   pmap_invalidate_all(void);
+vm_offset_t pmap_mapdev_priv(vm_paddr_t, vm_size_t, vm_memattr_t);
 vm_offset_t pmap_page_to_va(vm_page_t);
 vm_offset_t pmap_steal_memory(vm_size_t);
 struct pmap *pmap_switch(struct pmap *pmap);
-void   pmap_unmapdev(vm_offset_t, vm_size_t);
 
 #endif /* _KERNEL */
 
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to