Author: jhibbits
Date: Tue Mar 10 03:30:11 2020
New Revision: 358828
URL: https://svnweb.freebsd.org/changeset/base/358828

Log:
  powerpc/booke: Split out 32- and 64- bit pmap details from main body
  
  Summary:
  This is largely a straight-forward cleave of the 32-bit and 64-bit page
  table specifics, along with the mmu_booke_*() functions that are wholely
  different between the two implementations.
  
  The ultimate goal of this is to make it easier to reason about and
  update a specific implementation without wading through the other
  implementation details.  This is in support of further changes to the 64-bit
  pmap.
  
  Reviewed by: bdragon
  Differential Revision: https://reviews.freebsd.org/D23983

Added:
  head/sys/powerpc/booke/pmap_32.c   (contents, props changed)
  head/sys/powerpc/booke/pmap_64.c   (contents, props changed)
Modified:
  head/sys/powerpc/booke/pmap.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Mon Mar  9 23:40:25 2020        
(r358827)
+++ head/sys/powerpc/booke/pmap.c       Tue Mar 10 03:30:11 2020        
(r358828)
@@ -221,25 +221,7 @@ uint32_t tlb1_entries;
 
 #define TLB1_ENTRIES (tlb1_entries)
 
-/*
- * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
- * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
- * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger 
than
- * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
- * us to map akin to the DMAP, with addresses identical to the PA, offset by 
the
- * base.
- */
-#ifdef __powerpc64__
-#define        VM_MAPDEV_BASE          0x8000000000000000
-#define        VM_MAPDEV_PA_MAX        0x4000000000000000 /* Don't encroach on 
DMAP */
-#else
-#define        VM_MAPDEV_BASE  ((vm_offset_t)VM_MAXUSER_ADDRESS + PAGE_SIZE)
-#endif
-
-static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE;
-
 static tlbtid_t tid_alloc(struct pmap *);
-static void tid_flush(tlbtid_t tid);
 
 #ifdef DDB
 #ifdef __powerpc64__
@@ -254,6 +236,8 @@ static void tlb1_write_entry(tlb_entry_t *, unsigned i
 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t, int);
 
+static __inline uint32_t tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma);
+
 static vm_size_t tsize2size(unsigned int);
 static unsigned int size2tsize(vm_size_t);
 static unsigned long ilog2(unsigned long);
@@ -279,26 +263,6 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_en
 #define PMAP_SHPGPERPROC       200
 #endif
 
-#ifdef __powerpc64__
-#define PMAP_ROOT_SIZE (sizeof(pte_t***) * PP2D_NENTRIES)
-static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
-                        unsigned int, boolean_t);
-static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int, vm_page_t);
-static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
-static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
-#else
-#define PMAP_ROOT_SIZE (sizeof(pte_t**) * PDIR_NENTRIES)
-static void ptbl_init(void);
-static struct ptbl_buf *ptbl_buf_alloc(void);
-static void ptbl_buf_free(struct ptbl_buf *);
-static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
-
-static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
-static void ptbl_free(mmu_t, pmap_t, unsigned int);
-static void ptbl_hold(mmu_t, pmap_t, unsigned int);
-static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
-#endif
-
 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, 
boolean_t);
 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
@@ -312,26 +276,9 @@ static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
 
 static void booke_pmap_init_qpages(void);
 
-struct ptbl_buf {
-       TAILQ_ENTRY(ptbl_buf) link;     /* list link */
-       vm_offset_t kva;                /* va of mapping */
-};
+static inline void tlb_miss_lock(void);
+static inline void tlb_miss_unlock(void);
 
-#ifndef __powerpc64__
-/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
-#define PTBL_BUFS              (128 * 16)
-
-/* ptbl free list and a lock used for access synchronization. */
-static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
-static struct mtx ptbl_buf_freelist_lock;
-
-/* Base address of kva space allocated fot ptbl bufs. */
-static vm_offset_t ptbl_buf_pool_vabase;
-
-/* Pointer to ptbl_buf structures. */
-static struct ptbl_buf *ptbl_bufs;
-#endif
-
 #ifdef SMP
 extern tlb_entry_t __boot_tlb1[];
 void pmap_bootstrap_ap(volatile uint32_t *);
@@ -476,6 +423,14 @@ static mmu_method_t mmu_booke_methods[] = {
 
 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
 
+#ifdef __powerpc64__
+#include "pmap_64.c"
+#else
+#include "pmap_32.c"
+#endif
+
+static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE;
+
 static __inline uint32_t
 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
 {
@@ -588,495 +543,6 @@ tlb1_get_tlbconf(void)
 /* Page table related */
 /**************************************************************************/
 
-#ifdef __powerpc64__
-/* Initialize pool of kva ptbl buffers. */
-static void
-ptbl_init(void)
-{
-}
-
-/* Get a pointer to a PTE in a page table. */
-static __inline pte_t *
-pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
-{
-       pte_t         **pdir;
-       pte_t          *ptbl;
-
-       KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
-
-       pdir = pmap->pm_pp2d[PP2D_IDX(va)];
-       if (!pdir)
-               return NULL;
-       ptbl = pdir[PDIR_IDX(va)];
-       return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
-}
-
-/*
- * allocate a page of pointers to page directories, do not preallocate the
- * page tables
- */
-static pte_t  **
-pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
-{
-       vm_page_t       m;
-       pte_t          **pdir;
-       int             req;
-
-       req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
-       while ((m = vm_page_alloc(NULL, pp2d_idx, req)) == NULL) {
-               PMAP_UNLOCK(pmap);
-               if (nosleep) {
-                       return (NULL);
-               }
-               vm_wait(NULL);
-               PMAP_LOCK(pmap);
-       }
-
-       /* Zero whole ptbl. */
-       pdir = (pte_t **)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-       mmu_booke_zero_page(mmu, m);
-
-       return (pdir);
-}
-
-/* Free pdir pages and invalidate pdir entry. */
-static void
-pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, vm_page_t m)
-{
-       pte_t         **pdir;
-
-       pdir = pmap->pm_pp2d[pp2d_idx];
-
-       KASSERT((pdir != NULL), ("pdir_free: null pdir"));
-
-       pmap->pm_pp2d[pp2d_idx] = NULL;
-
-       vm_wire_sub(1);
-       vm_page_free_zero(m);
-}
-
-/*
- * Decrement pdir pages hold count and attempt to free pdir pages. Called
- * when removing directory entry from pdir.
- * 
- * Return 1 if pdir pages were freed.
- */
-static int
-pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
-{
-       pte_t         **pdir;
-       vm_paddr_t      pa;
-       vm_page_t       m;
-
-       KASSERT((pmap != kernel_pmap),
-               ("pdir_unhold: unholding kernel pdir!"));
-
-       pdir = pmap->pm_pp2d[pp2d_idx];
-
-       /* decrement hold count */
-       pa = DMAP_TO_PHYS((vm_offset_t) pdir);
-       m = PHYS_TO_VM_PAGE(pa);
-
-       /*
-        * Free pdir page if there are no dir entries in this pdir.
-        */
-       m->ref_count--;
-       if (m->ref_count == 0) {
-               pdir_free(mmu, pmap, pp2d_idx, m);
-               return (1);
-       }
-       return (0);
-}
-
-/*
- * Increment hold count for pdir pages. This routine is used when new ptlb
- * entry is being inserted into pdir.
- */
-static void
-pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
-{
-       vm_page_t       m;
-
-       KASSERT((pmap != kernel_pmap),
-               ("pdir_hold: holding kernel pdir!"));
-
-       KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
-
-       m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir));
-       m->ref_count++;
-}
-
-/* Allocate page table. */
-static pte_t   *
-ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
-    boolean_t nosleep)
-{
-       vm_page_t       m;
-       pte_t          *ptbl;
-       int             req;
-
-       KASSERT((pdir[pdir_idx] == NULL),
-               ("%s: valid ptbl entry exists!", __func__));
-
-       req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
-       while ((m = vm_page_alloc(NULL, pdir_idx, req)) == NULL) {
-               if (nosleep)
-                       return (NULL);
-               PMAP_UNLOCK(pmap);
-               rw_wunlock(&pvh_global_lock);
-               vm_wait(NULL);
-               rw_wlock(&pvh_global_lock);
-               PMAP_LOCK(pmap);
-       }
-
-       /* Zero whole ptbl. */
-       ptbl = (pte_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-       mmu_booke_zero_page(mmu, m);
-
-       return (ptbl);
-}
-
-/* Free ptbl pages and invalidate pdir entry. */
-static void
-ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, 
vm_page_t m)
-{
-       pte_t          *ptbl;
-
-       ptbl = pdir[pdir_idx];
-
-       KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
-
-       pdir[pdir_idx] = NULL;
-
-       vm_wire_sub(1);
-       vm_page_free_zero(m);
-}
-
-/*
- * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
- * when removing pte entry from ptbl.
- * 
- * Return 1 if ptbl pages were freed.
- */
-static int
-ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
-{
-       pte_t          *ptbl;
-       vm_page_t       m;
-       u_int           pp2d_idx;
-       pte_t         **pdir;
-       u_int           pdir_idx;
-
-       pp2d_idx = PP2D_IDX(va);
-       pdir_idx = PDIR_IDX(va);
-
-       KASSERT((pmap != kernel_pmap),
-               ("ptbl_unhold: unholding kernel ptbl!"));
-
-       pdir = pmap->pm_pp2d[pp2d_idx];
-       ptbl = pdir[pdir_idx];
-
-       /* decrement hold count */
-       m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
-
-       /*
-        * Free ptbl pages if there are no pte entries in this ptbl.
-        * ref_count has the same value for all ptbl pages, so check the
-        * last page.
-        */
-       m->ref_count--;
-       if (m->ref_count == 0) {
-               ptbl_free(mmu, pmap, pdir, pdir_idx, m);
-               pdir_unhold(mmu, pmap, pp2d_idx);
-               return (1);
-       }
-       return (0);
-}
-
-/*
- * Increment hold count for ptbl pages. This routine is used when new pte
- * entry is being inserted into ptbl.
- */
-static void
-ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
-{
-       pte_t          *ptbl;
-       vm_page_t       m;
-
-       KASSERT((pmap != kernel_pmap),
-               ("ptbl_hold: holding kernel ptbl!"));
-
-       ptbl = pdir[pdir_idx];
-
-       KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
-
-       m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
-       m->ref_count++;
-}
-#else
-
-/* Initialize pool of kva ptbl buffers. */
-static void
-ptbl_init(void)
-{
-       int i;
-
-       CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
-           (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
-       CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
-           __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
-
-       mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
-       TAILQ_INIT(&ptbl_buf_freelist);
-
-       for (i = 0; i < PTBL_BUFS; i++) {
-               ptbl_bufs[i].kva =
-                   ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
-               TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
-       }
-}
-
-/* Get a ptbl_buf from the freelist. */
-static struct ptbl_buf *
-ptbl_buf_alloc(void)
-{
-       struct ptbl_buf *buf;
-
-       mtx_lock(&ptbl_buf_freelist_lock);
-       buf = TAILQ_FIRST(&ptbl_buf_freelist);
-       if (buf != NULL)
-               TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
-       mtx_unlock(&ptbl_buf_freelist_lock);
-
-       CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
-
-       return (buf);
-}
-
-/* Return ptbl buff to free pool. */
-static void
-ptbl_buf_free(struct ptbl_buf *buf)
-{
-
-       CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
-
-       mtx_lock(&ptbl_buf_freelist_lock);
-       TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
-       mtx_unlock(&ptbl_buf_freelist_lock);
-}
-
-/*
- * Search the list of allocated ptbl bufs and find on list of allocated ptbls
- */
-static void
-ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
-{
-       struct ptbl_buf *pbuf;
-
-       CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
-
-       PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-
-       TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
-               if (pbuf->kva == (vm_offset_t)ptbl) {
-                       /* Remove from pmap ptbl buf list. */
-                       TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
-
-                       /* Free corresponding ptbl buf. */
-                       ptbl_buf_free(pbuf);
-                       break;
-               }
-}
-
-/* Allocate page table. */
-static pte_t *
-ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
-{
-       vm_page_t mtbl[PTBL_PAGES];
-       vm_page_t m;
-       struct ptbl_buf *pbuf;
-       unsigned int pidx;
-       pte_t *ptbl;
-       int i, j;
-
-       CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
-           (pmap == kernel_pmap), pdir_idx);
-
-       KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
-           ("ptbl_alloc: invalid pdir_idx"));
-       KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
-           ("pte_alloc: valid ptbl entry exists!"));
-
-       pbuf = ptbl_buf_alloc();
-       if (pbuf == NULL)
-               panic("pte_alloc: couldn't alloc kernel virtual memory");
-               
-       ptbl = (pte_t *)pbuf->kva;
-
-       CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
-
-       for (i = 0; i < PTBL_PAGES; i++) {
-               pidx = (PTBL_PAGES * pdir_idx) + i;
-               while ((m = vm_page_alloc(NULL, pidx,
-                   VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
-                       if (nosleep) {
-                               ptbl_free_pmap_ptbl(pmap, ptbl);
-                               for (j = 0; j < i; j++)
-                                       vm_page_free(mtbl[j]);
-                               vm_wire_sub(i);
-                               return (NULL);
-                       }
-                       PMAP_UNLOCK(pmap);
-                       rw_wunlock(&pvh_global_lock);
-                       vm_wait(NULL);
-                       rw_wlock(&pvh_global_lock);
-                       PMAP_LOCK(pmap);
-               }
-               mtbl[i] = m;
-       }
-
-       /* Map allocated pages into kernel_pmap. */
-       mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
-
-       /* Zero whole ptbl. */
-       bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
-
-       /* Add pbuf to the pmap ptbl bufs list. */
-       TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
-
-       return (ptbl);
-}
-
-/* Free ptbl pages and invalidate pdir entry. */
-static void
-ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
-{
-       pte_t *ptbl;
-       vm_paddr_t pa;
-       vm_offset_t va;
-       vm_page_t m;
-       int i;
-
-       CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
-           (pmap == kernel_pmap), pdir_idx);
-
-       KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
-           ("ptbl_free: invalid pdir_idx"));
-
-       ptbl = pmap->pm_pdir[pdir_idx];
-
-       CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
-
-       KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
-
-       /*
-        * Invalidate the pdir entry as soon as possible, so that other CPUs
-        * don't attempt to look up the page tables we are releasing.
-        */
-       mtx_lock_spin(&tlbivax_mutex);
-       tlb_miss_lock();
-       
-       pmap->pm_pdir[pdir_idx] = NULL;
-
-       tlb_miss_unlock();
-       mtx_unlock_spin(&tlbivax_mutex);
-
-       for (i = 0; i < PTBL_PAGES; i++) {
-               va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
-               pa = pte_vatopa(mmu, kernel_pmap, va);
-               m = PHYS_TO_VM_PAGE(pa);
-               vm_page_free_zero(m);
-               vm_wire_sub(1);
-               mmu_booke_kremove(mmu, va);
-       }
-
-       ptbl_free_pmap_ptbl(pmap, ptbl);
-}
-
-/*
- * Decrement ptbl pages hold count and attempt to free ptbl pages.
- * Called when removing pte entry from ptbl.
- *
- * Return 1 if ptbl pages were freed.
- */
-static int
-ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
-{
-       pte_t *ptbl;
-       vm_paddr_t pa;
-       vm_page_t m;
-       int i;
-
-       CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
-           (pmap == kernel_pmap), pdir_idx);
-
-       KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
-           ("ptbl_unhold: invalid pdir_idx"));
-       KASSERT((pmap != kernel_pmap),
-           ("ptbl_unhold: unholding kernel ptbl!"));
-
-       ptbl = pmap->pm_pdir[pdir_idx];
-
-       //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
-       KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
-           ("ptbl_unhold: non kva ptbl"));
-
-       /* decrement hold count */
-       for (i = 0; i < PTBL_PAGES; i++) {
-               pa = pte_vatopa(mmu, kernel_pmap,
-                   (vm_offset_t)ptbl + (i * PAGE_SIZE));
-               m = PHYS_TO_VM_PAGE(pa);
-               m->ref_count--;
-       }
-
-       /*
-        * Free ptbl pages if there are no pte etries in this ptbl.
-        * ref_count has the same value for all ptbl pages, so check the last
-        * page.
-        */
-       if (m->ref_count == 0) {
-               ptbl_free(mmu, pmap, pdir_idx);
-
-               //debugf("ptbl_unhold: e (freed ptbl)\n");
-               return (1);
-       }
-
-       return (0);
-}
-
-/*
- * Increment hold count for ptbl pages. This routine is used when a new pte
- * entry is being inserted into the ptbl.
- */
-static void
-ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
-{
-       vm_paddr_t pa;
-       pte_t *ptbl;
-       vm_page_t m;
-       int i;
-
-       CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
-           pdir_idx);
-
-       KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
-           ("ptbl_hold: invalid pdir_idx"));
-       KASSERT((pmap != kernel_pmap),
-           ("ptbl_hold: holding kernel ptbl!"));
-
-       ptbl = pmap->pm_pdir[pdir_idx];
-
-       KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
-
-       for (i = 0; i < PTBL_PAGES; i++) {
-               pa = pte_vatopa(mmu, kernel_pmap,
-                   (vm_offset_t)ptbl + (i * PAGE_SIZE));
-               m = PHYS_TO_VM_PAGE(pa);
-               m->ref_count++;
-       }
-}
-#endif
-
 /* Allocate pv_entry structure. */
 pv_entry_t
 pv_alloc(void)
@@ -1156,397 +622,6 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
        //debugf("pv_remove: e\n");
 }
 
-#ifdef __powerpc64__
-/*
- * Clean pte entry, try to free page table page if requested.
- * 
- * Return 1 if ptbl pages were freed, otherwise return 0.
- */
-static int
-pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
-{
-       vm_page_t       m;
-       pte_t          *pte;
-
-       pte = pte_find(mmu, pmap, va);
-       KASSERT(pte != NULL, ("%s: NULL pte", __func__));
-
-       if (!PTE_ISVALID(pte))
-               return (0);
-
-       /* Get vm_page_t for mapped pte. */
-       m = PHYS_TO_VM_PAGE(PTE_PA(pte));
-
-       if (PTE_ISWIRED(pte))
-               pmap->pm_stats.wired_count--;
-
-       /* Handle managed entry. */
-       if (PTE_ISMANAGED(pte)) {
-
-               /* Handle modified pages. */
-               if (PTE_ISMODIFIED(pte))
-                       vm_page_dirty(m);
-
-               /* Referenced pages. */
-               if (PTE_ISREFERENCED(pte))
-                       vm_page_aflag_set(m, PGA_REFERENCED);
-
-               /* Remove pv_entry from pv_list. */
-               pv_remove(pmap, va, m);
-       } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
-               pv_remove(pmap, va, m);
-               if (TAILQ_EMPTY(&m->md.pv_list))
-                       m->md.pv_tracked = false;
-       }
-       mtx_lock_spin(&tlbivax_mutex);
-       tlb_miss_lock();
-
-       tlb0_flush_entry(va);
-       *pte = 0;
-
-       tlb_miss_unlock();
-       mtx_unlock_spin(&tlbivax_mutex);
-
-       pmap->pm_stats.resident_count--;
-
-       if (flags & PTBL_UNHOLD) {
-               return (ptbl_unhold(mmu, pmap, va));
-       }
-       return (0);
-}
-
-/*
- * Insert PTE for a given page and virtual address.
- */
-static int
-pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
-    boolean_t nosleep)
-{
-       unsigned int    pp2d_idx = PP2D_IDX(va);
-       unsigned int    pdir_idx = PDIR_IDX(va);
-       unsigned int    ptbl_idx = PTBL_IDX(va);
-       pte_t          *ptbl, *pte, pte_tmp;
-       pte_t         **pdir;
-
-       /* Get the page directory pointer. */
-       pdir = pmap->pm_pp2d[pp2d_idx];
-       if (pdir == NULL)
-               pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
-
-       /* Get the page table pointer. */
-       ptbl = pdir[pdir_idx];
-
-       if (ptbl == NULL) {
-               /* Allocate page table pages. */
-               ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
-               if (ptbl == NULL) {
-                       KASSERT(nosleep, ("nosleep and NULL ptbl"));
-                       return (ENOMEM);
-               }
-               pte = &ptbl[ptbl_idx];
-       } else {
-               /*
-                * Check if there is valid mapping for requested va, if there
-                * is, remove it.
-                */
-               pte = &ptbl[ptbl_idx];
-               if (PTE_ISVALID(pte)) {
-                       pte_remove(mmu, pmap, va, PTBL_HOLD);
-               } else {
-                       /*
-                        * pte is not used, increment hold count for ptbl
-                        * pages.
-                        */
-                       if (pmap != kernel_pmap)
-                               ptbl_hold(mmu, pmap, pdir, pdir_idx);
-               }
-       }
-
-       if (pdir[pdir_idx] == NULL) {
-               if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
-                       pdir_hold(mmu, pmap, pdir);
-               pdir[pdir_idx] = ptbl;
-       }
-       if (pmap->pm_pp2d[pp2d_idx] == NULL)
-               pmap->pm_pp2d[pp2d_idx] = pdir;
-
-       /*
-        * Insert pv_entry into pv_list for mapped page if part of managed
-        * memory.
-        */
-       if ((m->oflags & VPO_UNMANAGED) == 0) {
-               flags |= PTE_MANAGED;
-
-               /* Create and insert pv entry. */
-               pv_insert(pmap, va, m);
-       }
-
-       pmap->pm_stats.resident_count++;
-
-       pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
-       pte_tmp |= (PTE_VALID | flags);
-
-       mtx_lock_spin(&tlbivax_mutex);
-       tlb_miss_lock();
-
-       tlb0_flush_entry(va);
-       *pte = pte_tmp;
-
-       tlb_miss_unlock();
-       mtx_unlock_spin(&tlbivax_mutex);
-
-       return (0);
-}
-
-/* Return the pa for the given pmap/va. */
-static vm_paddr_t
-pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
-{
-       vm_paddr_t      pa = 0;
-       pte_t          *pte;
-
-       pte = pte_find(mmu, pmap, va);
-       if ((pte != NULL) && PTE_ISVALID(pte))
-               pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
-       return (pa);
-}
-
-
-/* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
-static void
-kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
-{
-       int             i, j;
-       vm_offset_t     va;
-       pte_t           *pte;
-
-       va = addr;
-       /* Initialize kernel pdir */
-       for (i = 0; i < kernel_pdirs; i++) {
-               kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
-                   (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
-               for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * 
PTBL_NENTRIES));
-                   j < PDIR_NENTRIES; j++) {
-                       kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
-                           (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE) +
-                            (((i * PDIR_NENTRIES) + j) * PAGE_SIZE));
-               }
-       }
-
-       /*
-        * Fill in PTEs covering kernel code and data. They are not required
-        * for address translation, as this area is covered by static TLB1
-        * entries, but for pte_vatopa() to work correctly with kernel area
-        * addresses.
-        */
-       for (va = addr; va < data_end; va += PAGE_SIZE) {
-               pte = 
&(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
-               *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
-               *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
-                   PTE_VALID | PTE_PS_4KB;
-       }
-}
-#else
-/*
- * Clean pte entry, try to free page table page if requested.
- *
- * Return 1 if ptbl pages were freed, otherwise return 0.
- */
-static int
-pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
-{
-       unsigned int pdir_idx = PDIR_IDX(va);
-       unsigned int ptbl_idx = PTBL_IDX(va);
-       vm_page_t m;
-       pte_t *ptbl;
-       pte_t *pte;
-
-       //int su = (pmap == kernel_pmap);
-       //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = 
%d)\n",
-       //              su, (u_int32_t)pmap, va, flags);
-
-       ptbl = pmap->pm_pdir[pdir_idx];
-       KASSERT(ptbl, ("pte_remove: null ptbl"));
-
-       pte = &ptbl[ptbl_idx];
-
-       if (pte == NULL || !PTE_ISVALID(pte))
-               return (0);
-
-       if (PTE_ISWIRED(pte))
-               pmap->pm_stats.wired_count--;
-
-       /* Get vm_page_t for mapped pte. */
-       m = PHYS_TO_VM_PAGE(PTE_PA(pte));
-
-       /* Handle managed entry. */
-       if (PTE_ISMANAGED(pte)) {
-
-               if (PTE_ISMODIFIED(pte))
-                       vm_page_dirty(m);
-
-               if (PTE_ISREFERENCED(pte))
-                       vm_page_aflag_set(m, PGA_REFERENCED);
-
-               pv_remove(pmap, va, m);
-       } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
-               /*
-                * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
-                * used.  This is needed by the NCSW support code for fast
-                * VA<->PA translation.
-                */
-               pv_remove(pmap, va, m);
-               if (TAILQ_EMPTY(&m->md.pv_list))
-                       m->md.pv_tracked = false;
-       }
-
-       mtx_lock_spin(&tlbivax_mutex);
-       tlb_miss_lock();
-
-       tlb0_flush_entry(va);
-       *pte = 0;
-
-       tlb_miss_unlock();
-       mtx_unlock_spin(&tlbivax_mutex);
-
-       pmap->pm_stats.resident_count--;
-
-       if (flags & PTBL_UNHOLD) {
-               //debugf("pte_remove: e (unhold)\n");
-               return (ptbl_unhold(mmu, pmap, pdir_idx));
-       }
-
-       //debugf("pte_remove: e\n");
-       return (0);
-}
-
-/*
- * Insert PTE for a given page and virtual address.
- */
-static int
-pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
-    boolean_t nosleep)
-{
-       unsigned int pdir_idx = PDIR_IDX(va);
-       unsigned int ptbl_idx = PTBL_IDX(va);
-       pte_t *ptbl, *pte, pte_tmp;
-
-       CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
-           pmap == kernel_pmap, pmap, va);
-
-       /* Get the page table pointer. */
-       ptbl = pmap->pm_pdir[pdir_idx];
-
-       if (ptbl == NULL) {
-               /* Allocate page table pages. */
-               ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
-               if (ptbl == NULL) {
-                       KASSERT(nosleep, ("nosleep and NULL ptbl"));
-                       return (ENOMEM);
-               }
-               pmap->pm_pdir[pdir_idx] = ptbl;
-               pte = &ptbl[ptbl_idx];
-       } else {
-               /*
-                * Check if there is valid mapping for requested
-                * va, if there is, remove it.
-                */
-               pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
-               if (PTE_ISVALID(pte)) {
-                       pte_remove(mmu, pmap, va, PTBL_HOLD);
-               } else {
-                       /*
-                        * pte is not used, increment hold count
-                        * for ptbl pages.
-                        */
-                       if (pmap != kernel_pmap)
-                               ptbl_hold(mmu, pmap, pdir_idx);
-               }
-       }
-
-       /*
-        * Insert pv_entry into pv_list for mapped page if part of managed
-        * memory.
-        */
-       if ((m->oflags & VPO_UNMANAGED) == 0) {
-               flags |= PTE_MANAGED;
-
-               /* Create and insert pv entry. */
-               pv_insert(pmap, va, m);
-       }
-
-       pmap->pm_stats.resident_count++;
-       
-       pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
-       pte_tmp |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
-
-       mtx_lock_spin(&tlbivax_mutex);
-       tlb_miss_lock();
-
-       tlb0_flush_entry(va);
-       *pte = pte_tmp;
-
-       tlb_miss_unlock();
-       mtx_unlock_spin(&tlbivax_mutex);
-       return (0);
-}
-
-/* Return the pa for the given pmap/va. */
-static vm_paddr_t
-pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
-{
-       vm_paddr_t pa = 0;
-       pte_t *pte;
-
-       pte = pte_find(mmu, pmap, va);
-       if ((pte != NULL) && PTE_ISVALID(pte))
-               pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
-       return (pa);
-}
-
-/* Get a pointer to a PTE in a page table. */
-static pte_t *
-pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
-{
-       unsigned int pdir_idx = PDIR_IDX(va);
-       unsigned int ptbl_idx = PTBL_IDX(va);
-
-       KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
-
-       if (pmap->pm_pdir[pdir_idx])
-               return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
-
-       return (NULL);
-}
-
-/* Set up kernel page tables. */
-static void
-kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
-{
-       int             i;
-       vm_offset_t     va;
-       pte_t           *pte;
-
-       /* Initialize kernel pdir */
-       for (i = 0; i < kernel_ptbls; i++)
-               kernel_pmap->pm_pdir[kptbl_min + i] =
-                   (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
-
-       /*
-        * Fill in PTEs covering kernel code and data. They are not required
-        * for address translation, as this area is covered by static TLB1
-        * entries, but for pte_vatopa() to work correctly with kernel area
-        * addresses.
-        */
-       for (va = addr; va < data_end; va += PAGE_SIZE) {
-               pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
-               *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
-               *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
-                   PTE_VALID | PTE_PS_4KB;
-       }
-}
-#endif
-
 /**************************************************************************/

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to