Module Name: src Committed By: rin Date: Sat Sep 4 14:31:04 UTC 2021
Modified Files: src/sys/arch/powerpc/ibm4xx: pmap.c Log Message: Style. No binary changes. Also, remove old #if-0'ed code block copied from oea (and therefore will never be enabled). To generate a diff of this commit: cvs rdiff -u -r1.98 -r1.99 src/sys/arch/powerpc/ibm4xx/pmap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/powerpc/ibm4xx/pmap.c diff -u src/sys/arch/powerpc/ibm4xx/pmap.c:1.98 src/sys/arch/powerpc/ibm4xx/pmap.c:1.99 --- src/sys/arch/powerpc/ibm4xx/pmap.c:1.98 Thu Apr 15 00:00:46 2021 +++ src/sys/arch/powerpc/ibm4xx/pmap.c Sat Sep 4 14:31:04 2021 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.98 2021/04/15 00:00:46 rin Exp $ */ +/* $NetBSD: pmap.c,v 1.99 2021/09/04 14:31:04 rin Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. @@ -67,7 +67,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.98 2021/04/15 00:00:46 rin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.99 2021/09/04 14:31:04 rin Exp $"); #ifdef _KERNEL_OPT #include "opt_ddb.h" @@ -99,7 +99,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.9 * kernmap is an array of PTEs large enough to map in * 4GB. At 16KB/page it is 256K entries or 2MB. */ -#define KERNMAP_SIZE ((0xffffffffU/PAGE_SIZE)+1) +#define KERNMAP_SIZE ((0xffffffffU / PAGE_SIZE) + 1) void *kernmap; #define MINCTX 2 @@ -127,11 +127,11 @@ static int pmap_bootstrap_done = 0; /* Event counters */ struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, - NULL, "cpu", "tlbmiss"); + NULL, "cpu", "tlbmiss"); struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, - NULL, "cpu", "tlbflush"); + NULL, "cpu", "tlbflush"); struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, - NULL, "cpu", "tlbenter"); + NULL, "cpu", "tlbenter"); EVCNT_ATTACH_STATIC(tlbmiss_ev); EVCNT_ATTACH_STATIC(tlbflush_ev); EVCNT_ATTACH_STATIC(tlbenter_ev); @@ -229,8 +229,7 @@ pa_to_attr(paddr_t pa) static inline int pte_enter(struct pmap *pm, vaddr_t va, u_int pte) { - int seg = STIDX(va); - int ptn = PTIDX(va); + int seg = STIDX(va), ptn = PTIDX(va); u_int oldpte; if (!pm->pm_ptbl[seg]) { @@ -266,8 +265,7 @@ pte_enter(struct pmap *pm, vaddr_t va, u volatile u_int * pte_find(struct pmap *pm, vaddr_t va) { - int seg = STIDX(va); - int ptn = PTIDX(va); + int seg = STIDX(va), ptn = PTIDX(va); if (pm->pm_ptbl[seg]) return &pm->pm_ptbl[seg][ptn]; @@ -296,9 +294,8 @@ pmap_bootstrap(u_int kernelstart, u_int /* * Initialize kernel page table. */ - for (i = 0; i < STSZ; i++) { + for (i = 0; i < STSZ; i++) pmap_kernel()->pm_ptbl[i] = NULL; - } ctxbusy[0] = ctxbusy[1] = pmap_kernel(); /* @@ -313,7 +310,7 @@ pmap_bootstrap(u_int kernelstart, u_int mem_regions(&mem, &avail); for (mp = mem; mp->size; mp++) { physmem += btoc(mp->size); - printf("+%lx,",mp->size); + printf("+%lx,", mp->size); } printf("\n"); ppc4xx_tlb_init(); @@ -333,7 +330,7 @@ pmap_bootstrap(u_int kernelstart, u_int for (mp = avail; mp->size; mp++) { s = mp->start; e = mp->start + mp->size; - printf("%08x-%08x -> ",s,e); + printf("%08x-%08x -> ", s, e); /* * Check whether this region holds all of the kernel. */ @@ -366,14 +363,14 @@ pmap_bootstrap(u_int kernelstart, u_int if (e < s) e = s; sz = e - s; - printf("%08x-%08x = %x\n",s,e,sz); + printf("%08x-%08x = %x\n", s, e, sz); /* * Check whether some memory is left here. */ if (sz == 0) { - empty: + empty: memmove(mp, mp + 1, - (cnt - (mp - avail)) * sizeof *mp); + (cnt - (mp - avail)) * sizeof(*mp)); cnt--; mp--; continue; @@ -415,13 +412,13 @@ pmap_bootstrap(u_int kernelstart, u_int msgbuf_paddr = mp->start + mp->size - sz; mp->size -= sz; if (mp->size <= 0) - memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); + memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof(*mp)); #endif for (mp = avail; mp->size; mp++) uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), - atop(mp->start), atop(mp->start + mp->size), - VM_FREELIST_DEFAULT); + atop(mp->start), atop(mp->start + mp->size), + VM_FREELIST_DEFAULT); /* * Initialize kernel pmap and hardware. @@ -468,14 +465,15 @@ pmap_init(void) struct pv_entry *pv; vsize_t sz; vaddr_t addr; - int i, s; - int bank; + int bank, i, s; char *attr; sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); sz = round_page(sz); addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); + s = splvm(); + pv = pv_table = (struct pv_entry *)addr; for (i = npgs; --i >= 0;) pv++->pv_pm = NULL; @@ -484,8 +482,7 @@ pmap_init(void) pv = pv_table; attr = pmap_attrib; - for (bank = uvm_physseg_get_first(); - uvm_physseg_valid_p(bank); + for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank); bank = uvm_physseg_get_next(bank)) { sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank); uvm_physseg_get_pmseg(bank)->pvent = pv; @@ -495,11 +492,12 @@ pmap_init(void) } pmap_initialized = 1; + splx(s); /* Setup a pool for additional pvlist structures */ - pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL, - IPL_VM); + pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", + NULL, IPL_VM); } /* @@ -509,16 +507,8 @@ void pmap_virtual_space(vaddr_t *start, vaddr_t *end) { -#if 0 - /* - * Reserve one segment for kernel virtual memory - */ - *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); - *end = *start + SEGMENT_LENGTH; -#else *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; -#endif } #ifdef PMAP_GROWKERNEL @@ -540,27 +530,23 @@ vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS; vaddr_t pmap_growkernel(vaddr_t maxkvaddr) { - int s; - int seg; - paddr_t pg; struct pmap *pm = pmap_kernel(); + paddr_t pg; + int seg, s; s = splvm(); /* Align with the start of a page table */ - for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr; - kbreak += PTMAP) { + for (kbreak &= ~(PTMAP - 1); kbreak < maxkvaddr; kbreak += PTMAP) { seg = STIDX(kbreak); if (pte_find(pm, kbreak)) continue; - if (uvm.page_init_done) { + if (uvm.page_init_done) pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); - } else { - if (!uvm_page_physget(&pg)) - panic("pmap_growkernel: no memory"); - } + else if (!uvm_page_physget(&pg)) + panic("pmap_growkernel: no memory"); if (!pg) panic("pmap_growkernel: no pages"); pmap_zero_page((paddr_t)pg); @@ -568,7 +554,9 @@ pmap_growkernel(vaddr_t maxkvaddr) /* XXX This is based on all phymem being addressable */ pm->pm_ptbl[seg] = (u_int *)pg; } + splx(s); + return kbreak; } @@ -601,10 +589,12 @@ vm_page_alloc1(void) void vm_page_free1(struct vm_page *pg) { + #ifdef DIAGNOSTIC if (pg->flags != (PG_CLEAN|PG_FAKE)) { printf("Freeing invalid page %p\n", pg); - printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg)); + printf("pa = %llx\n", + (unsigned long long)VM_PAGE_TO_PHYS(pg)); #ifdef DDB Debugger(); #endif @@ -626,7 +616,7 @@ pmap_create(void) struct pmap *pm; pm = kmem_alloc(sizeof(*pm), KM_SLEEP); - memset(pm, 0, sizeof *pm); + memset(pm, 0, sizeof(*pm)); pm->pm_refs = 1; return pm; } @@ -650,9 +640,8 @@ pmap_destroy(struct pmap *pm) { int i; - if (--pm->pm_refs > 0) { + if (--pm->pm_refs > 0) return; - } KASSERT(pm->pm_stats.resident_count == 0); KASSERT(pm->pm_stats.wired_count == 0); for (i = 0; i < STSZ; i++) @@ -694,14 +683,14 @@ pmap_update(struct pmap *pmap) void pmap_zero_page(paddr_t pa) { + int i; #ifdef PPC_4XX_NOCACHE memset((void *)pa, 0, PAGE_SIZE); #else - int i; for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) { - __asm volatile ("dcbz 0,%0" :: "r"(pa)); + __asm volatile ("dcbz 0,%0" : : "r"(pa)); pa += CACHELINESIZE; } #endif @@ -731,6 +720,7 @@ pmap_enter_pv(struct pmap *pm, vaddr_t v return 0; s = splvm(); + pv = pa_to_pv(pa); if (!pv->pv_pm) { /* @@ -761,7 +751,9 @@ pmap_enter_pv(struct pmap *pm, vaddr_t v PV_WIRE(pv); pm->pm_stats.wired_count++; } + splx(s); + return 1; } @@ -784,9 +776,8 @@ pmap_remove_pv(struct pmap *pm, vaddr_t * the entry. In either case we free the now unused entry. */ if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { - if (PV_ISWIRED(pv)) { + if (PV_ISWIRED(pv)) pm->pm_stats.wired_count--; - } if ((npv = pv->pv_next)) { *pv = *npv; pool_put(&pv_pool, npv); @@ -812,9 +803,9 @@ pmap_remove_pv(struct pmap *pm, vaddr_t int pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { - int s; u_int tte; bool managed; + int s; /* * Have to remove any existing mapping first. @@ -832,21 +823,26 @@ pmap_enter(struct pmap *pm, vaddr_t va, tte = TTE_PA(pa); /* XXXX -- need to support multiple page sizes. */ tte |= TTE_SZ_16K; -#ifdef DIAGNOSTIC + +#ifdef DIAGNOSTIC if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) == - (PMAP_NOCACHE | PME_WRITETHROUG)) + (PMAP_NOCACHE | PME_WRITETHROUG)) panic("pmap_enter: uncached & writethrough"); #endif - if (flags & PMAP_NOCACHE) + + if (flags & PMAP_NOCACHE) { /* Must be I/O mapping */ tte |= TTE_I | TTE_G; + } #ifdef PPC_4XX_NOCACHE tte |= TTE_I; #else - else if (flags & PME_WRITETHROUG) + else if (flags & PME_WRITETHROUG) { /* Uncached and writethrough are not compatible */ tte |= TTE_W; + } #endif + if (pm == pmap_kernel()) tte |= TTE_ZONE(ZONE_PRIV); else @@ -897,6 +893,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, ppc4xx_tlb_enter(pm->pm_ctx, va, tte); splx(s2); } + splx(s); /* Flush the real memory from the instruction cache. */ @@ -913,15 +910,15 @@ pmap_unwire(struct pmap *pm, vaddr_t va) paddr_t pa; int s; - if (!pmap_extract(pm, va, &pa)) { + if (!pmap_extract(pm, va, &pa)) return; - } pv = pa_to_pv(pa); if (!pv) return; s = splvm(); + while (pv != NULL) { if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { if (PV_ISWIRED(pv)) { @@ -932,15 +929,16 @@ pmap_unwire(struct pmap *pm, vaddr_t va) } pv = pv->pv_next; } + splx(s); } void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { - int s; - u_int tte; struct pmap *pm = pmap_kernel(); + u_int tte; + int s; /* * Generate TTE. @@ -952,24 +950,26 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v */ tte = 0; if (prot & VM_PROT_ALL) { - tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); /* XXXX -- need to support multiple page sizes. */ tte |= TTE_SZ_16K; + #ifdef DIAGNOSTIC if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) == - (PMAP_NOCACHE | PME_WRITETHROUG)) + (PMAP_NOCACHE | PME_WRITETHROUG)) panic("pmap_kenter_pa: uncached & writethrough"); #endif + if (flags & PMAP_NOCACHE) /* Must be I/O mapping */ tte |= TTE_I | TTE_G; #ifdef PPC_4XX_NOCACHE tte |= TTE_I; #else - else if (prot & PME_WRITETHROUG) + else if (prot & PME_WRITETHROUG) { /* Uncached and writethrough are not compatible */ tte |= TTE_W; + } #endif if (prot & VM_PROT_WRITE) tte |= TTE_WR; @@ -980,6 +980,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v /* Insert page into page table. */ if (__predict_false(!pte_enter(pm, va, tte))) panic("%s: pte_enter", __func__); + splx(s); } @@ -1000,13 +1001,13 @@ pmap_kremove(vaddr_t va, vsize_t len) void pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) { - int s; paddr_t pa; volatile u_int *ptp; + int s; s = splvm(); - while (va < endva) { + while (va < endva) { if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { pa = TTE_PA(pa); pmap_remove_pv(pm, va, pa); @@ -1026,16 +1027,17 @@ pmap_remove(struct pmap *pm, vaddr_t va, bool pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) { - int seg = STIDX(va); - int ptn = PTIDX(va); + int seg = STIDX(va), ptn = PTIDX(va); u_int pa = 0; int s; s = splvm(); - if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn]) && pap) { + + if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn]) && pap) *pap = TTE_PA(pa) | (va & PGOFSET); - } + splx(s); + return pa != 0; } @@ -1056,16 +1058,15 @@ pmap_protect(struct pmap *pm, vaddr_t sv return; } bic = 0; - if ((prot & VM_PROT_WRITE) == 0) { + if ((prot & VM_PROT_WRITE) == 0) bic |= TTE_WR; - } - if ((prot & VM_PROT_EXECUTE) == 0) { + if ((prot & VM_PROT_EXECUTE) == 0) bic |= TTE_EX; - } - if (bic == 0) { + if (bic == 0) return; - } + s = splvm(); + while (sva < eva) { if ((ptp = pte_find(pm, sva)) != NULL) { *ptp &= ~bic; @@ -1073,6 +1074,7 @@ pmap_protect(struct pmap *pm, vaddr_t sv } sva += PAGE_SIZE; } + splx(s); } @@ -1092,12 +1094,16 @@ pmap_check_attr(struct vm_page *pg, u_in return false; s = splvm(); - rv = ((*attr & mask) != 0); + + rv = (*attr & mask) != 0; if (clear) { *attr &= ~mask; - pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0); + pmap_page_protect(pg, + mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0); } + splx(s); + return rv; } @@ -1111,10 +1117,10 @@ pmap_check_attr(struct vm_page *pg, u_in void pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { - paddr_t pa = VM_PAGE_TO_PHYS(pg); - vaddr_t va; struct pv_entry *pvh, *pv, *npv; struct pmap *pm; + paddr_t pa = VM_PAGE_TO_PHYS(pg); + vaddr_t va; pvh = pa_to_pv(pa); if (pvh == NULL) @@ -1128,6 +1134,7 @@ pmap_page_protect(struct vm_page *pg, vm va = PV_VA(pv); pmap_protect(pm, va, va + PAGE_SIZE, prot); } + /* Now check the head pv */ if (pvh->pv_pm) { pv = pvh; @@ -1190,28 +1197,28 @@ pmap_procwr(struct proc *p, vaddr_t va, ctx = pm->pm_ctx; } - __asm volatile( - "mfmsr %0;" - "li %1,0x20;" /* Turn off IMMU */ - "andc %1,%0,%1;" - "ori %1,%1,0x10;" /* Turn on DMMU for sure */ - "mtmsr %1;" + __asm volatile ( + "mfmsr %0;" + "li %1,0x20;" /* Turn off IMMU */ + "andc %1,%0,%1;" + "ori %1,%1,0x10;" /* Turn on DMMU for sure */ + "mtmsr %1;" "isync;" - "mfpid %1;" - "mtpid %2;" + "mfpid %1;" + "mtpid %2;" "isync;" "1:" - "dcbst 0,%3;" - "icbi 0,%3;" - "add %3,%3,%5;" - "sub. %4,%4,%5;" - "bge 1b;" + "dcbst 0,%3;" + "icbi 0,%3;" + "add %3,%3,%5;" + "sub. %4,%4,%5;" + "bge 1b;" "sync;" - "mtpid %1;" - "mtmsr %0;" + "mtpid %1;" + "mtmsr %0;" "isync;" - : "=&r" (msr), "=&r" (opid) - : "r" (ctx), "r" (va), "r" (len), "r" (CACHELINESIZE)); + : "=&r"(msr), "=&r"(opid) + : "r"(ctx), "r"(va), "r"(len), "r"(CACHELINESIZE)); } else { paddr_t pa; vaddr_t tva, eva; @@ -1248,7 +1255,7 @@ tlb_invalidate_entry(int i) KASSERT(mfspr(SPR_PID) == KERNEL_PID); - __asm volatile( + __asm volatile ( "mfmsr %0;" "li %1,0;" "mtmsr %1;" @@ -1259,16 +1266,16 @@ tlb_invalidate_entry(int i) "mtpid %1;" "mtmsr %0;" "isync;" - : "=&r" (msr), "=&r" (pid), "=&r" (hi) - : "r" (i), "r" (TLB_VALID)); + : "=&r"(msr), "=&r"(pid), "=&r"(hi) + : "r"(i), "r"(TLB_VALID)); #else /* * Just clear entire TLBHI register. */ - __asm volatile( + __asm volatile ( "tlbwe %0,%1,0;" "isync;" - : : "r" (0), "r" (i)); + : : "r"(0), "r"(i)); #endif tlb_info[i].ti_ctx = 0; @@ -1279,32 +1286,32 @@ tlb_invalidate_entry(int i) void ppc4xx_tlb_flush(vaddr_t va, int pid) { - u_long i, found; - u_long msr; + u_long msr, i, found; /* If there's no context then it can't be mapped. */ if (!pid) return; - __asm volatile( - "mfpid %1;" /* Save PID */ - "mfmsr %2;" /* Save MSR */ - "li %0,0;" /* Now clear MSR */ - "mtmsr %0;" + __asm volatile ( + "mfpid %1;" /* Save PID */ + "mfmsr %2;" /* Save MSR */ + "li %0,0;" /* Now clear MSR */ + "mtmsr %0;" "isync;" - "mtpid %4;" /* Set PID */ + "mtpid %4;" /* Set PID */ "isync;" - "tlbsx. %0,0,%3;" /* Search TLB */ + "tlbsx. %0,0,%3;" /* Search TLB */ "isync;" - "mtpid %1;" /* Restore PID */ - "mtmsr %2;" /* Restore MSR */ + "mtpid %1;" /* Restore PID */ + "mtmsr %2;" /* Restore MSR */ "isync;" - "li %1,1;" - "beq 1f;" - "li %1,0;" - "1:" - : "=&r" (i), "=&r" (found), "=&r" (msr) - : "r" (va), "r" (pid)); + "li %1,1;" + "beq 1f;" + "li %1,0;" + "1:" + : "=&r"(i), "=&r"(found), "=&r"(msr) + : "r"(va), "r"(pid)); + if (found && !TLB_LOCKED(i)) { /* Now flush translation */ tlb_invalidate_entry(i); @@ -1323,7 +1330,7 @@ ppc4xx_tlb_flush_all(void) if (!TLB_LOCKED(i)) tlb_invalidate_entry(i); - __asm volatile("isync"); + __asm volatile ("isync"); } /* Find a TLB entry to evict. */ @@ -1337,12 +1344,13 @@ ppc4xx_tlb_find_victim(void) tlbnext = tlb_nreserved; flags = tlb_info[tlbnext].ti_flags; if (!(flags & TLBF_USED) || - (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { + (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { u_long va, stack = (u_long)&va; - if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && + if (!((tlb_info[tlbnext].ti_va ^ stack) & + (~PGOFSET)) && (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && - (flags & TLBF_USED)) { + (flags & TLBF_USED)) { /* Kernel stack page */ flags |= TLBF_REF; tlb_info[tlbnext].ti_flags = flags; @@ -1350,9 +1358,8 @@ ppc4xx_tlb_find_victim(void) /* Found it! */ return tlbnext; } - } else { + } else tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); - } } } @@ -1360,9 +1367,8 @@ void ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) { u_long th, tl, idx; - int msr, pid; paddr_t pa; - int sz; + int msr, pid, sz; tlbenter_ev.ev_count++; @@ -1384,21 +1390,23 @@ ppc4xx_tlb_enter(int ctx, vaddr_t va, u_ tlb_info[idx].ti_ctx = ctx; tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; - __asm volatile( - "mfmsr %0;" /* Save MSR */ - "li %1,0;" - "mtmsr %1;" /* Clear MSR */ + __asm volatile ( + "mfmsr %0;" /* Save MSR */ + "li %1,0;" + "mtmsr %1;" /* Clear MSR */ "isync;" - "tlbwe %1,%3,0;" /* Invalidate old entry. */ - "mfpid %1;" /* Save old PID */ - "mtpid %2;" /* Load translation ctx */ + "tlbwe %1,%3,0;" /* Invalidate old entry. */ + "mfpid %1;" /* Save old PID */ + "mtpid %2;" /* Load translation ctx */ "isync;" - "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ + "tlbwe %4,%3,1;" /* Set TLB */ + "tlbwe %5,%3,0;" "isync;" - "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ + "mtpid %1;" /* Restore PID */ + "mtmsr %0;" /* and MSR */ "isync;" - : "=&r" (msr), "=&r" (pid) - : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); + : "=&r"(msr), "=&r"(pid) + : "r"(ctx), "r"(idx), "r"(tl), "r"(th)); } void @@ -1418,10 +1426,10 @@ ppc4xx_tlb_init(void) * Z3 - full access regardless of TLB entry permissions */ - __asm volatile( - "mtspr %0,%1;" + __asm volatile ( + "mtspr %0,%1;" "isync;" - :: "K"(SPR_ZPR), "r" (0x1b000000)); + : : "K"(SPR_ZPR), "r"(0x1b000000)); } /* @@ -1432,7 +1440,7 @@ ppc4xx_tlb_init(void) static int ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz) { - int i; + int i; for (i = 0; i < __arraycount(tlbsize); i++) if (size <= tlbsize[i]) { @@ -1455,18 +1463,18 @@ ppc4xx_tlb_size_mask(size_t size, int *m void * ppc4xx_tlb_mapiodev(paddr_t base, psize_t len) { - paddr_t pa; - vaddr_t va; - u_int lo, hi, sz; - int i; + paddr_t pa; + vaddr_t va; + u_int lo, hi, sz; + int i; /* tlb_nreserved is only allowed to grow, so this is safe. */ for (i = 0; i < tlb_nreserved; i++) { __asm volatile ( - " tlbre %0,%2,1 \n" /* TLBLO */ - " tlbre %1,%2,0 \n" /* TLBHI */ - : "=&r" (lo), "=&r" (hi) - : "r" (i)); + "tlbre %0,%2,1;" /* TLBLO */ + "tlbre %1,%2,0;" /* TLBHI */ + : "=&r"(lo), "=&r"(hi) + : "r"(i)); KASSERT(hi & TLB_VALID); KASSERT(mfspr(SPR_PID) == KERNEL_PID); @@ -1476,7 +1484,7 @@ ppc4xx_tlb_mapiodev(paddr_t base, psize_ continue; sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT]; - if ((base + len) > (pa + sz)) + if (base + len > pa + sz) continue; va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); /* sz = 2^n */ @@ -1494,12 +1502,12 @@ ppc4xx_tlb_mapiodev(paddr_t base, psize_ void ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags) { - u_int lo, hi; - int szmask, rsize; + u_int lo, hi; + int szmask, rsize; /* Called before pmap_bootstrap(), va outside kernel space. */ KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS); - KASSERT(! pmap_bootstrap_done); + KASSERT(!pmap_bootstrap_done); KASSERT(tlb_nreserved < NTLB); /* Resolve size. */ @@ -1519,10 +1527,10 @@ ppc4xx_tlb_reserve(paddr_t pa, vaddr_t v #endif __asm volatile( - " tlbwe %1,%0,1 \n" /* write TLBLO */ - " tlbwe %2,%0,0 \n" /* write TLBHI */ - " isync \n" - : : "r" (tlb_nreserved), "r" (lo), "r" (hi)); + "tlbwe %1,%0,1;" /* write TLBLO */ + "tlbwe %2,%0,0;" /* write TLBHI */ + "isync;" + : : "r"(tlb_nreserved), "r"(lo), "r"(hi)); tlb_nreserved++; } @@ -1548,19 +1556,20 @@ pmap_tlbmiss(vaddr_t va, int ctx) (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) { pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va); if (pte == NULL) { - /* Map unmanaged addresses directly for kernel access */ + /* + * Map unmanaged addresses directly for + * kernel access + */ return 1; } tte = *pte; - if (tte == 0) { + if (tte == 0) return 1; - } } else { /* Create a 16MB writable mapping. */ -#ifdef PPC_4XX_NOCACHE - tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR; -#else tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; +#ifdef PPC_4XX_NOCACHE + tte |= TTE_I; #endif } ppc4xx_tlb_enter(ctx, va, tte); @@ -1583,8 +1592,8 @@ ctx_flush(int cnum) if (TLB_LOCKED(i)) { #ifdef DIAGNOSTIC printf("ctx_flush: can't invalidate " - "locked mapping %d " - "for context %d\n", i, cnum); + "locked mapping %d for context %d\n", + i, cnum); #ifdef DDB Debugger(); #endif @@ -1613,8 +1622,8 @@ ctx_flush(int cnum) int ctx_alloc(struct pmap *pm) { - int s, cnum; static int next = MINCTX; + int cnum, s; if (pm == pmap_kernel()) { #ifdef DIAGNOSTIC @@ -1622,12 +1631,13 @@ ctx_alloc(struct pmap *pm) #endif return 0; } + s = splvm(); /* Find a likely context. */ cnum = next; do { - if ((++cnum) >= NUMCTX) + if (++cnum >= NUMCTX) cnum = MINCTX; } while (ctxbusy[cnum] != NULL && cnum != next); @@ -1637,7 +1647,7 @@ oops: cnum = MINCTX; /* Never steal ctx 0 or 1 */ if (ctx_flush(cnum)) { /* oops -- something's wired. */ - if ((++cnum) >= NUMCTX) + if (++cnum >= NUMCTX) cnum = MINCTX; goto oops; } @@ -1651,7 +1661,9 @@ oops: } ctxbusy[cnum] = pm; next = cnum; + splx(s); + pm->pm_ctx = cnum; return cnum; @@ -1669,24 +1681,25 @@ ctx_free(struct pmap *pm) if (oldctx == 0) panic("ctx_free: freeing kernel context"); + #ifdef DIAGNOSTIC if (ctxbusy[oldctx] == 0) printf("ctx_free: freeing free context %d\n", oldctx); if (ctxbusy[oldctx] != pm) { printf("ctx_free: freeing someone esle's context\n " - "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", - oldctx, (void *)(u_long)ctxbusy[oldctx], pm); + "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", + oldctx, (void *)(u_long)ctxbusy[oldctx], pm); #ifdef DDB Debugger(); #endif } #endif + /* We should verify it has not been stolen and reallocated... */ ctxbusy[oldctx] = NULL; ctx_flush(oldctx); } - #ifdef DEBUG /* * Test ref/modify handling. @@ -1695,17 +1708,16 @@ void pmap_testout(void); void pmap_testout(void) { + struct vm_page *pg; vaddr_t va; - volatile int *loc; - int val = 0; paddr_t pa; - struct vm_page *pg; - int ref, mod; + volatile int *loc; + int ref, mod, val = 0; /* Allocate a page */ va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); - loc = (int*)va; + loc = (int *)va; pmap_extract(pmap_kernel(), va, &pa); pg = PHYS_TO_VM_PAGE(pa); @@ -1719,88 +1731,76 @@ pmap_testout(void) ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Check it's properly cleared */ ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Checking cleared page: ref %d, mod %d\n", - ref, mod); + printf("Checking cleared page: ref %d, mod %d\n", ref, mod); /* Reference page */ val = *loc; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Referenced page: ref %d, mod %d val %x\n", - ref, mod, val); + printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Modify page */ *loc = 1; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Check it's properly cleared */ ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Checking cleared page: ref %d, mod %d\n", - ref, mod); + printf("Checking cleared page: ref %d, mod %d\n", ref, mod); /* Modify page */ *loc = 1; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Check pmap_protect() */ pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); pmap_update(pmap_kernel()); ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", - ref, mod); + printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Reference page */ val = *loc; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Referenced page: ref %d, mod %d val %x\n", - ref, mod, val); + printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Modify page */ #if 0 @@ -1811,38 +1811,33 @@ pmap_testout(void) ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Check pmap_protect() */ pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); pmap_update(pmap_kernel()); ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("pmap_protect(): ref %d, mod %d\n", - ref, mod); + printf("pmap_protect(): ref %d, mod %d\n", ref, mod); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Reference page */ val = *loc; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Referenced page: ref %d, mod %d val %x\n", - ref, mod, val); + printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Modify page */ #if 0 @@ -1853,37 +1848,32 @@ pmap_testout(void) ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Check pmap_pag_protect() */ pmap_page_protect(pg, VM_PROT_READ); ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", - ref, mod); + printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Reference page */ val = *loc; ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Referenced page: ref %d, mod %d val %x\n", - ref, mod, val); + printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Modify page */ #if 0 @@ -1894,22 +1884,19 @@ pmap_testout(void) ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Check pmap_pag_protect() */ pmap_page_protect(pg, VM_PROT_NONE); ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("pmap_page_protect(): ref %d, mod %d\n", - ref, mod); + printf("pmap_page_protect(): ref %d, mod %d\n", ref, mod); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Reference page */ @@ -1917,15 +1904,13 @@ pmap_testout(void) ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Referenced page: ref %d, mod %d val %x\n", - ref, mod, val); + printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val); /* Now clear reference and modify */ ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, - ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Modify page */ #if 0 @@ -1936,8 +1921,7 @@ pmap_testout(void) ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Modified page: ref %d, mod %d\n", - ref, mod); + printf("Modified page: ref %d, mod %d\n", ref, mod); /* Unmap page */ pmap_remove(pmap_kernel(), va, va+1); @@ -1950,13 +1934,12 @@ pmap_testout(void) ref = pmap_clear_reference(pg); mod = pmap_clear_modify(pg); printf("Clearing page va %p pa %lx: ref %d, mod %d\n", - (void *)(u_long)va, (long)pa, ref, mod); + (void *)(u_long)va, (long)pa, ref, mod); /* Check it's properly cleared */ ref = pmap_is_referenced(pg); mod = pmap_is_modified(pg); - printf("Checking cleared page: ref %d, mod %d\n", - ref, mod); + printf("Checking cleared page: ref %d, mod %d\n", ref, mod); pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);