Author: nwhitehorn Date: Wed Mar 28 17:25:29 2012 New Revision: 233618 URL: http://svn.freebsd.org/changeset/base/233618
Log: More PMAP performance improvements: skip 256 MB segments entirely if they are are not mapped during ranged operations and reduce the scope of the tlbie lock only to the actual tlbie instruction instead of the entire sequence. There are a few more optimization possibilities here as well. Modified: head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/aim/moea64_native.c Modified: head/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea64.c Wed Mar 28 17:21:59 2012 (r233617) +++ head/sys/powerpc/aim/mmu_oea64.c Wed Mar 28 17:25:29 2012 (r233618) @@ -1981,10 +1981,18 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_ LOCK_TABLE_RD(); PMAP_LOCK(pm); if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { - for (; sva < eva; sva += PAGE_SIZE) { + while (sva < eva) { + #ifdef __powerpc64__ + if (pm != kernel_pmap && + user_va_to_slb_entry(pm, sva) == NULL) { + sva = roundup2(sva + 1, SEGMENT_LENGTH); + continue; + } + #endif pvo = moea64_pvo_find_va(pm, sva); if (pvo != NULL) moea64_pvo_protect(mmu, pm, pvo, prot); + sva += PAGE_SIZE; } } else { LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { @@ -2095,10 +2103,18 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o LOCK_TABLE_WR(); PMAP_LOCK(pm); if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { - for (; sva < eva; sva += PAGE_SIZE) { + while (sva < eva) { + #ifdef __powerpc64__ + if (pm != kernel_pmap && + user_va_to_slb_entry(pm, sva) == NULL) { + sva = roundup2(sva + 1, SEGMENT_LENGTH); + continue; + } + #endif pvo = moea64_pvo_find_va(pm, sva); if (pvo != NULL) moea64_pvo_remove(mmu, pvo); + sva += PAGE_SIZE; } } else { LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { @@ -2566,7 +2582,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_offset_ ppa = trunc_page(pa); offset = pa & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = roundup2(offset + size, PAGE_SIZE); va = kmem_alloc_nofault(kernel_map, size); @@ -2597,7 +2613,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t v base = trunc_page(va); offset = va & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = roundup2(offset + size, PAGE_SIZE); kmem_free(kernel_map, base, size); } Modified: head/sys/powerpc/aim/moea64_native.c ============================================================================== --- head/sys/powerpc/aim/moea64_native.c Wed Mar 28 17:21:59 2012 (r233617) +++ head/sys/powerpc/aim/moea64_native.c Wed Mar 28 17:25:29 2012 (r233618) @@ -103,6 +103,7 @@ __FBSDID("$FreeBSD$"); #include <sys/lock.h> #include <sys/mutex.h> #include <sys/proc.h> +#include <sys/sched.h> #include <sys/sysctl.h> #include <sys/systm.h> @@ -152,15 +153,13 @@ TLBIE(uint64_t vpn) { vpn &= ~(0xffffULL << 48); #ifdef __powerpc64__ + sched_pin(); + __asm __volatile("ptesync"); mtx_lock(&tlbie_mutex); - __asm __volatile("\ - ptesync; \ - tlbie %0; \ - eieio; \ - tlbsync; \ - ptesync;" - :: "r"(vpn) : "memory"); + __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); mtx_unlock(&tlbie_mutex); + __asm __volatile("eieio; tlbsync; ptesync"); + sched_unpin(); #else vpn_hi = (uint32_t)(vpn >> 32); vpn_lo = (uint32_t)vpn; _______________________________________________ svn-src-head@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"