Author: nwhitehorn Date: Sat Jul 31 21:35:15 2010 New Revision: 210704 URL: http://svn.freebsd.org/changeset/base/210704
Log: Improve hash coverage for kernel page table entries by modifying the kernel ESID -> VSID map function. This makes ZFS run stably on PowerPC under heavy loads (repeated simultaneous SVN checkouts and updates). Modified: head/sys/powerpc/aim/mmu_oea64.c head/sys/powerpc/aim/slb.c head/sys/powerpc/include/slb.h Modified: head/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- head/sys/powerpc/aim/mmu_oea64.c Sat Jul 31 21:33:18 2010 (r210703) +++ head/sys/powerpc/aim/mmu_oea64.c Sat Jul 31 21:35:15 2010 (r210704) @@ -838,7 +838,7 @@ moea64_bootstrap_slb_prefault(vm_offset_ } entry.slbe = slbe; - entry.slbv = KERNEL_VSID(esid, large) << SLBV_VSID_SHIFT; + entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; if (large) entry.slbv |= SLBV_L; Modified: head/sys/powerpc/aim/slb.c ============================================================================== --- head/sys/powerpc/aim/slb.c Sat Jul 31 21:33:18 2010 (r210703) +++ head/sys/powerpc/aim/slb.c Sat Jul 31 21:35:15 2010 (r210704) @@ -104,17 +104,10 @@ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { struct slb entry; - int large; /* Shortcut kernel case */ - if (pm == kernel_pmap) { - large = 0; - if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS && - mem_valid(va, 0) == 0) - large = 1; - - return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT, large)); - } + if (pm == kernel_pmap) + return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); /* * If there is no vsid for this VA, we need to add a new entry Modified: head/sys/powerpc/include/slb.h ============================================================================== --- head/sys/powerpc/include/slb.h Sat Jul 31 21:33:18 2010 (r210703) +++ head/sys/powerpc/include/slb.h Sat Jul 31 21:35:15 2010 (r210704) @@ -47,14 +47,15 @@ #define SLBV_VSID_MASK 0xfffffffffffff000UL /* Virtual segment ID mask */ #define SLBV_VSID_SHIFT 12 -#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */ - /* - * Shift large-page VSIDs one place left. At present, they are only used in the - * kernel direct map, and we already assume in the placement of KVA that the - * CPU cannot address more than 63 bits of memory. + * Make a predictable 1:1 map from ESIDs to VSIDs for the kernel. Hash table + * coverage is increased by swizzling the ESID and multiplying by a prime + * number (0x13bb). */ -#define KERNEL_VSID(esid, large) (((uint64_t)(esid) << (large ? 1 : 0)) | KERNEL_VSID_BIT) +#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */ +#define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \ + * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \ + KERNEL_VSID_BIT) #define SLBE_VALID 0x0000000008000000UL /* SLB entry valid */ #define SLBE_INDEX_MASK 0x0000000000000fffUL /* SLB index mask*/ _______________________________________________ svn-src-head@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"