Benjamin Herrenschmidt <b...@kernel.crashing.org> writes: > On Thu, 2012-09-06 at 20:59 +0530, Aneesh Kumar K.V wrote: >> Hi, >> >> This patchset include patches for supporting 64TB with ppc64. I haven't >> booted >> this on hardware with 64TB memory yet. But they boot fine on real hardware >> with >> less memory. Changes extend VSID bits to 38 bits for a 256MB segment >> and 26 bits for 1TB segments. > > Your series breaks the embedded 64-bit build. You seem to be hard wiring > dependencies on slice stuff all over 64-bit stuff regardless of the MMU > type or the value of CONFIG_MM_SLICES. > > Also all these: > >> +/* 4 bits per slice and we have one slice per 1TB */ >> +#if 0 /* We can't directly include pgtable.h hence this hack */ >> +#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) >> +#else >> +/* Right now we only support 64TB */ >> +#define SLICE_ARRAY_SIZE 32 >> +#endif > > Things are just too horrible. Find a different way of doing it, if > necessary create a new range define somewhere, whatever but don't leave > that crap as-is, it's too wrong. > > Dropping the series for now. >
How about the change below. If you are ok moving the range details to new header, I can fold this into patch 7 and send a new series -aneesh diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 428f23e..057a12a 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -14,6 +14,7 @@ #include <asm/asm-compat.h> #include <asm/page.h> +#include <asm/pgtable-ppc64-range.h> /* * Segment table @@ -415,12 +416,7 @@ extern void slb_set_size(u16 size); add rt,rt,rx /* 4 bits per slice and we have one slice per 1TB */ -#if 0 /* We can't directly include pgtable.h hence this hack */ #define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) -#else -/* Right now we only support 64TB */ -#define SLICE_ARRAY_SIZE 32 -#endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index b55beb4..01ab518 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -78,16 +78,14 @@ extern u64 ppc64_pft_size; #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) -/* 1 bit per slice and we have one slice per 1TB */ -#if 0 /* We can't directly include pgtable.h hence this hack */ -#define SLICE_MASK_SIZE (PGTABLE_RANGE >> 43) -#else -/* +/* 1 bit per slice and we have one slice per 1TB * Right now we support only 64TB. * IF we change this we will have to change the type * of high_slices */ #define SLICE_MASK_SIZE 8 +#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE +#error PGTABLE_RANGE exceeds slice_mask high_slices size #endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-range.h b/arch/powerpc/include/asm/pgtable-ppc64-range.h new file mode 100644 index 0000000..04a825c --- /dev/null +++ b/arch/powerpc/include/asm/pgtable-ppc64-range.h @@ -0,0 +1,16 @@ +#ifndef _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_ +#define _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_ + +#ifdef CONFIG_PPC_64K_PAGES +#include <asm/pgtable-ppc64-64k.h> +#else +#include <asm/pgtable-ppc64-4k.h> +#endif + +/* + * Size of EA range mapped by our pagetables. + */ +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) +#endif diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index dea953f..ee783b4 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -13,13 +13,7 @@ #define FIRST_USER_ADDRESS 0 -/* - * Size of EA range mapped by our pagetables. - */ -#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) -#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) - +#include <asm/pgtable-ppc64-range.h> /* Some sanity checking */ #if TASK_SIZE_USER64 > PGTABLE_RANGE @@ -32,14 +26,6 @@ #endif #endif -#if (PGTABLE_RANGE >> 41) > SLICE_ARRAY_SIZE -#error PGTABLE_RANGE exceeds SLICE_ARRAY_SIZE -#endif - -#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE -#error PGTABLE_RANGE exceeds slice_mask high_slices size -#endif - /* * Define the address range of the kernel non-linear virtual area */ _______________________________________________ Linuxppc-dev mailing list Linuxppc-dev@lists.ozlabs.org https://lists.ozlabs.org/listinfo/linuxppc-dev