This updates hash 4k VMALLOC_START to 0xc008000000000000. Our page table still limit max addr to 64TB. But with hash translation we map only vmalloc and IO region and we ignore top bits (0xc008) when mapping the addr to page table. That means VMALLOC_START get mapped as 0 addr on linux page table.
With this we have for 4K kernel vmalloc start = 0xc008000000000000 kernel IO start = 0xc008100000000000 kernel vmemmap start = 0xc008200000000000 64K hash, 64K radix and 4k radix: kernel vmalloc start = 0xc008000000000000 kernel IO start = 0xc00a000000000000 kernel vmemmap start = 0xc00c000000000000 Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 8 +++++++- arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 11 +++++------ arch/powerpc/include/asm/book3s/64/mmu.h | 9 ++++++++- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 64eaf187f891..73058c933560 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -26,7 +26,13 @@ * Define the address range of the kernel non-linear virtual area * 16TB */ -#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000) +#define H_KERN_VIRT_START ASM_CONST(0xc008000000000000) +/* + * Even though we use 2PB address here, For mapping to linux + * page table we ignore these bits. Hash only map vmalloc + * and I/O region in linux page table. + */ +#define H_KERN_EA_MASK ASM_CONST(~0xc008000000000000) #ifndef __ASSEMBLY__ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 24ca63beba14..0d0ea957bdfd 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -24,7 +24,7 @@ * 2PB */ #define H_KERN_VIRT_START ASM_CONST(0xc008000000000000) - +#define H_KERN_EA_MASK ASM_CONST(~0xc000000000000000) /* * 64k aligned address free up few of the lower bits of RPN for us * We steal that here. For more deatils look at pte_pfn/pfn_pte() diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 5d2adf3c1325..8be2cbbf8ef2 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -742,12 +742,6 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, unsigned long vsid_bits; unsigned long protovsid; - /* - * Bad address. We return VSID 0 for that - */ - if ((ea & EA_MASK) >= H_PGTABLE_RANGE) - return 0; - if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) va_bits = 65; @@ -807,6 +801,11 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) if (!is_kernel_addr(ea)) return 0; + /* + * Bad address. We return VSID 0 for that + */ + if ((ea & H_KERN_EA_MASK) >= H_PGTABLE_RANGE) + return 0; context = get_kernel_context(ea); return get_vsid(context, ea, ssize); diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 1ceee000c18d..cab8deb2501e 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -232,8 +232,15 @@ static inline int get_user_context(mm_context_t *ctx, unsigned long ea) static inline unsigned long get_user_vsid(mm_context_t *ctx, unsigned long ea, int ssize) { - unsigned long context = get_user_context(ctx, ea); + unsigned long context; + /* + * Bad address. We return VSID 0 for that + */ + if (ea >= H_PGTABLE_RANGE) + return 0; + + context = get_user_context(ctx, ea); return get_vsid(context, ea, ssize); } -- 2.20.1