With Book3s64 Hash translation, manually inserting a PTE requires updating the Linux PTE, inserting a SLB entry, and inserting the hashed page. The first is handled via the usual kernel abstractions, the second requires slb_allocate_user() which is currently 'static', and the third is available via hash_page_mm() already.
Make slb_allocate_user() non-static and add a prototype so the next patch can use it during code-patching. Signed-off-by: Christopher M. Riedl <c...@bluescreens.de> --- v4: * New to series. --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 1 + arch/powerpc/mm/book3s64/slb.c | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 3004f3323144d..189854eebba77 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -525,6 +525,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr); extern void slb_vmalloc_update(void); extern void slb_set_size(u16 size); void preload_new_slb_context(unsigned long start, unsigned long sp); +long slb_allocate_user(struct mm_struct *mm, unsigned long ea); #endif /* __ASSEMBLY__ */ /* diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index da0836cb855af..532eb51bc5211 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -29,8 +29,6 @@ #include "internal.h" -static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); - bool stress_slb_enabled __initdata; static int __init parse_stress_slb(char *p) @@ -791,7 +789,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id) return slb_insert_entry(ea, context, flags, ssize, true); } -static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) +long slb_allocate_user(struct mm_struct *mm, unsigned long ea) { unsigned long context; unsigned long flags; -- 2.26.1