With the isolated mm context support, there is a CPU local variable that
can hold the patch address. Use it instead of adding a level of
indirection through the text_poke_area vm_struct.

Signed-off-by: Benjamin Gray <bg...@linux.ibm.com>
---
 arch/powerpc/lib/code-patching.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index eabdd74a26c0..ce58c1b3fcf1 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -122,6 +122,7 @@ static int text_area_cpu_up(unsigned int cpu)
        unmap_patch_area(addr);
 
        this_cpu_write(text_poke_area, area);
+       this_cpu_write(cpu_patching_addr, addr);
 
        return 0;
 }
@@ -365,7 +366,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t 
instr)
        pte_t *pte;
        unsigned long pfn = get_patch_pfn(addr);
 
-       text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & 
PAGE_MASK;
+       text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_addr) & 
PAGE_MASK;
        patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
 
        pte = virt_to_kpte(text_poke_addr);
-- 
2.37.3

Reply via email to