Some architecture would like to be triggered when a memory area is moved through the mremap system call.
This patch is introducing a new arch_remap mm hook which is placed in the path of mremap, and is called before the old area is unmapped (and the arch_unmap hook is called). The architectures which need to call this hook should define __HAVE_ARCH_REMAP in their asm/mmu_context.h and provide the arch_remap service with the following prototype: void arch_remap(struct mm_struct *mm, unsigned long old_start, unsigned long old_end, unsigned long new_start, unsigned long new_end); Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com> Reviewed-by: Ingo Molnar <mi...@kernel.org> --- mm/mremap.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 2dc44b1cb1df..009db5565893 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -25,6 +25,7 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> #include "internal.h" @@ -286,13 +287,19 @@ static unsigned long move_vma(struct vm_area_struct *vma, old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; - } else if (vma->vm_file && vma->vm_file->f_op->mremap) { - err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); - if (err < 0) { - move_page_tables(new_vma, new_addr, vma, old_addr, - moved_len, true); - return err; + } else { + if (vma->vm_file && vma->vm_file->f_op->mremap) { + err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); + if (err < 0) { + move_page_tables(new_vma, new_addr, vma, + old_addr, moved_len, true); + return err; + } } +#ifdef __HAVE_ARCH_REMAP + arch_remap(mm, old_addr, old_addr+old_len, + new_addr, new_addr+new_len); +#endif } /* Conceal VM_ACCOUNT so old reservation is not undone */ -- 1.9.1 _______________________________________________ Linuxppc-dev mailing list Linuxppc-dev@lists.ozlabs.org https://lists.ozlabs.org/listinfo/linuxppc-dev