From: Tiwei Bie <[email protected]>

Although UML_ROUND_UP() is defined in a shared header file, it
depends on the PAGE_SIZE and PAGE_MASK macros, so it can only be
used in kernel code. Considering its name is not very clear and
its functionality is the same as PAGE_ALIGN(), replace its usages
with a direct call to PAGE_ALIGN() and remove it.

Signed-off-by: Tiwei Bie <[email protected]>
---
 arch/um/include/shared/kern_util.h | 3 ---
 arch/um/kernel/mem.c               | 2 +-
 arch/um/kernel/um_arch.c           | 5 ++---
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/um/include/shared/kern_util.h 
b/arch/um/include/shared/kern_util.h
index 00ca3e12fd9a..949a03c7861e 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -15,9 +15,6 @@ extern int uml_exitcode;
 
 extern int kmalloc_ok;
 
-#define UML_ROUND_UP(addr) \
-       ((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-
 extern unsigned long alloc_stack(int order, int atomic);
 extern void free_stack(unsigned long stack, int order);
 
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 32e3b1972dc1..19d40b58eac4 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -71,7 +71,7 @@ void __init arch_mm_preinit(void)
        /* Map in the area just after the brk now that kmalloc is about
         * to be turned on.
         */
-       brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
+       brk_end = PAGE_ALIGN((unsigned long) sbrk(0));
        map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
        memblock_free((void *)brk_end, uml_reserved - brk_end);
        uml_reserved = brk_end;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index c54d5ed91bb8..74c75d2287d5 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -350,12 +350,11 @@ int __init linux_main(int argc, char **argv, char **envp)
         * so they actually get what they asked for. This should
         * add zero for non-exec shield users
         */
-
-       diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+       diff = PAGE_ALIGN(brk_start) - PAGE_ALIGN((unsigned long) &_end);
        if (diff > 1024 * 1024) {
                os_info("Adding %ld bytes to physical memory to account for "
                        "exec-shield gap\n", diff);
-               physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+               physmem_size += diff;
        }
 
        uml_physmem = (unsigned long) __binary_start & PAGE_MASK;
-- 
2.34.1


Reply via email to