From: Nikita Kalyazin <[email protected]>

This is to avoid excessive conversions folio->page->address when adding
helpers on top of set_direct_map_valid_noflush() in the next patch.

Signed-off-by: Nikita Kalyazin <[email protected]>
---
 arch/arm64/include/asm/set_memory.h     |  7 ++++---
 arch/arm64/mm/pageattr.c                | 19 +++++++++----------
 arch/loongarch/include/asm/set_memory.h |  7 ++++---
 arch/loongarch/mm/pageattr.c            | 25 ++++++++++++-------------
 arch/riscv/include/asm/set_memory.h     |  7 ++++---
 arch/riscv/mm/pageattr.c                | 17 +++++++++--------
 arch/s390/include/asm/set_memory.h      |  7 ++++---
 arch/s390/mm/pageattr.c                 | 13 +++++++------
 arch/x86/include/asm/set_memory.h       |  7 ++++---
 arch/x86/mm/pat/set_memory.c            | 23 ++++++++++++-----------
 include/linux/set_memory.h              |  9 +++++----
 kernel/power/snapshot.c                 |  4 ++--
 mm/execmem.c                            |  6 ++++--
 mm/secretmem.c                          |  6 +++---
 mm/vmalloc.c                            | 11 +++++++----
 15 files changed, 90 insertions(+), 78 deletions(-)

diff --git a/arch/arm64/include/asm/set_memory.h 
b/arch/arm64/include/asm/set_memory.h
index 90f61b17275e..c71a2a6812c4 100644
--- a/arch/arm64/include/asm/set_memory.h
+++ b/arch/arm64/include/asm/set_memory.h
@@ -11,9 +11,10 @@ bool can_set_direct_map(void);
 
 int set_memory_valid(unsigned long addr, int numpages, int enable);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int set_direct_map_invalid_noflush(const void *addr);
+int set_direct_map_default_noflush(const void *addr);
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid);
 bool kernel_page_present(struct page *page);
 
 int set_memory_encrypted(unsigned long addr, int numpages);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index f0e784b963e6..e2bdc3c1f992 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -243,7 +243,7 @@ int set_memory_valid(unsigned long addr, int numpages, int 
enable)
                                        __pgprot(PTE_VALID));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
 {
        pgprot_t clear_mask = __pgprot(PTE_VALID);
        pgprot_t set_mask = __pgprot(0);
@@ -251,11 +251,11 @@ int set_direct_map_invalid_noflush(struct page *page)
        if (!can_set_direct_map())
                return 0;
 
-       return update_range_prot((unsigned long)page_address(page),
-                                PAGE_SIZE, set_mask, clear_mask);
+       return update_range_prot((unsigned long)addr, PAGE_SIZE, set_mask,
+                                clear_mask);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
 {
        pgprot_t set_mask = __pgprot(PTE_VALID | PTE_WRITE);
        pgprot_t clear_mask = __pgprot(PTE_RDONLY);
@@ -263,8 +263,8 @@ int set_direct_map_default_noflush(struct page *page)
        if (!can_set_direct_map())
                return 0;
 
-       return update_range_prot((unsigned long)page_address(page),
-                                PAGE_SIZE, set_mask, clear_mask);
+       return update_range_prot((unsigned long)addr, PAGE_SIZE, set_mask,
+                                clear_mask);
 }
 
 static int __set_memory_enc_dec(unsigned long addr,
@@ -347,14 +347,13 @@ int realm_register_memory_enc_ops(void)
        return arm64_mem_crypt_ops_register(&realm_crypt_ops);
 }
 
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid)
 {
-       unsigned long addr = (unsigned long)page_address(page);
-
        if (!can_set_direct_map())
                return 0;
 
-       return set_memory_valid(addr, nr, valid);
+       return set_memory_valid((unsigned long)addr, numpages, valid);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/loongarch/include/asm/set_memory.h 
b/arch/loongarch/include/asm/set_memory.h
index 55dfaefd02c8..5e9b67b2fea1 100644
--- a/arch/loongarch/include/asm/set_memory.h
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -15,8 +15,9 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 
 bool kernel_page_present(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int set_direct_map_invalid_noflush(const void *addr);
+int set_direct_map_default_noflush(const void *addr);
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid);
 
 #endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
index f5e910b68229..c1b2be915038 100644
--- a/arch/loongarch/mm/pageattr.c
+++ b/arch/loongarch/mm/pageattr.c
@@ -198,32 +198,31 @@ bool kernel_page_present(struct page *page)
        return pte_present(ptep_get(pte));
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
 {
-       unsigned long addr = (unsigned long)page_address(page);
-
-       if (addr < vm_map_base)
+       if ((unsigned long)addr < vm_map_base)
                return 0;
 
-       return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+       return __set_memory((unsigned long)addr, 1, PAGE_KERNEL, __pgprot(0));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
 {
-       unsigned long addr = (unsigned long)page_address(page);
+       unsigned long addr = (unsigned long)addr;
 
-       if (addr < vm_map_base)
+       if ((unsigned long)addr < vm_map_base)
                return 0;
 
-       return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | 
_PAGE_VALID));
+       return __set_memory((unsigned long)addr, 1, __pgprot(0),
+                           __pgprot(_PAGE_PRESENT | _PAGE_VALID));
 }
 
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid)
 {
-       unsigned long addr = (unsigned long)page_address(page);
        pgprot_t set, clear;
 
-       if (addr < vm_map_base)
+       if ((unsigned long)addr < vm_map_base)
                return 0;
 
        if (valid) {
@@ -234,5 +233,5 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
                clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
        }
 
-       return __set_memory(addr, 1, set, clear);
+       return __set_memory((unsigned long)addr, 1, set, clear);
 }
diff --git a/arch/riscv/include/asm/set_memory.h 
b/arch/riscv/include/asm/set_memory.h
index 87389e93325a..a87eabd7fc78 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -40,9 +40,10 @@ static inline int set_kernel_memory(char *startp, char *endp,
 }
 #endif
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int set_direct_map_invalid_noflush(const void *addr);
+int set_direct_map_default_noflush(const void *addr);
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid);
 bool kernel_page_present(struct page *page);
 
 #endif /* __ASSEMBLER__ */
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 3f76db3d2769..0a457177a88c 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -374,19 +374,20 @@ int set_memory_nx(unsigned long addr, int numpages)
        return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
 {
-       return __set_memory((unsigned long)page_address(page), 1,
-                           __pgprot(0), __pgprot(_PAGE_PRESENT));
+       return __set_memory((unsigned long)addr, 1, __pgprot(0),
+                           __pgprot(_PAGE_PRESENT));
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
 {
-       return __set_memory((unsigned long)page_address(page), 1,
-                           PAGE_KERNEL, __pgprot(_PAGE_EXEC));
+       return __set_memory((unsigned long)addr, 1, PAGE_KERNEL,
+                           __pgprot(_PAGE_EXEC));
 }
 
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid)
 {
        pgprot_t set, clear;
 
@@ -398,7 +399,7 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
                clear = __pgprot(_PAGE_PRESENT);
        }
 
-       return __set_memory((unsigned long)page_address(page), nr, set, clear);
+       return __set_memory((unsigned long)addr, numpages, set, clear);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/s390/include/asm/set_memory.h 
b/arch/s390/include/asm/set_memory.h
index 94092f4ae764..3e43c3c96e67 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -60,9 +60,10 @@ __SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | 
SET_MEMORY_X)
 __SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX)
 __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int set_direct_map_invalid_noflush(const void *addr);
+int set_direct_map_default_noflush(const void *addr);
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid);
 bool kernel_page_present(struct page *page);
 
 #endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d3ce04a4b248..e231757bb0e0 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -390,17 +390,18 @@ int __set_memory(unsigned long addr, unsigned long 
numpages, unsigned long flags
        return rc;
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
 {
-       return __set_memory((unsigned long)page_to_virt(page), 1, 
SET_MEMORY_INV);
+       return __set_memory((unsigned long)addr, 1, SET_MEMORY_INV);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
 {
-       return __set_memory((unsigned long)page_to_virt(page), 1, 
SET_MEMORY_DEF);
+       return __set_memory((unsigned long)addr, 1, SET_MEMORY_DEF);
 }
 
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid)
 {
        unsigned long flags;
 
@@ -409,7 +410,7 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
        else
                flags = SET_MEMORY_INV;
 
-       return __set_memory((unsigned long)page_to_virt(page), nr, flags);
+       return __set_memory((unsigned long)addr, numpages, flags);
 }
 
 bool kernel_page_present(struct page *page)
diff --git a/arch/x86/include/asm/set_memory.h 
b/arch/x86/include/asm/set_memory.h
index 61f56cdaccb5..f912191f0853 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -87,9 +87,10 @@ int set_pages_wb(struct page *page, int numpages);
 int set_pages_ro(struct page *page, int numpages);
 int set_pages_rw(struct page *page, int numpages);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int set_direct_map_invalid_noflush(const void *addr);
+int set_direct_map_default_noflush(const void *addr);
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid);
 bool kernel_page_present(struct page *page);
 
 extern int kernel_set_to_readonly;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 6c6eb486f7a6..bc8e1c23175b 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2600,9 +2600,9 @@ int set_pages_rw(struct page *page, int numpages)
        return set_memory_rw(addr, numpages);
 }
 
-static int __set_pages_p(struct page *page, int numpages)
+static int __set_pages_p(const void *addr, int numpages)
 {
-       unsigned long tempaddr = (unsigned long) page_address(page);
+       unsigned long tempaddr = (unsigned long)addr;
        struct cpa_data cpa = { .vaddr = &tempaddr,
                                .pgd = NULL,
                                .numpages = numpages,
@@ -2619,9 +2619,9 @@ static int __set_pages_p(struct page *page, int numpages)
        return __change_page_attr_set_clr(&cpa, 1);
 }
 
-static int __set_pages_np(struct page *page, int numpages)
+static int __set_pages_np(const void *addr, int numpages)
 {
-       unsigned long tempaddr = (unsigned long) page_address(page);
+       unsigned long tempaddr = (unsigned long)addr;
        struct cpa_data cpa = { .vaddr = &tempaddr,
                                .pgd = NULL,
                                .numpages = numpages,
@@ -2638,22 +2638,23 @@ static int __set_pages_np(struct page *page, int 
numpages)
        return __change_page_attr_set_clr(&cpa, 1);
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
 {
-       return __set_pages_np(page, 1);
+       return __set_pages_np(addr, 1);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
 {
-       return __set_pages_p(page, 1);
+       return __set_pages_p(addr, 1);
 }
 
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+                                bool valid)
 {
        if (valid)
-               return __set_pages_p(page, nr);
+               return __set_pages_p(addr, numpages);
 
-       return __set_pages_np(page, nr);
+       return __set_pages_np(addr, numpages);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 3030d9245f5a..1a2563f525fc 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -25,17 +25,18 @@ static inline int set_memory_rox(unsigned long addr, int 
numpages)
 #endif
 
 #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
-static inline int set_direct_map_invalid_noflush(struct page *page)
+static inline int set_direct_map_invalid_noflush(const void *addr)
 {
        return 0;
 }
-static inline int set_direct_map_default_noflush(struct page *page)
+static inline int set_direct_map_default_noflush(const void *addr)
 {
        return 0;
 }
 
-static inline int set_direct_map_valid_noflush(struct page *page,
-                                              unsigned nr, bool valid)
+static inline int set_direct_map_valid_noflush(const void *addr,
+                                              unsigned long numpages,
+                                              bool valid)
 {
        return 0;
 }
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0a946932d5c1..b6dda3a8eb6e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -88,7 +88,7 @@ static inline int hibernate_restore_unprotect_page(void 
*page_address) {return 0
 static inline void hibernate_map_page(struct page *page)
 {
        if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
-               int ret = set_direct_map_default_noflush(page);
+               int ret = set_direct_map_default_noflush(page_address(page));
 
                if (ret)
                        pr_warn_once("Failed to remap page\n");
@@ -101,7 +101,7 @@ static inline void hibernate_unmap_page(struct page *page)
 {
        if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
                unsigned long addr = (unsigned long)page_address(page);
-               int ret  = set_direct_map_invalid_noflush(page);
+               int ret  = set_direct_map_invalid_noflush(page_address(page));
 
                if (ret)
                        pr_warn_once("Failed to remap page\n");
diff --git a/mm/execmem.c b/mm/execmem.c
index 810a4ba9c924..220298ec87c8 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -119,7 +119,8 @@ static int execmem_set_direct_map_valid(struct vm_struct 
*vm, bool valid)
        int err = 0;
 
        for (int i = 0; i < vm->nr_pages; i += nr) {
-               err = set_direct_map_valid_noflush(vm->pages[i], nr, valid);
+               err = set_direct_map_valid_noflush(page_address(vm->pages[i]),
+                                                  nr, valid);
                if (err)
                        goto err_restore;
                updated += nr;
@@ -129,7 +130,8 @@ static int execmem_set_direct_map_valid(struct vm_struct 
*vm, bool valid)
 
 err_restore:
        for (int i = 0; i < updated; i += nr)
-               set_direct_map_valid_noflush(vm->pages[i], nr, !valid);
+               set_direct_map_valid_noflush(page_address(vm->pages[i]), nr,
+                                            !valid);
 
        return err;
 }
diff --git a/mm/secretmem.c b/mm/secretmem.c
index edf111e0a1bb..4453ae5dcdd4 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -72,7 +72,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
                        goto out;
                }
 
-               err = set_direct_map_invalid_noflush(folio_page(folio, 0));
+               err = set_direct_map_invalid_noflush(folio_address(folio));
                if (err) {
                        folio_put(folio);
                        ret = vmf_error(err);
@@ -87,7 +87,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
                         * already happened when we marked the page invalid
                         * which guarantees that this call won't fail
                         */
-                       set_direct_map_default_noflush(folio_page(folio, 0));
+                       set_direct_map_default_noflush(folio_address(folio));
                        folio_put(folio);
                        if (err == -EEXIST)
                                goto retry;
@@ -152,7 +152,7 @@ static int secretmem_migrate_folio(struct address_space 
*mapping,
 
 static void secretmem_free_folio(struct folio *folio)
 {
-       set_direct_map_default_noflush(folio_page(folio, 0));
+       set_direct_map_default_noflush(folio_address(folio));
        folio_zero_segment(folio, 0, folio_size(folio));
 }
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ecbac900c35f..5b9b421682ab 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3329,14 +3329,17 @@ struct vm_struct *remove_vm_area(const void *addr)
 }
 
 static inline void set_area_direct_map(const struct vm_struct *area,
-                                      int (*set_direct_map)(struct page *page))
+                                      int (*set_direct_map)(const void *addr))
 {
        int i;
 
        /* HUGE_VMALLOC passes small pages to set_direct_map */
-       for (i = 0; i < area->nr_pages; i++)
-               if (page_address(area->pages[i]))
-                       set_direct_map(area->pages[i]);
+       for (i = 0; i < area->nr_pages; i++) {
+               const void *addr = page_address(area->pages[i]);
+
+               if (addr)
+                       set_direct_map(addr);
+       }
 }
 
 /*
-- 
2.50.1


Reply via email to