From: Al Viro <v...@zeniv.linux.org.uk> Signed-off-by: Al Viro <v...@zeniv.linux.org.uk> --- arch/mips/include/asm/pgtable.h | 4 ++-- arch/mips/mm/init.c | 7 ++++--- arch/s390/include/asm/pgtable.h | 6 +++--- arch/s390/mm/init.c | 7 ++++--- arch/score/include/asm/pgtable.h | 6 +++--- arch/score/mm/init.c | 6 +++--- 6 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 8957f15..f0e06c6 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -75,11 +75,11 @@ extern unsigned long _page_cachable_default; * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page; +extern void *empty_zero_page; extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ - (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) + (virt_to_page((empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE extern void paging_init(void); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8770e61..872e7a1 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -53,7 +53,8 @@ * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ -unsigned long empty_zero_page, zero_page_mask; +void *empty_zero_page; +unsigned long zero_page_mask; EXPORT_SYMBOL_GPL(empty_zero_page); EXPORT_SYMBOL(zero_page_mask); @@ -70,11 +71,11 @@ void setup_zero_pages(void) else order = 0; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + empty_zero_page = get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - page = virt_to_page((void *)empty_zero_page); + page = virt_to_page(empty_zero_page); split_page(page, order); for (i = 0; i < (1 << order); i++, page++) mark_page_reserved(page); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 024f85f..f6fe409 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -47,12 +47,12 @@ extern void vmem_map_init(void); * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page; +extern void *empty_zero_page; extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ - (virt_to_page((void *)(empty_zero_page + \ - (((unsigned long)(vaddr)) &zero_page_mask)))) + (virt_to_page(empty_zero_page + \ + (((unsigned long)(vaddr)) &zero_page_mask))) #define __HAVE_COLOR_ZERO_PAGE /* TODO: s390 cannot support io_remap_pfn_range... */ diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c722400..7c8190f 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,7 +42,8 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); -unsigned long empty_zero_page, zero_page_mask; +void *empty_zero_page; +unsigned long zero_page_mask; EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(zero_page_mask); @@ -59,11 +60,11 @@ static void __init setup_zero_pages(void) while (order > 2 && (totalram_pages >> 10) < (1UL << order)) order--; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + empty_zero_page = get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Out of memory in setup_zero_pages"); - page = virt_to_page((void *) empty_zero_page); + page = virt_to_page(empty_zero_page); split_page(page, order); for (i = 1 << order; i > 0; i--) { mark_page_reserved(page); diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0553e5c..8f5b1be 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -163,12 +163,12 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) #define __swp_offset(x) ((x).val >> 10) #define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)}) -extern unsigned long empty_zero_page; +extern void *empty_zero_page; extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ - (virt_to_page((void *)(empty_zero_page + \ - (((unsigned long)(vaddr)) & zero_page_mask)))) + (virt_to_page(empty_zero_page + \ + (((unsigned long)(vaddr)) & zero_page_mask))) #define pgtable_cache_init() do {} while (0) diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 9fbce49..b4db012 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -38,18 +38,18 @@ #include <asm/sections.h> #include <asm/tlb.h> -unsigned long empty_zero_page; +void *empty_zero_page; EXPORT_SYMBOL_GPL(empty_zero_page); static void setup_zero_page(void) { struct page *page; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); + empty_zero_page = get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - page = virt_to_page((void *) empty_zero_page); + page = virt_to_page(empty_zero_page); mark_page_reserved(page); } -- 2.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/