3.16.59-rc1 review patch.  If anyone has any objections, please let me know.

------------------

From: "Kirill A. Shutemov" <kir...@shutemov.name>

commit b32da82e28ce90bff4e371fc15d2816fa3175bb0 upstream.

We've replaced remap_file_pages(2) implementation with emulation.  Nobody
creates non-linear mapping anymore.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
Cc: Ralf Baechle <r...@linux-mips.org>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
[bwh: Backported to 3.16: Deleted definitions are slightly different]
Signed-off-by: Ben Hutchings <b...@decadent.org.uk>
---
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -148,20 +148,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot
 #define __swp_entry(type,offset)       \
        ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
 
-/*
- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS      28
-
-#define pte_to_pgoff(_pte)     ((((_pte).pte >> 1 ) & 0x07) | \
-                                (((_pte).pte >> 2 ) & 0x38) | \
-                                (((_pte).pte >> 10) <<  6 ))
-
-#define pgoff_to_pte(off)      ((pte_t) { (((off) & 0x07) << 1 ) | \
-                                          (((off) & 0x38) << 2 ) | \
-                                          (((off) >>  6 ) << 10) | \
-                                          _PAGE_FILE })
-
 #else
 
 /* Swap entries must have VALID and GLOBAL bits cleared. */
@@ -177,31 +163,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot
                ((swp_entry_t)  { ((type) << 8) | ((offset) << 13) })
 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-/*
- * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
- */
-#define PTE_FILE_MAX_BITS      30
-
-#define pte_to_pgoff(_pte)     ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off)      ((pte_t) { _PAGE_FILE, (off) << 2 })
-
-#else
-/*
- * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS      28
-
-#define pte_to_pgoff(_pte)     ((((_pte).pte >> 1) & 0x7) | \
-                                (((_pte).pte >> 2) & 0x8) | \
-                                (((_pte).pte >> 8) <<  4))
-
-#define pgoff_to_pte(off)      ((pte_t) { (((off) & 0x7) << 1) | \
-                                          (((off) & 0x8) << 2) | \
-                                          (((off) >>  4) << 8) | \
-                                          _PAGE_FILE })
-#endif
-
 #endif
 
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -291,13 +291,4 @@ static inline pte_t mk_swap_pte(unsigned
 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-/*
- * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
- * make things easier, and only use the upper 56 bits for the page offset...
- */
-#define PTE_FILE_MAX_BITS      56
-
-#define pte_to_pgoff(_pte)     ((_pte).pte >> 8)
-#define pgoff_to_pte(off)      ((pte_t) { ((off) << 8) | _PAGE_FILE })
-
 #endif /* _ASM_PGTABLE_64_H */
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -50,8 +50,6 @@
 
 /*
  * The following bits are implemented in software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
  */
 #define _PAGE_PRESENT_SHIFT    6
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
@@ -64,14 +62,10 @@
 #define _PAGE_MODIFIED_SHIFT   10
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
-#define _PAGE_FILE             (1 << 10)
-
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
  * The following are implemented by software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
  */
 #define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 <<  _PAGE_PRESENT_SHIFT)
@@ -83,8 +77,6 @@
 #define _PAGE_ACCESSED         (1 <<  _PAGE_ACCESSED_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   4
 #define _PAGE_MODIFIED         (1 <<  _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE_SHIFT       4
-#define _PAGE_FILE             (1 <<  _PAGE_FILE_SHIFT)
 
 /*
  * And these are the hardware TLB bits
@@ -114,7 +106,6 @@
  * The following bits are implemented in software
  *
  * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
- * _PAGE_FILE semantics: set:pagecache unset:swap
  */
 #define _PAGE_PRESENT_SHIFT    (0)
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
@@ -126,7 +117,6 @@
 #define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE             (_PAGE_MODIFIED)
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 /* huge tlb page */
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -253,7 +253,6 @@ extern pgd_t swapper_pg_dir[];
 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte)  { return pte.pte_low & _PAGE_FILE; }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
@@ -309,7 +308,6 @@ static inline pte_t pte_mkyoung(pte_t pt
 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; 
}
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; 
}
-static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {

Reply via email to