On !PAE 32-bit, _PAGE_NX will be 0, making is_prefetch always
return early.  The test is sufficient on PAE as __supported_pte_mask
is updated in the same places as nx_enabled in init_32.c which also
takes disable_nx into account.

Signed-off-by: Harvey Harrison <[EMAIL PROTECTED]>
---
 arch/x86/mm/fault.c |   13 ++++---------
 1 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index bdf0282..32755eb 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -92,18 +92,13 @@ static int is_prefetch(struct pt_regs *regs, unsigned long 
addr,
        unsigned char *max_instr;
 
 #ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_PAE
-       /* If it was a exec fault on NX page, ignore */
-       if (nx_enabled && (error_code & PF_INSTR))
+       if (!(__supported_pte_mask & _PAGE_NX))
                return 0;
-# else
-       return 0;
-# endif
-#else /* CONFIG_X86_64 */
+#endif
+
        /* If it was a exec fault on NX page, ignore */
        if (error_code & PF_INSTR)
                return 0;
-#endif
 
        instr = (unsigned char *)convert_ip_to_linear(current, regs);
        max_instr = instr + 15;
@@ -468,7 +463,7 @@ static int vmalloc_fault(unsigned long address)
        pmd_t *pmd, *pmd_ref;
        pte_t *pte, *pte_ref;
 
-       if (address >= VMALLOC_START && address < VMALLOC_END)
+       if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
        /* Copy kernel mappings over when needed. This can also
-- 
1.5.4.rc4.1142.gf5a97



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to