Add X86_FEATURE_SMAP to the disabled features mask as appropriate
and use cpu_feature_enabled() in the fault code.  This lets us get
rid of a redundant IS_ENABLED(CONFIG_X86_SMAP).

Signed-off-by: Andy Lutomirski <l...@kernel.org>
---
 arch/x86/include/asm/disabled-features.h | 8 +++++++-
 arch/x86/mm/fault.c                      | 5 +----
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/disabled-features.h 
b/arch/x86/include/asm/disabled-features.h
index 33833d1909af..a5ea841cc6d2 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
 # define DISABLE_MPX   (1<<(X86_FEATURE_MPX & 31))
 #endif
 
+#ifdef CONFIG_X86_SMAP
+# define DISABLE_SMAP  0
+#else
+# define DISABLE_SMAP  (1<<(X86_FEATURE_SMAP & 31))
+#endif
+
 #ifdef CONFIG_X86_INTEL_UMIP
 # define DISABLE_UMIP  0
 #else
@@ -68,7 +74,7 @@
 #define DISABLED_MASK6 0
 #define DISABLED_MASK7 (DISABLE_PTI)
 #define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
 #define DISABLED_MASK10        0
 #define DISABLED_MASK11        0
 #define DISABLED_MASK12        0
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eae7ee3ce89b..0597342d4a55 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1150,10 +1150,7 @@ static int fault_in_kernel_space(unsigned long address)
 
 static inline bool smap_violation(int error_code, struct pt_regs *regs)
 {
-       if (!IS_ENABLED(CONFIG_X86_SMAP))
-               return false;
-
-       if (!static_cpu_has(X86_FEATURE_SMAP))
+       if (!cpu_feature_enabled(X86_FEATURE_SMAP))
                return false;
 
        if (error_code & X86_PF_USER)
-- 
2.17.2

Reply via email to