diff --git a/Makefile b/Makefile
index 44efd1252ab8..1a8c0fc6b997 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 137
+SUBLEVEL = 138
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 722bacea040e..8baaff5af0b5 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
 
 static int __init chacha20_simd_mod_init(void)
 {
-       if (!cpu_has_ssse3)
+       if (!boot_cpu_has(X86_FEATURE_SSSE3))
                return -ENODEV;
 
 #ifdef CONFIG_AS_AVX2
diff --git a/arch/x86/crypto/crc32c-intel_glue.c 
b/arch/x86/crypto/crc32c-intel_glue.c
index 81a595d75cf5..15f5c7675d42 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -58,16 +58,11 @@
 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
                                unsigned int crc_init);
 static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#if defined(X86_FEATURE_EAGER_FPU)
 #define set_pcl_breakeven_point()                                      \
 do {                                                                   \
        if (!use_eager_fpu())                                           \
                crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
 } while (0)
-#else
-#define set_pcl_breakeven_point()                                      \
-       (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
-#endif
 #endif /* CONFIG_X86_64 */
 
 static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t 
length)
@@ -257,7 +252,7 @@ static int __init crc32c_intel_mod_init(void)
        if (!x86_match_cpu(crc32c_cpu_id))
                return -ENODEV;
 #ifdef CONFIG_X86_64
-       if (cpu_has_pclmulqdq) {
+       if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
                alg.update = crc32c_pcl_intel_update;
                alg.finup = crc32c_pcl_intel_finup;
                alg.digest = crc32c_pcl_intel_digest;
diff --git a/arch/x86/include/asm/cmpxchg_32.h 
b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481..e4959d023af8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 
old, u64 new)
 
 #endif
 
-#define system_has_cmpxchg_double() cpu_has_cx8
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
 
 #endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h 
b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae5..caa23a34c963 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#define system_has_cmpxchg_double() cpu_has_cx16
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
 
 #endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h 
b/arch/x86/include/asm/cpufeature.h
index 641f0f2c2982..232621c5e859 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -104,7 +104,7 @@
 #define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 
bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU  ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+/* free, was #define X86_FEATURE_EAGER_FPU     ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
@@ -368,58 +368,29 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
 
 #define cpu_has_fpu            boot_cpu_has(X86_FEATURE_FPU)
-#define cpu_has_de             boot_cpu_has(X86_FEATURE_DE)
 #define cpu_has_pse            boot_cpu_has(X86_FEATURE_PSE)
 #define cpu_has_tsc            boot_cpu_has(X86_FEATURE_TSC)
 #define cpu_has_pge            boot_cpu_has(X86_FEATURE_PGE)
 #define cpu_has_apic           boot_cpu_has(X86_FEATURE_APIC)
-#define cpu_has_sep            boot_cpu_has(X86_FEATURE_SEP)
-#define cpu_has_mtrr           boot_cpu_has(X86_FEATURE_MTRR)
-#define cpu_has_mmx            boot_cpu_has(X86_FEATURE_MMX)
 #define cpu_has_fxsr           boot_cpu_has(X86_FEATURE_FXSR)
 #define cpu_has_xmm            boot_cpu_has(X86_FEATURE_XMM)
 #define cpu_has_xmm2           boot_cpu_has(X86_FEATURE_XMM2)
-#define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
-#define cpu_has_ssse3          boot_cpu_has(X86_FEATURE_SSSE3)
 #define cpu_has_aes            boot_cpu_has(X86_FEATURE_AES)
 #define cpu_has_avx            boot_cpu_has(X86_FEATURE_AVX)
 #define cpu_has_avx2           boot_cpu_has(X86_FEATURE_AVX2)
-#define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
-#define cpu_has_nx             boot_cpu_has(X86_FEATURE_NX)
-#define cpu_has_xstore         boot_cpu_has(X86_FEATURE_XSTORE)
-#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
-#define cpu_has_xcrypt         boot_cpu_has(X86_FEATURE_XCRYPT)
-#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
-#define cpu_has_ace2           boot_cpu_has(X86_FEATURE_ACE2)
-#define cpu_has_ace2_enabled   boot_cpu_has(X86_FEATURE_ACE2_EN)
-#define cpu_has_phe            boot_cpu_has(X86_FEATURE_PHE)
-#define cpu_has_phe_enabled    boot_cpu_has(X86_FEATURE_PHE_EN)
-#define cpu_has_pmm            boot_cpu_has(X86_FEATURE_PMM)
-#define cpu_has_pmm_enabled    boot_cpu_has(X86_FEATURE_PMM_EN)
-#define cpu_has_ds             boot_cpu_has(X86_FEATURE_DS)
-#define cpu_has_pebs           boot_cpu_has(X86_FEATURE_PEBS)
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLUSH)
-#define cpu_has_bts            boot_cpu_has(X86_FEATURE_BTS)
 #define cpu_has_gbpages                boot_cpu_has(X86_FEATURE_GBPAGES)
 #define cpu_has_arch_perfmon   boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
 #define cpu_has_pat            boot_cpu_has(X86_FEATURE_PAT)
-#define cpu_has_xmm4_1         boot_cpu_has(X86_FEATURE_XMM4_1)
-#define cpu_has_xmm4_2         boot_cpu_has(X86_FEATURE_XMM4_2)
 #define cpu_has_x2apic         boot_cpu_has(X86_FEATURE_X2APIC)
 #define cpu_has_xsave          boot_cpu_has(X86_FEATURE_XSAVE)
-#define cpu_has_xsaveopt       boot_cpu_has(X86_FEATURE_XSAVEOPT)
 #define cpu_has_xsaves         boot_cpu_has(X86_FEATURE_XSAVES)
 #define cpu_has_osxsave                boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
-#define cpu_has_pclmulqdq      boot_cpu_has(X86_FEATURE_PCLMULQDQ)
-#define cpu_has_perfctr_core   boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
-#define cpu_has_perfctr_nb     boot_cpu_has(X86_FEATURE_PERFCTR_NB)
-#define cpu_has_perfctr_l2     boot_cpu_has(X86_FEATURE_PERFCTR_L2)
-#define cpu_has_cx8            boot_cpu_has(X86_FEATURE_CX8)
-#define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
-#define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
-#define cpu_has_topoext                boot_cpu_has(X86_FEATURE_TOPOEXT)
-#define cpu_has_bpext          boot_cpu_has(X86_FEATURE_BPEXT)
+/*
+ * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * fast paths and boot_cpu_has() otherwise!
+ */
 
 #if __GNUC__ >= 4
 extern void warn_pre_alternatives(void);
diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 3c3550c3a4a3..146d838e6ee7 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
 extern void fpu__init_system(struct cpuinfo_x86 *c);
 extern void fpu__init_check_bugs(void);
 extern void fpu__resume_cpu(void);
+extern u64 fpu__get_supported_xfeatures_mask(void);
 
 /*
  * Debugging facility:
@@ -57,7 +58,7 @@ extern void fpu__resume_cpu(void);
  */
 static __always_inline __pure bool use_eager_fpu(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+       return true;
 }
 
 static __always_inline __pure bool use_xsaveopt(void)
@@ -595,7 +596,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
         * If the task has used the math, pre-load the FPU on xsave processors
         * or if the past 5 consecutive context-switches used math.
         */
-       fpu.preload = new_fpu->fpstate_active &&
+       fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+                     new_fpu->fpstate_active &&
                      (use_eager_fpu() || new_fpu->counter > 5);
 
        if (old_fpu->fpregs_active) {
diff --git a/arch/x86/include/asm/fpu/xstate.h 
b/arch/x86/include/asm/fpu/xstate.h
index 3a6c89b70307..f23cd8c80b1c 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -22,7 +22,7 @@
 #define XFEATURE_MASK_LAZY     (XFEATURE_MASK_FP | \
                                 XFEATURE_MASK_SSE | \
                                 XFEATURE_MASK_YMM | \
-                                XFEATURE_MASK_OPMASK | \
+                                XFEATURE_MASK_OPMASK | \
                                 XFEATURE_MASK_ZMM_Hi256 | \
                                 XFEATURE_MASK_Hi16_ZMM)
 
diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
index fc3c7e49c8e4..ae357d0afc91 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -105,11 +105,12 @@ struct x86_emulate_ops {
         *  @addr:  [IN ] Linear address from which to read.
         *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to read from memory.
+        *  @system:[IN ] Whether the access is forced to be at CPL0.
         */
        int (*read_std)(struct x86_emulate_ctxt *ctxt,
                        unsigned long addr, void *val,
                        unsigned int bytes,
-                       struct x86_exception *fault);
+                       struct x86_exception *fault, bool system);
 
        /*
         * read_phys: Read bytes of standard (non-emulated/special) memory.
@@ -127,10 +128,11 @@ struct x86_emulate_ops {
         *  @addr:  [IN ] Linear address to which to write.
         *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to write to memory.
+        *  @system:[IN ] Whether the access is forced to be at CPL0.
         */
        int (*write_std)(struct x86_emulate_ctxt *ctxt,
                         unsigned long addr, void *val, unsigned int bytes,
-                        struct x86_exception *fault);
+                        struct x86_exception *fault, bool system);
        /*
         * fetch: Read bytes of standard (non-emulated/special) memory.
         *        Used for instruction fetch.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5..a438c5598a90 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,15 +21,6 @@
 extern int smp_num_siblings;
 extern unsigned int num_processors;
 
-static inline bool cpu_has_ht_siblings(void)
-{
-       bool has_siblings = false;
-#ifdef CONFIG_SMP
-       has_siblings = cpu_has_ht && smp_num_siblings > 1;
-#endif
-       return has_siblings;
-}
-
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
 /* cpus sharing the last level cache: */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff33..c54beb44c4c1 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do {                                                        
\
        if (cpu_has_xmm) {                              \
                xor_speed(&xor_block_pIII_sse);         \
                xor_speed(&xor_block_sse_pf64);         \
-       } else if (cpu_has_mmx) {                       \
+       } else if (boot_cpu_has(X86_FEATURE_MMX)) {     \
                xor_speed(&xor_block_pII_mmx);          \
                xor_speed(&xor_block_p5_mmx);           \
        } else {                                        \
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4bf9e77f3e05..f4fb8f5b0be4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
        int cpu = smp_processor_id();
 
        /* get information required for multi-node processors */
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                u32 eax, ebx, ecx, edx;
 
                cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -954,7 +954,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, 
const int *erratum)
 
 void set_dr_addr_mask(unsigned long mask, int dr)
 {
-       if (!cpu_has_bpext)
+       if (!boot_cpu_has(X86_FEATURE_BPEXT))
                return;
 
        switch (dr) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8eabbafff213..0498ad3702f5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1539,7 +1539,9 @@ void cpu_init(void)
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
 
-       if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
+       if (cpu_feature_enabled(X86_FEATURE_VME) ||
+           cpu_has_tsc ||
+           boot_cpu_has(X86_FEATURE_DE))
                cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
        load_current_idt();
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 209ac1e7d1f0..565648bc1a0a 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        if (cpu_has_xmm2)
                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-       if (cpu_has_ds) {
+
+       if (boot_cpu_has(X86_FEATURE_DS)) {
                unsigned int l1;
                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
                if (!(l1 & (1<<11)))
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c 
b/arch/x86/kernel/cpu/intel_cacheinfo.c
index b4ca91cf55b0..3fa72317ad78 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct 
_cpuid4_info_regs *this_leaf)
        unsigned                edx;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-               if (cpu_has_topoext)
+               if (boot_cpu_has(X86_FEATURE_TOPOEXT))
                        cpuid_count(0x8000001d, index, &eax.full,
                                    &ebx.full, &ecx.full, &edx);
                else
@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
 {
 
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                num_cache_leaves = find_num_cache_leaves(c);
        } else if (c->extended_cpuid_level >= 0x80000006) {
                if (cpuid_edx(0x80000006) & 0xf000)
@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int 
index,
        struct cacheinfo *this_leaf;
        int i, sibling;
 
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                unsigned int apicid, nshared, first, last;
 
                this_leaf = this_cpu_ci->info_list + index;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c 
b/arch/x86/kernel/cpu/mtrr/generic.c
index b5624fafa44a..136ae86f4f5f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
 
 void mtrr_save_fixed_ranges(void *info)
 {
-       if (cpu_has_mtrr)
+       if (boot_cpu_has(X86_FEATURE_MTRR))
                get_fixed_ranges(mtrr_state.fixed_ranges);
 }
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index fa77ac8291f0..f924f41af89a 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
 
        phys_addr = 32;
 
-       if (cpu_has_mtrr) {
+       if (boot_cpu_has(X86_FEATURE_MTRR)) {
                mtrr_if = &generic_mtrr_ops;
                size_or_mask = SIZE_OR_MASK_BITS(36);
                size_and_mask = 0x00f00000;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c 
b/arch/x86/kernel/cpu/perf_event_amd.c
index 1cee5d2d7ece..3ea177cb7366 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool 
eventsel)
        if (offset)
                return offset;
 
-       if (!cpu_has_perfctr_core)
+       if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                offset = index;
        else
                offset = index << 1;
@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
 
 static int __init amd_core_pmu_init(void)
 {
-       if (!cpu_has_perfctr_core)
+       if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                return 0;
 
        switch (boot_cpu_data.x86) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c 
b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index cc6cedb8f25d..49742746a6c9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                goto fail_nodev;
 
-       if (!cpu_has_topoext)
+       if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
                goto fail_nodev;
 
-       if (cpu_has_perfctr_nb) {
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
                amd_uncore_nb = alloc_percpu(struct amd_uncore *);
                if (!amd_uncore_nb) {
                        ret = -ENOMEM;
@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
                ret = 0;
        }
 
-       if (cpu_has_perfctr_l2) {
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
                amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
                if (!amd_uncore_l2) {
                        ret = -ENOMEM;
@@ -583,10 +583,11 @@ fail_online:
 
        /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
        amd_uncore_nb = amd_uncore_l2 = NULL;
-       if (cpu_has_perfctr_l2)
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
                perf_pmu_unregister(&amd_l2_pmu);
 fail_l2:
-       if (cpu_has_perfctr_nb)
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
                perf_pmu_unregister(&amd_nb_pmu);
        if (amd_uncore_l2)
                free_percpu(amd_uncore_l2);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d25097c3fc1d..6aa0b519c851 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
        kernel_fpu_disable();
 
        if (fpu->fpregs_active) {
+               /*
+                * Ignore return value -- we don't care if reg state
+                * is clobbered.
+                */
                copy_fpregs_to_fpstate(fpu);
        } else {
                this_cpu_write(fpu_fpregs_owner_ctx, NULL);
@@ -189,8 +193,12 @@ void fpu__save(struct fpu *fpu)
 
        preempt_disable();
        if (fpu->fpregs_active) {
-               if (!copy_fpregs_to_fpstate(fpu))
-                       fpregs_deactivate(fpu);
+               if (!copy_fpregs_to_fpstate(fpu)) {
+                       if (use_eager_fpu())
+                               copy_kernel_to_fpregs(&fpu->state);
+                       else
+                               fpregs_deactivate(fpu);
+               }
        }
        preempt_enable();
 }
@@ -259,7 +267,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu 
*src_fpu)
        preempt_disable();
        if (!copy_fpregs_to_fpstate(dst_fpu)) {
                memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
-               fpregs_deactivate(src_fpu);
+
+               if (use_eager_fpu())
+                       copy_kernel_to_fpregs(&src_fpu->state);
+               else
+                       fpregs_deactivate(src_fpu);
        }
        preempt_enable();
 }
@@ -409,8 +421,10 @@ static inline void copy_init_fpstate_to_fpregs(void)
 {
        if (use_xsave())
                copy_kernel_to_xregs(&init_fpstate.xsave, -1);
-       else
+       else if (static_cpu_has(X86_FEATURE_FXSR))
                copy_kernel_to_fxregs(&init_fpstate.fxsave);
+       else
+               copy_kernel_to_fregs(&init_fpstate.fsave);
 }
 
 /*
@@ -423,7 +437,7 @@ void fpu__clear(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an 
anomaly */
 
-       if (!use_eager_fpu()) {
+       if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
                /* FPU state will be reallocated lazily at the first use. */
                fpu__drop(fpu);
        } else {
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1011c05b1bd5..954517285fa2 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -3,8 +3,11 @@
  */
 #include <asm/fpu/internal.h>
 #include <asm/tlbflush.h>
+#include <asm/setup.h>
+#include <asm/cmdline.h>
 
 #include <linux/sched.h>
+#include <linux/init.h>
 
 /*
  * Initialize the TS bit in CR0 according to the style of context-switches
@@ -12,10 +15,7 @@
  */
 static void fpu__init_cpu_ctx_switch(void)
 {
-       if (!cpu_has_eager_fpu)
-               stts();
-       else
-               clts();
+       clts();
 }
 
 /*
@@ -75,13 +75,15 @@ static void fpu__init_system_early_generic(struct 
cpuinfo_x86 *c)
        cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
        write_cr0(cr0);
 
-       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
-                    : "+m" (fsw), "+m" (fcw));
+       if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+               asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+                            : "+m" (fsw), "+m" (fcw));
 
-       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
-               set_cpu_cap(c, X86_FEATURE_FPU);
-       else
-               clear_cpu_cap(c, X86_FEATURE_FPU);
+               if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+                       set_cpu_cap(c, X86_FEATURE_FPU);
+               else
+                       clear_cpu_cap(c, X86_FEATURE_FPU);
+       }
 
 #ifndef CONFIG_MATH_EMULATION
        if (!cpu_has_fpu) {
@@ -130,7 +132,7 @@ static void __init fpu__init_system_generic(void)
         * Set up the legacy init FPU context. (xstate init might overwrite this
         * with a more modern format, if the CPU supports it.)
         */
-       fpstate_init_fxstate(&init_fpstate.fxsave);
+       fpstate_init(&init_fpstate);
 
        fpu__init_system_mxcsr();
 }
@@ -230,53 +232,16 @@ static void __init 
fpu__init_system_xstate_size_legacy(void)
 }
 
 /*
- * FPU context switching strategies:
- *
- * Against popular belief, we don't do lazy FPU saves, due to the
- * task migration complications it brings on SMP - we only do
- * lazy FPU restores.
- *
- * 'lazy' is the traditional strategy, which is based on setting
- * CR0::TS to 1 during context-switch (instead of doing a full
- * restore of the FPU state), which causes the first FPU instruction
- * after the context switch (whenever it is executed) to fault - at
- * which point we lazily restore the FPU state into FPU registers.
- *
- * Tasks are of course under no obligation to execute FPU instructions,
- * so it can easily happen that another context-switch occurs without
- * a single FPU instruction being executed. If we eventually switch
- * back to the original task (that still owns the FPU) then we have
- * not only saved the restores along the way, but we also have the
- * FPU ready to be used for the original task.
- *
- * 'eager' switching is used on modern CPUs, there we switch the FPU
- * state during every context switch, regardless of whether the task
- * has used FPU instructions in that time slice or not. This is done
- * because modern FPU context saving instructions are able to optimize
- * state saving and restoration in hardware: they can detect both
- * unused and untouched FPU state and optimize accordingly.
- *
- * [ Note that even in 'lazy' mode we might optimize context switches
- *   to use 'eager' restores, if we detect that a task is using the FPU
- *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+ * Find supported xfeatures based on cpu features and command-line input.
+ * This must be called after fpu__init_parse_early_param() is called and
+ * xfeatures_mask is enumerated.
  */
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
-
-static int __init eager_fpu_setup(char *s)
+u64 __init fpu__get_supported_xfeatures_mask(void)
 {
-       if (!strcmp(s, "on"))
-               eagerfpu = ENABLE;
-       else if (!strcmp(s, "off"))
-               eagerfpu = DISABLE;
-       else if (!strcmp(s, "auto"))
-               eagerfpu = AUTO;
-       return 1;
+       return XCNTXT_MASK;
 }
-__setup("eagerfpu=", eager_fpu_setup);
 
-/*
- * Pick the FPU context switching strategy:
- */
+/* Legacy code to initialize eager fpu mode. */
 static void __init fpu__init_system_ctx_switch(void)
 {
        static bool on_boot_cpu = 1;
@@ -286,25 +251,31 @@ static void __init fpu__init_system_ctx_switch(void)
 
        WARN_ON_FPU(current->thread.fpu.fpstate_active);
        current_thread_info()->status = 0;
+}
 
-       /* Auto enable eagerfpu for xsaveopt */
-       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
-               eagerfpu = ENABLE;
-
-       if (xfeatures_mask & XFEATURE_MASK_EAGER) {
-               if (eagerfpu == DISABLE) {
-                       pr_err("x86/fpu: eagerfpu switching disabled, disabling 
the following xstate features: 0x%llx.\n",
-                              xfeatures_mask & XFEATURE_MASK_EAGER);
-                       xfeatures_mask &= ~XFEATURE_MASK_EAGER;
-               } else {
-                       eagerfpu = ENABLE;
-               }
+/*
+ * We parse fpu parameters early because fpu__init_system() is executed
+ * before parse_early_param().
+ */
+static void __init fpu__init_parse_early_param(void)
+{
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
+               setup_clear_cpu_cap(X86_FEATURE_FPU);
+
+       if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
+               setup_clear_cpu_cap(X86_FEATURE_FXSR);
+               setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
+               setup_clear_cpu_cap(X86_FEATURE_XMM);
        }
 
-       if (eagerfpu == ENABLE)
-               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+       if (cmdline_find_option_bool(boot_command_line, "noxsave"))
+               fpu__xstate_clear_all_cpu_caps();
+
+       if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
+               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
 
-       printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", 
eagerfpu == ENABLE ? "eager" : "lazy");
+       if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+               setup_clear_cpu_cap(X86_FEATURE_XSAVES);
 }
 
 /*
@@ -313,6 +284,7 @@ static void __init fpu__init_system_ctx_switch(void)
  */
 void __init fpu__init_system(struct cpuinfo_x86 *c)
 {
+       fpu__init_parse_early_param();
        fpu__init_system_early_generic(c);
 
        /*
@@ -336,62 +308,3 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
 
        fpu__init_system_ctx_switch();
 }
-
-/*
- * Boot parameter to turn off FPU support and fall back to math-emu:
- */
-static int __init no_387(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FPU);
-       return 1;
-}
-__setup("no387", no_387);
-
-/*
- * Disable all xstate CPU features:
- */
-static int __init x86_noxsave_setup(char *s)
-{
-       if (strlen(s))
-               return 0;
-
-       fpu__xstate_clear_all_cpu_caps();
-
-       return 1;
-}
-__setup("noxsave", x86_noxsave_setup);
-
-/*
- * Disable the XSAVEOPT instruction specifically:
- */
-static int __init x86_noxsaveopt_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-
-       return 1;
-}
-__setup("noxsaveopt", x86_noxsaveopt_setup);
-
-/*
- * Disable the XSAVES instruction:
- */
-static int __init x86_noxsaves_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-
-       return 1;
-}
-__setup("noxsaves", x86_noxsaves_setup);
-
-/*
- * Disable FX save/restore and SSE support:
- */
-static int __init x86_nofxsr_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FXSR);
-       setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
-       setup_clear_cpu_cap(X86_FEATURE_XMM);
-
-       return 1;
-}
-__setup("nofxsr", x86_nofxsr_setup);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 70fc312221fc..3fa200ecca62 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -632,8 +632,7 @@ void __init fpu__init_system_xstate(void)
                BUG();
        }
 
-       /* Support only the state known to the OS: */
-       xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
+       xfeatures_mask &= fpu__get_supported_xfeatures_mask();
 
        /* Enable xstate instructions to be able to continue with 
initialization: */
        fpu__init_cpu_xstate();
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 50a3fad5b89f..2bcfb5f2bc44 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
                        return -EINVAL;
                if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
                        return -EINVAL;
+
+               if (!boot_cpu_has(X86_FEATURE_BPEXT))
+                       return -EOPNOTSUPP;
+
                /*
                 * It's impossible to use a range breakpoint to fake out
                 * user vs kernel detection because bp_len - 1 can't
@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * breakpoints, then we'll have to check for kprobe-blacklisted
                 * addresses anywhere in the range.
                 */
-               if (!cpu_has_bpext)
-                       return -EOPNOTSUPP;
                info->mask = bp->attr.bp_len - 1;
                info->len = X86_BREAKPOINT_LEN_1;
        }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 48ca93242bfd..1f7aefc7b0b4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -295,7 +295,7 @@ do {                                                        
                \
 
 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
                if (c->phys_proc_id == o->phys_proc_id &&
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1fbd2631be60..8c73bf1492b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -751,7 +751,6 @@ dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-       BUG_ON(use_eager_fpu());
 
 #ifdef CONFIG_MATH_EMULATION
        if (read_cr0() & X86_CR0_EM) {
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index af57736a0309..d6d64a519559 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user 
*user_vm86, bool plus)
        tss = &per_cpu(cpu_tss, get_cpu());
        /* make room for real-mode segments */
        tsk->thread.sp0 += 16;
-       if (cpu_has_sep)
+
+       if (static_cpu_has_safe(X86_FEATURE_SEP))
                tsk->thread.sysenter_cs = 0;
+
        load_sp0(tss, &tsk->thread);
        put_cpu();
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8864fec63a20..f1507626ed36 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -790,6 +790,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, 
int rel)
        return assign_eip_near(ctxt, ctxt->_eip + rel);
 }
 
+static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
+                             void *data, unsigned size)
+{
+       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
true);
+}
+
+static int linear_write_system(struct x86_emulate_ctxt *ctxt,
+                              ulong linear, void *data,
+                              unsigned int size)
+{
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
true);
+}
+
 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
                              struct segmented_address addr,
                              void *data,
@@ -801,7 +814,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
        rc = linearize(ctxt, addr, size, false, &linear);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
+       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
false);
 }
 
 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
@@ -815,7 +828,7 @@ static int segmented_write_std(struct x86_emulate_ctxt 
*ctxt,
        rc = linearize(ctxt, addr, size, true, &linear);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
false);
 }
 
 /*
@@ -1488,8 +1501,7 @@ static int read_interrupt_descriptor(struct 
x86_emulate_ctxt *ctxt,
                return emulate_gp(ctxt, index << 3 | 0x2);
 
        addr = dt.address + index * 8;
-       return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
-                                  &ctxt->exception);
+       return linear_read_system(ctxt, addr, desc, sizeof *desc);
 }
 
 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
@@ -1552,8 +1564,7 @@ static int read_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
-                                  &ctxt->exception);
+       return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
 }
 
 /* allowed just for 8 bytes segments */
@@ -1567,8 +1578,7 @@ static int write_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
-                                   &ctxt->exception);
+       return linear_write_system(ctxt, addr, desc, sizeof *desc);
 }
 
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
@@ -1729,8 +1739,7 @@ static int __load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
                                return ret;
                }
        } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
-               ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
-                               sizeof(base3), &ctxt->exception);
+               ret = linear_read_system(ctxt, desc_addr+8, &base3, 
sizeof(base3));
                if (ret != X86EMUL_CONTINUE)
                        return ret;
                if (is_noncanonical_address(get_desc_base(&seg_desc) |
@@ -2043,11 +2052,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt 
*ctxt, int irq)
        eip_addr = dt.address + (irq << 2);
        cs_addr = dt.address + (irq << 2) + 2;
 
-       rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
+       rc = linear_read_system(ctxt, cs_addr, &cs, 2);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
+       rc = linear_read_system(ctxt, eip_addr, &eip, 2);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
@@ -2891,12 +2900,12 @@ static bool emulator_io_port_access_allowed(struct 
x86_emulate_ctxt *ctxt,
 #ifdef CONFIG_X86_64
        base |= ((u64)base3) << 32;
 #endif
-       r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
+       r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
        if (r != X86EMUL_CONTINUE)
                return false;
        if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
                return false;
-       r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
+       r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, 
true);
        if (r != X86EMUL_CONTINUE)
                return false;
        if ((perm >> bit_idx) & mask)
@@ -3025,35 +3034,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
                          u16 tss_selector, u16 old_tss_sel,
                          ulong old_tss_base, struct desc_struct *new_desc)
 {
-       const struct x86_emulate_ops *ops = ctxt->ops;
        struct tss_segment_16 tss_seg;
        int ret;
        u32 new_tss_base = get_desc_base(new_desc);
 
-       ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        save_state_to_tss16(ctxt, &tss_seg);
 
-       ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                            &ctxt->exception);
+       ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        if (old_tss_sel != 0xffff) {
                tss_seg.prev_task_link = old_tss_sel;
 
-               ret = ops->write_std(ctxt, new_tss_base,
-                                    &tss_seg.prev_task_link,
-                                    sizeof tss_seg.prev_task_link,
-                                    &ctxt->exception);
+               ret = linear_write_system(ctxt, new_tss_base,
+                                         &tss_seg.prev_task_link,
+                                         sizeof tss_seg.prev_task_link);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
@@ -3169,38 +3173,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
                          u16 tss_selector, u16 old_tss_sel,
                          ulong old_tss_base, struct desc_struct *new_desc)
 {
-       const struct x86_emulate_ops *ops = ctxt->ops;
        struct tss_segment_32 tss_seg;
        int ret;
        u32 new_tss_base = get_desc_base(new_desc);
        u32 eip_offset = offsetof(struct tss_segment_32, eip);
        u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
 
-       ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        save_state_to_tss32(ctxt, &tss_seg);
 
        /* Only GP registers and segment selectors are saved */
-       ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
-                            ldt_sel_offset - eip_offset, &ctxt->exception);
+       ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+                                 ldt_sel_offset - eip_offset);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        if (old_tss_sel != 0xffff) {
                tss_seg.prev_task_link = old_tss_sel;
 
-               ret = ops->write_std(ctxt, new_tss_base,
-                                    &tss_seg.prev_task_link,
-                                    sizeof tss_seg.prev_task_link,
-                                    &ctxt->exception);
+               ret = linear_write_system(ctxt, new_tss_base,
+                                         &tss_seg.prev_task_link,
+                                         sizeof tss_seg.prev_task_link);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a750fc7c7458..63c44a9bf6bb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6692,8 +6692,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, 
int exit_reason,
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-                               sizeof(vmptr), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -7211,8 +7210,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                                vmx_instruction_info, true, &gva))
                        return 1;
                /* _system ok, as nested_vmx_check_permission verified cpl=0 */
-               kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
-                            &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+               kvm_write_guest_virt_system(vcpu, gva, &field_value,
+                                           (is_long_mode(vcpu) ? 8 : 4), NULL);
        }
 
        nested_vmx_succeed(vcpu);
@@ -7247,8 +7246,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                if (get_vmx_mem_address(vcpu, exit_qualification,
                                vmx_instruction_info, false, &gva))
                        return 1;
-               if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
-                          &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+               if (kvm_read_guest_virt(vcpu, gva, &field_value,
+                                       (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
                        kvm_inject_page_fault(vcpu, &e);
                        return 1;
                }
@@ -7338,9 +7337,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
                        vmx_instruction_info, true, &vmcs_gva))
                return 1;
        /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
-       if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
-                                (void *)&to_vmx(vcpu)->nested.current_vmptr,
-                                sizeof(u64), &e)) {
+       if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
+                                       (void 
*)&to_vmx(vcpu)->nested.current_vmptr,
+                                       sizeof(u64), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -7394,8 +7393,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmx_instruction_info, false, &gva))
                return 1;
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
-                               sizeof(operand), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -7454,8 +7452,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmx_instruction_info, false, &gva))
                return 1;
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
-                               sizeof(u32), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9cea09597d66..53d43d22a84b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4245,11 +4245,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt 
*ctxt,
        return X86EMUL_CONTINUE;
 }
 
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
                               gva_t addr, void *val, unsigned int bytes,
                               struct x86_exception *exception)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
@@ -4257,12 +4256,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
 
-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
-                                     gva_t addr, void *val, unsigned int bytes,
-                                     struct x86_exception *exception)
+static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
+                            gva_t addr, void *val, unsigned int bytes,
+                            struct x86_exception *exception, bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+       u32 access = 0;
+
+       if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+               access |= PFERR_USER_MASK;
+
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 
exception);
 }
 
 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
@@ -4274,18 +4278,16 @@ static int kvm_read_guest_phys_system(struct 
x86_emulate_ctxt *ctxt,
        return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
 }
 
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
-                                      gva_t addr, void *val,
-                                      unsigned int bytes,
-                                      struct x86_exception *exception)
+static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int 
bytes,
+                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct x86_exception *exception)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
                gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
-                                                            PFERR_WRITE_MASK,
+                                                            access,
                                                             exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
@@ -4306,6 +4308,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
 out:
        return r;
 }
+
+static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void 
*val,
+                             unsigned int bytes, struct x86_exception 
*exception,
+                             bool system)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       u32 access = PFERR_WRITE_MASK;
+
+       if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+               access |= PFERR_USER_MASK;
+
+       return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+                                          access, exception);
+}
+
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+                               unsigned int bytes, struct x86_exception 
*exception)
+{
+       return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+                                          PFERR_WRITE_MASK, exception);
+}
 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
 
 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
@@ -5025,8 +5048,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt 
*ctxt, unsigned emul_fla
 static const struct x86_emulate_ops emulate_ops = {
        .read_gpr            = emulator_read_gpr,
        .write_gpr           = emulator_write_gpr,
-       .read_std            = kvm_read_guest_virt_system,
-       .write_std           = kvm_write_guest_virt_system,
+       .read_std            = emulator_read_std,
+       .write_std           = emulator_write_std,
        .read_phys           = kvm_read_guest_phys_system,
        .fetch               = kvm_fetch_guest_virt,
        .read_emulated       = emulator_read_emulated,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f2afa5fe48a6..53a750a10598 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, 
int irq, int inc_eip);
 
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf60aa4..92e2eacb3321 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
 
 void x86_configure_nx(void)
 {
-       if (cpu_has_nx && !disable_nx)
+       if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
                __supported_pte_mask |= _PAGE_NX;
        else
                __supported_pte_mask &= ~_PAGE_NX;
@@ -39,7 +39,7 @@ void x86_configure_nx(void)
 
 void __init x86_report_nx(void)
 {
-       if (!cpu_has_nx) {
+       if (!boot_cpu_has(X86_FEATURE_NX)) {
                printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
                       "missing in CPU!\n");
        } else {
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0c98a9d51a24..44ce80606944 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
         * RNG configuration like it used to be the case in this
         * register */
        if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
-               if (!cpu_has_xstore_enabled) {
+               if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
                        pr_err(PFX "can't enable hardware RNG "
                                "if XSTORE is not enabled\n");
                        return -ENODEV;
@@ -200,8 +200,9 @@ static int __init mod_init(void)
 {
        int err;
 
-       if (!cpu_has_xstore)
+       if (!boot_cpu_has(X86_FEATURE_XSTORE))
                return -ENODEV;
+
        pr_info("VIA RNG detected\n");
        err = hwrng_register(&via_rng);
        if (err) {
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index da2d6777bd09..97a364694bfc 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -515,7 +515,7 @@ static int __init padlock_init(void)
        if (!x86_match_cpu(padlock_cpu_id))
                return -ENODEV;
 
-       if (!cpu_has_xcrypt_enabled) {
+       if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
                printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. 
Hmm, strange...\n");
                return -ENODEV;
        }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 4e154c9b9206..8c5f90647b7a 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -540,7 +540,7 @@ static int __init padlock_init(void)
        struct shash_alg *sha1;
        struct shash_alg *sha256;
 
-       if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
+       if (!x86_match_cpu(padlock_sha_ids) || 
!boot_cpu_has(X86_FEATURE_PHE_EN))
                return -ENODEV;
 
        /* Register the newly added algorithm module if on *
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 263af709e536..b907e4b1bbe2 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
 
        crypto_cipher_set_flags(fallback,
                                crypto_cipher_get_flags((struct
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 3f8bb9a40df1..9506e8693c81 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
 
        crypto_blkcipher_set_flags(
                fallback,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index d83ab4bac8b1..7d070201b3d3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
 
        crypto_blkcipher_set_flags(
                fallback,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 9cb3a0b715e2..84b9389bf1ed 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
 
        crypto_shash_set_flags(fallback,
                               crypto_shash_get_flags((struct crypto_shash
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 759a39906a52..fe89fd56eabf 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2117,6 +2117,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct 
device *dev,
        struct gpio_desc *desc = NULL;
        int status;
        enum gpio_lookup_flags lookupflags = 0;
+       /* Maybe we have a device name, maybe not */
+       const char *devname = dev ? dev_name(dev) : "?";
 
        dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
 
@@ -2145,8 +2147,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct 
device *dev,
                return desc;
        }
 
-       /* If a connection label was passed use that, else use the device name 
as label */
-       status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
+       /*
+        * If a connection label was passed use that, else attempt to use
+        * the device name as label
+        */
+       status = gpiod_request(desc, con_id ? con_id : devname);
        if (status < 0)
                return ERR_PTR(status);
 
diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
index 3851d5715772..aeb8250ab079 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1249,6 +1249,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060B", 0 },
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
+       { "ELAN0612", 0 },
        { "ELAN1000", 0 },
        { }
 };
diff --git a/drivers/input/touchscreen/goodix.c 
b/drivers/input/touchscreen/goodix.c
index 4d113c9e4b77..7bf2597ce44c 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -425,6 +425,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id goodix_acpi_match[] = {
        { "GDIX1001", 0 },
+       { "GDIX1002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
index e9b241b1c9dd..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
                 * should have X86_FEATURE_CX16 support, this has been confirmed
                 * with Intel hardware guys.
                 */
-               if ( cpu_has_cx16 )
+               if (boot_cpu_has(X86_FEATURE_CX16))
                        intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
 
                for_each_iommu(iommu, drhd)
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1e688bfec567..fe90b7e04427 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
                }
        }
 
-       if (b->batch_page) {
-               vunmap(b->batch_page);
-               b->batch_page = NULL;
-       }
-
-       if (b->page) {
-               __free_page(b->page);
-               b->page = NULL;
-       }
+       /* Clearing the batch_page unconditionally has no adverse effect */
+       free_page((unsigned long)b->batch_page);
+       b->batch_page = NULL;
 }
 
 /*
@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = 
{
 
 static bool vmballoon_init_batching(struct vmballoon *b)
 {
-       b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
-       if (!b->page)
-               return false;
+       struct page *page;
 
-       b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
-       if (!b->batch_page) {
-               __free_page(b->page);
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page)
                return false;
-       }
 
+       b->batch_page = page_address(page);
        return true;
 }
 
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 312343beb249..4d532a085db9 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -860,15 +860,12 @@ static int s3c24xx_serial_request_dma(struct 
s3c24xx_uart_port *p)
        dma->rx_conf.direction          = DMA_DEV_TO_MEM;
        dma->rx_conf.src_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
        dma->rx_conf.src_addr           = p->port.mapbase + S3C2410_URXH;
-       dma->rx_conf.src_maxburst       = 16;
+       dma->rx_conf.src_maxburst       = 1;
 
        dma->tx_conf.direction          = DMA_MEM_TO_DEV;
        dma->tx_conf.dst_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
        dma->tx_conf.dst_addr           = p->port.mapbase + S3C2410_UTXH;
-       if (dma_get_cache_alignment() >= 16)
-               dma->tx_conf.dst_maxburst = 16;
-       else
-               dma->tx_conf.dst_maxburst = 1;
+       dma->tx_conf.dst_maxburst       = 1;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 208b3f5ffb3f..7efd70bfeaf7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned 
long bio_flags)
        if (bio_flags & EXTENT_BIO_TREE_LOG)
                return 0;
 #ifdef CONFIG_X86
-       if (cpu_has_xmm4_2)
+       if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
                return 0;
 #endif
        return 1;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index da79e9d66e5b..240cbaee819f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -926,9 +926,9 @@ static inline struct file *get_file(struct file *f)
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
    limits, otherwise bad things can happen in VM. */ 
 #if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE       (((loff_t)PAGE_CACHE_SIZE << 
(BITS_PER_LONG-1))-1) 
+#define MAX_LFS_FILESIZE       ((loff_t)ULONG_MAX << PAGE_SHIFT)
 #elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE       ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE       ((loff_t)LLONG_MAX)
 #endif
 
 #define FL_POSIX       1
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 15150b412930..3ba903ff2bb0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
        return 0;
 }
 
+static inline int sadb_key_len(const struct sadb_key *key)
+{
+       int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
+
+       return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
+                           sizeof(uint64_t));
+}
+
+static int verify_key_len(const void *p)
+{
+       const struct sadb_key *key = p;
+
+       if (sadb_key_len(key) > key->sadb_key_len)
+               return -EINVAL;
+
+       return 0;
+}
+
 static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
 {
        return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const 
struct sadb_msg *hdr, void *
                                return -EINVAL;
                        if (ext_hdrs[ext_type-1] != NULL)
                                return -EINVAL;
-                       if (ext_type == SADB_EXT_ADDRESS_SRC ||
-                           ext_type == SADB_EXT_ADDRESS_DST ||
-                           ext_type == SADB_EXT_ADDRESS_PROXY ||
-                           ext_type == SADB_X_EXT_NAT_T_OA) {
+                       switch (ext_type) {
+                       case SADB_EXT_ADDRESS_SRC:
+                       case SADB_EXT_ADDRESS_DST:
+                       case SADB_EXT_ADDRESS_PROXY:
+                       case SADB_X_EXT_NAT_T_OA:
                                if (verify_address_len(p))
                                        return -EINVAL;
-                       }
-                       if (ext_type == SADB_X_EXT_SEC_CTX) {
+                               break;
+                       case SADB_X_EXT_SEC_CTX:
                                if (verify_sec_ctx_len(p))
                                        return -EINVAL;
+                               break;
+                       case SADB_EXT_KEY_AUTH:
+                       case SADB_EXT_KEY_ENCRYPT:
+                               if (verify_key_len(p))
+                                       return -EINVAL;
+                               break;
+                       default:
+                               break;
                        }
                        ext_hdrs[ext_type-1] = (void *) p;
                }
@@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct 
net *net,
        key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
        if (key != NULL &&
            sa->sadb_sa_auth != SADB_X_AALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
        key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
        if (key != NULL &&
            sa->sadb_sa_encrypt != SADB_EALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
 
        x = xfrm_state_alloc(net);

Reply via email to