Some 'feature' BIOSes fiddle with the TSC_ADJUST register during
suspend/resume which renders the TSC unusable.

Add sanity checks into the resume path and restore the
original value if it was adjusted.

Reported-by: Roland Scheidegger <rscheidegger_li...@hispeed.ch>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 arch/x86/include/asm/tsc.h |    4 ++--
 arch/x86/kernel/process.c  |    2 +-
 arch/x86/kernel/tsc.c      |    6 ++++++
 arch/x86/kernel/tsc_sync.c |    6 +++---
 arch/x86/power/cpu.c       |    1 +
 5 files changed, 13 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -47,12 +47,12 @@ extern int tsc_clocksource_reliable;
  */
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(void);
-extern void tsc_verify_tsc_adjust(void);
+extern void tsc_verify_tsc_adjust(bool resume);
 extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
-static inline void tsc_verify_tsc_adjust(void) { }
+static inline void tsc_verify_tsc_adjust(bool resume) { }
 static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,7 +277,7 @@ void exit_idle(void)
 
 void arch_cpu_idle_enter(void)
 {
-       tsc_verify_tsc_adjust();
+       tsc_verify_tsc_adjust(false);
        local_touch_nmi();
        enter_idle();
 }
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1080,6 +1080,11 @@ static void detect_art(void)
 
 static struct clocksource clocksource_tsc;
 
+static void tsc_resume(struct clocksource *cs)
+{
+       tsc_verify_tsc_adjust(true);
+}
+
 /*
  * We used to compare the TSC to the cycle_last value in the clocksource
  * structure to avoid a nasty time-warp. This can be observed in a
@@ -1112,6 +1117,7 @@ static struct clocksource clocksource_ts
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
                                  CLOCK_SOURCE_MUST_VERIFY,
        .archdata               = { .vclock_mode = VCLOCK_TSC },
+       .resume                 = tsc_resume,
 };
 
 void mark_tsc_unstable(char *reason)
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -30,7 +30,7 @@ struct tsc_adjust {
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
 
-void tsc_verify_tsc_adjust(void)
+void tsc_verify_tsc_adjust(bool resume)
 {
        struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
        s64 curval;
@@ -39,7 +39,7 @@ void tsc_verify_tsc_adjust(void)
                return;
 
        /* Rate limit the MSR check */
-       if (time_before(jiffies, adj->nextcheck))
+       if (!resume && time_before(jiffies, adj->nextcheck))
                return;
 
        adj->nextcheck = jiffies + HZ;
@@ -51,7 +51,7 @@ void tsc_verify_tsc_adjust(void)
        /* Restore the original value */
        wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
 
-       if (!adj->warned) {
+       if (!adj->warned || resume) {
                pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. 
Restoring\n",
                        smp_processor_id(), adj->adjusted, curval);
                adj->warned = true;
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -256,6 +256,7 @@ static void notrace __restore_processor_
        mtrr_bp_restore();
        perf_restore_debug_store();
        msr_restore_context(ctxt);
+       tsc_verify_tsc_adjust(true);
 }
 
 /* Needed by apm.c */


Reply via email to