Cc Paolo, 2017-01-19 21:36 GMT+08:00 Peter Zijlstra <pet...@infradead.org>: > On Thu, Jan 19, 2017 at 11:19:24AM +0100, Peter Zijlstra wrote: > >> I'll go have a prod. Thanks! > > This seems to cure it for me. > > --- > Subject: sched/clock: Fix hotplug issue > > Mike reported that he could trigger the WARN_ON_ONCE() in > set_sched_clock_stable() using hotplug. > > This exposed a fundamental problem with the interface, we should never > mark the TSC stable if we ever find it to be unstable. Therefore > set_sched_clock_stable() is a broken interface. > > The reason it existed is that not having it is a pain, it means all > relevant architecture code needs to call clear_sched_clock_stable() > where appropriate. > > Of the three architectures that select HAVE_UNSTABLE_SCHED_CLOCK ia64 > and parisc are trivial in that they never called > set_sched_clock_stable(), so add an unconditional call to > clear_sched_clock_stable() to them. > > For x86 the story is a lot more involved, and what this patch tries to > do is ensure we preserve the status quo. So even is Cyrix or Transmeta > have usable TSC they never called set_sched_clock_stable() so they now > get an explicit mark unstable. > > XXX: what about Xen ? > > Fixes: 9881b024b7d7 ("sched/clock: Delay switching sched_clock to stable") > Reported-by: Mike Galbraith <efa...@gmx.de> > Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> > --- > arch/ia64/kernel/setup.c | 2 ++ > arch/parisc/kernel/setup.c | 2 ++ > arch/x86/kernel/cpu/amd.c | 6 ++++-- > arch/x86/kernel/cpu/centaur.c | 2 ++ > arch/x86/kernel/cpu/common.c | 3 +++ > arch/x86/kernel/cpu/cyrix.c | 2 ++ > arch/x86/kernel/cpu/intel.c | 6 ++++-- > arch/x86/kernel/cpu/transmeta.c | 3 +++ > arch/x86/kernel/kvmclock.c | 2 +- > include/linux/sched.h | 1 - > kernel/sched/clock.c | 29 ++++++++--------------------- > 11 files changed, 31 insertions(+), 27 deletions(-) > > diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c > index 7ec7acc..c483ece 100644 > --- a/arch/ia64/kernel/setup.c > +++ b/arch/ia64/kernel/setup.c > @@ -619,6 +619,8 @@ setup_arch (char **cmdline_p) > check_sal_cache_flush(); > #endif > paging_init(); > + > + clear_sched_clock_stable(); > } > > /* > diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c > index 2e66a88..068ed36 100644 > --- a/arch/parisc/kernel/setup.c > +++ b/arch/parisc/kernel/setup.c > @@ -36,6 +36,7 @@ > #undef PCI_DEBUG > #include <linux/proc_fs.h> > #include <linux/export.h> > +#include <linux/sched.h> > > #include <asm/processor.h> > #include <asm/sections.h> > @@ -176,6 +177,7 @@ void __init setup_arch(char **cmdline_p) > conswitchp = &dummy_con; /* we use do_take_over_console() > later ! */ > #endif > > + clear_sched_clock_stable(); > } > > /* > diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c > index 1d31672..80e657e 100644 > --- a/arch/x86/kernel/cpu/amd.c > +++ b/arch/x86/kernel/cpu/amd.c > @@ -541,8 +541,10 @@ static void early_init_amd(struct cpuinfo_x86 *c) > if (c->x86_power & (1 << 8)) { > set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); > set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); > - if (!check_tsc_unstable()) > - set_sched_clock_stable(); > + if (check_tsc_unstable()) > + clear_sched_clock_stable(); > + } else { > + clear_sched_clock_stable(); > } > > /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ > diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c > index 1661d8e..c4bab8c 100644 > --- a/arch/x86/kernel/cpu/centaur.c > +++ b/arch/x86/kernel/cpu/centaur.c > @@ -104,6 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c) > #ifdef CONFIG_X86_64 > set_cpu_cap(c, X86_FEATURE_SYSENTER32); > #endif > + > + clear_sched_clock_stable(); > } > > static void init_centaur(struct cpuinfo_x86 *c) > diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c > index 9bab7a8..0bdb1ab 100644 > --- a/arch/x86/kernel/cpu/common.c > +++ b/arch/x86/kernel/cpu/common.c > @@ -83,6 +83,7 @@ static void default_init(struct cpuinfo_x86 *c) > strcpy(c->x86_model_id, "386"); > } > #endif > + clear_sched_clock_stable(); > } > > static const struct cpu_dev default_cpu = { > @@ -1055,6 +1056,8 @@ static void identify_cpu(struct cpuinfo_x86 *c) > */ > if (this_cpu->c_init) > this_cpu->c_init(c); > + else > + clear_sched_clock_stable(); > > /* Disable the PN if appropriate */ > squash_the_stupid_serial_number(c); > diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c > index bd9dcd6..47416f9 100644 > --- a/arch/x86/kernel/cpu/cyrix.c > +++ b/arch/x86/kernel/cpu/cyrix.c > @@ -9,6 +9,7 @@ > #include <asm/pci-direct.h> > #include <asm/tsc.h> > #include <asm/cpufeature.h> > +#include <linux/sched.h> > > #include "cpu.h" > > @@ -183,6 +184,7 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) > set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); > break; > } > + clear_sched_clock_stable(); > } > > static void init_cyrix(struct cpuinfo_x86 *c) > diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c > index 203f860..026c728 100644 > --- a/arch/x86/kernel/cpu/intel.c > +++ b/arch/x86/kernel/cpu/intel.c > @@ -119,8 +119,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) > if (c->x86_power & (1 << 8)) { > set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); > set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); > - if (!check_tsc_unstable()) > - set_sched_clock_stable(); > + if (check_tsc_unstable()) > + clear_sched_clock_stable(); > + } else { > + clear_sched_clock_stable(); > } > > /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ > diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c > index 3417856..c1ea5b9 100644 > --- a/arch/x86/kernel/cpu/transmeta.c > +++ b/arch/x86/kernel/cpu/transmeta.c > @@ -1,4 +1,5 @@ > #include <linux/kernel.h> > +#include <linux/sched.h> > #include <linux/mm.h> > #include <asm/cpufeature.h> > #include <asm/msr.h> > @@ -14,6 +15,8 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) > if (xlvl >= 0x80860001) > c->x86_capability[CPUID_8086_0001_EDX] = > cpuid_edx(0x80860001); > } > + > + clear_sched_clock_stable(); > } > > static void init_transmeta(struct cpuinfo_x86 *c) > diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c > index 2a5cafd..542710b 100644 > --- a/arch/x86/kernel/kvmclock.c > +++ b/arch/x86/kernel/kvmclock.c > @@ -107,12 +107,12 @@ static inline void kvm_sched_clock_init(bool stable) > { > if (!stable) { > pv_time_ops.sched_clock = kvm_clock_read; > + clear_sched_clock_stable(); > return; > } > > kvm_sched_clock_offset = kvm_clock_read(); > pv_time_ops.sched_clock = kvm_sched_clock_read; > - set_sched_clock_stable();
This results in sched clock always unstable for kvm guest since there is no invariant tsc cpuid bit exposed for kvm guest currently. The blockage happened for several reasons: 1) Migration: to host with different TSC frequency. 2) Savevm: It is not safe to use the TSC for wall clock timer services. How about something like below(untested): diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bae6ea6..a61c477 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -115,6 +115,7 @@ static inline void kvm_sched_clock_init(bool stable) kvm_sched_clock_offset = kvm_clock_read(); pv_time_ops.sched_clock = kvm_sched_clock_read; + hypervisor_sched_clock_stable(); printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n", kvm_sched_clock_offset); diff --git a/include/linux/sched.h b/include/linux/sched.h index 451e241..38c6edb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2499,6 +2499,10 @@ static inline void clear_sched_clock_stable(void) { } +static inline void hypervisor_sched_clock_stable(void) +{ +} + static inline void sched_clock_idle_sleep_event(void) { } @@ -2526,6 +2530,7 @@ extern void sched_clock_init_late(void); */ extern int sched_clock_stable(void); extern void clear_sched_clock_stable(void); +extern void hypervisor_sched_clock_stable(void); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index ad64efe..a46639e 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -77,9 +77,15 @@ EXPORT_SYMBOL_GPL(sched_clock); __read_mostly int sched_clock_running; +enum { + SCHED_CLOCK_INIT = 0, + HYPERVISOR_SCHED_CLOCK_STABLE, + SCHED_CLOCK_INIT_LATE +}; + void sched_clock_init(void) { - sched_clock_running = 1; + sched_clock_running = SCHED_CLOCK_INIT; } #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK @@ -170,13 +176,14 @@ void clear_sched_clock_stable(void) smp_mb(); /* matches sched_clock_init_late() */ - if (sched_clock_running == 2) + if (sched_clock_running == SCHED_CLOCK_INIT_LATE) schedule_work(&sched_clock_work); } void sched_clock_init_late(void) { - sched_clock_running = 2; + if (sched_clock_running == SCHED_CLOCK_INIT) + sched_clock_running = SCHED_CLOCK_INIT_LATE; /* * Ensure that it is impossible to not do a static_key update. * @@ -186,8 +193,15 @@ void sched_clock_init_late(void) */ smp_mb(); /* matches {set,clear}_sched_clock_stable() */ - if (__sched_clock_stable_early) + if (__sched_clock_stable_early || + sched_clock_running == HYPERVISOR_SCHED_CLOCK_STABLE) { __set_sched_clock_stable(); + } +} + +void hypervisor_sched_clock_stable() +{ + sched_clock_running = HYPERVISOR_SCHED_CLOCK_STABLE; } /* Regards, Wanpeng Li > > printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n", > kvm_sched_clock_offset); > diff --git a/include/linux/sched.h b/include/linux/sched.h > index 68b35dd..8a9bbb6 100644 > --- a/include/linux/sched.h > +++ b/include/linux/sched.h > @@ -2522,7 +2522,6 @@ extern void sched_clock_init_late(void); > * is reliable after all: > */ > extern int sched_clock_stable(void); > -extern void set_sched_clock_stable(void); > extern void clear_sched_clock_stable(void); > > extern void sched_clock_tick(void); > diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c > index 7713b2b..ad64efe 100644 > --- a/kernel/sched/clock.c > +++ b/kernel/sched/clock.c > @@ -83,8 +83,15 @@ void sched_clock_init(void) > } > > #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK > +/* > + * We must start with !__sched_clock_stable because the unstable -> stable > + * transition is accurate, while the stable -> unstable transition is not. > + * > + * Similarly we start with __sched_clock_stable_early, thereby assuming we > + * will become stable, such that there's only a single 1 -> 0 transition. > + */ > static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); > -static int __sched_clock_stable_early; > +static int __sched_clock_stable_early = 1; > > /* > * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset > @@ -132,24 +139,6 @@ static void __set_sched_clock_stable(void) > tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); > } > > -void set_sched_clock_stable(void) > -{ > - __sched_clock_stable_early = 1; > - > - smp_mb(); /* matches sched_clock_init_late() */ > - > - /* > - * This really should only be called early (before > - * sched_clock_init_late()) when guestimating our sched_clock() is > - * solid. > - * > - * After that we test stability and we can negate our guess using > - * clear_sched_clock_stable, possibly from a watchdog. > - */ > - if (WARN_ON_ONCE(sched_clock_running == 2)) > - __set_sched_clock_stable(); > -} > - > static void __clear_sched_clock_stable(struct work_struct *work) > { > struct sched_clock_data *scd = this_scd(); > @@ -199,8 +188,6 @@ void sched_clock_init_late(void) > > if (__sched_clock_stable_early) > __set_sched_clock_stable(); > - else > - __clear_sched_clock_stable(NULL); > } > > /*