Now that all functionality is properly serialized against CPU hotplug, remove the extra per cpu storage which holds the disabled events for cleanup. The core makes sure that cleanup happens before new events are created.
Signed-off-by: Thomas Gleixner <[email protected]> Cc: Don Zickus <[email protected]> Cc: Chris Metcalf <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Sebastian Siewior <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Ulrich Obergfell <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Andrew Morton <[email protected]> Link: http://lkml.kernel.org/r/[email protected] --- kernel/watchdog_hld.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -21,7 +21,6 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -static DEFINE_PER_CPU(struct perf_event *, dead_event); static struct cpumask dead_events_mask; static unsigned long hardlockup_allcpu_dumped; @@ -204,8 +203,6 @@ void hardlockup_detector_perf_disable(vo if (event) { perf_event_disable(event); - this_cpu_write(watchdog_ev, NULL); - this_cpu_write(dead_event, event); cpumask_set_cpu(smp_processor_id(), &dead_events_mask); watchdog_cpus--; } @@ -221,9 +218,9 @@ void hardlockup_detector_perf_cleanup(vo int cpu; for_each_cpu(cpu, &dead_events_mask) { - struct perf_event *event = per_cpu(dead_event, cpu); + struct perf_event *event = per_cpu(watchdog_ev, cpu); - per_cpu(dead_event, cpu) = NULL; + per_cpu(watchdog_ev, cpu) = NULL; perf_event_release_kernel(event); } cpumask_clear(&dead_events_mask);

