* Steven Rostedt | 2016-08-04 13:16:45 [-0400]:

>diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
>index dca7bf8cffe2..4ec2c9b205f2 100644
>--- a/include/linux/ftrace_irq.h
>+++ b/include/linux/ftrace_irq.h
>@@ -3,11 +3,34 @@
…
>+static inline void ftrace_nmi_enter(void)
>+{
>+#ifdef CONFIG_HWLAT_TRACER
>+      if (trace_hwlat_callback_enabled)
>+              trace_hwlat_callback(true);

so we take a tracepoint while we enter an nmi

>--- a/kernel/trace/trace_hwlat.c
>+++ b/kernel/trace/trace_hwlat.c
>@@ -64,6 +64,15 @@ static struct dentry *hwlat_sample_window;  /* sample 
>window us */
> /* Save the previous tracing_thresh value */
> static unsigned long save_tracing_thresh;
> 
>+/* NMI timestamp counters */
>+static u64 nmi_ts_start;
>+static u64 nmi_total_ts;
>+static int nmi_count;
>+static int nmi_cpu;

and this is always limited to one CPU at a time?

…
>@@ -125,6 +138,19 @@ static void trace_hwlat_sample(struct hwlat_sample 
>*sample)
> #define init_time(a, b)       (a = b)
> #define time_u64(a)   a
> 
>+void trace_hwlat_callback(bool enter)
>+{
>+      if (smp_processor_id() != nmi_cpu)
>+              return;
>+
>+      if (enter)
>+              nmi_ts_start = time_get();

but more interestingly: trace_clock_local() -> sched_clock()
and of kernel/time/sched_clock.c we do raw_read_seqcount(&cd.seq) which
means we are busted if the NMI triggers during update_clock_read_data().

>+      else {
>+              nmi_total_ts = time_get() - nmi_ts_start;
>+              nmi_count++;
>+      }
>+}

Sebastian

Reply via email to