Using FIQ (if it is available) gives perf a better insight into the
system by allowing code run with interrupts disabled to be profiled.

Signed-off-by: Daniel Thompson <daniel.thomp...@linaro.org>
---
 arch/arm/include/asm/pmu.h       |  4 ++++
 arch/arm/kernel/perf_event.c     |  2 +-
 arch/arm/kernel/perf_event_cpu.c | 35 ++++++++++++++++++++++++++++++++---
 arch/arm/kernel/traps.c          |  3 ++-
 4 files changed, 39 insertions(+), 5 deletions(-)

diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b1596bd59129..2a7ea97a4a14 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -123,6 +123,8 @@ struct arm_pmu {
 
 extern const struct dev_pm_ops armpmu_dev_pm_ops;
 
+irqreturn_t armpmu_dispatch_irq(int irq, void *dev);
+
 int armpmu_register(struct arm_pmu *armpmu, int type);
 
 u64 armpmu_event_update(struct perf_event *event);
@@ -136,6 +138,8 @@ int armpmu_map_event(struct perf_event *event,
                                                
[PERF_COUNT_HW_CACHE_RESULT_MAX],
                     u32 raw_event_mask);
 
+void cpu_pmu_handle_fiq(int irq);
+
 struct pmu_probe_info {
        unsigned int cpuid;
        unsigned int mask;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f7c65adaa428..5ae9adf7f18e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -296,7 +296,7 @@ validate_group(struct perf_event *event)
        return 0;
 }
 
-static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 {
        struct arm_pmu *armpmu;
        struct platform_device *plat_device;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index a80309087a7b..5c4e9ce23389 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -36,6 +36,9 @@
 /* Set at runtime when we know what CPU type we are. */
 static struct arm_pmu *cpu_pmu;
 
+/* Allows us to find out if an IRQ is for us (mostly used from NMI context) */
+static DEFINE_PER_CPU(int, cpu_pmu_irqs);
+
 /*
  * Despite the names, these two functions are CPU-specific and are used
  * by the OProfile/perf code.
@@ -127,6 +130,24 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
        }
 }
 
+/*
+ * This handler is called *unconditionally* from the default NMI/FIQ
+ * handler. The irq may not be anything to do with us so the main
+ * job of this function is to figure out if the irq passed in is ours
+ * or not.
+ */
+void cpu_pmu_handle_fiq(int irq)
+{
+       int cpu = smp_processor_id();
+
+       if (irq != get_cpu_var(cpu_pmu_irqs))
+               return;
+
+       (void)armpmu_dispatch_irq(irq,
+                                 get_cpu_ptr(&cpu_pmu->hw_events->percpu_pmu));
+}
+
+
 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
 {
        int i, err, irq, irqs;
@@ -170,9 +191,16 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, 
irq_handler_t handler)
                                continue;
                        }
 
-                       err = request_irq(irq, handler,
-                                         IRQF_NOBALANCING | IRQF_NO_THREAD, 
"arm-pmu",
-                                         per_cpu_ptr(&hw_events->percpu_pmu, 
i));
+                       err = request_nmi_irq(
+                           irq, IRQF_NOBALANCING, "arm-pmu",
+                           per_cpu_ptr(&hw_events->percpu_pmu, i));
+                       if (err) {
+                               err = request_irq(
+                                   irq, handler,
+                                   IRQF_NOBALANCING | IRQF_NO_THREAD,
+                                   "arm-pmu",
+                                   per_cpu_ptr(&hw_events->percpu_pmu, i));
+                       }
                        if (err) {
                                pr_err("unable to request IRQ%d for ARM PMU 
counters\n",
                                        irq);
@@ -180,6 +208,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, 
irq_handler_t handler)
                        }
 
                        cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+                       per_cpu(cpu_pmu_irqs, i) = irq;
                }
        }
 
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 74c752b9db68..c581e07517ff 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -38,6 +38,7 @@
 #include <asm/tls.h>
 #include <asm/system_misc.h>
 #include <asm/opcodes.h>
+#include <asm/pmu.h>
 
 
 static const char *handler[]= {
@@ -485,7 +486,7 @@ asmlinkage void __exception_irq_entry 
handle_fiq_as_nmi(struct pt_regs *regs)
        irq = gic_ack_fiq();
 
        if (irq) {
-               /* empty - no SPI handlers (yet) */
+               cpu_pmu_handle_fiq(irq);
        } else {
 #ifdef CONFIG_SMP
                ipi_cpu_backtrace(regs);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to