We have systems where the PMU interrupts are PPIs, and so per-cpu. This
patch adds support for per-cpu PMU interrupts to the perf event code.

Note that because the dev_id passed to request_percpu_irq has to be
percpu data, we now pass cpu_hw_events as the dev_id rather than armpmu.
This only place the dev_id was used was in armpmu_platform_irq, which is
fixed up accordingly.

Signed-off-by: Chris Smith <chris.sm...@st.com>
---
 arch/arm/kernel/perf_event.c    |   91 +++++++++++++++++++++++++++++---------
 arch/arm/kernel/perf_event_v6.c |    3 +-
 arch/arm/kernel/perf_event_v7.c |    3 +-
 3 files changed, 71 insertions(+), 26 deletions(-)

diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index b2abfa1..add9064 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -14,6 +14,7 @@
 
 #include <linux/bitmap.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/perf_event.h>
@@ -355,13 +356,27 @@ validate_group(struct perf_event *event)
 
 static irqreturn_t armpmu_platform_irq(int irq, void *dev)
 {
-       struct arm_pmu *armpmu = (struct arm_pmu *) dev;
+       struct pmu_hw_events *cpuc  = (struct pmu_hw_events *)dev;
+       struct arm_pmu *armpmu = to_arm_pmu((*(cpuc->events))->pmu);
+
        struct platform_device *plat_device = armpmu->plat_device;
        struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
 
        return plat->handle_irq(irq, dev, armpmu->handle_irq);
 }
 
+/* Wrap enable_percpu_irq up as a smp_call_func_t */
+static void armpmu_enable_percpu_irq(void *irq)
+{
+       enable_percpu_irq((int)irq, IRQ_TYPE_NONE);
+}
+
+/* Wrap disable_percpu_irq up as a smp_call_func_t */
+static void armpmu_disable_percpu_irq(void *irq)
+{
+       disable_percpu_irq((int)irq);
+}
+
 static void
 armpmu_release_hardware(struct arm_pmu *armpmu)
 {
@@ -379,7 +394,14 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
                if (irq >= 0) {
                        if (plat && plat->disable_irq)
                                plat->disable_irq(irq);
-                       free_irq(irq, armpmu);
+                       else if (irq_is_per_cpu(irq))
+                               on_each_cpu(armpmu_disable_percpu_irq,
+                                               (void *)irq, 1);
+
+                       if (irq_is_per_cpu(irq))
+                               free_percpu_irq(irq, &cpu_hw_events);
+                       else
+                               free_irq(irq, &per_cpu(cpu_hw_events, i));
                }
        }
 
@@ -421,28 +443,53 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
                if (irq < 0)
                        continue;
 
-               /*
-                * If we have a single PMU interrupt that we can't shift,
-                * assume that we're running on a uniprocessor machine and
-                * continue. Otherwise, continue without this interrupt.
-                */
-               if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
-                       pr_warning("unable to set irq affinity (irq=%d, 
cpu=%u)\n",
-                                   irq, i);
-                       continue;
+               if (irq_is_per_cpu(irq)) {
+                       /*
+                        * We assume that if the PMU IRQ is per-cpu, then
+                        * there is only one.
+                        */
+                       WARN_ON(irqs > 1);
+
+                       err = request_percpu_irq(irq, handle_irq,
+                                       "arm-pmu", &cpu_hw_events);
+                       if (err) {
+                               pr_err("unable to request IRQ%d for ARM PMU 
counters\n",
+                                       irq);
+                               armpmu_release_hardware(armpmu);
+                               return err;
+                       } else if (plat && plat->enable_irq) {
+                               plat->enable_irq(irq);
+                       } else {
+                               on_each_cpu(armpmu_enable_percpu_irq,
+                                               (void *)irq, 1);
+                       }
+
+               } else {
+                       /*
+                        * If we have a single PMU interrupt that we can't
+                        * shift, assume that we're running on a uniprocessor
+                        * machine and continue. Otherwise, continue without
+                        * this interrupt.
+                        */
+                       if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+                               pr_warning("unable to set irq affinity (irq=%d, 
cpu=%u)\n",
+                                           irq, i);
+                               continue;
+                       }
+
+                       err = request_irq(irq, handle_irq,
+                                       IRQF_DISABLED | IRQF_NOBALANCING,
+                                       "arm-pmu", &per_cpu(cpu_hw_events, i));
+
+                       if (err) {
+                               pr_err("unable to request IRQ%d for ARM PMU 
counters\n",
+                                       irq);
+                               armpmu_release_hardware(armpmu);
+                               return err;
+                       } else if (plat && plat->enable_irq)
+                               plat->enable_irq(irq);
                }
 
-               err = request_irq(irq, handle_irq,
-                                 IRQF_DISABLED | IRQF_NOBALANCING,
-                                 "arm-pmu", armpmu);
-               if (err) {
-                       pr_err("unable to request IRQ%d for ARM PMU counters\n",
-                               irq);
-                       armpmu_release_hardware(armpmu);
-                       return err;
-               } else if (plat && plat->enable_irq)
-                       plat->enable_irq(irq);
-
                cpumask_set_cpu(i, &armpmu->active_irqs);
        }
 
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index b78af0c..6b348f5 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -473,7 +473,7 @@ armv6pmu_handle_irq(int irq_num,
 {
        unsigned long pmcr = armv6_pmcr_read();
        struct perf_sample_data data;
-       struct pmu_hw_events *cpuc;
+       struct pmu_hw_events *cpuc = (struct pmu_hw_events *)dev;
        struct pt_regs *regs;
        int idx;
 
@@ -491,7 +491,6 @@ armv6pmu_handle_irq(int irq_num,
 
        perf_sample_data_init(&data, 0);
 
-       cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4d7095a..56ddad7 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -933,7 +933,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void 
*dev)
 {
        u32 pmnc;
        struct perf_sample_data data;
-       struct pmu_hw_events *cpuc;
+       struct pmu_hw_events *cpuc  = (struct pmu_hw_events *)dev;
        struct pt_regs *regs;
        int idx;
 
@@ -955,7 +955,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void 
*dev)
 
        perf_sample_data_init(&data, 0);
 
-       cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
-- 
1.7.6.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to