From: Kan Liang <[email protected]>

This patch implements core_misc PMU disable and enable functions.
core_misc PMU counters are free running counters, so it's impossible to
stop/start them. The "disable" means not read counters.
With disable/enable functions, it's possible to "disable" core_misc
events when other PMU events stop. For example, we are able to stop
read the core_misc counter during irq handler.

Signed-off-by: Kan Liang <[email protected]>
---
 arch/x86/include/asm/perf_event.h                |  2 ++
 arch/x86/kernel/cpu/perf_event.h                 | 10 ++++++
 arch/x86/kernel/cpu/perf_event_intel.c           |  4 +++
 arch/x86/kernel/cpu/perf_event_intel_core_misc.c | 41 ++++++++++++++++++++++++
 4 files changed, 57 insertions(+)

diff --git a/arch/x86/include/asm/perf_event.h 
b/arch/x86/include/asm/perf_event.h
index dc0f6ed..2905f4c 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -11,6 +11,8 @@
 
 #define X86_PMC_IDX_MAX                                               64
 
+#define X86_CORE_MISC_COUNTER_MAX                             64
+
 #define MSR_ARCH_PERFMON_PERFCTR0                            0xc1
 #define MSR_ARCH_PERFMON_PERFCTR1                            0xc2
 
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3e7fd27..fb14f8a 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -239,6 +239,12 @@ struct cpu_hw_events {
        int excl_thread_id; /* 0 or 1 */
 
        /*
+        * Intel core misc
+        */
+       struct perf_event       *core_misc_events[X86_CORE_MISC_COUNTER_MAX]; 
/* in counter order */
+       unsigned long           
core_misc_active_mask[BITS_TO_LONGS(X86_CORE_MISC_COUNTER_MAX)];
+
+       /*
         * AMD specific bits
         */
        struct amd_nb                   *amd_nb;
@@ -927,6 +933,10 @@ int p6_pmu_init(void);
 
 int knc_pmu_init(void);
 
+void intel_core_misc_pmu_enable(void);
+
+void intel_core_misc_pmu_disable(void);
+
 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
                          char *page);
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a9..651a86d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1586,6 +1586,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        __intel_pmu_disable_all();
+       if (cpuc->core_misc_active_mask)
+               intel_core_misc_pmu_disable();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
        status = intel_pmu_get_status();
@@ -1671,6 +1673,8 @@ again:
 
 done:
        __intel_pmu_enable_all(0, true);
+       if (cpuc->core_misc_active_mask)
+               intel_core_misc_pmu_enable();
        /*
         * Only unmask the NMI after the overflow counters
         * have been reset. This avoids spurious NMIs on
diff --git a/arch/x86/kernel/cpu/perf_event_intel_core_misc.c 
b/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
index c6c82ac..4efe842 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
@@ -250,12 +250,19 @@ static void __core_misc_pmu_event_start(struct 
core_misc_pmu *pmu,
 static void core_misc_pmu_event_start(struct perf_event *event, int mode)
 {
        struct core_misc_pmu *pmu = get_core_misc_pmu(event);
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int idx = event->hw.idx;
        unsigned long flags;
 
        if (pmu == NULL)
                return;
 
        spin_lock_irqsave(&pmu->lock, flags);
+
+       if (pmu->pmu->type == perf_intel_core_misc_thread) {
+               cpuc->core_misc_events[idx] = event;
+               __set_bit(idx, cpuc->core_misc_active_mask);
+       }
        __core_misc_pmu_event_start(pmu, event);
        spin_unlock_irqrestore(&pmu->lock, flags);
 }
@@ -264,6 +271,7 @@ static void core_misc_pmu_event_stop(struct perf_event 
*event, int mode)
 {
        struct core_misc_pmu *pmu = get_core_misc_pmu(event);
        struct hw_perf_event *hwc = &event->hw;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        unsigned long flags;
 
        if (pmu == NULL)
@@ -273,6 +281,8 @@ static void core_misc_pmu_event_stop(struct perf_event 
*event, int mode)
 
        /* mark event as deactivated and stopped */
        if (!(hwc->state & PERF_HES_STOPPED)) {
+               if (__test_and_clear_bit(hwc->idx, cpuc->core_misc_active_mask))
+                       cpuc->core_misc_events[hwc->idx] = NULL;
                WARN_ON_ONCE(pmu->n_active <= 0);
                pmu->n_active--;
 
@@ -294,6 +304,32 @@ static void core_misc_pmu_event_stop(struct perf_event 
*event, int mode)
        spin_unlock_irqrestore(&pmu->lock, flags);
 }
 
+void intel_core_misc_pmu_enable(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct perf_event *event;
+       u64 start;
+       int bit;
+
+       for_each_set_bit(bit, cpuc->core_misc_active_mask,
+                        X86_CORE_MISC_COUNTER_MAX) {
+               event = cpuc->core_misc_events[bit];
+               start = core_misc_pmu_read_counter(event);
+               local64_set(&event->hw.prev_count, start);
+       }
+}
+
+void intel_core_misc_pmu_disable(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int bit;
+
+       for_each_set_bit(bit, cpuc->core_misc_active_mask,
+                        X86_CORE_MISC_COUNTER_MAX) {
+               core_misc_pmu_event_update(cpuc->core_misc_events[bit]);
+       }
+}
+
 static void core_misc_pmu_event_del(struct perf_event *event, int mode)
 {
        core_misc_pmu_event_stop(event, PERF_EF_UPDATE);
@@ -863,6 +899,11 @@ static void __init core_misc_pmus_register(void)
                        .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
                };
 
+               if (type->type == perf_intel_core_misc_thread) {
+                       type->pmu.pmu_disable = (void *) 
intel_core_misc_pmu_disable;
+                       type->pmu.pmu_enable = (void *) 
intel_core_misc_pmu_enable;
+               }
+
                err = perf_pmu_register(&type->pmu, type->name, -1);
                if (WARN_ON(err))
                        pr_info("Failed to register PMU %s error %d\n",
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to