Keep track of event opened with direct access to the hardware counters
and modify permissions while they are open.

The strategy used here is the same which x86 uses: everytime an event
is mapped, the permissions are set if required. The atomic field added
in the mm_context helps keep track of the different event opened and
de-activate the permissions when all are unmapped.
We also need to update the permissions in the context switch code so
that tasks keep the right permissions.

Signed-off-by: Raphael Gault <raphael.ga...@arm.com>
---
 arch/arm64/include/asm/mmu.h         |  6 +++++
 arch/arm64/include/asm/mmu_context.h |  2 ++
 arch/arm64/include/asm/perf_event.h  | 14 ++++++++++
 drivers/perf/arm_pmu.c               | 38 ++++++++++++++++++++++++++++
 4 files changed, 60 insertions(+)

diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 67ef25d037ea..9de4cf0b17c7 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -29,6 +29,12 @@
 
 typedef struct {
        atomic64_t      id;
+
+       /*
+        * non-zero if userspace have access to hardware
+        * counters directly.
+        */
+       atomic_t        pmu_direct_access;
        void            *vdso;
        unsigned long   flags;
 } mm_context_t;
diff --git a/arch/arm64/include/asm/mmu_context.h 
b/arch/arm64/include/asm/mmu_context.h
index 2da3e478fd8f..33494af613d8 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -32,6 +32,7 @@
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
+#include <asm/perf_event.h>
 #include <asm/sysreg.h>
 #include <asm/tlbflush.h>
 
@@ -235,6 +236,7 @@ static inline void __switch_mm(struct mm_struct *next)
        }
 
        check_and_switch_context(next, cpu);
+       perf_switch_user_access(next);
 }
 
 static inline void
diff --git a/arch/arm64/include/asm/perf_event.h 
b/arch/arm64/include/asm/perf_event.h
index c593761ba61c..32a6d604bb3b 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -19,6 +19,7 @@
 
 #include <asm/stack_pointer.h>
 #include <asm/ptrace.h>
+#include <linux/mm_types.h>
 
 #define        ARMV8_PMU_MAX_COUNTERS  32
 #define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -234,4 +235,17 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
        (regs)->pstate = PSR_MODE_EL1h; \
 }
 
+static inline void perf_switch_user_access(struct mm_struct *mm)
+{
+       if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+               return;
+
+       if (atomic_read(&mm->context.pmu_direct_access)) {
+               write_sysreg(ARMV8_PMU_USERENR_ER|ARMV8_PMU_USERENR_CR,
+                            pmuserenr_el0);
+       } else {
+               write_sysreg(0, pmuserenr_el0);
+       }
+}
+
 #endif
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index eec75b97e7ea..0e5588cd2f39 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -24,6 +24,7 @@
 #include <linux/irqdesc.h>
 
 #include <asm/irq_regs.h>
+#include <asm/mmu_context.h>
 
 static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
 static DEFINE_PER_CPU(int, cpu_irq);
@@ -777,6 +778,41 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
                                            &cpu_pmu->node);
 }
 
+static void refresh_pmuserenr(void *mm)
+{
+       perf_switch_user_access(mm);
+}
+
+static void armpmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+       if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR))
+               return;
+
+       /*
+        * This function relies on not being called concurrently in two
+        * tasks in the same mm.  Otherwise one task could observe
+        * pmu_direct_access > 1 and return all the way back to
+        * userspace with user access disabled while another task is still
+        * doing on_each_cpu_mask() to enable user access.
+        *
+        * For now, this can't happen because all callers hold mmap_sem
+        * for write.  If this changes, we'll need a different solution.
+        */
+       lockdep_assert_held_exclusive(&mm->mmap_sem);
+
+       if (atomic_inc_return(&mm->context.pmu_direct_access) == 1)
+               on_each_cpu(refresh_pmuserenr, mm, 1);
+}
+
+static void armpmu_event_unmapped(struct perf_event *event, struct mm_struct 
*mm)
+{
+       if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR))
+               return;
+
+       if (atomic_dec_and_test(&mm->context.pmu_direct_access))
+               on_each_cpu_mask(mm_cpumask(mm), refresh_pmuserenr, NULL, 1);
+}
+
 static struct arm_pmu *__armpmu_alloc(gfp_t flags)
 {
        struct arm_pmu *pmu;
@@ -798,6 +834,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
                .pmu_enable     = armpmu_enable,
                .pmu_disable    = armpmu_disable,
                .event_init     = armpmu_event_init,
+               .event_mapped   = armpmu_event_mapped,
+               .event_unmapped = armpmu_event_unmapped,
                .add            = armpmu_add,
                .del            = armpmu_del,
                .start          = armpmu_start,
-- 
2.17.1

Reply via email to