This patch uses the PMU driver configuration held in event::hw::drv_config
to select a sink for each event that is created (the old sysFS way of
working is kept around for backward compatibility).

By proceeding in this way a sink can be used by multiple sessions
without having to play games with entries in sysFS.

Signed-off-by: Mathieu Poirier <mathieu.poir...@linaro.org>
---
 drivers/hwtracing/coresight/coresight-etm-perf.c | 58 +++++++++++++++++++-----
 1 file changed, 46 insertions(+), 12 deletions(-)

diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c 
b/drivers/hwtracing/coresight/coresight-etm-perf.c
index f21eb28b6782..0fbff912d515 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -14,6 +14,7 @@
 #include <linux/perf_event.h>
 #include <linux/percpu-defs.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
@@ -177,6 +178,26 @@ static void etm_free_aux(void *data)
        schedule_work(&event_data->work);
 }
 
+static struct coresight_device *etm_drv_config_sync(struct perf_event *event)
+{
+       struct coresight_device *sink = NULL;
+       struct pmu_drv_config *drv_config = perf_event_get_drv_config(event);
+
+       /*
+        * Make sure we don't race with perf_drv_config_replace() in
+        * kernel/events/core.c.
+        */
+       raw_spin_lock(&drv_config->lock);
+
+       /* Copy what we got from user space if applicable. */
+       if (drv_config->config)
+               sink = drv_config->config;
+
+       raw_spin_unlock(&drv_config->lock);
+
+       return sink;
+}
+
 static void *etm_setup_aux(struct perf_event *event, void **pages,
                           int nr_pages, bool overwrite)
 {
@@ -190,18 +211,11 @@ static void *etm_setup_aux(struct perf_event *event, void 
**pages,
                return NULL;
        INIT_WORK(&event_data->work, free_event_data);
 
-       /*
-        * In theory nothing prevent tracers in a trace session from being
-        * associated with different sinks, nor having a sink per tracer.  But
-        * until we have HW with this kind of topology we need to assume tracers
-        * in a trace session are using the same sink.  Therefore go through
-        * the coresight bus and pick the first enabled sink.
-        *
-        * When operated from sysFS users are responsible to enable the sink
-        * while from perf, the perf tools will do it based on the choice made
-        * on the cmd line.  As such the "enable_sink" flag in sysFS is reset.
-        */
-       sink = coresight_get_enabled_sink(true);
+       /* First get the sink config from user space. */
+       sink = etm_drv_config_sync(event);
+       if (!sink)
+               sink = coresight_get_enabled_sink(true);
+
        if (!sink || !sink_ops(sink)->alloc_buffer)
                goto err;
 
@@ -454,6 +468,25 @@ static void etm_addr_filters_sync(struct perf_event *event)
        filters->nr_filters = i;
 }
 
+static void *etm_drv_config_validate(struct perf_event *event, char *cstr)
+{
+       char drv_config[NAME_MAX];
+       struct device *dev;
+       struct coresight_device *sink;
+
+       strncpy(drv_config, cstr, NAME_MAX);
+
+       /* Look for the device with that name on the CS bus. */
+       dev = bus_find_device_by_name(&coresight_bustype, NULL, drv_config);
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+
+       sink = to_coresight_device(dev);
+       put_device(dev);
+
+       return sink;
+}
+
 int etm_perf_symlink(struct coresight_device *csdev, bool link)
 {
        char entry[sizeof("cpu9999999")];
@@ -498,6 +531,7 @@ static int __init etm_perf_init(void)
        etm_pmu.addr_filters_sync       = etm_addr_filters_sync;
        etm_pmu.addr_filters_validate   = etm_addr_filters_validate;
        etm_pmu.nr_addr_filters         = ETM_ADDR_CMP_MAX;
+       etm_pmu.drv_config_validate     = etm_drv_config_validate;
 
        ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
        if (ret == 0)
-- 
2.7.4

Reply via email to