Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.

Signed-off-by: Alexey Budankov <alexey.budan...@linux.intel.com>
---
Changes in v3:
- separated mask manipulations into __adjust_affinity() and 
__setup_affinity_mask()
- implemented mapping of c index into online cpu index
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
 tools/perf/builtin-record.c | 14 ++++++++++++++
 tools/perf/util/evlist.c    |  6 +++++-
 tools/perf/util/mmap.c      | 20 +++++++++++++++++++-
 tools/perf/util/mmap.h      |  1 +
 4 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5a108b11d46..553c2fabf3c1 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
        struct record_opts *opts = &rec->opts;
        char msg[512];
 
+       if (opts->affinity != PERF_AFFINITY_SYS)
+               cpu__setup_cpunode_map();
+
        if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
                                 opts->auxtrace_mmap_pages,
                                 opts->auxtrace_snapshot_mode,
@@ -728,6 +731,16 @@ static struct perf_event_header finished_round_event = {
        .type = PERF_RECORD_FINISHED_ROUND,
 };
 
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+       if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+           !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+               CPU_ZERO(&rec->affinity_mask);
+               CPU_OR(&rec->affinity_mask, &rec->affinity_mask, 
&map->affinity_mask);
+               sched_setaffinity(0, sizeof(rec->affinity_mask), 
&rec->affinity_mask);
+       }
+}
+
 static int record__mmap_read_evlist(struct record *rec, struct perf_evlist 
*evlist,
                                    bool overwrite)
 {
@@ -755,6 +768,7 @@ static int record__mmap_read_evlist(struct record *rec, 
struct perf_evlist *evli
                struct perf_mmap *map = &maps[i];
 
                if (map->base) {
+                       record__adjust_affinity(rec, map);
                        if (!record__aio_enabled(rec)) {
                                if (perf_mmap__push(map, rec, record__pushfn) 
!= 0) {
                                        rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 08cedb643ea6..b6680f65ccc4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1032,7 +1032,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, 
unsigned int pages,
         * Its value is decided by evsel's write_backward.
         * So &mp should not be passed through const pointer.
         */
-       struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = 
affinity };
+       struct mmap_params mp = {
+               .nr_cblocks     = nr_cblocks,
+               .affinity       = affinity,
+               .cpu_map        = cpus
+       };
 
        if (!evlist->mmap)
                evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index e5220790f1fb..ee0230eed635 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -377,6 +377,24 @@ void perf_mmap__munmap(struct perf_mmap *map)
        auxtrace_mmap__munmap(&map->auxtrace_mmap);
 }
 
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct 
mmap_params *mp)
+{
+       int c, cpu, nr_cpus, node;
+
+       CPU_ZERO(&map->affinity_mask);
+       if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
+               nr_cpus = cpu_map__nr(mp->cpu_map);
+               node = cpu__get_node(map->cpu);
+               for (c = 0; c < nr_cpus; c++) {
+                       cpu = mp->cpu_map->map[c]; /* map c index to online cpu 
index */
+                       if (cpu__get_node(cpu) == node)
+                               CPU_SET(cpu, &map->affinity_mask);
+               }
+       } else if (mp->affinity == PERF_AFFINITY_CPU) {
+               CPU_SET(map->cpu, &map->affinity_mask);
+       }
+}
+
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu)
 {
        /*
@@ -406,7 +424,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
mmap_params *mp, int fd, int c
        map->fd = fd;
        map->cpu = cpu;
 
-       CPU_ZERO(&map->affinity_mask);
+       perf_mmap__setup_affinity_mask(map, mp);
 
        if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
                                &mp->auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
 struct mmap_params {
        int                         prot, mask, nr_cblocks, affinity;
        struct auxtrace_mmap_params auxtrace_mp;
+       const struct cpu_map        *cpu_map;
 };
 
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu);

Reply via email to