Allocate and bind AIO user space buffers to the memory nodes that mmap kernel buffers are bound to.
Signed-off-by: Alexey Budankov <alexey.budan...@linux.intel.com> --- Changes in v3: - corrected code style issues - adjusted __aio_alloc,__aio_bind,__aio_free() implementation Changes in v2: - implemented perf_mmap__aio_alloc, perf_mmap__aio_free, perf_mmap__aio_bind and put HAVE_LIBNUMA_SUPPORT #ifdefs in there --- tools/perf/util/mmap.c | 71 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index e68ba754a8e2..e5220790f1fb 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -10,6 +10,9 @@ #include <sys/mman.h> #include <inttypes.h> #include <asm/bug.h> +#ifdef HAVE_LIBNUMA_SUPPORT +#include <numaif.h> +#endif #include "debug.h" #include "event.h" #include "mmap.h" @@ -154,9 +157,68 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb } #ifdef HAVE_AIO_SUPPORT + +#ifdef HAVE_LIBNUMA_SUPPORT +static int perf_mmap__aio_alloc(struct perf_mmap *map, int index) +{ + map->aio.data[index] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); + if (map->aio.data[index] == MAP_FAILED) { + map->aio.data[index] = NULL; + return -1; + } + + return 0; +} + +static void perf_mmap__aio_free(struct perf_mmap *map, int index) +{ + if (map->aio.data[index]) { + munmap(map->aio.data[index], perf_mmap__mmap_len(map)); + map->aio.data[index] = NULL; + } +} + +static void perf_mmap__aio_bind(struct perf_mmap *map, int index, int cpu, int affinity) +{ + void *data; + size_t mmap_len; + unsigned long node_mask; + + if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { + data = map->aio.data[index]; + mmap_len = perf_mmap__mmap_len(map); + node_mask = 1UL << cpu__get_node(cpu); + if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) { + pr_warn("failed to bind [%p-%p] to node %d\n", + data, data + mmap_len, cpu__get_node(cpu)); + } + } +} +#else +static int perf_mmap__aio_alloc(struct perf_mmap *map, int index) +{ + map->aio.data[index] = malloc(perf_mmap__mmap_len(map)); + if (map->aio.data[index] == NULL) + return -1; + + return 0; +} + +static void perf_mmap__aio_free(struct perf_mmap *map, int index) +{ + zfree(&(map->aio.data[index])); +} + +static void perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int index __maybe_unused, + int cpu __maybe_unused, int affinity __maybe_unused) +{ +} +#endif + static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) { - int delta_max, i, prio; + int delta_max, i, prio, ret; map->aio.nr_cblocks = mp->nr_cblocks; if (map->aio.nr_cblocks) { @@ -177,11 +239,12 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) } delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); for (i = 0; i < map->aio.nr_cblocks; ++i) { - map->aio.data[i] = malloc(perf_mmap__mmap_len(map)); - if (!map->aio.data[i]) { + ret = perf_mmap__aio_alloc(map, i); + if (ret == -1) { pr_debug2("failed to allocate data buffer area, error %m"); return -1; } + perf_mmap__aio_bind(map, i, map->cpu, mp->affinity); /* * Use cblock.aio_fildes value different from -1 * to denote started aio write operation on the @@ -210,7 +273,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map) int i; for (i = 0; i < map->aio.nr_cblocks; ++i) - zfree(&map->aio.data[i]); + perf_mmap__aio_free(map, i); if (map->aio.data) zfree(&map->aio.data); zfree(&map->aio.cblocks);