Currently, mem config will be mapped without using the virtual area reservation infrastructure, which means it will be mapped at an arbitrary location. This may cause failures to map the shared config in secondary process due to things like PCI whitelist arguments allocating memory in a space where the primary has allocated the shared mem config.
Fix this by using virtual area reservation to reserve space for the mem config, thereby avoiding the problem and reserving the shared config (hopefully) far away from any normal memory allocations. Cc: sta...@dpdk.org Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com> --- Notes: v4: - Fix mem config length to always be page-aligned v3: - Fix alignment issues with base address v2: - Fix issue with unneeded ADDR_IS_HINT flag that broke things on 32-bit builds lib/librte_eal/freebsd/eal/eal.c | 28 +++++++++++++++++++++------ lib/librte_eal/linux/eal/eal.c | 33 +++++++++++++++++++++++--------- 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/lib/librte_eal/freebsd/eal/eal.c b/lib/librte_eal/freebsd/eal/eal.c index d53f0fe69..ce3c5ed2d 100644 --- a/lib/librte_eal/freebsd/eal/eal.c +++ b/lib/librte_eal/freebsd/eal/eal.c @@ -219,7 +219,10 @@ eal_parse_sysfs_value(const char *filename, unsigned long *val) static int rte_eal_config_create(void) { - void *rte_mem_cfg_addr; + size_t page_sz = sysconf(_SC_PAGE_SIZE); + size_t cfg_len = sizeof(*rte_config.mem_config); + size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz); + void *rte_mem_cfg_addr, *mapped_mem_cfg_addr; int retval; const char *pathname = eal_runtime_config_path(); @@ -236,7 +239,7 @@ rte_eal_config_create(void) } } - retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config)); + retval = ftruncate(mem_cfg_fd, cfg_len); if (retval < 0){ close(mem_cfg_fd); mem_cfg_fd = -1; @@ -254,15 +257,28 @@ rte_eal_config_create(void) return -1; } - rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config), - PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); - - if (rte_mem_cfg_addr == MAP_FAILED){ + /* reserve space for config */ + rte_mem_cfg_addr = eal_get_virtual_area(NULL, &cfg_len_aligned, page_sz, + 0, 0); + if (rte_mem_cfg_addr == NULL) { RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config\n"); close(mem_cfg_fd); mem_cfg_fd = -1; return -1; } + + /* remap the actual file into the space we've just reserved */ + mapped_mem_cfg_addr = mmap(rte_mem_cfg_addr, + cfg_len_aligned, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, mem_cfg_fd, 0); + if (mapped_mem_cfg_addr == MAP_FAILED) { + RTE_LOG(ERR, EAL, "Cannot remap memory for rte_config\n"); + munmap(rte_mem_cfg_addr, cfg_len); + close(mem_cfg_fd); + mem_cfg_fd = -1; + return -1; + } + memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config)); rte_config.mem_config = rte_mem_cfg_addr; diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c index 34db78753..55a3bb971 100644 --- a/lib/librte_eal/linux/eal/eal.c +++ b/lib/librte_eal/linux/eal/eal.c @@ -305,7 +305,10 @@ eal_parse_sysfs_value(const char *filename, unsigned long *val) static int rte_eal_config_create(void) { - void *rte_mem_cfg_addr; + size_t page_sz = sysconf(_SC_PAGE_SIZE); + size_t cfg_len = sizeof(*rte_config.mem_config); + size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz); + void *rte_mem_cfg_addr, *mapped_mem_cfg_addr; int retval; const char *pathname = eal_runtime_config_path(); @@ -317,7 +320,7 @@ rte_eal_config_create(void) if (internal_config.base_virtaddr != 0) rte_mem_cfg_addr = (void *) RTE_ALIGN_FLOOR(internal_config.base_virtaddr - - sizeof(struct rte_mem_config), sysconf(_SC_PAGE_SIZE)); + sizeof(struct rte_mem_config), page_sz); else rte_mem_cfg_addr = NULL; @@ -330,7 +333,7 @@ rte_eal_config_create(void) } } - retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config)); + retval = ftruncate(mem_cfg_fd, cfg_len); if (retval < 0){ close(mem_cfg_fd); mem_cfg_fd = -1; @@ -348,13 +351,25 @@ rte_eal_config_create(void) return -1; } - rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config), - PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); - - if (rte_mem_cfg_addr == MAP_FAILED){ - close(mem_cfg_fd); - mem_cfg_fd = -1; + /* reserve space for config */ + rte_mem_cfg_addr = eal_get_virtual_area(rte_mem_cfg_addr, + &cfg_len_aligned, page_sz, 0, 0); + if (rte_mem_cfg_addr == NULL) { RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config\n"); + close(mem_cfg_fd); + mem_cfg_fd = -1; + return -1; + } + + /* remap the actual file into the space we've just reserved */ + mapped_mem_cfg_addr = mmap(rte_mem_cfg_addr, + cfg_len_aligned, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, mem_cfg_fd, 0); + if (mapped_mem_cfg_addr == MAP_FAILED) { + munmap(rte_mem_cfg_addr, cfg_len); + close(mem_cfg_fd); + mem_cfg_fd = -1; + RTE_LOG(ERR, EAL, "Cannot remap memory for rte_config\n"); return -1; } -- 2.17.1