When we ask to reserve virtual areas, we usually include alignment in the mapping size, and that memory ends up being wasted. Wasting a gigabyte of VA space while trying to reserve one gigabyte is pretty expensive on 32-bit, so after we're done mapping, unmap unneeded space.
Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com> --- lib/librte_eal/common/eal_common_memory.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 24a9ed5..8dd026a 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -75,8 +75,13 @@ eal_get_virtual_area(void *requested_addr, size_t *size, do { map_sz = no_align ? *size : *size + page_sz; + if (map_sz > SIZE_MAX) { + RTE_LOG(ERR, EAL, "Map size too big\n"); + rte_errno = E2BIG; + return NULL; + } - mapped_addr = mmap(requested_addr, map_sz, PROT_READ, + mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ, mmap_flags, -1, 0); if (mapped_addr == MAP_FAILED && allow_shrink) *size -= page_sz; @@ -113,8 +118,22 @@ eal_get_virtual_area(void *requested_addr, size_t *size, RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n"); } - if (unmap) + if (unmap) { munmap(mapped_addr, map_sz); + } else if (!no_align) { + void *unmap_pre, *unmap_post, *map_end; + size_t pre_len, post_len; + + /* unmap all of the extra space */ + unmap_pre = mapped_addr; + pre_len = RTE_PTR_DIFF(aligned_addr, mapped_addr); + map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz); + unmap_post = RTE_PTR_ADD(aligned_addr, *size); + post_len = RTE_PTR_DIFF(map_end, unmap_post); + + munmap(unmap_pre, pre_len); + munmap(unmap_post, post_len); + } RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n", aligned_addr, *size); -- 2.7.4