Previously, when we allocated hugepages, we closed the fd's corresponding
to them after we've done our mappings. Since we did mmap(), we didn't
actually lose the reference, but file descriptors used for mmap() do not
count against the fd limit. Since we are going to store all of our fd's,
we will hit the fd limit much more often when using smaller page sizes.

Fix this to raise the fd limit to maximum unconditionally.

Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_memory.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index dbf19499e..dfb537f59 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -17,6 +17,7 @@
 #include <sys/stat.h>
 #include <sys/queue.h>
 #include <sys/file.h>
+#include <sys/resource.h>
 #include <unistd.h>
 #include <limits.h>
 #include <sys/ioctl.h>
@@ -2204,6 +2205,25 @@ memseg_secondary_init(void)
 int
 rte_eal_memseg_init(void)
 {
+       /* increase rlimit to maximum */
+       struct rlimit lim;
+
+       if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
+               /* set limit to maximum */
+               lim.rlim_cur = lim.rlim_max;
+
+               if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
+                       RTE_LOG(DEBUG, EAL, "Setting maximum number of open 
files failed: %s\n",
+                                       strerror(errno));
+               } else {
+                       RTE_LOG(DEBUG, EAL, "Setting maximum number of open 
files to %"
+                                       PRIu64 "\n",
+                                       (uint64_t)lim.rlim_cur);
+               }
+       } else {
+               RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
+       }
+
        return rte_eal_process_type() == RTE_PROC_PRIMARY ?
 #ifndef RTE_ARCH_64
                        memseg_primary_init_32() :
-- 
2.17.1

Reply via email to