Signed-off-by: Cunming Liang <cunming.liang at intel.com> --- lib/librte_eal/linuxapp/eal/eal_thread.c | 144 +++++++++++++++++-------------- 1 file changed, 81 insertions(+), 63 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c index a584e3b..05cebe4 100644 --- a/lib/librte_eal/linuxapp/eal/eal_thread.c +++ b/lib/librte_eal/linuxapp/eal/eal_thread.c @@ -103,13 +103,6 @@ rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id) } -/* set affinity for current thread */ -static int -__eal_thread_set_affinity(pthread_t thread, unsigned lcore) -{ - - int s; - /* * According to the section VERSIONS of the CPU_ALLOC man page: * @@ -124,38 +117,62 @@ __eal_thread_set_affinity(pthread_t thread, unsigned lcore) * first appeared in glibc 2.7. */ #if defined(CPU_ALLOC) +#define INIT_CPUSET(size, cpusetp) \ + do { \ + cpusetp = CPU_ALLOC(RTE_MAX_LCORE); \ + if (cpusetp == NULL) \ + rte_panic("CPU_ALLOC failed\n"); \ + \ + size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); \ + CPU_ZERO_S(size, cpusetp); \ + } while(0) + +#define CLEAN_CPUSET(cpusetp) \ + do { \ + CPU_FREE(cpusetp); \ + } while(0) + +#define SET_CPUSET(lcore, size, cpusetp) \ + CPU_SET_S(lcore, size, cpusetp) + +#else /* CPU_ALLOC */ + +#define INIT_CPUSET(size, cpusetp) \ + do { \ + cpu_set_t cpuset; \ + cpusetp = &cpuset; \ + size = sizeof(cpuset); \ + CPU_ZERO(&cpuset); \ + } while(0) + +#define CLEAN_CPUSET(cpusetp) + +#define SET_CPUSET(lcore, size, cpusetp) \ + CPU_SET(lcore, cpusetp); + +#endif + + +/* set affinity for current thread */ +static int +__eal_thread_set_affinity(pthread_t thread, unsigned lcore) +{ + int s; size_t size; cpu_set_t *cpusetp; - cpusetp = CPU_ALLOC(RTE_MAX_LCORE); - if (cpusetp == NULL) { - RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n"); - return -1; - } - - size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); - CPU_ZERO_S(size, cpusetp); - CPU_SET_S(lcore, size, cpusetp); + INIT_CPUSET(size, cpusetp); + SET_CPUSET(lcore, size, cpusetp); s = pthread_setaffinity_np(thread, size, cpusetp); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); - CPU_FREE(cpusetp); + CLEAN_CPUSET(cpusetp); return -1; } - CPU_FREE(cpusetp); -#else /* CPU_ALLOC */ - cpu_set_t cpuset; - CPU_ZERO( &cpuset ); - CPU_SET(lcore, &cpuset ); + CLEAN_CPUSET(cpusetp); - s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset); - if (s != 0) { - RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); - return -1; - } -#endif return 0; } @@ -248,6 +265,9 @@ __put_linear_tid(uint64_t tid) struct eal_thread_cb *pcb; uint8_t shift; + if (tid >= RTE_MAX_THREAD) + return; + mz = rte_memzone_lookup(LINEAR_THREAD_ID_POOL); if (!mz) return; @@ -352,55 +372,28 @@ rte_pthread_assign_cpuset(pthread_t thread, unsigned lcore[], unsigned num) { int s; unsigned i; - -#if defined(CPU_ALLOC) size_t size; cpu_set_t *cpusetp; - cpusetp = CPU_ALLOC(RTE_MAX_LCORE); - if (cpusetp == NULL) { - RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n"); - return -1; - } - - size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); - CPU_ZERO_S(size, cpusetp); + INIT_CPUSET(size, cpusetp); for (i = 0; i < num; i++) { if (!rte_lcore_is_enabled(lcore[i])) { RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]); - CPU_FREE(cpusetp); + CLEAN_CPUSET(cpusetp); return -1; } - CPU_SET_S(lcore[i], size, cpusetp); + SET_CPUSET(lcore[i], size, cpusetp); } s = pthread_setaffinity_np(thread, size, cpusetp); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); - CPU_FREE(cpusetp); + CLEAN_CPUSET(cpusetp); return -1; } - CPU_FREE(cpusetp); -#else /* CPU_ALLOC */ - cpu_set_t cpuset; - CPU_ZERO(&cpuset); - - for (i = 0; i < num; i++) { - if (!rte_lcore_is_enabled(lcore[i])) { - RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]); - return -1; - } - CPU_SET(lcore[i], &cpuset); - } - - s = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset); - if (s != 0) { - RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); - return -1; - } -#endif + CLEAN_CPUSET(cpusetp); return 0; } @@ -409,9 +402,20 @@ int rte_pthread_prepare(void) { unsigned long ltid; + unsigned lcore; + if (__get_linear_tid(<id) < 0) return -1; + RTE_PER_LCORE(_thread_id) = ltid; + + lcore = sched_getcpu(); + if (!rte_lcore_is_enabled(lcore)) + RTE_LOG(WARNING, EAL, "lcore %u is not enabled\n", lcore); + else + RTE_PER_LCORE(_lcore_id) = lcore; + + return 0; } void @@ -424,16 +428,30 @@ int rte_pthread_create(pthread_t *tid, void *(*work)(void *), void *arg) { int ret; + pthread_attr_t attr; + size_t size; + cpu_set_t *cpusetp; + pthread_attr_t *pattr = NULL; if (tid == NULL || work == NULL) return -1; - ret = pthread_create(tid, NULL, work, arg); + INIT_CPUSET(size, cpusetp); + + SET_CPUSET(rte_lcore_id(), size, cpusetp); + + pthread_attr_init(&attr); + if (!pthread_attr_setaffinity_np(&attr, size, cpusetp)) + pattr = &attr; + + CLEAN_CPUSET(cpusetp); + + ret = pthread_create(tid, pattr, work, arg); if (ret != 0) return -1; - if (__eal_thread_set_affinity(*tid, rte_lcore_id()) < 0) - rte_panic("cannot set affinity\n"); + pthread_attr_destroy(&attr); return 0; } + -- 1.8.1.4