This is a preparation step for dynamically unregistering external
threads.

Since we explicitly allocate a per thread trace buffer in
rte_thread_init, add an internal helper to free this buffer.

Note: I preferred renaming the current internal function to free all
threads trace buffers (new name trace_mem_free()) and reuse the previous
name (trace_mem_per_thread_free()) when freeing this buffer for a given
thread.

Signed-off-by: David Marchand <david.march...@redhat.com>
---
 lib/librte_eal/common/eal_common_thread.c |  9 +++++
 lib/librte_eal/common/eal_common_trace.c  | 49 +++++++++++++++++++----
 lib/librte_eal/common/eal_thread.h        |  5 +++
 lib/librte_eal/common/eal_trace.h         |  1 +
 4 files changed, 57 insertions(+), 7 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_thread.c 
b/lib/librte_eal/common/eal_common_thread.c
index fd5c41a2af..8a973ca8ac 100644
--- a/lib/librte_eal/common/eal_common_thread.c
+++ b/lib/librte_eal/common/eal_common_thread.c
@@ -22,6 +22,7 @@
 #include "eal_internal_cfg.h"
 #include "eal_private.h"
 #include "eal_thread.h"
+#include "eal_trace.h"
 
 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
@@ -169,6 +170,14 @@ rte_thread_init(unsigned int lcore_id, rte_cpuset_t 
*cpuset)
 #endif
 }
 
+void
+rte_thread_uninit(void)
+{
+#ifndef RTE_EXEC_ENV_WINDOWS
+       trace_mem_per_thread_free();
+#endif
+}
+
 struct rte_thread_ctrl_params {
        void *(*start_routine)(void *);
        void *arg;
diff --git a/lib/librte_eal/common/eal_common_trace.c 
b/lib/librte_eal/common/eal_common_trace.c
index 875553d7e5..cd2d217b02 100644
--- a/lib/librte_eal/common/eal_common_trace.c
+++ b/lib/librte_eal/common/eal_common_trace.c
@@ -101,7 +101,7 @@ eal_trace_fini(void)
 {
        if (!rte_trace_is_enabled())
                return;
-       trace_mem_per_thread_free();
+       trace_mem_free();
        trace_metadata_destroy();
        eal_trace_args_free();
 }
@@ -370,24 +370,59 @@ __rte_trace_mem_per_thread_alloc(void)
        rte_spinlock_unlock(&trace->lock);
 }
 
+static void
+trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
+{
+       if (meta->area == TRACE_AREA_HUGEPAGE)
+               eal_free_no_trace(meta->mem);
+       else if (meta->area == TRACE_AREA_HEAP)
+               free(meta->mem);
+}
+
 void
 trace_mem_per_thread_free(void)
+{
+       struct trace *trace = trace_obj_get();
+       struct __rte_trace_header *header;
+       uint32_t count;
+
+       if (RTE_PER_LCORE(trace_mem) == NULL)
+               return;
+
+       header = RTE_PER_LCORE(trace_mem);
+       rte_spinlock_lock(&trace->lock);
+       for (count = 0; count < trace->nb_trace_mem_list; count++) {
+               if (trace->lcore_meta[count].mem == header)
+                       break;
+       }
+       if (count != trace->nb_trace_mem_list) {
+               struct thread_mem_meta *meta = &trace->lcore_meta[count];
+
+               trace_mem_per_thread_free_unlocked(meta);
+               if (count != trace->nb_trace_mem_list - 1) {
+                       memmove(meta, meta + 1,
+                               sizeof(*meta) *
+                                (trace->nb_trace_mem_list - count - 1));
+               }
+               trace->nb_trace_mem_list--;
+       }
+       rte_spinlock_unlock(&trace->lock);
+}
+
+void
+trace_mem_free(void)
 {
        struct trace *trace = trace_obj_get();
        uint32_t count;
-       void *mem;
 
        if (!rte_trace_is_enabled())
                return;
 
        rte_spinlock_lock(&trace->lock);
        for (count = 0; count < trace->nb_trace_mem_list; count++) {
-               mem = trace->lcore_meta[count].mem;
-               if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
-                       eal_free_no_trace(mem);
-               else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
-                       free(mem);
+               trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
        }
+       trace->nb_trace_mem_list = 0;
        rte_spinlock_unlock(&trace->lock);
 }
 
diff --git a/lib/librte_eal/common/eal_thread.h 
b/lib/librte_eal/common/eal_thread.h
index da5e7c93ba..4ecd8fd53a 100644
--- a/lib/librte_eal/common/eal_thread.h
+++ b/lib/librte_eal/common/eal_thread.h
@@ -25,6 +25,11 @@ __rte_noreturn void *eal_thread_loop(void *arg);
  */
 void rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset);
 
+/**
+ * Uninitialize per-lcore info for current thread.
+ */
+void rte_thread_uninit(void);
+
 /**
  * Get the NUMA socket id from cpu id.
  * This function is private to EAL.
diff --git a/lib/librte_eal/common/eal_trace.h 
b/lib/librte_eal/common/eal_trace.h
index 8f60616156..92c5951c3a 100644
--- a/lib/librte_eal/common/eal_trace.h
+++ b/lib/librte_eal/common/eal_trace.h
@@ -106,6 +106,7 @@ int trace_metadata_create(void);
 void trace_metadata_destroy(void);
 int trace_mkdir(void);
 int trace_epoch_time_save(void);
+void trace_mem_free(void);
 void trace_mem_per_thread_free(void);
 
 /* EAL interface */
-- 
2.23.0

Reply via email to