The branch main has been updated by jrtc27:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=a1d971ad3f8216c9b6b651e97dbe3d8177b11825

commit a1d971ad3f8216c9b6b651e97dbe3d8177b11825
Author:     Jessica Clarke <jrt...@freebsd.org>
AuthorDate: 2025-07-10 19:00:28 +0000
Commit:     Jessica Clarke <jrt...@freebsd.org>
CommitDate: 2025-07-10 19:00:28 +0000

    rtld-elf: Track allocated TCBs internally and use for distribute_static_tls
    
    Currently rtld delegates to libc or libthr to initialise the TCBs for
    all existing threads when dlopen is called for a library that is using
    static TLS. This creates an odd split where rtld manages all of TLS for
    dynamically-linked executables except for this specific case, and is
    unnecessarily complex, including having to reason about the locking due
    to dropping the bind lock so libthr can take the thread list lock
    without deadlocking if any of the code run whilst that lock is held ends
    up calling back into rtld (such as for lazy PLT resolution).
    
    The only real reason we call out into libc / libthr is that we don't
    have a list of threads in rtld and that's how we find the currently used
    TCBs to initialise (and at the same time do the copy in the callee
    rather than adding overhead with some kind of callback that provides the
    TCB to rtld. If we instead keep a list of allocated TCBs in rtld itself
    then we no longer need to do this, and can just copy the data in rtld.
    How these TCBs are mapped to threads is irrelevant, rtld can just treat
    all TCBs equally and ensure that each TCB's static TLS data block
    remains in sync with the current set of loaded modules, just as how
    _rtld_allocate_tls creates a fresh TCB and associated data without any
    embedded threading model assumptions.
    
    As an implementation detail, to avoid a separate allocation for the list
    entry and having to find that allocation from the TCB to remove and free
    it on deallocation, we allocate a fake TLS offset for it and embed the
    list entry there in each TLS block.
    
    This will also make it easier to add a new TLS ABI downstream in
    CheriBSD, especially in the presence of library compartmentalisation.
    
    Reviewed by:    kib
    Differential Revision:  https://reviews.freebsd.org/D50920
---
 libexec/rtld-elf/rtld.c | 103 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 87 insertions(+), 16 deletions(-)

diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c
index 7172fbf1d794..17196f55c271 100644
--- a/libexec/rtld-elf/rtld.c
+++ b/libexec/rtld-elf/rtld.c
@@ -82,9 +82,15 @@ struct dlerror_save {
        char *msg;
 };
 
+struct tcb_list_entry {
+       TAILQ_ENTRY(tcb_list_entry)     next;
+};
+
 /*
  * Function declarations.
  */
+static bool allocate_tls_offset_common(size_t *offp, size_t tlssize,
+    size_t tlsalign, size_t tlspoffset);
 static const char *basename(const char *);
 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
     const Elf_Dyn **, const Elf_Dyn **);
@@ -92,7 +98,7 @@ static bool digest_dynamic2(Obj_Entry *, const Elf_Dyn *, 
const Elf_Dyn *,
     const Elf_Dyn *);
 static bool digest_dynamic(Obj_Entry *, int);
 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
-static void distribute_static_tls(Objlist *, RtldLockState *);
+static void distribute_static_tls(Objlist *);
 static Obj_Entry *dlcheck(void *);
 static int dlclose_locked(void *, RtldLockState *);
 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
@@ -303,6 +309,10 @@ static size_t tls_static_max_align;
 Elf_Addr tls_dtv_generation = 1; /* Used to detect when dtv size changes */
 int tls_max_index = 1;          /* Largest module index allocated */
 
+static TAILQ_HEAD(, tcb_list_entry) tcb_list =
+    TAILQ_HEAD_INITIALIZER(tcb_list);
+static size_t tcb_list_entry_offset;
+
 static bool ld_library_path_rpath = false;
 bool ld_fast_sigblock = false;
 
@@ -929,6 +939,19 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry 
**objp)
                allocate_tls_offset(entry->obj);
        }
 
+       if (!allocate_tls_offset_common(&tcb_list_entry_offset,
+           sizeof(struct tcb_list_entry), _Alignof(struct tcb_list_entry),
+           0)) {
+               /*
+                * This should be impossible as the static block size is not
+                * yet fixed, but catch and diagnose it failing if that ever
+                * changes or somehow turns out to be false.
+                */
+               _rtld_error("Could not allocate offset for tcb_list_entry");
+               rtld_die();
+       }
+       dbg("tcb_list_entry_offset %zu", tcb_list_entry_offset);
+
        if (relocate_objects(obj_main,
                ld_bind_now != NULL && *ld_bind_now != '\0', &obj_rtld,
                SYMLOOK_EARLY, NULL) == -1)
@@ -3973,7 +3996,7 @@ dlopen_object(const char *name, int fd, Obj_Entry 
*refobj, int lo_flags,
        if ((lo_flags & RTLD_LO_EARLY) == 0) {
                map_stacks_exec(lockstate);
                if (obj != NULL)
-                       distribute_static_tls(&initlist, lockstate);
+                       distribute_static_tls(&initlist);
        }
 
        if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) ==
@@ -5400,6 +5423,44 @@ tls_get_addr_common(struct tcb *tcb, int index, size_t 
offset)
        return (tls_get_addr_slow(tcb, index, offset, false));
 }
 
+static struct tcb *
+tcb_from_tcb_list_entry(struct tcb_list_entry *tcbelm)
+{
+#ifdef TLS_VARIANT_I
+       return ((struct tcb *)((char *)tcbelm - tcb_list_entry_offset));
+#else
+       return ((struct tcb *)((char *)tcbelm + tcb_list_entry_offset));
+#endif
+}
+
+static struct tcb_list_entry *
+tcb_list_entry_from_tcb(struct tcb *tcb)
+{
+#ifdef TLS_VARIANT_I
+       return ((struct tcb_list_entry *)((char *)tcb + tcb_list_entry_offset));
+#else
+       return ((struct tcb_list_entry *)((char *)tcb - tcb_list_entry_offset));
+#endif
+}
+
+static void
+tcb_list_insert(struct tcb *tcb)
+{
+       struct tcb_list_entry *tcbelm;
+
+       tcbelm = tcb_list_entry_from_tcb(tcb);
+       TAILQ_INSERT_TAIL(&tcb_list, tcbelm, next);
+}
+
+static void
+tcb_list_remove(struct tcb *tcb)
+{
+       struct tcb_list_entry *tcbelm;
+
+       tcbelm = tcb_list_entry_from_tcb(tcb);
+       TAILQ_REMOVE(&tcb_list, tcbelm, next);
+}
+
 #ifdef TLS_VARIANT_I
 
 /*
@@ -5513,6 +5574,7 @@ allocate_tls(Obj_Entry *objs, void *oldtcb, size_t 
tcbsize, size_t tcbalign)
                }
        }
 
+       tcb_list_insert(tcb);
        return (tcb);
 }
 
@@ -5524,6 +5586,8 @@ free_tls(void *tcb, size_t tcbsize, size_t tcbalign 
__unused)
        size_t post_size;
        size_t i, tls_init_align __unused;
 
+       tcb_list_remove(tcb);
+
        assert(tcbsize >= TLS_TCB_SIZE);
        tls_init_align = MAX(obj_main->tlsalign, 1);
 
@@ -5624,6 +5688,7 @@ allocate_tls(Obj_Entry *objs, void *oldtcb, size_t 
tcbsize, size_t tcbalign)
                }
        }
 
+       tcb_list_insert(tcb);
        return (tcb);
 }
 
@@ -5635,6 +5700,8 @@ free_tls(void *tcb, size_t tcbsize __unused, size_t 
tcbalign)
        size_t i;
        uintptr_t tlsstart, tlsend;
 
+       tcb_list_remove(tcb);
+
        /*
         * Figure out the size of the initial TLS block so that we can
         * find stuff which ___tls_get_addr() allocated dynamically.
@@ -6136,25 +6203,29 @@ map_stacks_exec(RtldLockState *lockstate)
 }
 
 static void
-distribute_static_tls(Objlist *list, RtldLockState *lockstate)
+distribute_static_tls(Objlist *list)
 {
-       Objlist_Entry *elm;
+       struct tcb_list_entry *tcbelm;
+       Objlist_Entry *objelm;
+       struct tcb *tcb;
        Obj_Entry *obj;
-       void (*distrib)(size_t, void *, size_t, size_t);
+       char *tlsbase;
 
-       distrib = (void (*)(size_t, void *, size_t, size_t))(
-           uintptr_t)get_program_var_addr("__pthread_distribute_static_tls",
-           lockstate);
-       if (distrib == NULL)
-               return;
-       STAILQ_FOREACH(elm, list, link) {
-               obj = elm->obj;
+       STAILQ_FOREACH(objelm, list, link) {
+               obj = objelm->obj;
                if (obj->marker || !obj->tls_static || obj->static_tls_copied)
                        continue;
-               lock_release(rtld_bind_lock, lockstate);
-               distrib(obj->tlsoffset, obj->tlsinit, obj->tlsinitsize,
-                   obj->tlssize);
-               wlock_acquire(rtld_bind_lock, lockstate);
+               TAILQ_FOREACH(tcbelm, &tcb_list, next) {
+                       tcb = tcb_from_tcb_list_entry(tcbelm);
+#ifdef TLS_VARIANT_I
+                       tlsbase = (char *)tcb + obj->tlsoffset;
+#else
+                       tlsbase = (char *)tcb - obj->tlsoffset;
+#endif
+                       memcpy(tlsbase, obj->tlsinit, obj->tlsinitsize);
+                       memset(tlsbase + obj->tlsinitsize, 0,
+                           obj->tlssize - obj->tlsinitsize);
+               }
                obj->static_tls_copied = true;
        }
 }

Reply via email to