On Mon, Jan 19, 2009 at 07:41:35PM -0500, Brian Fundakowski Feldman wrote:
> On Fri, Jan 16, 2009 at 02:36:06PM -0800, Jason Evans wrote:
> > Brian Fundakowski Feldman wrote:
> >  > Could you, and anyone else who would care to, check this out?  It's a 
> > regression
> >> fix but it also makes the code a little bit clearer.  Thanks!
> >> 
> >> Index: lib/libc/stdlib/malloc.c
> > 
> > Why does malloc need to change for this?  Unless there's a really good 
> > reason, I don't want the extra branches in the locking functions.
> 
> Because malloc is the thing causing the regression.  It is easy enough
> to optimize out the one extra fetch and branch in the single-threaded case
> if I can get some consensus that the fix to it is actually fine.

Pessimization removed:
Index: lib/libc/stdlib/malloc.c
===================================================================
--- lib/libc/stdlib/malloc.c    (revision 187160)
+++ lib/libc/stdlib/malloc.c    (working copy)
@@ -1217,6 +1217,13 @@
                _SPINUNLOCK(&mutex->lock);
 }
 
+static inline void
+malloc_mutex_always_unlock(malloc_mutex_t *mutex)
+{
+
+               _SPINUNLOCK(&mutex->lock);
+}
+
 /*
  * End mutex.
  */
@@ -1300,6 +1307,13 @@
                _pthread_mutex_unlock(lock);
 }
 
+static inline void
+malloc_spin_always_unlock(pthread_mutex_t *lock)
+{
+
+       _pthread_mutex_unlock(lock);
+}
+
 /*
  * End spin lock.
  */
@@ -5515,9 +5529,8 @@
 void
 _malloc_prefork(void)
 {
+       arena_t *larenas[narenas];
        bool again;
-       unsigned i, j;
-       arena_t *larenas[narenas], *tarenas[narenas];
 
        /* Acquire all mutexes in a safe order. */
 
@@ -5530,19 +5543,23 @@
         */
        memset(larenas, 0, sizeof(arena_t *) * narenas);
        do {
+               unsigned int i;
+
                again = false;
 
                malloc_spin_lock(&arenas_lock);
                for (i = 0; i < narenas; i++) {
                        if (arenas[i] != larenas[i]) {
+                               arena_t *tarenas[narenas];
+                               unsigned int j;
+
                                memcpy(tarenas, arenas, sizeof(arena_t *) *
                                    narenas);
                                malloc_spin_unlock(&arenas_lock);
                                for (j = 0; j < narenas; j++) {
                                        if (larenas[j] != tarenas[j]) {
                                                larenas[j] = tarenas[j];
-                                               malloc_spin_lock(
-                                                   &larenas[j]->lock);
+                                               
malloc_spin_lock(&larenas[j]->lock);
                                        }
                                }
                                again = true;
@@ -5569,19 +5586,24 @@
        /* Release all mutexes, now that fork() has completed. */
 
 #ifdef MALLOC_DSS
-       malloc_mutex_unlock(&dss_mtx);
+       malloc_mutex_always_unlock(&dss_mtx);
 #endif
 
-       malloc_mutex_unlock(&huge_mtx);
+       malloc_mutex_always_unlock(&huge_mtx);
 
-       malloc_mutex_unlock(&base_mtx);
+       malloc_mutex_always_unlock(&base_mtx);
 
        memcpy(larenas, arenas, sizeof(arena_t *) * narenas);
-       malloc_spin_unlock(&arenas_lock);
+       malloc_spin_always_unlock(&arenas_lock);
        for (i = 0; i < narenas; i++) {
                if (larenas[i] != NULL)
-                       malloc_spin_unlock(&larenas[i]->lock);
+                       malloc_spin_always_unlock(&larenas[i]->lock);
        }
+       /*
+        * This ends the special post-__isthreaded exemption behavior for
+        * malloc stuff.  We should really be single threaded right now
+        * in effect regardless of __isthreaded status.
+        */
 }
 
 /*
Index: lib/libthr/thread/thr_fork.c
===================================================================
--- lib/libthr/thread/thr_fork.c        (revision 187160)
+++ lib/libthr/thread/thr_fork.c        (working copy)
@@ -105,7 +105,7 @@
        struct pthread_atfork *af;
        pid_t ret;
        int errsave;
-       int unlock_malloc;
+       int was_threaded;
        int rtld_locks[MAX_RTLD_LOCKS];
 
        if (!_thr_is_inited())
@@ -122,16 +122,16 @@
        }
 
        /*
-        * Try our best to protect memory from being corrupted in
-        * child process because another thread in malloc code will
-        * simply be kill by fork().
+        * All bets are off as to what should happen soon if the parent
+        * process was not so kindly as to set up pthread fork hooks to
+        * relinquish all running threads.
         */
        if (_thr_isthreaded() != 0) {
-               unlock_malloc = 1;
+               was_threaded = 1;
                _malloc_prefork();
                _rtld_atfork_pre(rtld_locks);
        } else {
-               unlock_malloc = 0;
+               was_threaded = 0;
        }
 
        /*
@@ -159,7 +159,7 @@
                _thr_umutex_init(&curthread->lock);
                _thr_umutex_init(&_thr_atfork_lock);
 
-               if (unlock_malloc)
+               if (was_threaded)
                        _rtld_atfork_post(rtld_locks);
                _thr_setthreaded(0);
 
@@ -173,7 +173,7 @@
                /* Ready to continue, unblock signals. */ 
                _thr_signal_unblock(curthread);
 
-               if (unlock_malloc)
+               if (was_threaded)
                        _malloc_postfork();
 
                /* Run down atfork child handlers. */
@@ -188,7 +188,7 @@
                /* Ready to continue, unblock signals. */ 
                _thr_signal_unblock(curthread);
 
-               if (unlock_malloc) {
+               if (was_threaded) {
                        _rtld_atfork_post(rtld_locks);
                        _malloc_postfork();
                }

-- 
Brian Fundakowski Feldman                           \'[ FreeBSD ]''''''''''\
  <> gr...@freebsd.org                               \  The Power to Serve! \
 Opinions expressed are my own.                       \,,,,,,,,,,,,,,,,,,,,,,\
_______________________________________________
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"

Reply via email to