Author: attilio
Date: Tue Nov  3 16:46:52 2009
New Revision: 198854
URL: http://svn.freebsd.org/changeset/base/198854

Log:
  Split P_NOLOAD into a per-thread flag (TDF_NOLOAD).
  This improvements aims for avoiding further cache-misses in scheduler
  specific functions which need to keep track of average thread running
  time and further locking in places setting for this flag.
  
  Reported by:  jeff (originally), kris (currently)
  Reviewed by:  jhb
  Tested by:    Giuseppe Cocomazzi <sbudella at email dot it>

Modified:
  head/sys/kern/kern_idle.c
  head/sys/kern/kern_intr.c
  head/sys/kern/sched_4bsd.c
  head/sys/kern/sched_ule.c
  head/sys/sys/proc.h
  head/sys/vm/vm_zeroidle.c

Modified: head/sys/kern/kern_idle.c
==============================================================================
--- head/sys/kern/kern_idle.c   Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/kern/kern_idle.c   Tue Nov  3 16:46:52 2009        (r198854)
@@ -74,10 +74,9 @@ idle_setup(void *dummy)
                if (error)
                        panic("idle_setup: kproc_create error %d\n", error);
 
-               p->p_flag |= P_NOLOAD;
                thread_lock(td);
                TD_SET_CAN_RUN(td);
-               td->td_flags |= TDF_IDLETD;
+               td->td_flags |= TDF_IDLETD | TDF_NOLOAD;
                sched_class(td, PRI_IDLE);
                sched_prio(td, PRI_MAX_IDLE);
                thread_unlock(td);

Modified: head/sys/kern/kern_intr.c
==============================================================================
--- head/sys/kern/kern_intr.c   Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/kern/kern_intr.c   Tue Nov  3 16:46:52 2009        (r198854)
@@ -1061,6 +1061,7 @@ int
 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
            void *arg, int pri, enum intr_type flags, void **cookiep)
 {
+       struct thread *td;
        struct intr_event *ie;
        int error;
 
@@ -1085,11 +1086,10 @@ swi_add(struct intr_event **eventp, cons
        if (error)
                return (error);
        if (pri == SWI_CLOCK) {
-               struct proc *p;
-               p = ie->ie_thread->it_thread->td_proc;
-               PROC_LOCK(p);
-               p->p_flag |= P_NOLOAD;
-               PROC_UNLOCK(p);
+               td = ie->ie_thread->it_thread;
+               thread_lock(td);
+               td->td_flags |= TDF_NOLOAD;
+               thread_unlock(td);
        }
        return (0);
 }

Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c  Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/kern/sched_4bsd.c  Tue Nov  3 16:46:52 2009        (r198854)
@@ -728,10 +728,10 @@ sched_exit_thread(struct thread *td, str
        thread_lock(td);
        td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
        thread_unlock(td);
-       mtx_lock_spin(&sched_lock);
-       if ((child->td_proc->p_flag & P_NOLOAD) == 0)
+       thread_lock(child);
+       if ((child->td_flags & TDF_NOLOAD) == 0)
                sched_load_rem();
-       mtx_unlock_spin(&sched_lock);
+       thread_unlock(child);
 }
 
 void
@@ -937,7 +937,7 @@ sched_switch(struct thread *td, struct t
                thread_unlock(td);
        }
 
-       if ((p->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                sched_load_rem();
 
        if (newtd)
@@ -980,7 +980,7 @@ sched_switch(struct thread *td, struct t
                        ("trying to run inhibited thread"));
                newtd->td_flags |= TDF_DIDRUN;
                TD_SET_RUNNING(newtd);
-               if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
+               if ((newtd->td_flags & TDF_NOLOAD) == 0)
                        sched_load_add();
        } else {
                newtd = choosethread();
@@ -1289,7 +1289,7 @@ sched_add(struct thread *td, int flags)
                }
        }
 
-       if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                sched_load_add();
        runq_add(ts->ts_runq, td, flags);
        if (cpu != NOCPU)
@@ -1338,7 +1338,7 @@ sched_add(struct thread *td, int flags)
                if (maybe_preempt(td))
                        return;
        }
-       if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                sched_load_add();
        runq_add(ts->ts_runq, td, flags);
        maybe_resched(td);
@@ -1360,7 +1360,7 @@ sched_rem(struct thread *td)
            "prio:%d", td->td_priority, KTR_ATTR_LINKED,
            sched_tdname(curthread));
 
-       if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                sched_load_rem();
 #ifdef SMP
        if (ts->ts_runq != &runq)

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c   Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/kern/sched_ule.c   Tue Nov  3 16:46:52 2009        (r198854)
@@ -495,7 +495,7 @@ tdq_load_add(struct tdq *tdq, struct thr
        THREAD_LOCK_ASSERT(td, MA_OWNED);
 
        tdq->tdq_load++;
-       if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                tdq->tdq_sysload++;
        KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
 }
@@ -514,7 +514,7 @@ tdq_load_rem(struct tdq *tdq, struct thr
            ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
 
        tdq->tdq_load--;
-       if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+       if ((td->td_flags & TDF_NOLOAD) == 0)
                tdq->tdq_sysload--;
        KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
 }

Modified: head/sys/sys/proc.h
==============================================================================
--- head/sys/sys/proc.h Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/sys/proc.h Tue Nov  3 16:46:52 2009        (r198854)
@@ -322,7 +322,7 @@ do {                                                        
                \
 #define        TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
 #define        TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
 #define        TDF_NEEDSIGCHK  0x00020000 /* Thread may need signal delivery. 
*/
-#define        TDF_UNUSED18    0x00040000 /* --available-- */
+#define        TDF_NOLOAD      0x00040000 /* Ignore during load avg 
calculations. */
 #define        TDF_UNUSED19    0x00080000 /* Thread is sleeping on a umtx. */
 #define        TDF_THRWAKEUP   0x00100000 /* Libthr thread must not suspend 
itself. */
 #define        TDF_UNUSED21    0x00200000 /* --available-- */
@@ -558,7 +558,7 @@ struct proc {
 #define        P_ADVLOCK       0x00001 /* Process may hold a POSIX advisory 
lock. */
 #define        P_CONTROLT      0x00002 /* Has a controlling terminal. */
 #define        P_KTHREAD       0x00004 /* Kernel thread (*). */
-#define        P_NOLOAD        0x00008 /* Ignore during load avg calculations. 
*/
+#define        P_UNUSED0       0x00008 /* available. */
 #define        P_PPWAIT        0x00010 /* Parent is waiting for child to 
exec/exit. */
 #define        P_PROFIL        0x00020 /* Has started profiling. */
 #define        P_STOPPROF      0x00040 /* Has thread requesting to stop 
profiling. */

Modified: head/sys/vm/vm_zeroidle.c
==============================================================================
--- head/sys/vm/vm_zeroidle.c   Tue Nov  3 12:52:35 2009        (r198853)
+++ head/sys/vm/vm_zeroidle.c   Tue Nov  3 16:46:52 2009        (r198854)
@@ -139,26 +139,21 @@ vm_pagezero(void __unused *arg)
        }
 }
 
-static struct proc *pagezero_proc;
-
 static void
 pagezero_start(void __unused *arg)
 {
        int error;
+       struct proc *p;
        struct thread *td;
 
-       error = kproc_create(vm_pagezero, NULL, &pagezero_proc, RFSTOPPED, 0,
-           "pagezero");
+       error = kproc_create(vm_pagezero, NULL, &p, RFSTOPPED, 0, "pagezero");
        if (error)
                panic("pagezero_start: error %d\n", error);
-       /*
-        * We're an idle task, don't count us in the load.
-        */
-       PROC_LOCK(pagezero_proc);
-       pagezero_proc->p_flag |= P_NOLOAD;
-       PROC_UNLOCK(pagezero_proc);
-       td = FIRST_THREAD_IN_PROC(pagezero_proc);
+       td = FIRST_THREAD_IN_PROC(p);
        thread_lock(td);
+
+       /* We're an idle task, don't count us in the load. */
+       td->td_flags |= TDF_NOLOAD;
        sched_class(td, PRI_IDLE);
        sched_prio(td, PRI_MAX_IDLE);
        sched_add(td, SRQ_BORING);
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to