Sorry for the temporary build breakage; I meant to commit these two
patches together.
Jeff
On Sun, 15 Mar 2009, Jeff Roberson wrote:
Author: jeff
Date: Sun Mar 15 08:03:54 2009
New Revision: 189846
URL: http://svn.freebsd.org/changeset/base/189846
Log:
- Wrap lock profiling state variables in #ifdef LOCK_PROFILING blocks.
Modified:
head/sys/kern/kern_lock.c
head/sys/kern/kern_mutex.c
head/sys/kern/kern_rwlock.c
head/sys/kern/kern_sx.c
Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c Sun Mar 15 06:41:47 2009 (r189845)
+++ head/sys/kern/kern_lock.c Sun Mar 15 08:03:54 2009 (r189846)
@@ -333,16 +333,17 @@ __lockmgr_args(struct lock *lk, u_int fl
const char *wmesg, int pri, int timo, const char *file, int line)
{
GIANT_DECLARE;
- uint64_t waittime;
struct lock_class *class;
const char *iwmesg;
uintptr_t tid, v, x;
u_int op;
- int contested, error, ipri, itimo, queue, wakeup_swapper;
+ int error, ipri, itimo, queue, wakeup_swapper;
+#ifdef LOCK_PROFILING
+ uint64_t waittime = 0;
+ int contested = 0;
+#endif
- contested = 0;
error = 0;
- waittime = 0;
tid = (uintptr_t)curthread;
op = (flags & LK_TYPE_MASK);
iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c Sun Mar 15 06:41:47 2009 (r189845)
+++ head/sys/kern/kern_mutex.c Sun Mar 15 08:03:54 2009 (r189846)
@@ -254,8 +254,11 @@ _mtx_unlock_spin_flags(struct mtx *m, in
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- int rval, contested = 0;
+#ifdef LOCK_PROFILING
uint64_t waittime = 0;
+ int contested = 0;
+#endif
+ int rval;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@@ -296,15 +299,17 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
int line)
{
struct turnstile *ts;
+ uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
#ifdef KTR
int cont_logged = 0;
#endif
+#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
- uintptr_t v;
+#endif
if (mtx_owned(m)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -448,8 +453,11 @@ void
_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
- int i = 0, contested = 0;
+ int i = 0;
+#ifdef LOCK_PROFILING
+ int contested = 0;
uint64_t waittime = 0;
+#endif
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
@@ -486,11 +494,13 @@ _thread_lock_flags(struct thread *td, in
{
struct mtx *m;
uintptr_t tid;
- int i, contested;
- uint64_t waittime;
+ int i;
+#ifdef LOCK_PROFILING
+ int contested = 0;
+ uint64_t waittime = 0;
+#endif
- contested = i = 0;
- waittime = 0;
+ i = 0;
tid = (uintptr_t)curthread;
for (;;) {
retry:
Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c Sun Mar 15 06:41:47 2009 (r189845)
+++ head/sys/kern/kern_rwlock.c Sun Mar 15 08:03:54 2009 (r189846)
@@ -282,8 +282,10 @@ _rw_rlock(struct rwlock *rw, const char
int spintries = 0;
int i;
#endif
+#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
+#endif
uintptr_t v;
KASSERT(rw->rw_lock != RW_DESTROYED,
@@ -584,9 +586,11 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
int spintries = 0;
int i;
#endif
- uint64_t waittime = 0;
uintptr_t v, x;
+#ifdef LOCK_PROFILING
+ uint64_t waittime = 0;
int contested = 0;
+#endif
if (rw_wlocked(rw)) {
KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c Sun Mar 15 06:41:47 2009 (r189845)
+++ head/sys/kern/kern_sx.c Sun Mar 15 08:03:54 2009 (r189846)
@@ -431,9 +431,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
- uint64_t waittime = 0;
uintptr_t x;
- int contested = 0, error = 0;
+#ifdef LOCK_PROFILING
+ uint64_t waittime = 0;
+ int contested = 0;
+#endif
+ int error = 0;
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
@@ -652,8 +655,10 @@ _sx_slock_hard(struct sx *sx, int opts,
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
+#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
+#endif
uintptr_t x;
int error = 0;
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"