Author: attilio
Date: Mon Aug 17 16:33:53 2009
New Revision: 196336
URL: http://svn.freebsd.org/changeset/base/196336

Log:
  MFC r196334:
  
  * Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to
    a pointer-fetching specific operation check. Consequently, rename the
    operation ASSERT_ATOMIC_LOAD_PTR().
  * Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking
    directly alignment on the word boundry, for all the given specific
    architectures. That's a bit too strict for some common case, but it
    assures safety.
  * Add a comment explaining the scope of the macro
  * Add a new stub in the lockmgr specific implementation
  
  Tested by: marcel (initial version), marius
  Reviewed by: rwatson, jhb (comment specific review)
  Approved by: re (kib)

Modified:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/mfi/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)
  stable/8/sys/kern/kern_lock.c
  stable/8/sys/kern/kern_mutex.c
  stable/8/sys/kern/kern_rwlock.c
  stable/8/sys/kern/kern_sx.c
  stable/8/sys/sys/systm.h

Modified: stable/8/sys/kern/kern_lock.c
==============================================================================
--- stable/8/sys/kern/kern_lock.c       Mon Aug 17 16:24:50 2009        
(r196335)
+++ stable/8/sys/kern/kern_lock.c       Mon Aug 17 16:33:53 2009        
(r196336)
@@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const
        int iflags;
 
        MPASS((flags & ~LK_INIT_MASK) == 0);
+       ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
+            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
+            &lk->lk_lock));
 
        iflags = LO_SLEEPABLE | LO_UPGRADABLE;
        if (flags & LK_CANRECURSE)

Modified: stable/8/sys/kern/kern_mutex.c
==============================================================================
--- stable/8/sys/kern/kern_mutex.c      Mon Aug 17 16:24:50 2009        
(r196335)
+++ stable/8/sys/kern/kern_mutex.c      Mon Aug 17 16:33:53 2009        
(r196336)
@@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name
 
        MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
                MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
-       ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
-           __func__, name, &m->mtx_lock));
+       ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
+           ("%s: mtx_lock not aligned for %s: %p", __func__, name,
+           &m->mtx_lock));
 
 #ifdef MUTEX_DEBUG
        /* Diagnostic and error correction */

Modified: stable/8/sys/kern/kern_rwlock.c
==============================================================================
--- stable/8/sys/kern/kern_rwlock.c     Mon Aug 17 16:24:50 2009        
(r196335)
+++ stable/8/sys/kern/kern_rwlock.c     Mon Aug 17 16:33:53 2009        
(r196336)
@@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const c
 
        MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
            RW_RECURSE)) == 0);
-       ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
-           __func__, name, &rw->rw_lock));
+       ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
+           ("%s: rw_lock not aligned for %s: %p", __func__, name,
+           &rw->rw_lock));
 
        flags = LO_UPGRADABLE;
        if (opts & RW_DUPOK)

Modified: stable/8/sys/kern/kern_sx.c
==============================================================================
--- stable/8/sys/kern/kern_sx.c Mon Aug 17 16:24:50 2009        (r196335)
+++ stable/8/sys/kern/kern_sx.c Mon Aug 17 16:33:53 2009        (r196336)
@@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char 
 
        MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
            SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
-       ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p",
-           __func__, description, &sx->sx_lock));
+       ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
+           ("%s: sx_lock not aligned for %s: %p", __func__, description,
+           &sx->sx_lock));
 
        flags = LO_SLEEPABLE | LO_UPGRADABLE;
        if (opts & SX_DUPOK)

Modified: stable/8/sys/sys/systm.h
==============================================================================
--- stable/8/sys/sys/systm.h    Mon Aug 17 16:24:50 2009        (r196335)
+++ stable/8/sys/sys/systm.h    Mon Aug 17 16:33:53 2009        (r196336)
@@ -89,9 +89,16 @@ extern int maxusers;         /* system tune hin
 #define        __CTASSERT(x, y)        typedef char __assert ## y[(x) ? 1 : -1]
 #endif
 
-#define        ASSERT_ATOMIC_LOAD(var,msg)                                     
\
-       KASSERT(sizeof(var) <= sizeof(uintptr_t) &&                     \
-           ALIGN(&(var)) == (uintptr_t)&(var), msg)
+/*
+ * Assert that a pointer can be loaded from memory atomically.
+ *
+ * This assertion enforces stronger alignment than necessary.  For example,
+ * on some architectures, atomicity for unaligned loads will depend on
+ * whether or not the load spans multiple cache lines.
+ */
+#define        ASSERT_ATOMIC_LOAD_PTR(var, msg)                                
\
+       KASSERT(sizeof(var) == sizeof(void *) &&                        \
+           ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
 
 /*
  * XXX the hints declarations are even more misplaced than most declarations
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to