Author: hselasky
Date: Tue Feb 21 14:22:14 2017
New Revision: 314044
URL: https://svnweb.freebsd.org/changeset/base/314044

Log:
  Streamline the LinuxKPI spinlock wrappers.
  
  1) Add better spinlock debug names when WITNESS_ALL is defined.
  
  2) Make sure that the calling thread gets bound to the current CPU
  while a spinlock is locked. Some Linux kernel code depends on that the
  CPU ID doesn't change while a spinlock is locked.
  
  3) Add support for using LinuxKPI spinlocks during a panic().
  
  MFC after:            1 week
  Sponsored by:         Mellanox Technologies

Modified:
  head/sys/compat/linuxkpi/common/include/linux/spinlock.h

Modified: head/sys/compat/linuxkpi/common/include/linux/spinlock.h
==============================================================================
--- head/sys/compat/linuxkpi/common/include/linux/spinlock.h    Tue Feb 21 
13:23:53 2017        (r314043)
+++ head/sys/compat/linuxkpi/common/include/linux/spinlock.h    Tue Feb 21 
14:22:14 2017        (r314044)
@@ -2,7 +2,7 @@
  * Copyright (c) 2010 Isilon Systems, Inc.
  * Copyright (c) 2010 iX Systems, Inc.
  * Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -35,36 +35,126 @@
 #include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
-#include <sys/libkern.h>
+#include <sys/kdb.h>
 
 #include <linux/compiler.h>
 #include <linux/rwlock.h>
+#include <linux/bottom_half.h>
 
 typedef struct {
        struct mtx m;
 } spinlock_t;
 
-#define        spin_lock(_l)           mtx_lock(&(_l)->m)
-#define        spin_unlock(_l)         mtx_unlock(&(_l)->m)
-#define        spin_trylock(_l)        mtx_trylock(&(_l)->m)
-#define        spin_lock_nested(_l, _n) mtx_lock_flags(&(_l)->m, MTX_DUPOK)
-#define        spin_lock_irq(lock)     spin_lock(lock)
-#define        spin_unlock_irq(lock)   spin_unlock(lock)
-#define        spin_lock_irqsave(lock, flags)                                  
\
-    do {(flags) = 0; spin_lock(lock); } while (0)
-#define        spin_unlock_irqrestore(lock, flags)                             
\
-    do { spin_unlock(lock); } while (0)
+/*
+ * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be
+ * skipped during panic(). By default it is disabled due to
+ * performance reasons.
+ */
+#ifdef CONFIG_SPIN_SKIP
+#define        SPIN_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
+#else
+#define        SPIN_SKIP(void) 0
+#endif
+
+#define        spin_lock(_l) do {                      \
+       if (SPIN_SKIP())                        \
+               break;                          \
+       mtx_lock(&(_l)->m);                     \
+       local_bh_disable();                     \
+} while (0)
+
+#define        spin_lock_bh(_l) do {                   \
+       spin_lock(_l);                          \
+} while (0)
+
+#define        spin_lock_irq(_l) do {                  \
+       spin_lock(_l);                          \
+} while (0)
+
+#define        spin_unlock(_l) do {                    \
+       if (SPIN_SKIP())                        \
+               break;                          \
+       local_bh_enable();                      \
+       mtx_unlock(&(_l)->m);                   \
+} while (0)
+
+#define        spin_unlock_bh(_l) do {                 \
+       spin_unlock(_l);                        \
+} while (0)
+
+#define        spin_unlock_irq(_l) do {                \
+       spin_unlock(_l);                        \
+} while (0)
+
+#define        spin_trylock(_l) ({                     \
+       int __ret;                              \
+       if (SPIN_SKIP()) {                      \
+               __ret = 1;                      \
+       } else {                                \
+               __ret = mtx_trylock(&(_l)->m);  \
+               if (likely(__ret != 0))         \
+                       local_bh_disable();     \
+       }                                       \
+       __ret;                                  \
+})
+
+#define        spin_lock_nested(_l, _n) do {           \
+       if (SPIN_SKIP())                        \
+               break;                          \
+       mtx_lock_flags(&(_l)->m, MTX_DUPOK);    \
+       local_bh_disable();                     \
+} while (0)
+
+#define        spin_lock_irqsave(_l, flags) do {       \
+       (flags) = 0;                            \
+       spin_lock(_l);                          \
+} while (0)
+
+#define        spin_lock_irqsave_nested(_l, flags, _n) do {    \
+       (flags) = 0;                                    \
+       spin_lock_nested(_l, _n);                       \
+} while (0)
+
+#define        spin_unlock_irqrestore(_l, flags) do {          \
+       spin_unlock(_l);                                \
+} while (0)
+
+#ifdef WITNESS_ALL
+/* NOTE: the maximum WITNESS name is 64 chars */
+#define        __spin_lock_name(name, file, line)              \
+       (((const char *){file ":" #line "-" name}) +    \
+       (sizeof(file) > 16 ? sizeof(file) - 16 : 0))
+#else
+#define        __spin_lock_name(name, file, line)      name
+#endif
+#define        _spin_lock_name(...)            __spin_lock_name(__VA_ARGS__)
+#define        spin_lock_name(name)            _spin_lock_name(name, __FILE__, 
__LINE__)
+
+#define        spin_lock_init(lock)    linux_spin_lock_init(lock, 
spin_lock_name("lnxspin"))
+
+static inline void
+linux_spin_lock_init(spinlock_t *lock, const char *name)
+{
+
+       memset(lock, 0, sizeof(*lock));
+       mtx_init(&lock->m, name, NULL, MTX_DEF | MTX_NOWITNESS);
+}
 
 static inline void
-spin_lock_init(spinlock_t *lock)
+spin_lock_destroy(spinlock_t *lock)
 {
 
-       memset(&lock->m, 0, sizeof(lock->m));
-       mtx_init(&lock->m, "lnxspin", NULL, MTX_DEF | MTX_NOWITNESS);
+       mtx_destroy(&lock->m);
 }
 
-#define        DEFINE_SPINLOCK(lock)                                           
\
-       spinlock_t lock;                                                \
-       MTX_SYSINIT(lock, &(lock).m, "lnxspin", MTX_DEF)
+#define        DEFINE_SPINLOCK(lock)                                   \
+       spinlock_t lock;                                        \
+       MTX_SYSINIT(lock, &(lock).m, spin_lock_name("lnxspin"), MTX_DEF)
+
+#define        assert_spin_locked(_l) do {             \
+       if (SPIN_SKIP())                        \
+               break;                          \
+       mtx_assert(&(_l)->m, MA_OWNED);         \
+} while (0)
 
-#endif /* _LINUX_SPINLOCK_H_ */
+#endif                                 /* _LINUX_SPINLOCK_H_ */
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to