Hi Jason,

[auto build test ERROR on v4.6-rc3]
[also build test ERROR on next-20160412]
[cannot apply to tip/core/locking]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improving the system]

url:    
https://github.com/0day-ci/linux/commits/Jason-Low/MCS-spinlock-Use-smp_cond_load_acquire/20160413-053726
config: i386-randconfig-s0-201615 (attached as .config)
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All error/warnings (new ones prefixed by >>):

   In file included from kernel/locking/qspinlock.c:70:0:
   kernel/locking/mcs_spinlock.h: In function 'mcs_spin_lock':
>> kernel/locking/mcs_spinlock.h:31:2: error: implicit declaration of function 
>> 'smp_cond_load_acquire' [-Werror=implicit-function-declaration]
     smp_cond_load_acquire(&l, VAL);     \
     ^
>> kernel/locking/mcs_spinlock.h:91:2: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
     arch_mcs_spin_lock_contended(&node->locked);
     ^
>> kernel/locking/mcs_spinlock.h:31:24: error: lvalue required as unary '&' 
>> operand
     smp_cond_load_acquire(&l, VAL);     \
                           ^
>> kernel/locking/mcs_spinlock.h:91:2: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
     arch_mcs_spin_lock_contended(&node->locked);
     ^
>> kernel/locking/mcs_spinlock.h:31:28: error: 'VAL' undeclared (first use in 
>> this function)
     smp_cond_load_acquire(&l, VAL);     \
                               ^
>> kernel/locking/mcs_spinlock.h:91:2: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
     arch_mcs_spin_lock_contended(&node->locked);
     ^
   kernel/locking/mcs_spinlock.h:31:28: note: each undeclared identifier is 
reported only once for each function it appears in
     smp_cond_load_acquire(&l, VAL);     \
                               ^
>> kernel/locking/mcs_spinlock.h:91:2: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
     arch_mcs_spin_lock_contended(&node->locked);
     ^
   kernel/locking/qspinlock.c: In function 'native_queued_spin_lock_slowpath':
>> kernel/locking/mcs_spinlock.h:31:24: error: lvalue required as unary '&' 
>> operand
     smp_cond_load_acquire(&l, VAL);     \
                           ^
>> kernel/locking/qspinlock.c:411:3: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
      arch_mcs_spin_lock_contended(&node->locked);
      ^
>> kernel/locking/mcs_spinlock.h:31:28: error: 'VAL' undeclared (first use in 
>> this function)
     smp_cond_load_acquire(&l, VAL);     \
                               ^
>> kernel/locking/qspinlock.c:411:3: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
      arch_mcs_spin_lock_contended(&node->locked);
      ^
   kernel/locking/qspinlock.c: In function '__pv_queued_spin_lock_slowpath':
>> kernel/locking/mcs_spinlock.h:31:24: error: lvalue required as unary '&' 
>> operand
     smp_cond_load_acquire(&l, VAL);     \
                           ^
>> kernel/locking/qspinlock.c:411:3: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
      arch_mcs_spin_lock_contended(&node->locked);
      ^
>> kernel/locking/mcs_spinlock.h:31:28: error: 'VAL' undeclared (first use in 
>> this function)
     smp_cond_load_acquire(&l, VAL);     \
                               ^
>> kernel/locking/qspinlock.c:411:3: note: in expansion of macro 
>> 'arch_mcs_spin_lock_contended'
      arch_mcs_spin_lock_contended(&node->locked);
      ^
   cc1: some warnings being treated as errors

vim +/smp_cond_load_acquire +31 kernel/locking/mcs_spinlock.h

    25   * Using smp_cond_load_acquire() provides the acquire semantics
    26   * required so that subsequent operations happen after the
    27   * lock is acquired.
    28   */
    29  #define arch_mcs_spin_lock_contended(l)                                 
\
    30  do {                                                                    
\
  > 31          smp_cond_load_acquire(&l, VAL);                                 
\
    32  } while (0)
    33  #endif
    34  
    35  #ifndef arch_mcs_spin_unlock_contended
    36  /*
    37   * smp_store_release() provides a memory barrier to ensure all
    38   * operations in the critical section has been completed before
    39   * unlocking.
    40   */
    41  #define arch_mcs_spin_unlock_contended(l)                               
\
    42          smp_store_release((l), 1)
    43  #endif
    44  
    45  /*
    46   * Note: the smp_load_acquire/smp_store_release pair is not
    47   * sufficient to form a full memory barrier across
    48   * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
    49   * For applications that need a full barrier across multiple cpus
    50   * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() 
should be
    51   * used after mcs_lock.
    52   */
    53  
    54  /*
    55   * In order to acquire the lock, the caller should declare a local node 
and
    56   * pass a reference of the node to this function in addition to the 
lock.
    57   * If the lock has already been acquired, then this will proceed to spin
    58   * on this node->locked until the previous lock holder sets the 
node->locked
    59   * in mcs_spin_unlock().
    60   */
    61  static inline
    62  void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock 
*node)
    63  {
    64          struct mcs_spinlock *prev;
    65  
    66          /* Init node */
    67          node->locked = 0;
    68          node->next   = NULL;
    69  
    70          /*
    71           * We rely on the full barrier with global transitivity implied 
by the
    72           * below xchg() to order the initialization stores above 
against any
    73           * observation of @node. And to provide the ACQUIRE ordering 
associated
    74           * with a LOCK primitive.
    75           */
    76          prev = xchg(lock, node);
    77          if (likely(prev == NULL)) {
    78                  /*
    79                   * Lock acquired, don't need to set node->locked to 1. 
Threads
    80                   * only spin on its own node->locked value for lock 
acquisition.
    81                   * However, since this thread can immediately acquire 
the lock
    82                   * and does not proceed to spin on its own 
node->locked, this
    83                   * value won't be used. If a debug mode is needed to
    84                   * audit lock status, then set node->locked value here.
    85                   */
    86                  return;
    87          }
    88          WRITE_ONCE(prev->next, node);
    89  
    90          /* Wait until the lock holder passes the lock down. */
  > 91          arch_mcs_spin_lock_contended(&node->locked);
    92  }
    93  
    94  /*

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: Binary data

Reply via email to