tree:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git 
rcu/next
head:   31f76cdc284fc45751d88b5ed754630a5f8a8914
commit: 3f4013e60f5eaf15913143831cfa8b8a2e2fbcb5 [33/34] Squash with 
7202bfecfbec ("rcu: Pretend ->boost_mtx acquired legitimately")
config: i386-randconfig-x077-201740 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        git checkout 3f4013e60f5eaf15913143831cfa8b8a2e2fbcb5
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   In file included from kernel/rcu/tree.c:4279:0:
   kernel/rcu/tree_plugin.h: In function 'rcu_read_unlock_special':
>> kernel/rcu/tree_plugin.h:533:4: error: implicit declaration of function 
>> 'rt_mutex_futex_unlock' [-Werror=implicit-function-declaration]
       rt_mutex_futex_unlock(&rnp->boost_mtx);
       ^~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors

vim +/rt_mutex_futex_unlock +533 kernel/rcu/tree_plugin.h

   408  
   409  /*
   410   * Handle special cases during rcu_read_unlock(), such as needing to
   411   * notify RCU core processing or task having blocked during the RCU
   412   * read-side critical section.
   413   */
   414  void rcu_read_unlock_special(struct task_struct *t)
   415  {
   416          bool empty_exp;
   417          bool empty_norm;
   418          bool empty_exp_now;
   419          unsigned long flags;
   420          struct list_head *np;
   421          bool drop_boost_mutex = false;
   422          struct rcu_data *rdp;
   423          struct rcu_node *rnp;
   424          union rcu_special special;
   425  
   426          /* NMI handlers cannot block and cannot safely manipulate 
state. */
   427          if (in_nmi())
   428                  return;
   429  
   430          local_irq_save(flags);
   431  
   432          /*
   433           * If RCU core is waiting for this CPU to exit its critical 
section,
   434           * report the fact that it has exited.  Because irqs are 
disabled,
   435           * t->rcu_read_unlock_special cannot change.
   436           */
   437          special = t->rcu_read_unlock_special;
   438          if (special.b.need_qs) {
   439                  rcu_preempt_qs();
   440                  t->rcu_read_unlock_special.b.need_qs = false;
   441                  if (!t->rcu_read_unlock_special.s) {
   442                          local_irq_restore(flags);
   443                          return;
   444                  }
   445          }
   446  
   447          /*
   448           * Respond to a request for an expedited grace period, but only 
if
   449           * we were not preempted, meaning that we were running on the 
same
   450           * CPU throughout.  If we were preempted, the exp_need_qs flag
   451           * would have been cleared at the time of the first preemption,
   452           * and the quiescent state would be reported when we were 
dequeued.
   453           */
   454          if (special.b.exp_need_qs) {
   455                  WARN_ON_ONCE(special.b.blocked);
   456                  t->rcu_read_unlock_special.b.exp_need_qs = false;
   457                  rdp = this_cpu_ptr(rcu_state_p->rda);
   458                  rcu_report_exp_rdp(rcu_state_p, rdp, true);
   459                  if (!t->rcu_read_unlock_special.s) {
   460                          local_irq_restore(flags);
   461                          return;
   462                  }
   463          }
   464  
   465          /* Hardware IRQ handlers cannot block, complain if they get 
here. */
   466          if (in_irq() || in_serving_softirq()) {
   467                  lockdep_rcu_suspicious(__FILE__, __LINE__,
   468                                         "rcu_read_unlock() from irq or 
softirq with blocking in critical section!!!\n");
   469                  pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: 
%d nq: %d)\n",
   470                           t->rcu_read_unlock_special.s,
   471                           t->rcu_read_unlock_special.b.blocked,
   472                           t->rcu_read_unlock_special.b.exp_need_qs,
   473                           t->rcu_read_unlock_special.b.need_qs);
   474                  local_irq_restore(flags);
   475                  return;
   476          }
   477  
   478          /* Clean up if blocked during RCU read-side critical section. */
   479          if (special.b.blocked) {
   480                  t->rcu_read_unlock_special.b.blocked = false;
   481  
   482                  /*
   483                   * Remove this task from the list it blocked on.  The 
task
   484                   * now remains queued on the rcu_node corresponding to 
the
   485                   * CPU it first blocked on, so there is no longer any 
need
   486                   * to loop.  Retain a WARN_ON_ONCE() out of sheer 
paranoia.
   487                   */
   488                  rnp = t->rcu_blocked_node;
   489                  raw_spin_lock_rcu_node(rnp); /* irqs already disabled. 
*/
   490                  WARN_ON_ONCE(rnp != t->rcu_blocked_node);
   491                  WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
   492                  empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
   493                  empty_exp = sync_rcu_preempt_exp_done(rnp);
   494                  smp_mb(); /* ensure expedited fastpath sees end of RCU 
c-s. */
   495                  np = rcu_next_node_entry(t, rnp);
   496                  list_del_init(&t->rcu_node_entry);
   497                  t->rcu_blocked_node = NULL;
   498                  trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
   499                                                  rnp->gpnum, t->pid);
   500                  if (&t->rcu_node_entry == rnp->gp_tasks)
   501                          rnp->gp_tasks = np;
   502                  if (&t->rcu_node_entry == rnp->exp_tasks)
   503                          rnp->exp_tasks = np;
   504                  if (IS_ENABLED(CONFIG_RCU_BOOST)) {
   505                          /* Snapshot ->boost_mtx ownership w/rnp->lock 
held. */
   506                          drop_boost_mutex = 
rt_mutex_owner(&rnp->boost_mtx) == t;
   507                          if (&t->rcu_node_entry == rnp->boost_tasks)
   508                                  rnp->boost_tasks = np;
   509                  }
   510  
   511                  /*
   512                   * If this was the last task on the current list, and if
   513                   * we aren't waiting on any CPUs, report the quiescent 
state.
   514                   * Note that rcu_report_unblock_qs_rnp() releases 
rnp->lock,
   515                   * so we must take a snapshot of the expedited state.
   516                   */
   517                  empty_exp_now = sync_rcu_preempt_exp_done(rnp);
   518                  if (!empty_norm && 
!rcu_preempt_blocked_readers_cgp(rnp)) {
   519                          
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
   520                                                           rnp->gpnum,
   521                                                           0, rnp->qsmask,
   522                                                           rnp->level,
   523                                                           rnp->grplo,
   524                                                           rnp->grphi,
   525                                                           
!!rnp->gp_tasks);
   526                          rcu_report_unblock_qs_rnp(rcu_state_p, rnp, 
flags);
   527                  } else {
   528                          raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
   529                  }
   530  
   531                  /* Unboost if we were boosted. */
   532                  if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
 > 533                          rt_mutex_futex_unlock(&rnp->boost_mtx);
   534  
   535                  /*
   536                   * If this was the last task on the expedited lists,
   537                   * then we need to report up the rcu_node hierarchy.
   538                   */
   539                  if (!empty_exp && empty_exp_now)
   540                          rcu_report_exp_rnp(rcu_state_p, rnp, true);
   541          } else {
   542                  local_irq_restore(flags);
   543          }
   544  }
   545  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to