Current use cases of torture_sched_setaffinity() are well served by its
unconditional warning on error.  However, an upcoming use case for a
preemption kthread needs to avoid warnings that might otherwise arise
when that kthread attempted to bind itself to a CPU on its way offline.
This commit therefore adds a dowarn argument that, when false, suppresses
the warning.

Signed-off-by: Paul E. McKenney <paul...@kernel.org>
---
 include/linux/torture.h      | 2 +-
 kernel/locking/locktorture.c | 6 +++---
 kernel/rcu/rcutorture.c      | 2 +-
 kernel/rcu/update.c          | 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/linux/torture.h b/include/linux/torture.h
index c2e979f82f8d0..0134e7221cae6 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -130,7 +130,7 @@ void _torture_stop_kthread(char *m, struct task_struct 
**tp);
 #endif
 
 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) 
|| IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
-long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool 
dowarn);
 #endif
 
 #endif /* __LINUX_TORTURE_H */
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index de95ec07e4771..cc33470f4de97 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -106,7 +106,7 @@ static const struct kernel_param_ops lt_bind_ops = {
 module_param_cb(bind_readers, &lt_bind_ops, &bind_readers, 0644);
 module_param_cb(bind_writers, &lt_bind_ops, &bind_writers, 0644);
 
-long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool 
dowarn);
 
 static struct task_struct *stats_task;
 static struct task_struct **writer_tasks;
@@ -1358,7 +1358,7 @@ static int __init lock_torture_init(void)
                if (torture_init_error(firsterr))
                        goto unwind;
                if (cpumask_nonempty(bind_writers))
-                       torture_sched_setaffinity(writer_tasks[i]->pid, 
bind_writers);
+                       torture_sched_setaffinity(writer_tasks[i]->pid, 
bind_writers, true);
 
        create_reader:
                if (cxt.cur_ops->readlock == NULL || (j >= 
cxt.nrealreaders_stress))
@@ -1369,7 +1369,7 @@ static int __init lock_torture_init(void)
                if (torture_init_error(firsterr))
                        goto unwind;
                if (cpumask_nonempty(bind_readers))
-                       torture_sched_setaffinity(reader_tasks[j]->pid, 
bind_readers);
+                       torture_sched_setaffinity(reader_tasks[j]->pid, 
bind_readers, true);
        }
        if (stat_interval > 0) {
                firsterr = torture_create_kthread(lock_torture_stats, NULL,
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 612d276903352..908506b68c412 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -857,7 +857,7 @@ static void synchronize_rcu_trivial(void)
        int cpu;
 
        for_each_online_cpu(cpu) {
-               torture_sched_setaffinity(current->pid, cpumask_of(cpu));
+               torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
                WARN_ON_ONCE(raw_smp_processor_id() != cpu);
        }
 }
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f8436969e0c89..c912b594ba987 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -527,12 +527,12 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
 
 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) 
|| IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
 /* Get rcutorture access to sched_setaffinity(). */
-long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool 
dowarn)
 {
        int ret;
 
        ret = sched_setaffinity(pid, in_mask);
-       WARN_ONCE(ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, 
pid, ret);
+       WARN_ONCE(dowarn && ret, "%s: sched_setaffinity(%d) returned %d\n", 
__func__, pid, ret);
        return ret;
 }
 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
-- 
2.40.1


Reply via email to