Add a /proc/sys/kernel scheduler knob named sched_rr_timeslice_ms
that allows global changing of the SCHED_RR timeslice value. User
visable value is in milliseconds but is stored as jiffies.  Setting
to 0 (zero) resets to the default (currently 100ms).

Signed-off-by: Clark Williams <willi...@redhat.com>
---
 include/linux/sched/sysctl.h |  5 +++++
 kernel/sched/core.c          | 19 +++++++++++++++++++
 kernel/sched/rt.c            |  6 ++++--
 kernel/sysctl.c              |  8 ++++++++
 4 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 912adab..fda131f 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -10,6 +10,11 @@
  */
 #define RR_TIMESLICE           (100 * HZ / 1000)
 
+extern int sched_rr_timeslice;
+extern int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
+
 /*
  *  control realtime throttling:
  *
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 26058d0..1c39c33 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7508,6 +7508,25 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+       static DEFINE_MUTEX(mutex);
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       /* make sure that internally we keep jiffies */
+       /* also, writing zero resets timeslice to default */
+       if (!ret && write) {
+               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+                       RR_TIMESLICE :
msecs_to_jiffies(sched_rr_timeslice);
+       }
+       mutex_unlock(&mutex);
+       return ret;
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7c4a007..0fd9d42 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,8 @@
 
 #include <linux/slab.h>
 
+int sched_rr_timeslice = RR_TIMESLICE;
+
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int
overrun); 
 struct rt_bandwidth def_rt_bandwidth;
@@ -2017,7 +2019,7 @@ static void task_tick_rt(struct rq *rq, struct
task_struct *p, int queued) if (--p->rt.time_slice)
                return;
 
-       p->rt.time_slice = RR_TIMESLICE;
+       p->rt.time_slice = sched_rr_timeslice;
 
        /*
         * Requeue to the end of queue if we (and all of our
ancestors) are the @@ -2048,7 +2050,7 @@ static unsigned int
get_rr_interval_rt(struct rq *rq, struct task_struct *task)
         * Time slice is 0 for SCHED_FIFO tasks
         */
        if (task->policy == SCHED_RR)
-               return RR_TIMESLICE;
+               return sched_rr_timeslice;
        else
                return 0;
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c88878d..1eabf86 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -403,6 +403,14 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = sched_rt_handler,
        },
+       {
+               .procname       = "sched_rr_timeslice_ms",
+               .data           = &sched_rr_timeslice,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = sched_rr_handler,
+       },
+
 #ifdef CONFIG_SCHED_AUTOGROUP
        {
                .procname       = "sched_autogroup_enabled",
-- 
1.7.11.7

Attachment: signature.asc
Description: PGP signature

Reply via email to