Add rps_check_max_queues function which checks is the input number is greater than rps_max_num_queues. If it is then set max_num_queues to the value and recreating the sock_flow_table to update the queue masks used in table entries. --- include/linux/netdevice.h | 10 ++++++++ net/core/sysctl_net_core.c | 48 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d528aa61fea3..48ba1c1fc644 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -804,6 +804,16 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, } } +int __rps_check_max_queues(unsigned int idx); + +static inline int rps_check_max_queues(unsigned int idx) +{ + if (idx < rps_max_num_queues) + return 0; + + return __rps_check_max_queues(idx); +} + #ifdef CONFIG_RFS_ACCEL bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index d09471f29d89..743c46148135 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -127,6 +127,54 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, return ret; } + +int __rps_check_max_queues(unsigned int idx) +{ + unsigned int old; + size_t size; + int ret = 0; + + /* Assume maximum queues should be a least the number of CPUs. + * This avoids too much thrashing of the sock flow table at + * initialization. + */ + if (idx < nr_cpu_ids && nr_cpu_ids < RPS_MAX_QID) + idx = nr_cpu_ids; + + if (idx > RPS_MAX_QID) + return -EINVAL; + + mutex_lock(&sock_flow_mutex); + + old = rps_max_num_queues; + rps_max_num_queues = idx; + + /* No need to reallocate table since nothing is changing */ + + if (roundup_pow_of_two(old) != roundup_pow_of_two(idx)) { + struct rps_sock_flow_table *sock_table; + + sock_table = rcu_dereference_protected(rps_sock_flow_table, + lockdep_is_held(&sock_flow_mutex)); + size = sock_table ? sock_table->mask + 1 : 0; + + /* Force creation of a new rps_sock_flow_table. It's + * the same size as the existing table, but we expunge + * any stale queue entries that would refer to the old + * queue mask. + */ + ret = rps_create_sock_flow_table(size, size, + sock_table, true); + if (ret) + rps_max_num_queues = old; + } + + mutex_unlock(&sock_flow_mutex); + + return ret; +} +EXPORT_SYMBOL(__rps_check_max_queues); + #endif /* CONFIG_RPS */ #ifdef CONFIG_NET_FLOW_LIMIT -- 2.25.1