From: Changbin Du <changbin...@intel.com> The default NR_CPUS can be very large, but actual possible nr_cpu_ids usually is very small. For my x86 distribution, the NR_CPUS is 8192 and nr_cpu_ids is 4. About 2 pages are wasted.
Most machines don't have so many CPUs, so define a array with NR_CPUS just wastes memory. So let's allocate the buffer dynamically when need. Signed-off-by: Changbin Du <changbin...@intel.com> --- kernel/trace/trace.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 752e5da..d1b3f11 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4184,19 +4184,18 @@ static const struct file_operations show_traces_fops = { */ static DEFINE_MUTEX(tracing_cpumask_update_lock); -/* - * Temporary storage for the character representation of the - * CPU bitmask (and one more byte for the newline): - */ -static char mask_str[NR_CPUS + 1]; - static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; + static char *mask_str; int len; + mask_str = kmalloc(nr_cpu_ids + 1, GFP_KERNEL); + if (!mask_str) + return -ENOMEM; + mutex_lock(&tracing_cpumask_update_lock); len = snprintf(mask_str, count, "%*pb\n", @@ -4205,10 +4204,12 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, count = -EINVAL; goto out_err; } - count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); + count = simple_read_from_buffer(ubuf, count, ppos, + mask_str, nr_cpu_ids+1); out_err: mutex_unlock(&tracing_cpumask_update_lock); + kfree(mask_str); return count; } -- 2.7.4