This patch exchanges the two loop for collecting the percpu
statistics data. This can reduce cache misses by going through
all the items of each cpu sequentially.

Signed-off-by: Jia He <hejia...@gmail.com>
---
 net/sctp/proc.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ef8ba77..085fb95 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -74,12 +74,19 @@ static const struct snmp_mib sctp_snmp_list[] = {
 static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 {
        struct net *net = seq->private;
-       int i;
+       int i, c;
+       unsigned long buff[SCTP_MIB_MAX];
 
+       memset(buff, 0, sizeof(unsigned long) * SCTP_MIB_MAX);
+
+       for_each_possible_cpu(c)
+               for (i = 0; sctp_snmp_list[i].name != NULL; i++)
+                       buff[i] += snmp_get_cpu_field(
+                                               net->sctp.sctp_statistics,
+                                               c, sctp_snmp_list[i].entry);
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field(net->sctp.sctp_statistics,
-                                     sctp_snmp_list[i].entry));
+                                               buff[i]);
 
        return 0;
 }
-- 
1.8.3.1

Reply via email to