]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
irq, fs/proc: replace loop with nr_irqs for proc/stat
authorYinghai Lu <yhlu.kernel@gmail.com>
Wed, 20 Aug 2008 03:50:12 +0000 (20:50 -0700)
committerIngo Molnar <mingo@elte.hu>
Thu, 16 Oct 2008 14:52:33 +0000 (16:52 +0200)
Replace another nr_irqs loop to avoid the allocation of all sparse
irq entries - use for_each_irq_desc instead.

v2: make sure arch without GENERIC_HARDIRQS works too

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
fs/proc/proc_misc.c
include/linux/interrupt.h

index aa069acf61a07505e2fe12abf3c0219e0d8827f0..c3cbabe8b38e2a702cf1edbeed6daf7d19e15a4d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/pagemap.h>
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
@@ -501,17 +502,16 @@ static const struct file_operations proc_vmalloc_operations = {
 
 static int show_stat(struct seq_file *p, void *v)
 {
-       int i;
+       int i, j;
        unsigned long jif;
        cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
        cputime64_t guest;
        u64 sum = 0;
        struct timespec boottime;
-       unsigned int *per_irq_sum;
-
-       per_irq_sum = kzalloc(sizeof(unsigned int)*nr_irqs, GFP_KERNEL);
-       if (!per_irq_sum)
-               return -ENOMEM;
+       unsigned int per_irq_sum;
+#ifdef CONFIG_GENERIC_HARDIRQS
+       struct irq_desc *desc;
+#endif
 
        user = nice = system = idle = iowait =
                irq = softirq = steal = cputime64_zero;
@@ -520,8 +520,6 @@ static int show_stat(struct seq_file *p, void *v)
        jif = boottime.tv_sec;
 
        for_each_possible_cpu(i) {
-               int j;
-
                user = cputime64_add(user, kstat_cpu(i).cpustat.user);
                nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
                system = cputime64_add(system, kstat_cpu(i).cpustat.system);
@@ -531,10 +529,12 @@ static int show_stat(struct seq_file *p, void *v)
                softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
                steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
                guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-               for (j = 0; j < nr_irqs; j++) {
-                       unsigned int temp = kstat_irqs_cpu(j, i);
+               for_each_irq_desc(j, desc)
+               {
+                       unsigned int temp;
+
+                       temp = kstat_irqs_cpu(j, i);
                        sum += temp;
-                       per_irq_sum[j] += temp;
                }
                sum += arch_irq_stat_cpu(i);
        }
@@ -577,8 +577,23 @@ static int show_stat(struct seq_file *p, void *v)
        }
        seq_printf(p, "intr %llu", (unsigned long long)sum);
 
-       for (i = 0; i < nr_irqs; i++)
-               seq_printf(p, " %u", per_irq_sum[i]);
+       /* sum again ? it could be updated? */
+       for_each_irq_desc(j, desc)
+       {
+               per_irq_sum = 0;
+               for_each_possible_cpu(i) {
+                       unsigned int temp;
+
+                       temp = kstat_irqs_cpu(j, i);
+                       per_irq_sum += temp;
+               }
+
+#ifdef CONFIG_HAVE_SPARSE_IRQ
+               seq_printf(p, " %u:%u", j, per_irq_sum);
+#else
+               seq_printf(p, " %u", per_irq_sum);
+#endif
+       }
 
        seq_printf(p,
                "\nctxt %llu\n"
@@ -592,7 +607,6 @@ static int show_stat(struct seq_file *p, void *v)
                nr_running(),
                nr_iowait());
 
-       kfree(per_irq_sum);
        return 0;
 }
 
index 511803853a5bfac2b47d09599adeaa43c7a16221..d4039a0b23f403df9c028420d9a86816d0974cd0 100644 (file)
 
 extern int nr_irqs;
 
+#ifndef CONFIG_GENERIC_HARDIRQS
+#define for_each_irq_desc(irq, desc)           \
+       for (irq = 0; irq < nr_irqs; irq++)
+#endif
+
 /*
  * These correspond to the IORESOURCE_IRQ_* defines in
  * linux/ioport.h to select the interrupt line behaviour.  When