]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] percpu data: only iterate over possible CPUs
authorEric Dumazet <dada1@cosmosbay.com>
Sun, 5 Feb 2006 07:27:36 +0000 (23:27 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 5 Feb 2006 19:06:51 +0000 (11:06 -0800)
percpu_data blindly allocates bootmem memory to store NR_CPUS instances of
cpudata, instead of allocating memory only for possible cpus.

As a preparation for changing that, we need to convert various 0 -> NR_CPUS
loops to use for_each_cpu().

(The above only applies to users of asm-generic/percpu.h.  powerpc has gone it
alone and is presently only allocating memory for present CPUs, so it's
currently corrupting memory).

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Jens Axboe <axboe@suse.de>
Cc: Anton Blanchard <anton@samba.org>
Acked-by: William Irwin <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/nmi.c
block/ll_rw_blk.c
drivers/scsi/scsi.c
fs/file.c
kernel/sched.c
mm/page_alloc.c
net/core/dev.c
net/core/utils.c
net/ipv4/proc.c
net/ipv6/proc.c
net/socket.c

index d661703ac1cb713db7327de628e4f7f5b09efcb1..63f39a7e2c96b0e3db1ce48ab73696e95db60a0c 100644 (file)
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
        if (nmi_watchdog == NMI_LOCAL_APIC)
                smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
        local_irq_enable();
        mdelay((10*1000)/nmi_hz); // wait 10 ticks
index f9fc07efd2da995d933fda955895175ec1e5012d..e5aad8314585ef6c434917ffb4a42b068d61ad94 100644 (file)
@@ -3453,7 +3453,7 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
index 245ca99a641eb6432ee14434500531d36675fb22..c551bb84dbfb6c46351624dcf640d3ac9be44ca4 100644 (file)
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
        if (error)
                goto cleanup_sysctl;
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
 
        devfs_mk_dir("scsi");
index fd066b261c751875de1c4871974b54d335eafcd7..cea7cbea11d0d5fca944ab11a331568fe0949fb7 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
 void __init files_defer_init(void)
 {
        int i;
-       /* Really early - can't use for_each_cpu */
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                fdtable_defer_list_init(i);
 }
index f77f23f8f479c898666082f3c9da0d52bb5a5bab..839466fdfb4cf285c44807a79223a11a9f1017fb 100644 (file)
@@ -6109,7 +6109,7 @@ void __init sched_init(void)
        runqueue_t *rq;
        int i, j, k;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                prio_array_t *array;
 
                rq = cpu_rq(i);
index 44b4eb4202d91e33acc45384f1468a4823ff5442..dde04ff4be31873b88c38efbf94d30fd06ca5329 100644 (file)
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
 {
        int cpu = 0;
 
-       memset(ret, 0, sizeof(*ret));
+       memset(ret, 0, nr * sizeof(unsigned long));
        cpus_and(*cpumask, *cpumask, cpu_online_map);
 
        cpu = first_cpu(*cpumask);
        while (cpu < NR_CPUS) {
                unsigned long *in, *out, off;
 
+               if (!cpu_isset(cpu, *cpumask))
+                       continue;
+
                in = (unsigned long *)&per_cpu(page_states, cpu);
 
                cpu = next_cpu(cpu, *cpumask);
 
-               if (cpu < NR_CPUS)
+               if (likely(cpu < NR_CPUS))
                        prefetch(&per_cpu(page_states, cpu));
 
                out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  * not check if the processor is online before following the pageset pointer.
  * Other parts of the kernel may not check if the zone is available.
  */
-static struct per_cpu_pageset
-       boot_pageset[NR_CPUS];
+static struct per_cpu_pageset boot_pageset[NR_CPUS];
 
 /*
  * Dynamically allocate memory for the
index ffb82073056e761267dcbd480603d01645cfd7cf..2afb0de953291c3dfb2e16b25ba764a43e3cd860 100644 (file)
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
         *      Initialise the packet receive queues.
         */
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct softnet_data *queue;
 
                queue = &per_cpu(softnet_data, i);
index ac1d1fcf8673f63158691ee0004952b803206f84..fdc4f38bc46ccfbcc86c4a36698489cfcd0ba08b 100644 (file)
@@ -121,7 +121,7 @@ void __init net_random_init(void)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, i+jiffies);
        }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
        unsigned long seed[NR_CPUS];
 
        get_random_bytes(seed, sizeof(seed));
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, seed[i]);
        }
index 39d49dc333a7f0dc47e1bbd1f8d7a7c02c3f2cfb..1b167c4bb3beb0254f446a6ab21ca41fd87a0455 100644 (file)
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
index 50a13e75d70ec5fecd20269c8c4e90bc523164cc..4238b1ed886012a331b1c5dd16ddffa1ea2eaaae 100644 (file)
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for (cpu=0; cpu<NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
index b38a263853c32038b07fe7dcb9dae457d4456722..a00851f981dbfcb17b26f64d7bfc2a5e64e31a5d 100644 (file)
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
        int cpu;
        int counter = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                counter += per_cpu(sockets_in_use, cpu);
 
        /* It can be negative, by the way. 8) */