]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[NET]: More kzalloc conversions.
authorAndrew Morton <akpm@osdl.org>
Fri, 7 Apr 2006 21:52:59 +0000 (14:52 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Mon, 10 Apr 2006 05:25:48 +0000 (22:25 -0700)
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/dv.c
net/core/flow.c
net/core/gen_estimator.c
net/core/neighbour.c
net/core/request_sock.c

index cf581407538c4b160947548b0fa350840b959dd0..29ee77f15932465115d64f7f5d01d395e69af6d6 100644 (file)
@@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev)
 
        dev->divert = NULL;
        if (dev->type == ARPHRD_ETHER) {
-               dev->divert = (struct divert_blk *)
-                       kmalloc(alloc_size, GFP_KERNEL);
+               dev->divert = kzalloc(alloc_size, GFP_KERNEL);
                if (dev->divert == NULL) {
                        printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
                               dev->name);
                        return -ENOMEM;
                }
-
-               memset(dev->divert, 0, sizeof(struct divert_blk));
                dev_hold(dev);
        }
 
index 55789f832edadc23d41851f6009b3d01f6072148..885a2f655db08ea1c44810876611b805fb261da4 100644 (file)
@@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
                /* NOTHING */;
 
        flow_table(cpu) = (struct flow_cache_entry **)
-               __get_free_pages(GFP_KERNEL, order);
+               __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
        if (!flow_table(cpu))
                panic("NET: failed to allocate flow cache order %lu\n", order);
 
-       memset(flow_table(cpu), 0, PAGE_SIZE << order);
-
        flow_hash_rnd_recalc(cpu) = 1;
        flow_count(cpu) = 0;
 
index b07c029e8219e3fbe6493c28e560faf0de473612..3cad026764f0c9b94f9615ee22da0f9b4ed16833 100644 (file)
@@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
        if (parm->interval < -2 || parm->interval > 3)
                return -EINVAL;
 
-       est = kmalloc(sizeof(*est), GFP_KERNEL);
+       est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (est == NULL)
                return -ENOBUFS;
 
-       memset(est, 0, sizeof(*est));
        est->interval = parm->interval + 2;
        est->bstats = bstats;
        est->rate_est = rate_est;
index 0c8666872d10fdf0d90fea6b327952c6f6493051..2ec8693fb778f581dd114838700131d810016e3d 100644 (file)
@@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries)
        struct neighbour **ret;
 
        if (size <= PAGE_SIZE) {
-               ret = kmalloc(size, GFP_ATOMIC);
+               ret = kzalloc(size, GFP_ATOMIC);
        } else {
                ret = (struct neighbour **)
-                       __get_free_pages(GFP_ATOMIC, get_order(size));
+                     __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
        }
-       if (ret)
-               memset(ret, 0, size);
-
        return ret;
 }
 
@@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
                if (hh->hh_type == protocol)
                        break;
 
-       if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
-               memset(hh, 0, sizeof(struct hh_cache));
+       if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
                rwlock_init(&hh->hh_lock);
                hh->hh_type = protocol;
                atomic_set(&hh->hh_refcnt, 0);
@@ -1366,13 +1362,11 @@ void neigh_table_init(struct neigh_table *tbl)
        tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
 
        phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
-       tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
+       tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
 
        if (!tbl->hash_buckets || !tbl->phash_buckets)
                panic("cannot allocate neighbour cache hashes");
 
-       memset(tbl->phash_buckets, 0, phsize);
-
        get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
 
        rwlock_init(&tbl->lock);
index 1e44eda1fda9f028bb3e9d67c06a4d6dda30311e..79ebd75fbe4dec5c57ddd34c4501c5858b04d8fb 100644 (file)
@@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
 {
        const int lopt_size = sizeof(struct listen_sock) +
                              nr_table_entries * sizeof(struct request_sock *);
-       struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL);
+       struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
 
        if (lopt == NULL)
                return -ENOMEM;
 
-       memset(lopt, 0, lopt_size);
-
        for (lopt->max_qlen_log = 6;
             (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
             lopt->max_qlen_log++);