]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] list: use list_replace_init() instead of list_splice_init()
authorOleg Nesterov <oleg@tv-sign.ru>
Fri, 23 Jun 2006 09:05:55 +0000 (02:05 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 23 Jun 2006 14:43:07 +0000 (07:43 -0700)
list_splice_init(list, head) does unneeded job if it is known that
list_empty(head) == 1.  We can use list_replace_init() instead.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/mm/pageattr.c
block/ll_rw_blk.c
fs/aio.c
kernel/timer.c
kernel/workqueue.c
net/core/dev.c
net/core/link_watch.c

index 92c3d9f0e7314f984fdd60a3ffbbfd3b01259edc..0887b34bc59b987543c10b92ae53d73b83c361ef 100644 (file)
@@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 }
 
 void global_flush_tlb(void)
-{ 
-       LIST_HEAD(l);
+{
+       struct list_head l;
        struct page *pg, *next;
 
        BUG_ON(irqs_disabled());
 
        spin_lock_irq(&cpa_lock);
-       list_splice_init(&df_list, &l);
+       list_replace_init(&df_list, &l);
        spin_unlock_irq(&cpa_lock);
        flush_map();
        list_for_each_entry_safe(pg, next, &l, lru)
                __free_page(pg);
-} 
+}
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void kernel_map_pages(struct page *page, int numpages, int enable)
index 7eb36c53f4b7868247141a105ef82e8be3e2d96f..465b54312c5930b3d5a44d404aa221d06da6612b 100644 (file)
@@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
  */
 static void blk_done_softirq(struct softirq_action *h)
 {
-       struct list_head *cpu_list;
-       LIST_HEAD(local_list);
+       struct list_head *cpu_list, local_list;
 
        local_irq_disable();
        cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_splice_init(cpu_list, &local_list);
+       list_replace_init(cpu_list, &local_list);
        local_irq_enable();
 
        while (!list_empty(&local_list)) {
index e41e932ba489ff040fe9ed43385fc164a8dd6515..8c34a62df7d7bbf7a692e5bff528e5c525d842f2 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -777,11 +777,11 @@ out:
 static int __aio_run_iocbs(struct kioctx *ctx)
 {
        struct kiocb *iocb;
-       LIST_HEAD(run_list);
+       struct list_head run_list;
 
        assert_spin_locked(&ctx->ctx_lock);
 
-       list_splice_init(&ctx->run_list, &run_list);
+       list_replace_init(&ctx->run_list, &run_list);
        while (!list_empty(&run_list)) {
                iocb = list_entry(run_list.next, struct kiocb,
                        ki_run_list);
index 9e49deed468cd8b2e63f2033d2971d961edeb110..3bf0e9ed2dbe622cc205b47a41b5151babe4695f 100644 (file)
@@ -419,10 +419,10 @@ static inline void __run_timers(tvec_base_t *base)
 
        spin_lock_irq(&base->lock);
        while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct list_head work_list = LIST_HEAD_INIT(work_list);
+               struct list_head work_list;
                struct list_head *head = &work_list;
                int index = base->timer_jiffies & TVR_MASK;
+
                /*
                 * Cascade timers:
                 */
@@ -431,8 +431,8 @@ static inline void __run_timers(tvec_base_t *base)
                                (!cascade(base, &base->tv3, INDEX(1))) &&
                                        !cascade(base, &base->tv4, INDEX(2)))
                        cascade(base, &base->tv5, INDEX(3));
-               ++base->timer_jiffies; 
-               list_splice_init(base->tv1.vec + index, &work_list);
+               ++base->timer_jiffies;
+               list_replace_init(base->tv1.vec + index, &work_list);
                while (!list_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
index 880fb415a8f601ebb5fff636fee20b958fe71149..740c5abceb07bdf2e1072a816594ada2fcff3e70 100644 (file)
@@ -531,11 +531,11 @@ int current_is_keventd(void)
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-       LIST_HEAD(list);
+       struct list_head list;
        struct work_struct *work;
 
        spin_lock_irq(&cwq->lock);
-       list_splice_init(&cwq->worklist, &list);
+       list_replace_init(&cwq->worklist, &list);
 
        while (!list_empty(&list)) {
                printk("Taking work for %s\n", wq->name);
index ab39fe17cb58bd0a49ce65376e279347a46fe248..195a5e96b2d1609eeaead8d466624a40b6b967e4 100644 (file)
@@ -2980,7 +2980,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
 static DEFINE_MUTEX(net_todo_run_mutex);
 void netdev_run_todo(void)
 {
-       struct list_head list = LIST_HEAD_INIT(list);
+       struct list_head list;
 
        /* Need to guard against multiple cpu's getting out of order. */
        mutex_lock(&net_todo_run_mutex);
@@ -2995,9 +2995,9 @@ void netdev_run_todo(void)
 
        /* Snapshot list, allow later requests */
        spin_lock(&net_todo_list_lock);
-       list_splice_init(&net_todo_list, &list);
+       list_replace_init(&net_todo_list, &list);
        spin_unlock(&net_todo_list_lock);
-               
+
        while (!list_empty(&list)) {
                struct net_device *dev
                        = list_entry(list.next, struct net_device, todo_list);
index 646937cc2d84db521aeff0b4eedb7a1bc89670d5..0f37266411b507a208c3f4dcd80240445cbc37d2 100644 (file)
@@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev)
 /* Must be called with the rtnl semaphore held */
 void linkwatch_run_queue(void)
 {
-       LIST_HEAD(head);
-       struct list_head *n, *next;
+       struct list_head head, *n, *next;
 
        spin_lock_irq(&lweventlist_lock);
-       list_splice_init(&lweventlist, &head);
+       list_replace_init(&lweventlist, &head);
        spin_unlock_irq(&lweventlist_lock);
 
        list_for_each_safe(n, next, &head) {