]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] cfq-iosched: kill cfq_exit_lock
authorJens Axboe <axboe@suse.de>
Tue, 29 Aug 2006 07:05:44 +0000 (09:05 +0200)
committerJens Axboe <axboe@nelson.home.kernel.dk>
Sat, 30 Sep 2006 18:29:36 +0000 (20:29 +0200)
cfq_exit_lock is protecting two things now:

- The per-ioc rbtree of cfq_io_contexts

- The per-cfqd linked list of cfq_io_contexts

The per-cfqd linked list can be protected by the queue lock, as it is (by
definition) per cfqd as the queue lock is.

The per-ioc rbtree is mainly used and updated by the process itself only.
The only outside use is the io priority changing. If we move the
priority changing to not browsing the rbtree, we can remove any locking
from the rbtree updates and lookup completely. Let the sys_ioprio syscall
just mark processes as having the iopriority changed and lazily update
the private cfq io contexts the next time io is queued, and we can
remove this locking as well.

Signed-off-by: Jens Axboe <axboe@suse.de>
block/cfq-iosched.c
block/ll_rw_blk.c
fs/ioprio.c
include/linux/blkdev.h

index ec24284e9d397048f4c4e5ee162363adca45d8b6..33e0b0c5e31d42b932b5631106a9235f0375ac7d 100644 (file)
@@ -31,8 +31,6 @@ static int cfq_slice_idle = HZ / 125;
 
 #define CFQ_KEY_ASYNC          (0)
 
-static DEFINE_SPINLOCK(cfq_exit_lock);
-
 /*
  * for the hash of cfqq inside the cfqd
  */
@@ -1084,12 +1082,6 @@ static void cfq_free_io_context(struct io_context *ioc)
                complete(ioc_gone);
 }
 
-static void cfq_trim(struct io_context *ioc)
-{
-       ioc->set_ioprio = NULL;
-       cfq_free_io_context(ioc);
-}
-
 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        if (unlikely(cfqq == cfqd->active_queue))
@@ -1101,6 +1093,10 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
                                         struct cfq_io_context *cic)
 {
+       list_del_init(&cic->queue_list);
+       smp_wmb();
+       cic->key = NULL;
+
        if (cic->cfqq[ASYNC]) {
                cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
                cic->cfqq[ASYNC] = NULL;
@@ -1110,9 +1106,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
                cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
                cic->cfqq[SYNC] = NULL;
        }
-
-       cic->key = NULL;
-       list_del_init(&cic->queue_list);
 }
 
 
@@ -1123,27 +1116,23 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
 {
        struct cfq_data *cfqd = cic->key;
 
-       WARN_ON(!irqs_disabled());
-
        if (cfqd) {
                request_queue_t *q = cfqd->queue;
 
-               spin_lock(q->queue_lock);
+               spin_lock_irq(q->queue_lock);
                __cfq_exit_single_io_context(cfqd, cic);
-               spin_unlock(q->queue_lock);
+               spin_unlock_irq(q->queue_lock);
        }
 }
 
 static void cfq_exit_io_context(struct io_context *ioc)
 {
        struct cfq_io_context *__cic;
-       unsigned long flags;
        struct rb_node *n;
 
        /*
         * put the reference this task is holding to the various queues
         */
-       spin_lock_irqsave(&cfq_exit_lock, flags);
 
        n = rb_first(&ioc->cic_root);
        while (n != NULL) {
@@ -1152,8 +1141,6 @@ static void cfq_exit_io_context(struct io_context *ioc)
                cfq_exit_single_io_context(__cic);
                n = rb_next(n);
        }
-
-       spin_unlock_irqrestore(&cfq_exit_lock, flags);
 }
 
 static struct cfq_io_context *
@@ -1248,15 +1235,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
        spin_unlock(cfqd->queue->queue_lock);
 }
 
-/*
- * callback from sys_ioprio_set, irqs are disabled
- */
-static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
+static void cfq_ioc_set_ioprio(struct io_context *ioc)
 {
        struct cfq_io_context *cic;
        struct rb_node *n;
 
-       spin_lock(&cfq_exit_lock);
+       ioc->ioprio_changed = 0;
 
        n = rb_first(&ioc->cic_root);
        while (n != NULL) {
@@ -1265,10 +1249,6 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
                changed_ioprio(cic);
                n = rb_next(n);
        }
-
-       spin_unlock(&cfq_exit_lock);
-
-       return 0;
 }
 
 static struct cfq_queue *
@@ -1336,10 +1316,8 @@ out:
 static void
 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
 {
-       spin_lock(&cfq_exit_lock);
+       WARN_ON(!list_empty(&cic->queue_list));
        rb_erase(&cic->rb_node, &ioc->cic_root);
-       list_del_init(&cic->queue_list);
-       spin_unlock(&cfq_exit_lock);
        kmem_cache_free(cfq_ioc_pool, cic);
        atomic_dec(&ioc_count);
 }
@@ -1385,7 +1363,6 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
        cic->ioc = ioc;
        cic->key = cfqd;
 
-       ioc->set_ioprio = cfq_ioc_set_ioprio;
 restart:
        parent = NULL;
        p = &ioc->cic_root.rb_node;
@@ -1407,11 +1384,12 @@ restart:
                        BUG();
        }
 
-       spin_lock(&cfq_exit_lock);
        rb_link_node(&cic->rb_node, parent, p);
        rb_insert_color(&cic->rb_node, &ioc->cic_root);
+
+       spin_lock_irq(cfqd->queue->queue_lock);
        list_add(&cic->queue_list, &cfqd->cic_list);
-       spin_unlock(&cfq_exit_lock);
+       spin_unlock_irq(cfqd->queue->queue_lock);
 }
 
 /*
@@ -1441,6 +1419,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 
        cfq_cic_link(cfqd, ioc, cic);
 out:
+       smp_read_barrier_depends();
+       if (unlikely(ioc->ioprio_changed))
+               cfq_ioc_set_ioprio(ioc);
+
        return cic;
 err:
        put_io_context(ioc);
@@ -1945,7 +1927,6 @@ static void cfq_exit_queue(elevator_t *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-       spin_lock(&cfq_exit_lock);
        spin_lock_irq(q->queue_lock);
 
        if (cfqd->active_queue)
@@ -1960,7 +1941,6 @@ static void cfq_exit_queue(elevator_t *e)
        }
 
        spin_unlock_irq(q->queue_lock);
-       spin_unlock(&cfq_exit_lock);
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -2149,7 +2129,7 @@ static struct elevator_type iosched_cfq = {
                .elevator_may_queue_fn =        cfq_may_queue,
                .elevator_init_fn =             cfq_init_queue,
                .elevator_exit_fn =             cfq_exit_queue,
-               .trim =                         cfq_trim,
+               .trim =                         cfq_free_io_context,
        },
        .elevator_attrs =       cfq_attrs,
        .elevator_name =        "cfq",
index e25b4cd2dcd18c0ff2ccdd6233a191b41f50d109..508548b834f14aa160c47c661558764486ce0dd5 100644 (file)
@@ -3654,7 +3654,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
        if (ret) {
                atomic_set(&ret->refcount, 1);
                ret->task = current;
-               ret->set_ioprio = NULL;
+               ret->ioprio_changed = 0;
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
index 78b1deae3fa2e1a9aaa4142fa83050b434840b90..0fd1089d7bf6cf00e6cdb0e72213af3a15c31359 100644 (file)
@@ -47,8 +47,8 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
        /* see wmb() in current_io_context() */
        smp_read_barrier_depends();
 
-       if (ioc && ioc->set_ioprio)
-               ioc->set_ioprio(ioc, ioprio);
+       if (ioc)
+               ioc->ioprio_changed = 1;
 
        task_unlock(task);
        return 0;
index a1e288069e2e75b8052b71367946071a57aa3a98..79cb9fa8034a5b4da6a67432be3c7a275012818b 100644 (file)
@@ -90,7 +90,7 @@ struct io_context {
        atomic_t refcount;
        struct task_struct *task;
 
-       int (*set_ioprio)(struct io_context *, unsigned int);
+       unsigned int ioprio_changed;
 
        /*
         * For request batching