]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
md: move allocation of ->queue from mddev_find to md_probe
authorNeilBrown <neilb@suse.de>
Thu, 8 Jan 2009 21:31:08 +0000 (08:31 +1100)
committerNeilBrown <neilb@suse.de>
Thu, 8 Jan 2009 21:31:08 +0000 (08:31 +1100)
It is more balanced to just do simple initialisation in mddev_find,
which allocates and links a new md device, and leave all the
more sophisticated allocation to md_probe (which calls mddev_find).
md_probe already allocated the gendisk.  It should allocate the
queue too.

Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/md.c

index 1f770c16d435c9ed0fd4444e5e091d90a438de87..da838cc32cc9f3d9247a93e4a7b1c8feeb751664 100644 (file)
@@ -221,7 +221,9 @@ static void mddev_put(mddev_t *mddev)
        if (!mddev->raid_disks && list_empty(&mddev->disks)) {
                list_del(&mddev->all_mddevs);
                spin_unlock(&all_mddevs_lock);
-               blk_cleanup_queue(mddev->queue);
+               if (mddev->queue)
+                       blk_cleanup_queue(mddev->queue);
+               mddev->queue = NULL;
                if (mddev->sysfs_state)
                        sysfs_put(mddev->sysfs_state);
                mddev->sysfs_state = NULL;
@@ -275,16 +277,6 @@ static mddev_t * mddev_find(dev_t unit)
        new->resync_max = MaxSector;
        new->level = LEVEL_NONE;
 
-       new->queue = blk_alloc_queue(GFP_KERNEL);
-       if (!new->queue) {
-               kfree(new);
-               return NULL;
-       }
-       /* Can be unlocked because the queue is new: no concurrency */
-       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
-
-       blk_queue_make_request(new->queue, md_fail_request);
-
        goto retry;
 }
 
@@ -3493,9 +3485,23 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
                mddev_put(mddev);
                return NULL;
        }
+
+       mddev->queue = blk_alloc_queue(GFP_KERNEL);
+       if (!mddev->queue) {
+               mutex_unlock(&disks_mutex);
+               mddev_put(mddev);
+               return NULL;
+       }
+       /* Can be unlocked because the queue is new: no concurrency */
+       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
+
+       blk_queue_make_request(mddev->queue, md_fail_request);
+
        disk = alloc_disk(1 << shift);
        if (!disk) {
                mutex_unlock(&disks_mutex);
+               blk_cleanup_queue(mddev->queue);
+               mddev->queue = NULL;
                mddev_put(mddev);
                return NULL;
        }