]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] md: Core of raid5 resize process
authorNeilBrown <neilb@suse.de>
Mon, 27 Mar 2006 09:18:09 +0000 (01:18 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 27 Mar 2006 16:45:01 +0000 (08:45 -0800)
This patch provides the core of the resize/expand process.

sync_request notices if a 'reshape' is happening and acts accordingly.

It allocated new stripe_heads for the next chunk-wide-stripe in the target
geometry, marking them STRIPE_EXPANDING.

Then it finds which stripe heads in the old geometry can provide data needed
by these and marks them STRIPE_EXPAND_SOURCE.  This causes stripe_handle to
read all blocks on those stripes.

Once all blocks on a STRIPE_EXPAND_SOURCE stripe_head are read, any that are
needed are copied into the corresponding STRIPE_EXPANDING stripe_head.  Once a
STRIPE_EXPANDING stripe_head is full, it is marks STRIPE_EXPAND_READY and then
is written out and released.

Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/md/md.c
drivers/md/raid5.c
include/linux/raid/md_k.h
include/linux/raid/raid5.h

index c7b7656f9aa5cd4e98ad9999b32d8ddb44199838..8e65986bc63fd06e966de23706c0bc2d7e994d77 100644 (file)
@@ -2165,7 +2165,9 @@ action_show(mddev_t *mddev, char *page)
        char *type = "idle";
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
-               if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+               if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+                       type = "reshape";
+               else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                        if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
                                type = "resync";
                        else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
@@ -4088,8 +4090,10 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
                seq_printf(seq, "] ");
        }
        seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
+                  (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
+                   "reshape" :
                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
-                      "resync" : "recovery"),
+                      "resync" : "recovery")),
                      per_milli/10, per_milli % 10,
                   (unsigned long long) resync,
                   (unsigned long long) max_blocks);
@@ -4543,7 +4547,9 @@ static void md_do_sync(mddev_t *mddev)
                 */
                max_sectors = mddev->resync_max_sectors;
                mddev->resync_mismatches = 0;
-       } else
+       } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+               max_sectors = mddev->size << 1;
+       else
                /* recovery follows the physical size of devices */
                max_sectors = mddev->size << 1;
 
@@ -4679,6 +4685,8 @@ static void md_do_sync(mddev_t *mddev)
        mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
 
        if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
+           test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+           !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
            mddev->curr_resync > 2 &&
            mddev->curr_resync >= mddev->recovery_cp) {
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
index 7a6df515b00820497777dd188b621a560dc80d3b..56cba8d3e39886d458f955bfa77afc4e9712a337 100644 (file)
@@ -93,11 +93,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                                if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
                                        md_wakeup_thread(conf->mddev->thread);
                        }
-                       list_add_tail(&sh->lru, &conf->inactive_list);
                        atomic_dec(&conf->active_stripes);
-                       if (!conf->inactive_blocked ||
-                           atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
+                       if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+                               list_add_tail(&sh->lru, &conf->inactive_list);
                                wake_up(&conf->wait_for_stripe);
+                       }
                }
        }
 }
@@ -273,9 +273,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
                        } else {
                                if (!test_bit(STRIPE_HANDLE, &sh->state))
                                        atomic_inc(&conf->active_stripes);
-                               if (list_empty(&sh->lru))
-                                       BUG();
-                               list_del_init(&sh->lru);
+                               if (!list_empty(&sh->lru))
+                                       list_del_init(&sh->lru);
                        }
                }
        } while (sh == NULL);
@@ -1035,6 +1034,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        return 0;
 }
 
+static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
+{
+       int sectors_per_chunk = conf->chunk_size >> 9;
+       sector_t x = stripe;
+       int pd_idx, dd_idx;
+       int chunk_offset = sector_div(x, sectors_per_chunk);
+       stripe = x;
+       raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
+                            + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
+       return pd_idx;
+}
+
 
 /*
  * handle_stripe - do things to a stripe.
@@ -1061,7 +1072,7 @@ static void handle_stripe(struct stripe_head *sh)
        struct bio *return_bi= NULL;
        struct bio *bi;
        int i;
-       int syncing;
+       int syncing, expanding, expanded;
        int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
        int non_overwrite = 0;
        int failed_num=0;
@@ -1076,6 +1087,8 @@ static void handle_stripe(struct stripe_head *sh)
        clear_bit(STRIPE_DELAYED, &sh->state);
 
        syncing = test_bit(STRIPE_SYNCING, &sh->state);
+       expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+       expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
        /* Now to look around and see what can be done */
 
        rcu_read_lock();
@@ -1268,13 +1281,14 @@ static void handle_stripe(struct stripe_head *sh)
         * parity, or to satisfy requests
         * or to load a block that is being partially written.
         */
-       if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
+       if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
                for (i=disks; i--;) {
                        dev = &sh->dev[i];
                        if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
                            (dev->toread ||
                             (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
                             syncing ||
+                            expanding ||
                             (failed && (sh->dev[failed_num].toread ||
                                         (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
                                    )
@@ -1464,13 +1478,76 @@ static void handle_stripe(struct stripe_head *sh)
                        set_bit(R5_Wantwrite, &dev->flags);
                        set_bit(R5_ReWrite, &dev->flags);
                        set_bit(R5_LOCKED, &dev->flags);
+                       locked++;
                } else {
                        /* let's read it back */
                        set_bit(R5_Wantread, &dev->flags);
                        set_bit(R5_LOCKED, &dev->flags);
+                       locked++;
                }
        }
 
+       if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+               /* Need to write out all blocks after computing parity */
+               sh->disks = conf->raid_disks;
+               sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
+               compute_parity(sh, RECONSTRUCT_WRITE);
+               for (i= conf->raid_disks; i--;) {
+                       set_bit(R5_LOCKED, &sh->dev[i].flags);
+                       locked++;
+                       set_bit(R5_Wantwrite, &sh->dev[i].flags);
+               }
+               clear_bit(STRIPE_EXPANDING, &sh->state);
+       } else if (expanded) {
+               clear_bit(STRIPE_EXPAND_READY, &sh->state);
+               wake_up(&conf->wait_for_overlap);
+               md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
+       }
+
+       if (expanding && locked == 0) {
+               /* We have read all the blocks in this stripe and now we need to
+                * copy some of them into a target stripe for expand.
+                */
+               clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+               for (i=0; i< sh->disks; i++)
+                       if (i != sh->pd_idx) {
+                               int dd_idx, pd_idx, j;
+                               struct stripe_head *sh2;
+
+                               sector_t bn = compute_blocknr(sh, i);
+                               sector_t s = raid5_compute_sector(bn, conf->raid_disks,
+                                                                 conf->raid_disks-1,
+                                                                 &dd_idx, &pd_idx, conf);
+                               sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
+                               if (sh2 == NULL)
+                                       /* so far only the early blocks of this stripe
+                                        * have been requested.  When later blocks
+                                        * get requested, we will try again
+                                        */
+                                       continue;
+                               if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
+                                  test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
+                                       /* must have already done this block */
+                                       release_stripe(sh2);
+                                       continue;
+                               }
+                               memcpy(page_address(sh2->dev[dd_idx].page),
+                                      page_address(sh->dev[i].page),
+                                      STRIPE_SIZE);
+                               set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
+                               set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
+                               for (j=0; j<conf->raid_disks; j++)
+                                       if (j != sh2->pd_idx &&
+                                           !test_bit(R5_Expanded, &sh2->dev[j].flags))
+                                               break;
+                               if (j == conf->raid_disks) {
+                                       set_bit(STRIPE_EXPAND_READY, &sh2->state);
+                                       set_bit(STRIPE_HANDLE, &sh2->state);
+                               }
+                               release_stripe(sh2);
+                       }
+       }
+
        spin_unlock(&sh->lock);
 
        while ((bi=return_bi)) {
@@ -1509,7 +1586,7 @@ static void handle_stripe(struct stripe_head *sh)
                rcu_read_unlock();
  
                if (rdev) {
-                       if (syncing)
+                       if (syncing || expanding || expanded)
                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
                        bi->bi_bdev = rdev->bdev;
@@ -1757,12 +1834,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 {
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
        struct stripe_head *sh;
-       int sectors_per_chunk = conf->chunk_size >> 9;
-       sector_t x;
-       unsigned long stripe;
-       int chunk_offset;
-       int dd_idx, pd_idx;
-       sector_t first_sector;
+       int pd_idx;
+       sector_t first_sector, last_sector;
        int raid_disks = conf->raid_disks;
        int data_disks = raid_disks-1;
        sector_t max_sector = mddev->size << 1;
@@ -1781,6 +1854,80 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                return 0;
        }
+
+       if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+               /* reshaping is quite different to recovery/resync so it is
+                * handled quite separately ... here.
+                *
+                * On each call to sync_request, we gather one chunk worth of
+                * destination stripes and flag them as expanding.
+                * Then we find all the source stripes and request reads.
+                * As the reads complete, handle_stripe will copy the data
+                * into the destination stripe and release that stripe.
+                */
+               int i;
+               int dd_idx;
+               for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
+                       int j;
+                       int skipped = 0;
+                       pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
+                       sh = get_active_stripe(conf, sector_nr+i,
+                                              conf->raid_disks, pd_idx, 0);
+                       set_bit(STRIPE_EXPANDING, &sh->state);
+                       /* If any of this stripe is beyond the end of the old
+                        * array, then we need to zero those blocks
+                        */
+                       for (j=sh->disks; j--;) {
+                               sector_t s;
+                               if (j == sh->pd_idx)
+                                       continue;
+                               s = compute_blocknr(sh, j);
+                               if (s < (mddev->array_size<<1)) {
+                                       skipped = 1;
+                                       continue;
+                               }
+                               memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
+                               set_bit(R5_Expanded, &sh->dev[j].flags);
+                               set_bit(R5_UPTODATE, &sh->dev[j].flags);
+                       }
+                       if (!skipped) {
+                               set_bit(STRIPE_EXPAND_READY, &sh->state);
+                               set_bit(STRIPE_HANDLE, &sh->state);
+                       }
+                       release_stripe(sh);
+               }
+               spin_lock_irq(&conf->device_lock);
+               conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
+               spin_unlock_irq(&conf->device_lock);
+               /* Ok, those stripe are ready. We can start scheduling
+                * reads on the source stripes.
+                * The source stripes are determined by mapping the first and last
+                * block on the destination stripes.
+                */
+               raid_disks = conf->previous_raid_disks;
+               data_disks = raid_disks - 1;
+               first_sector =
+                       raid5_compute_sector(sector_nr*(conf->raid_disks-1),
+                                            raid_disks, data_disks,
+                                            &dd_idx, &pd_idx, conf);
+               last_sector =
+                       raid5_compute_sector((sector_nr+conf->chunk_size/512)
+                                              *(conf->raid_disks-1) -1,
+                                            raid_disks, data_disks,
+                                            &dd_idx, &pd_idx, conf);
+               if (last_sector >= (mddev->size<<1))
+                       last_sector = (mddev->size<<1)-1;
+               while (first_sector <= last_sector) {
+                       pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
+                       sh = get_active_stripe(conf, first_sector,
+                                              conf->previous_raid_disks, pd_idx, 0);
+                       set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+                       set_bit(STRIPE_HANDLE, &sh->state);
+                       release_stripe(sh);
+                       first_sector += STRIPE_SECTORS;
+               }
+               return conf->chunk_size>>9;
+       }
        /* if there is 1 or more failed drives and we are trying
         * to resync, then assert that we are finished, because there is
         * nothing we can do.
@@ -1799,13 +1946,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
        }
 
-       x = sector_nr;
-       chunk_offset = sector_div(x, sectors_per_chunk);
-       stripe = x;
-       BUG_ON(x != stripe);
-
-       first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
-               + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
+       pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
        sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
        if (sh == NULL) {
                sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
index 617b9506c7609aca62e0b9401a3fa4705a8faec9..4e26ef2cacca7e10f2936619884b6be6ec052023 100644 (file)
@@ -157,6 +157,9 @@ struct mddev_s
         * DONE:     thread is done and is waiting to be reaped
         * REQUEST:  user-space has requested a sync (used with SYNC)
         * CHECK:    user-space request for for check-only, no repair
+        * RESHAPE:  A reshape is happening
+        *
+        * If neither SYNC or RESHAPE are set, then it is a recovery.
         */
 #define        MD_RECOVERY_RUNNING     0
 #define        MD_RECOVERY_SYNC        1
@@ -166,6 +169,7 @@ struct mddev_s
 #define        MD_RECOVERY_NEEDED      5
 #define        MD_RECOVERY_REQUESTED   6
 #define        MD_RECOVERY_CHECK       7
+#define MD_RECOVERY_RESHAPE    8
        unsigned long                   recovery;
 
        int                             in_sync;        /* know to not need resync */
index 6fa274aea2a0b1283e49a512c642769d64aa44da..55c738d50508e995cc4679901915753f0f663c99 100644 (file)
@@ -157,6 +157,7 @@ struct stripe_head {
 #define        R5_ReadError    8       /* seen a read error here recently */
 #define        R5_ReWrite      9       /* have tried to over-write the readerror */
 
+#define        R5_Expanded     10      /* This block now has post-expand data */
 /*
  * Write method
  */
@@ -176,7 +177,8 @@ struct stripe_head {
 #define        STRIPE_DEGRADED         7
 #define        STRIPE_BIT_DELAY        8
 #define        STRIPE_EXPANDING        9
-
+#define        STRIPE_EXPAND_SOURCE    10
+#define        STRIPE_EXPAND_READY     11
 /*
  * Plugging:
  *