]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] mmc: properly use the new multi block-write error handling
authorPierre Ossman <drzeus@drzeus.cx>
Wed, 4 Oct 2006 09:15:41 +0000 (02:15 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 4 Oct 2006 14:55:15 +0000 (07:55 -0700)
Use the new multi block-write error reporting flag and properly tell the block
layer how much data was transferred before the error.

Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/mmc/mmc_block.c

index db0e8ad439a5f6c8e09f873bf5734d9bc51df217..c1293f1bda870337e38f03d34848ff6c2fce3a5e 100644 (file)
@@ -158,13 +158,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
        struct mmc_blk_data *md = mq->data;
        struct mmc_card *card = md->queue.card;
+       struct mmc_blk_request brq;
        int ret;
 
        if (mmc_card_claim_host(card))
                goto cmd_err;
 
        do {
-               struct mmc_blk_request brq;
                struct mmc_command cmd;
                u32 readcmd, writecmd;
 
@@ -278,17 +278,27 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
  cmd_err:
        mmc_card_release_host(card);
 
+       ret = 1;
+
        /*
-        * This is a little draconian, but until we get proper
-        * error handling sorted out here, its the best we can
-        * do - especially as some hosts have no idea how much
-        * data was transferred before the error occurred.
+        * For writes and where the host claims to support proper
+        * error reporting, we first ok the successful blocks.
+        *
+        * For reads we just fail the entire chunk as that should
+        * be safe in all cases.
         */
+       if (rq_data_dir(req) != READ &&
+           (card->host->caps & MMC_CAP_MULTIWRITE)) {
+               spin_lock_irq(&md->lock);
+               ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+               spin_unlock_irq(&md->lock);
+       }
+
        spin_lock_irq(&md->lock);
-       do {
+       while (ret) {
                ret = end_that_request_chunk(req, 0,
                                req->current_nr_sectors << 9);
-       } while (ret);
+       }
 
        add_disk_randomness(req->rq_disk);
        blkdev_dequeue_request(req);