blkdev_dequeue_request(rq);
                        rq->flags |= REQ_QUIET;
                        end_that_request_chunk(rq, 0, nr_bytes);
-                       end_that_request_last(rq);
+                       end_that_request_last(rq, 0);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
                                                                ret);
 
 /*
  * Cache flushing for ordered writes handling
  */
-static void blk_pre_flush_end_io(struct request *flush_rq)
+static void blk_pre_flush_end_io(struct request *flush_rq, int error)
 {
        struct request *rq = flush_rq->end_io_data;
        request_queue_t *q = rq->q;
        }
 }
 
-static void blk_post_flush_end_io(struct request *flush_rq)
+static void blk_post_flush_end_io(struct request *flush_rq, int error)
 {
        struct request *rq = flush_rq->end_io_data;
        request_queue_t *q = rq->q;
  */
 void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
                           struct request *rq, int at_head,
-                          void (*done)(struct request *))
+                          rq_end_io_fn *done)
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
  * blk_end_sync_rq - executes a completion event on a request
  * @rq: request to complete
  */
-void blk_end_sync_rq(struct request *rq)
+void blk_end_sync_rq(struct request *rq, int error)
 {
        struct completion *waiting = rq->waiting;
 
 /*
  * queue lock must be held
  */
-void end_that_request_last(struct request *req)
+void end_that_request_last(struct request *req, int uptodate)
 {
        struct gendisk *disk = req->rq_disk;
+       int error;
+
+       /*
+        * extend uptodate bool to allow < 0 value to be direct io error
+        */
+       error = 0;
+       if (end_io_error(uptodate))
+               error = !uptodate ? -EIO : uptodate;
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
                disk->in_flight--;
        }
        if (req->end_io)
-               req->end_io(req);
+               req->end_io(req, error);
        else
                __blk_put_request(req->q, req);
 }
        if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
                add_disk_randomness(req->rq_disk);
                blkdev_dequeue_request(req);
-               end_that_request_last(req);
+               end_that_request_last(req, uptodate);
        }
 }
 
 
 
         if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
 
-               end_that_request_last(Request);
+               end_that_request_last(Request, UpToDate);
 
                if (Command->Completion) {
                        complete(Command->Completion);
 
        printk("Done with %p\n", cmd->rq);
 #endif /* CCISS_DEBUG */ 
 
-       end_that_request_last(cmd->rq);
+       end_that_request_last(cmd->rq, status ? 1 : -EIO);
        cmd_free(h,cmd,1);
 }
 
 
        complete_buffers(cmd->rq->bio, ok);
 
         DBGPX(printk("Done with %p\n", cmd->rq););
-       end_that_request_last(cmd->rq);
+       end_that_request_last(cmd->rq, ok ? 1 : -EIO);
 }
 
 /*
 
        add_disk_randomness(req->rq_disk);
        floppy_off((long)req->rq_disk->private_data);
        blkdev_dequeue_request(req);
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 
        /* We're done with the request */
        current_req = NULL;
 
 
        spin_lock_irqsave(q->queue_lock, flags);
        if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
-               end_that_request_last(req);
+               end_that_request_last(req, uptodate);
        }
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
        rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
        assert(rc == 0);
 
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 
        rc = carm_put_request(host, crq);
        assert(rc == 0);
 
 static void ub_end_rq(struct request *rq, int uptodate)
 {
        end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
-       end_that_request_last(rq);
+       end_that_request_last(rq, uptodate);
 }
 
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 
        if (end_that_request_first(req, uptodate, num_sectors))
                return;
        add_disk_randomness(req->rq_disk);
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 }
 
 /*
 
                        if (!end_that_request_first(req, 1, nblock)) {
                                spin_lock_irq(q->queue_lock);
                                blkdev_dequeue_request(req);
-                               end_that_request_last(req);
+                               end_that_request_last(req, 1);
                                spin_unlock_irq(q->queue_lock);
                        }
                        continue;
 
                         */
                        spin_lock_irqsave(&ide_lock, flags);
                        end_that_request_chunk(failed, 0, failed->data_len);
-                       end_that_request_last(failed);
+                       end_that_request_last(failed, 0);
                        spin_unlock_irqrestore(&ide_lock, flags);
                }
 
 
        spin_lock_irqsave(&ide_lock, flags);
        blkdev_dequeue_request(rq);
-       end_that_request_last(rq);
+       end_that_request_last(rq, 1);
        HWGROUP(drive)->rq = NULL;
        spin_unlock_irqrestore(&ide_lock, flags);
        return ide_stopped;
 
 
                blkdev_dequeue_request(rq);
                HWGROUP(drive)->rq = NULL;
-               end_that_request_last(rq);
+               end_that_request_last(rq, uptodate);
                ret = 0;
        }
        return ret;
        }
        blkdev_dequeue_request(rq);
        HWGROUP(drive)->rq = NULL;
-       end_that_request_last(rq);
+       end_that_request_last(rq, 1);
        spin_unlock_irqrestore(&ide_lock, flags);
 }
 
        blkdev_dequeue_request(rq);
        HWGROUP(drive)->rq = NULL;
        rq->errors = err;
-       end_that_request_last(rq);
+       end_that_request_last(rq, !rq->errors);
        spin_unlock_irqrestore(&ide_lock, flags);
 }
 
 
 
        spin_lock_irqsave(q->queue_lock, flags);
 
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 
        if (likely(dev)) {
                dev->open_queue_depth--;
 
                         */
                        add_disk_randomness(req->rq_disk);
                        blkdev_dequeue_request(req);
-                       end_that_request_last(req);
+                       end_that_request_last(req, 1);
                }
                spin_unlock_irq(&md->lock);
        } while (ret);
 
        add_disk_randomness(req->rq_disk);
        blkdev_dequeue_request(req);
-       end_that_request_last(req);
+       end_that_request_last(req, 0);
        spin_unlock_irq(&md->lock);
 
        return 0;
 
        if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
                BUG();
        add_disk_randomness(req->rq_disk);
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 }
 
 /*
 
 {
        if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
                BUG();
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
 }
 
 static void
 
 
        /* kill current request */
        blkdev_dequeue_request(req);
-       end_that_request_last(req);
+       end_that_request_last(req, 0);
        if (req->flags & REQ_SENSE)
                kfree(scsi->pc->buffer);
        kfree(scsi->pc);
        /* now nuke the drive queue */
        while ((req = elv_next_request(drive->queue))) {
                blkdev_dequeue_request(req);
-               end_that_request_last(req);
+               end_that_request_last(req, 0);
        }
 
        HWGROUP(drive)->rq = NULL;
 
        spin_lock_irqsave(q->queue_lock, flags);
        if (blk_rq_tagged(req))
                blk_queue_end_tag(q, req);
-       end_that_request_last(req);
+       end_that_request_last(req, uptodate);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
        /*
 
                 * force journal abort of barriers
                 */
                end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
-               end_that_request_last(rq);
+               end_that_request_last(rq, -EOPNOTSUPP);
        }
 }
 
 
 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
 
 struct request;
-typedef void (rq_end_io_fn)(struct request *);
+typedef void (rq_end_io_fn)(struct request *, int);
 
 struct request_list {
        int count[2];
 extern void generic_make_request(struct bio *bio);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(request_queue_t *, struct request *);
-extern void blk_end_sync_rq(struct request *rq);
+extern void blk_end_sync_rq(struct request *rq, int error);
 extern void blk_attempt_remerge(request_queue_t *, struct request *);
 extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
  */
 extern int end_that_request_first(struct request *, int, int);
 extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *);
+extern void end_that_request_last(struct request *, int);
 extern void end_request(struct request *req, int uptodate);
 
 /*