#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/mutex.h>
#include <linux/mmc/card.h>
#include <linux/mmc/protocol.h>
-#include <linux/mmc/host.h>
#include <asm/system.h>
#include <asm/uaccess.h>
unsigned int usage;
unsigned int block_bits;
- unsigned int suspended;
+ unsigned int read_only;
};
-static DECLARE_MUTEX(open_lock);
+static DEFINE_MUTEX(open_lock);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
- down(&open_lock);
+ mutex_lock(&open_lock);
md = disk->private_data;
if (md && md->usage == 0)
md = NULL;
if (md)
md->usage++;
- up(&open_lock);
+ mutex_unlock(&open_lock);
return md;
}
static void mmc_blk_put(struct mmc_blk_data *md)
{
- down(&open_lock);
+ mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
put_disk(md->disk);
mmc_cleanup_queue(&md->queue);
kfree(md);
}
- up(&open_lock);
-}
-
-static inline int mmc_blk_readonly(struct mmc_card *card)
-{
- return mmc_card_readonly(card) ||
- !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+ mutex_unlock(&open_lock);
}
static int mmc_blk_open(struct inode *inode, struct file *filp)
check_disk_change(inode->i_bdev);
ret = 0;
- if ((filp->f_mode & FMODE_WRITE) &&
- mmc_blk_readonly(md->queue.card))
+ if ((filp->f_mode & FMODE_WRITE) && md->read_only)
ret = -EROFS;
}
}
static int
-mmc_blk_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct block_device *bdev = inode->i_bdev;
-
- if (cmd == HDIO_GETGEO) {
- struct hd_geometry geo;
-
- memset(&geo, 0, sizeof(struct hd_geometry));
-
- geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
- geo.heads = 4;
- geo.sectors = 16;
- geo.start = get_start_sect(bdev);
-
- return copy_to_user((void __user *)arg, &geo, sizeof(geo))
- ? -EFAULT : 0;
- }
-
- return -ENOTTY;
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+ return 0;
}
static struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
- .ioctl = mmc_blk_ioctl,
+ .getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
};
stat = BLKPREP_KILL;
}
- if (md->suspended) {
- blk_plug_device(md->queue.queue);
- stat = BLKPREP_DEFER;
- }
-
- /*
- * Check for excessive requests.
- */
- if (req->sector + req->nr_sectors > get_capacity(req->rq_disk)) {
- printk("bad request size\n");
- stat = BLKPREP_KILL;
- }
-
return stat;
}
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
int ret;
-
-#ifdef CONFIG_MMC_BULKTRANSFER
- int failsafe;
-#endif
if (mmc_card_claim_host(card))
goto cmd_err;
-
-#ifdef CONFIG_MMC_BULKTRANSFER
- /*
- * We first try transfering multiple blocks. If this fails
- * we fall back to single block transfers.
- *
- * This gives us good performance when all is well and the
- * possibility to determine which sector fails when all
- * is not well.
- */
- failsafe = 0;
-#endif
do {
struct mmc_blk_request brq;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B;
-#ifdef CONFIG_MMC_BULKTRANSFER
- /*
- * A multi-block transfer failed. Falling back to single
- * blocks.
- */
- if (failsafe)
- brq.data.blocks = 1;
-
-#else
- /*
- * Writes are done one sector at a time.
- */
- if (rq_data_dir(req) != READ)
- brq.data.blocks = 1;
-#endif
-
- ret = 1;
-
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ;
} else {
- brq.cmd.opcode = brq.data.blocks > 1 ? MMC_WRITE_MULTIPLE_BLOCK :
- MMC_WRITE_BLOCK;
- brq.cmd.flags = MMC_RSP_R1B;
+ brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
+ brq.data.blocks = 1;
+ }
+
+ if (brq.data.blocks > 1) {
+ brq.data.flags |= MMC_DATA_MULTI;
+ brq.mrq.stop = &brq.stop;
+ } else {
+ brq.mrq.stop = NULL;
}
- brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;
brq.data.sg = mq->sg;
brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
if (brq.cmd.error) {
printk(KERN_ERR "%s: error %d sending read/write command\n",
req->rq_disk->disk_name, brq.cmd.error);
- goto cmd_fail;
+ goto cmd_err;
}
if (brq.data.error) {
printk(KERN_ERR "%s: error %d transferring data\n",
req->rq_disk->disk_name, brq.data.error);
- goto cmd_fail;
+ goto cmd_err;
}
if (brq.stop.error) {
printk(KERN_ERR "%s: error %d sending stop command\n",
req->rq_disk->disk_name, brq.stop.error);
- goto cmd_fail;
+ goto cmd_err;
}
- /* No need to check card status after a read */
- if (rq_data_dir(req) == READ)
- goto card_ready;
-
do {
int err;
if (err) {
printk(KERN_ERR "%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
- goto cmd_fail;
+ goto cmd_err;
}
-#ifdef CONFIG_MMC_BLOCK_BROKEN_RFD
- /* Work-around for broken cards setting READY_FOR_DATA
- * when not actually ready.
- */
- if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
- cmd.resp[0] &= ~R1_READY_FOR_DATA;
-#endif
} while (!(cmd.resp[0] & R1_READY_FOR_DATA));
#if 0
goto cmd_err;
#endif
- card_ready:
-
/*
* A block was successfully transferred.
*/
*/
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 1);
}
spin_unlock_irq(&md->lock);
-
-#ifdef CONFIG_MMC_BULKTRANSFER
- /*
- * Go back to bulk mode if in failsafe mode.
- */
- failsafe = 0;
-#endif
-
- continue;
-
- cmd_fail:
-
-#ifdef CONFIG_MMC_BULKTRANSFER
- if (failsafe)
- goto cmd_err;
- else
- failsafe = 1;
-#else
- goto cmd_err;
-#endif
-
} while (ret);
mmc_card_release_host(card);
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 0);
spin_unlock_irq(&md->lock);
- /* If a command fails, the card might be removed. */
- mmc_detect_change(card->host, 0);
-
return 0;
}
static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+ return mmc_card_readonly(card) ||
+ !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
struct mmc_blk_data *md;
__set_bit(devidx, dev_use);
md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
- if (md) {
- memset(md, 0, sizeof(struct mmc_blk_data));
+ if (!md) {
+ ret = -ENOMEM;
+ goto out;
+ }
- md->disk = alloc_disk(1 << MMC_SHIFT);
- if (md->disk == NULL) {
- kfree(md);
- md = ERR_PTR(-ENOMEM);
- goto out;
- }
+ memset(md, 0, sizeof(struct mmc_blk_data));
- spin_lock_init(&md->lock);
- md->usage = 1;
+ /*
+ * Set the read-only status based on the supported commands
+ * and the write protect switch.
+ */
+ md->read_only = mmc_blk_readonly(card);
- ret = mmc_init_queue(&md->queue, card, &md->lock);
- if (ret) {
- put_disk(md->disk);
- kfree(md);
- md = ERR_PTR(ret);
- goto out;
+ /*
+ * Figure out a workable block size. MMC cards have:
+ * - two block sizes, one for read and one for write.
+ * - may support partial reads and/or writes
+ * (allows block sizes smaller than specified)
+ */
+ md->block_bits = card->csd.read_blkbits;
+ if (card->csd.write_blkbits != card->csd.read_blkbits) {
+ if (card->csd.write_blkbits < card->csd.read_blkbits &&
+ card->csd.read_partial) {
+ /*
+ * write block size is smaller than read block
+ * size, but we support partial reads, so choose
+ * the smaller write block size.
+ */
+ md->block_bits = card->csd.write_blkbits;
+ } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
+ card->csd.write_partial) {
+ /*
+ * read block size is smaller than write block
+ * size, but we support partial writes. Use read
+ * block size.
+ */
+ } else {
+ /*
+ * We don't support this configuration for writes.
+ */
+ printk(KERN_ERR "%s: unable to select block size for "
+ "writing (rb%u wb%u rp%u wp%u)\n",
+ md->disk->disk_name,
+ 1 << card->csd.read_blkbits,
+ 1 << card->csd.write_blkbits,
+ card->csd.read_partial,
+ card->csd.write_partial);
+ md->read_only = 1;
}
- md->queue.prep_fn = mmc_blk_prep_rq;
- md->queue.issue_fn = mmc_blk_issue_rq;
- md->queue.data = md;
+ }
- md->disk->major = major;
- md->disk->first_minor = devidx << MMC_SHIFT;
- md->disk->fops = &mmc_bdops;
- md->disk->private_data = md;
- md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = &card->dev;
+ /*
+ * Refuse to allow block sizes smaller than 512 bytes.
+ */
+ if (md->block_bits < 9) {
+ printk(KERN_ERR "%s: unable to support block size %u\n",
+ mmc_card_id(card), 1 << md->block_bits);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
- /*
- * As discussed on lkml, GENHD_FL_REMOVABLE should:
- *
- * - be set for removable media with permanent block devices
- * - be unset for removable block devices with permanent media
- *
- * Since MMC block devices clearly fall under the second
- * case, we do not set GENHD_FL_REMOVABLE. Userspace
- * should use the block device creation/destruction hotplug
- * messages to tell when the card is present.
- */
+ md->disk = alloc_disk(1 << MMC_SHIFT);
+ if (md->disk == NULL) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
+ spin_lock_init(&md->lock);
+ md->usage = 1;
- md->block_bits = card->csd.read_blkbits;
+ ret = mmc_init_queue(&md->queue, card, &md->lock);
+ if (ret)
+ goto err_putdisk;
- blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+ md->queue.prep_fn = mmc_blk_prep_rq;
+ md->queue.issue_fn = mmc_blk_issue_rq;
+ md->queue.data = md;
- /*
- * The CSD capacity field is in units of read_blkbits.
- * set_capacity takes units of 512 bytes.
- */
- set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9));
- }
- out:
+ md->disk->major = major;
+ md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->disk->driverfs_dev = &card->dev;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+ sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
+
+ blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9));
return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
}
static int
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
- if (card->csd.read_blkbits < 9) {
- printk(KERN_WARNING "%s: read blocksize too small (%u)\n",
- mmc_card_id(card), 1 << card->csd.read_blkbits);
- return -ENODEV;
- }
-
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
if (err)
goto out;
- printk(KERN_INFO "%s: %s %s %luKiB %s\n",
+ printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- get_capacity(md->disk) >> 1, mmc_blk_readonly(card)?"(ro)":"");
+ (unsigned long long)(get_capacity(md->disk) >> 1),
+ md->read_only ? "(ro)" : "");
mmc_set_drvdata(card, md);
add_disk(md->disk);