/*
* Issue a new request to a drive from hwgroup
- * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
+ * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..);
*
* A hwgroup is a serialized group of IDE interfaces. Usually there is
* exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
* possibly along with many other devices. This is especially common in
* PCI-based systems with off-board IDE controller cards.
*
- * The IDE driver uses the single global ide_lock spinlock to protect
+ * The IDE driver uses a per-hwgroup spinlock to protect
* access to the request queues, and to protect the hwgroup->busy flag.
*
* The first thread into the driver for a particular hwgroup sets the
* will start the next request from the queue. If no more work remains,
* the driver will clear the hwgroup->busy flag and exit.
*
- * The ide_lock (spinlock) is used to protect all access to the
+ * The per-hwgroup spinlock is used to protect all access to the
* hwgroup->busy flag, but is otherwise not needed for most processing in
* the driver. This makes the driver much more friendlier to shared IRQs
* than previous designs, while remaining 100% (?) SMP safe and capable.
ide_startstop_t startstop;
int loops = 0;
- /* caller must own ide_lock */
+ /* caller must own hwgroup->lock */
BUG_ON(!irqs_disabled());
while (!hwgroup->busy) {
*/
if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
disable_irq_nosync(hwif->irq);
- spin_unlock(&ide_lock);
+ spin_unlock(&hwgroup->lock);
local_irq_enable_in_hardirq();
/* allow other IRQs while we start this request */
startstop = start_request(drive, rq);
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
enable_irq(hwif->irq);
if (startstop == ide_stopped)
unsigned long flags;
unsigned long wait = -1;
- spin_lock_irqsave(&ide_lock, flags);
+ spin_lock_irqsave(&hwgroup->lock, flags);
if (((handler = hwgroup->handler) == NULL) ||
(hwgroup->req_gen != hwgroup->req_gen_timer)) {
hwgroup->timer.expires = jiffies + wait;
hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
return;
}
}
* the handler() function, which means we need to
* globally mask the specific IRQ:
*/
- spin_unlock(&ide_lock);
+ spin_unlock(&hwgroup->lock);
hwif = HWIF(drive);
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
hwif->tp_ops->read_status(hwif));
}
drive->service_time = jiffies - drive->service_start;
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
enable_irq(hwif->irq);
if (startstop == ide_stopped)
hwgroup->busy = 0;
}
}
ide_do_request(hwgroup, IDE_NO_IRQ);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
}
/**
{
unsigned long flags;
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
- ide_hwif_t *hwif;
+ ide_hwif_t *hwif = hwgroup->hwif;
ide_drive_t *drive;
ide_handler_t *handler;
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
- spin_lock_irqsave(&ide_lock, flags);
- hwif = hwgroup->hwif;
+ spin_lock_irqsave(&hwgroup->lock, flags);
if (!ide_ack_intr(hwif))
goto out;
hwgroup->handler = NULL;
hwgroup->req_gen++;
del_timer(&hwgroup->timer);
- spin_unlock(&ide_lock);
+ spin_unlock(&hwgroup->lock);
if (hwif->port_ops && hwif->port_ops->clear_irq)
hwif->port_ops->clear_irq(drive);
/* service this interrupt, may set handler for next interrupt */
startstop = handler(drive);
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
/*
* Note that handler() may have set things up for another
* interrupt to occur soon, but it cannot happen until
out_handled:
irq_ret = IRQ_HANDLED;
out:
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
return irq_ret;
}
void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
unsigned int timeout, ide_expiry_t *expiry)
{
+ ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
unsigned long flags;
- spin_lock_irqsave(&ide_lock, flags);
+
+ spin_lock_irqsave(&hwgroup->lock, flags);
__ide_set_handler(drive, handler, timeout, expiry);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
}
EXPORT_SYMBOL(ide_set_handler);
void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
unsigned timeout, ide_expiry_t *expiry)
{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
unsigned long flags;
- ide_hwif_t *hwif = HWIF(drive);
- spin_lock_irqsave(&ide_lock, flags);
+ spin_lock_irqsave(&hwgroup->lock, flags);
__ide_set_handler(drive, handler, timeout, expiry);
hwif->tp_ops->exec_command(hwif, cmd);
/*
* FIXME: we could skip this delay with care on non shared devices
*/
ndelay(400);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
}
EXPORT_SYMBOL(ide_execute_command);
void ide_execute_pkt_cmd(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
unsigned long flags;
- spin_lock_irqsave(&ide_lock, flags);
+ spin_lock_irqsave(&hwgroup->lock, flags);
hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
ndelay(400);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
}
EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
*/
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{
- unsigned int unit;
- unsigned long flags, timeout;
- ide_hwif_t *hwif;
- ide_hwgroup_t *hwgroup;
- struct ide_io_ports *io_ports;
- const struct ide_tp_ops *tp_ops;
+ ide_hwif_t *hwif = drive->hwif;
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
const struct ide_port_ops *port_ops;
+ unsigned long flags, timeout;
+ unsigned int unit;
DEFINE_WAIT(wait);
- spin_lock_irqsave(&ide_lock, flags);
- hwif = HWIF(drive);
- hwgroup = HWGROUP(drive);
-
- io_ports = &hwif->io_ports;
-
- tp_ops = hwif->tp_ops;
+ spin_lock_irqsave(&hwgroup->lock, flags);
/* We must not reset with running handlers */
BUG_ON(hwgroup->handler != NULL);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1;
__ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
return ide_started;
}
if (time_before_eq(timeout, now))
break;
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
timeout = schedule_timeout_uninterruptible(timeout - now);
- spin_lock_irqsave(&ide_lock, flags);
+ spin_lock_irqsave(&hwgroup->lock, flags);
} while (timeout);
finish_wait(&ide_park_wq, &wait);
pre_reset(&hwif->drives[unit]);
if (io_ports->ctl_addr == 0) {
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
ide_complete_drive_reset(drive, -ENXIO);
return ide_stopped;
}
if (port_ops && port_ops->resetproc)
port_ops->resetproc(drive);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
return ide_started;
}
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
{
+ ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
struct request_queue *q = drive->queue;
struct request *rq;
int rc;
timeout += jiffies;
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
if (drive->dev_flags & IDE_DFLAG_PARKED) {
- ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
- int reset_timer;
+ int reset_timer = time_before(timeout, drive->sleep);
- reset_timer = time_before(timeout, drive->sleep);
drive->sleep = timeout;
wake_up_all(&ide_park_wq);
if (reset_timer && hwgroup->sleeping &&
hwgroup->busy = 0;
blk_start_queueing(q);
}
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
return;
}
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
rq = blk_get_request(q, READ, __GFP_WAIT);
rq->cmd[0] = REQ_PARK_HEADS;
char *buf)
{
ide_drive_t *drive = to_ide_device(dev);
+ ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
unsigned long now;
unsigned int msecs;
if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
return -EOPNOTSUPP;
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
now = jiffies;
if (drive->dev_flags & IDE_DFLAG_PARKED &&
time_after(drive->sleep, now))
msecs = jiffies_to_msecs(drive->sleep - now);
else
msecs = 0;
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
return snprintf(buf, 20, "%u\n", msecs);
}
* do not.
*/
- q = blk_init_queue_node(do_ide_request, &ide_lock, hwif_to_node(hwif));
+ q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock,
+ hwif_to_node(hwif));
if (!q)
return 1;
{
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
if (!hwgroup->drive) {
/* first drive for hwgroup. */
drive->next = drive;
drive->next = hwgroup->drive->next;
hwgroup->drive->next = drive;
}
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
}
/*
ide_ports[hwif->index] = NULL;
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
/*
* Remove us from the hwgroup, and free
* the hwgroup if we were the only member
}
BUG_ON(hwgroup->hwif == hwif);
}
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
}
/*
* linked list, the first entry is the hwif that owns
* hwgroup->handler - do not change that.
*/
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
hwif->next = hwgroup->hwif->next;
hwgroup->hwif->next = hwif;
BUG_ON(hwif->next == hwif);
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
} else {
hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO,
hwif_to_node(hwif));
if (hwgroup == NULL)
goto out_up;
+ spin_lock_init(&hwgroup->lock);
+
hwif->hwgroup = hwgroup;
hwgroup->hwif = hwif->next = hwif;
static void drive_release_dev (struct device *dev)
{
ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
+ ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
ide_proc_unregister_device(drive);
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
ide_remove_drive_from_hwgroup(drive);
kfree(drive->id);
drive->id = NULL;
drive->dev_flags &= ~IDE_DFLAG_PRESENT;
/* Messed up locking ... */
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
blk_cleanup_queue(drive->queue);
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
drive->queue = NULL;
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
complete(&drive->gendev_rel_comp);
}
DEFINE_MUTEX(ide_cfg_mtx);
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
-EXPORT_SYMBOL(ide_lock);
-
static void ide_port_init_devices_data(ide_hwif_t *);
/*
static int set_pio_mode(ide_drive_t *drive, int arg)
{
ide_hwif_t *hwif = drive->hwif;
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
const struct ide_port_ops *port_ops = hwif->port_ops;
if (arg < 0 || arg > 255)
unsigned long flags;
/* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
- spin_lock_irqsave(&ide_lock, flags);
+ spin_lock_irqsave(&hwgroup->lock, flags);
port_ops->set_pio_mode(drive, arg);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&hwgroup->lock, flags);
} else
port_ops->set_pio_mode(drive, arg);
} else {
static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
+ ide_hwgroup_t *mate_hwgroup = hwif->mate ? hwif->mate->hwgroup : NULL;
+ unsigned long uninitialized_var(flags);
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
- spin_lock_irqsave(&ide_lock, flags);
- if (hwif->mate && hwif->mate->hwgroup->handler) {
+ if (mate_hwgroup)
+ spin_lock_irqsave(&mate_hwgroup->lock, flags);
+ if (mate_hwgroup && mate_hwgroup->handler) {
printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
} else {
current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
umc_set_speeds(current_speeds);
}
- spin_unlock_irqrestore(&ide_lock, flags);
+ if (mate_hwgroup)
+ spin_unlock_irqrestore(&mate_hwgroup->lock, flags);
}
static const struct ide_port_ops umc8672_port_ops = {
{
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive;
+ ide_hwif_t *hwif;
+ ide_hwgroup_t *hwgroup;
int busy;
int ret = FAILED;
goto no_drive;
}
- /* First give it some more time, how much is "right" is hard to say :-( */
+ hwif = drive->hwif;
+ hwgroup = hwif->hwgroup;
- busy = ide_wait_not_busy(HWIF(drive), 100); /* FIXME - uses mdelay which causes latency? */
+ /* First give it some more time, how much is "right" is hard to say :-(
+ FIXME - uses mdelay which causes latency? */
+ busy = ide_wait_not_busy(hwif, 100);
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":"");
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
/* If there is no pc running we're done (our interrupt took care of it) */
pc = drive->pc;
}
ide_unlock:
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
no_drive:
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed");
struct request *req;
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive;
+ ide_hwgroup_t *hwgroup;
int ready = 0;
int ret = SUCCESS;
return FAILED;
}
+ hwgroup = drive->hwif->hwgroup;
+
spin_lock_irq(cmd->device->host->host_lock);
- spin_lock(&ide_lock);
+ spin_lock(&hwgroup->lock);
pc = drive->pc;
+ if (pc)
+ req = pc->rq;
- if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) {
+ if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) {
printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
- spin_unlock(&ide_lock);
+ spin_unlock(&hwgroup->lock);
spin_unlock_irq(cmd->device->host->host_lock);
return FAILED;
}
BUG();
}
- HWGROUP(drive)->rq = NULL;
- HWGROUP(drive)->handler = NULL;
- HWGROUP(drive)->busy = 1; /* will set this to zero when ide reset finished */
- spin_unlock(&ide_lock);
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ hwgroup->busy = 1; /* will set this to zero when ide reset finished */
+ spin_unlock(&hwgroup->lock);
ide_do_reset(drive);
int req_gen;
int req_gen_timer;
+
+ spinlock_t lock;
} ide_hwgroup_t;
typedef struct ide_driver_s ide_driver_t;
/*
* Structure locking:
*
- * ide_cfg_mtx and ide_lock together protect changes to
- * ide_hwif_t->{next,hwgroup}
+ * ide_cfg_mtx and hwgroup->lock together protect changes to
+ * ide_hwif_t->next
* ide_drive_t->next
*
- * ide_hwgroup_t->busy: ide_lock
- * ide_hwgroup_t->hwif: ide_lock
- * ide_hwif_t->mate: constant, no locking
+ * ide_hwgroup_t->busy: hwgroup->lock
+ * ide_hwgroup_t->hwif: hwgroup->lock
+ * ide_hwif_t->{hwgroup,mate}: constant, no locking
* ide_drive_t->hwif: constant, no locking
*/