/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
+ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
};
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
-static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
- void __iomem *port_mmio);
+static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
+ void __iomem *port_mmio, int want_ncq);
+static int __mv_stop_dma(struct ata_port *ap);
static struct scsi_host_template mv5_sht = {
.module = THIS_MODULE,
* Inherited from caller.
*/
static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
- struct mv_port_priv *pp)
+ struct mv_port_priv *pp, u8 protocol)
{
+ int want_ncq = (protocol == ATA_PROT_NCQ);
+
+ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+ int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
+ if (want_ncq != using_ncq)
+ __mv_stop_dma(ap);
+ }
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
int hard_port = mv_hardport_from_port(ap->port_no);
hc_mmio + HC_IRQ_CAUSE_OFS);
}
- mv_edma_cfg(ap, hpriv, port_mmio);
+ mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
/* clear FIS IRQ Cause */
writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
return -EINVAL;
}
-static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
- void __iomem *port_mmio)
+static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
+ void __iomem *port_mmio, int want_ncq)
{
u32 cfg;
cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
}
+ if (want_ncq) {
+ cfg |= EDMA_CFG_NCQ;
+ pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
+ } else
+ pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
+
writelfl(cfg, port_mmio + EDMA_CFG_OFS);
}
spin_lock_irqsave(&ap->host->lock, flags);
- mv_edma_cfg(ap, hpriv, port_mmio);
+ mv_edma_cfg(pp, hpriv, port_mmio, 0);
mv_set_edma_ptrs(port_mmio, hpriv, pp);
return ata_qc_issue_prot(qc);
}
- mv_start_dma(ap, port_mmio, pp);
+ mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;