/* MUSB_TXCSR_P_ISO is still set correctly */
-#ifdef CONFIG_USB_INVENTRA_DMA
- {
+ if (musb_inventra_dma()) {
size_t request_size;
/* setup DMA, then program endpoint CSR */
}
}
-#elif defined(CONFIG_USB_TI_CPPI_DMA)
- /* program endpoint CSR first, then setup DMA */
- csr &= ~(MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
- csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
- musb_writew(epio, MUSB_TXCSR,
- (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
- | csr);
-
- /* ensure writebuffer is empty */
- csr = musb_readw(epio, MUSB_TXCSR);
+ if (cppi_ti_dma()) {
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR,
+ (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+ | csr);
- /* NOTE host side sets DMAENAB later than this; both are
- * OK since the transfer dma glue (between CPPI and Mentor
- * fifos) just tells CPPI it could start. Data only moves
- * to the USB TX fifo when both fifos are ready.
- */
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
- /* "mode" is irrelevant here; handle terminating ZLPs like
- * PIO does, since the hardware RNDIS mode seems unreliable
- * except for the last-packet-is-already-short case.
- */
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- 0,
- request->dma,
- request->length);
- if (!use_dma) {
- c->channel_release(musb_ep->dma);
- musb_ep->dma = NULL;
- /* ASSERT: DMAENAB clear */
- csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
- /* invariant: prequest->buf is non-null */
+ /* NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and Mentor
+ * fifos) just tells CPPI it could start. Data only moves
+ * to the USB TX fifo when both fifos are ready.
+ */
+
+ /* "mode" is irrelevant here; handle terminating ZLPs like
+ * PIO does, since the hardware RNDIS mode seems unreliable
+ * except for the last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma,
+ request->length);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ /* ASSERT: DMAENAB clear */
+ csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ /* invariant: prequest->buf is non-null */
+ }
+ }
+
+ if (tusb_dma_omap()) {
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma,
+ request->length);
}
-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- request->zero,
- request->dma,
- request->length);
-#endif
}
#endif
csr = musb_readw(epio, MUSB_RXCSR);
- if (is_cppi_enabled() && musb_ep->dma) {
+ if (cppi_ti_dma() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) {
-#ifdef CONFIG_USB_INVENTRA_DMA
- if (is_dma_capable() && musb_ep->dma) {
+
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+ if (musb_inventra_dma() && musb_ep->dma) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
- /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
- * mode 0 only. So we do not get endpoint interrupts due to DMA
- * completion. We only get interrupts from DMA controller.
- *
- * We could operate in DMA mode 1 if we knew the size of the tranfer
- * in advance. For mass storage class, request->length = what the host
- * sends, so that'd work. But for pretty much everything else,
- * request->length is routinely more than what the host sends. For
- * most these gadgets, end of is signified either by a short packet,
- * or filling the last byte of the buffer. (Sending extra data in
- * that last pckate should trigger an overflow fault.) But in mode 1,
- * we don't get DMA completion interrrupt for short packets.
- *
- * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
- * to get endpoint interrupt on every DMA req, but that didn't seem
- * to work reliably.
- *
- * REVISIT an updated g_file_storage can set req->short_not_ok, which
- * then becomes usable as a runtime "use mode 1" hint...
- */
-
csr |= MUSB_RXCSR_DMAENAB;
#ifdef USE_MODE1
csr |= MUSB_RXCSR_AUTOCLEAR;
* to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
- csr | MUSB_RXCSR_DMAMODE);
+ csr | MUSB_RXCSR_DMAMODE);
#endif
musb_writew(epio, MUSB_RXCSR, csr);
if (use_dma)
return;
}
-#endif /* Mentor's DMA */
-
- fifo_count = request->length - request->actual;
- DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
- musb_ep->end_point.name,
- len, fifo_count,
- musb_ep->packet_sz);
- fifo_count = min(len, fifo_count);
-
-#ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
if (ret)
return;
}
-#endif
+
+ fifo_count = request->length - request->actual;
+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_ep->end_point.name,
+ len, fifo_count,
+ musb_ep->packet_sz);
+
+ fifo_count = min(len, fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request);
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
- if ((dma->desired_mode == 0)
+ if ((tusb_dma_omap() || musb_inventra_dma())
+ && ((dma->desired_mode == 0)
|| (dma->actual_len
- & (musb_ep->packet_sz - 1))) {
+ & (musb_ep->packet_sz - 1)))) {
/* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
&& (musb_ep->dma->actual_len
== musb_ep->packet_sz))
goto done;
-#endif
+
musb_g_giveback(musb_ep, request, 0);
request = next_request(musb_ep);
if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep);
- else if (is_cppi_enabled() || tusb_dma_omap())
+ else if (cppi_ti_dma() || tusb_dma_omap())
cppi_host_txdma_start(hw_ep);
}
}
else
load_count = min((u32) packet_sz, len);
-#ifdef CONFIG_USB_INVENTRA_DMA
- if (dma_channel) {
+ if (musb_inventra_dma() && dma_channel) {
/* clear previous state */
csr = musb_readw(epio, MUSB_TXCSR);
dma_channel = NULL;
}
}
-#endif
/* candidate for DMA */
- if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
/* program endpoint CSRs first, then setup DMA.
* assume CPPI setup succeeds.
/* kick things off */
- if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
/* candidate for DMA */
if (dma_channel) {
dma_channel->actual_len = 0L;
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
- if (rx_csr & MUSB_RXCSR_H_REQPKT) {
+ if (!musb_inventra_dma() && (rx_csr & MUSB_RXCSR_H_REQPKT)) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
* and also duplicates dma cleanup code above ... plus,
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr);
}
-#endif
+
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
xfer_len = dma->actual_len;
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
- if (dma) {
+ if (musb_inventra_dma() && dma) {
struct dma_controller *c;
u16 rx_count;
int ret, length;
/* REVISIT reset CSR */
}
}
-#endif /* Mentor DMA */
if (!dma) {
done = musb_host_packet_rx(musb, urb,