u8 chNum;
struct cppi_channel *otgCh;
void __iomem *tibase;
- int local_end = ep->bLocalEnd;
+ int local_end = ep->epnum;
pController = container_of(c, struct cppi, Controller);
tibase = pController->pCoreBase - DAVINCI_BASE_OFFSET;
};
/* called after channel_program(), may indicate a fault */
-extern void musb_dma_completion(struct musb *musb, u8 bLocalEnd, u8 bTransmit);
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 bTransmit);
extern struct dma_controller *__init
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
- if (ep->bLocalEnd) {
+ if (ep->epnum) {
txcsr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR);
txcsr |= MGC_M_TXCSR_TXPKTRDY | MGC_M_TXCSR_H_WZC_BITS;
musb_writew(ep->regs, MGC_O_HDRC_TXCSR, txcsr);
struct musb_hw_ep *hw_ep = qh->hw_ep;
unsigned nPipe = urb->pipe;
u8 bAddress = usb_pipedevice(nPipe);
- int bEnd = hw_ep->bLocalEnd;
+ int bEnd = hw_ep->epnum;
/* initialize software qh state */
qh->offset = 0;
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
- musb->periodic[ep->bLocalEnd] = NULL;
+ musb->periodic[ep->epnum] = NULL;
kfree(qh);
qh = NULL;
break;
if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
DBG(4, "... next ep%d %cX urb %p\n",
- hw_ep->bLocalEnd, is_in ? 'R' : 'T',
+ hw_ep->epnum, is_in ? 'R' : 'T',
next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
} else {
csr = musb_readw(ep->regs, MGC_O_HDRC_RXCSR);
if (csr & MGC_M_RXCSR_RXPKTRDY)
- WARN("rx%d, packet/%d ready?\n", ep->bLocalEnd,
+ WARN("rx%d, packet/%d ready?\n", ep->epnum,
musb_readw(ep->regs, MGC_O_HDRC_RXCOUNT));
musb_h_flush_rxfifo(ep, MGC_M_RXCSR_CLRDATATOG);
| MGC_M_RXCSR_DMAENAB
| MGC_M_RXCSR_H_REQPKT))
ERR("broken !rx_reinit, ep%d csr %04x\n",
- hw_ep->bLocalEnd, csr);
+ hw_ep->epnum, csr);
/* scrub any stale state, leaving toggle alone */
csr &= MGC_M_RXCSR_DISNYET;
{
struct musb_hw_ep *ep = qh->hw_ep;
void __iomem *epio = ep->regs;
- unsigned hw_end = ep->bLocalEnd;
+ unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
u16 csr;
int status = 0;
status = ep->musb->pDmaController->channel_abort(dma);
DBG(status ? 1 : 3,
"abort %cX%d DMA for urb %p --> %d\n",
- is_in ? 'R' : 'T', ep->bLocalEnd,
+ is_in ? 'R' : 'T', ep->epnum,
urb, status);
urb->actual_length += dma->dwActualLength;
}
#endif
/* index in musb->aLocalEnd[] */
- u8 bLocalEnd;
+ u8 epnum;
/* hardware configuration, possibly dynamic */
u8 bIsSharedFifo;
pImplChannel = &(pController->aChannel[bBit]);
pImplChannel->pController = pController;
pImplChannel->bIndex = bBit;
- pImplChannel->bEnd = hw_ep->bLocalEnd;
+ pImplChannel->bEnd = hw_ep->epnum;
pImplChannel->bTransmit = bTransmit;
pChannel = &(pImplChannel->Channel);
pChannel->pPrivateData = pImplChannel;
prefetch((u8 *)src);
DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
- 'T', hw_ep->bLocalEnd, fifo, len, src);
+ 'T', hw_ep->epnum, fifo, len, src);
/* we can't assume unaligned reads work */
if (likely((0x01 & (unsigned long) src) == 0)) {
void __iomem *fifo = hw_ep->fifo;
DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
- 'R', hw_ep->bLocalEnd, fifo, len, dst);
+ 'R', hw_ep->epnum, fifo, len, dst);
/* we can't assume unaligned writes work */
if (likely((0x01 & (unsigned long) dst) == 0)) {
}
/* configure the FIFO */
- musb_writeb(mbase, MGC_O_HDRC_INDEX, hw_ep->bLocalEnd);
+ musb_writeb(mbase, MGC_O_HDRC_INDEX, hw_ep->epnum);
#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* EP0 reserved endpoint for control, bidirectional;
* EP1 reserved for bulk, two unidirection halves.
*/
- if (hw_ep->bLocalEnd == 1)
+ if (hw_ep->epnum == 1)
musb->bulk_ep = hw_ep;
/* REVISIT error check: be sure ep0 can both rx and tx ... */
#endif
/* NOTE rx and tx endpoint irqs aren't managed separately,
* which happens to be ok
*/
- musb->wEndMask |= (1 << hw_ep->bLocalEnd);
+ musb->wEndMask |= (1 << hw_ep->epnum);
return offset + (maxpacket << ((c_size & MGC_M_FIFOSZ_DPB) ? 1 : 0));
}
module_param(use_dma, bool, 0);
MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-void musb_dma_completion(struct musb *musb, u8 bLocalEnd, u8 bTransmit)
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 bTransmit)
{
u8 devctl = musb_readb(musb->mregs, MGC_O_HDRC_DEVCTL);
/* called with controller lock already held */
- if (!bLocalEnd) {
+ if (!epnum) {
#ifndef CONFIG_USB_TUSB_OMAP_DMA
if (!is_cppi_enabled()) {
/* endpoint 0 */
if (bTransmit) {
if (devctl & MGC_M_DEVCTL_HM) {
if (is_host_capable())
- musb_host_tx(musb, bLocalEnd);
+ musb_host_tx(musb, epnum);
} else {
if (is_peripheral_capable())
- musb_g_tx(musb, bLocalEnd);
+ musb_g_tx(musb, epnum);
}
} else {
/* receive */
if (devctl & MGC_M_DEVCTL_HM) {
if (is_host_capable())
- musb_host_rx(musb, bLocalEnd);
+ musb_host_rx(musb, epnum);
} else {
if (is_peripheral_capable())
- musb_g_rx(musb, bLocalEnd);
+ musb_g_rx(musb, epnum);
}
}
}
epnum++, ep++) {
ep->musb = musb;
- ep->bLocalEnd = epnum;
+ ep->epnum = epnum;
}
musb->controller = dev;
{
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
- u8 epnum = hw_ep->bLocalEnd;
+ u8 epnum = hw_ep->epnum;
prefetch(buf);
{
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
- u8 epnum = hw_ep->bLocalEnd;
+ u8 epnum = hw_ep->epnum;
DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
'R', epnum, fifo, len, buf);
reg = musb_readl(tusb_base, TUSB_DMA_INT_MASK);
if (tx)
- reg &= ~(1 << hw_ep->bLocalEnd);
+ reg &= ~(1 << hw_ep->epnum);
else
- reg &= ~(1 << (hw_ep->bLocalEnd + 15));
+ reg &= ~(1 << (hw_ep->epnum + 15));
musb_writel(tusb_base, TUSB_DMA_INT_MASK, reg);
/* REVISIT: Why does dmareq5 not work? */
- if (hw_ep->bLocalEnd == 0) {
+ if (hw_ep->epnum == 0) {
DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
return NULL;
}
chdat->musb = tusb_dma->musb;
chdat->tusb_base = tusb_dma->tusb_base;
chdat->hw_ep = hw_ep;
- chdat->epnum = hw_ep->bLocalEnd;
+ chdat->epnum = hw_ep->epnum;
chdat->dmareq = -1;
chdat->completed_len = 0;
chdat->tusb_dma = tusb_dma;