obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/gadget/
obj-$(CONFIG_GAMEPORT) += input/gameport/
source "drivers/usb/host/Kconfig"
+source "drivers/usb/musb/Kconfig"
+
source "drivers/usb/class/Kconfig"
source "drivers/usb/storage/Kconfig"
select USB_GADGET_SELECTED
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+ boolean "Inventra HDRC USB Peripheral (TI, ...)"
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SELECTED
+ help
+ This OTG-capable silicon IP is used in dual designs including
+ the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
config USB_GADGET_OMAP
boolean "OMAP USB Device Controller"
depends on ARCH_OMAP1
--- /dev/null
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+ depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+ depends on USB || USB_GADGET
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+ configure options to match your silicon and the board
+ it's being used with, including the USB peripheral role,
+ or the USB host role, or both.
+
+ Texas Instruments parts using this IP include DaVinci 644x,
+ OMAP 243x, OMAP 343x, and TUSB 6010.
+
+ If you do not know what this is, please say N.
+
+ To compile this driver as a module, choose M here; the
+ module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+ boolean
+ depends on USB_MUSB_HDRC
+ default y if ARCH_DAVINCI
+ default y if ARCH_OMAP243X
+ default y if ARCH_OMAP343X
+ help
+ Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+ controller is configured (endpoints, mechanisms, etc) on the
+ current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+ depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP243X
+
+comment "OMAP 343x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP343X
+
+config USB_TUSB6010
+ boolean "TUSB 6010 support"
+ depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+ default y
+ help
+ The TUSB 6010 chip, from Texas Instruments, connects a discrete
+ HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+ (a high speed serial link). It can use system-specific external
+ DMA controllers.
+
+choice
+ prompt "Driver Mode"
+ depends on USB_MUSB_HDRC
+ help
+ Dual-Role devices can support both host and peripheral roles,
+ as well as a the special "OTG Device" role which can switch
+ between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+ depends on USB
+ bool "USB Host"
+ help
+ Say Y here if your system supports the USB host role.
+ If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+ or "Mini-AB" connector, it supports the host role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+ depends on USB_GADGET
+ bool "USB Peripheral (gadget stack)"
+ select USB_GADGET_MUSB_HDRC
+ help
+ Say Y here if your system supports the USB peripheral role.
+ If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+ connector, it supports the peripheral role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+ depends on USB && USB_GADGET && EXPERIMENTAL
+ bool "Both host and peripheral: USB OTG (On The Go) Device"
+ select USB_GADGET_MUSB_HDRC
+ select USB_OTG
+ select PM
+ help
+ The most notable feature of USB OTG is support for a
+ "Dual-Role" device, which can act as either a device
+ or a host. The initial role choice can be changed
+ later, when two dual-role devices talk to each other.
+
+ At this writing, the OTG support in this driver is incomplete,
+ omitting the mandatory HNP or SRP protocols. However, some
+ of the cable based role switching works. (That is, grounding
+ the ID pin switches the controller to host mode, while leaving
+ it floating leaves it in peripheral mode.)
+
+ Select this if your system has a Mini-AB connector, or
+ to simplify certain kinds of configuration.
+
+ To implement your OTG Targeted Peripherals List (TPL), enable
+ USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+ to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+# default y
+# select USB_GADGET_DUALSPEED
+# select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+ select USB_OTG if USB_GADGET_MUSB_HDRC
+ default y
+
+
+config USB_INVENTRA_FIFO
+ bool 'Disable DMA (always use PIO)'
+ depends on USB_MUSB_HDRC
+ default y if USB_TUSB6010
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not select 'n' here unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+config USB_INVENTRA_DMA
+ bool
+ depends on USB_MUSB_HDRC && !USB_INVENTRA_FIFO
+ default ARCH_OMAP243X || ARCH_OMAP343X
+ help
+ Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+ bool
+ depends on USB_MUSB_HDRC && !USB_INVENTRA_FIFO
+ default ARCH_DAVINCI
+ help
+ Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+ bool
+ depends on USB_MUSB_HDRC && !USB_INVENTRA_FIFO
+ depends on USB_TUSB6010
+ depends on ARCH_OMAP
+ default y
+ help
+ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config USB_INVENTRA_HCD_LOGGING
+ depends on USB_MUSB_HDRC
+ int 'Logging Level (0 - none / 3 - annoying / ... )'
+ default 0
+ help
+ Set the logging level. 0 disables the debugging altogether,
+ although when USB_DEBUG is set the value is at least 1.
+ Starting at level 3, per-transfer (urb, usb_request, packet,
+ or dma transfer) tracing may kick in.
--- /dev/null
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := plat_uds.o
+
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+ musb_hdrc-objs += davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+ musb_hdrc-objs += tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP243X),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_OTG),y)
+ musb_hdrc-objs += otg.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+ musb_hdrc-objs += g_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+ musb_hdrc-objs += virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO (INVENTRA_FIFO), or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_USB_INVENTRA_FIFO),y)
+
+ ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+ musb_hdrc-objs += musbhsdma.o
+
+ else
+ ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+ musb_hdrc-objs += cppi_dma.o
+
+ else
+ ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+ musb_hdrc-objs += tusb6010_omap.o
+
+ endif
+ endif
+ endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+ EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+MUSB_DEBUG:=$(CONFIG_USB_INVENTRA_HCD_LOGGING)
+
+ifeq ("$(strip $(MUSB_DEBUG))","")
+ ifdef CONFIG_USB_DEBUG
+ MUSB_DEBUG:=1
+ else
+ MUSB_DEBUG:=0
+ endif
+endif
+
+ifneq ($(MUSB_DEBUG),0)
+ EXTRA_CFLAGS += -DDEBUG
+
+ ifeq ($(CONFIG_PROC_FS),y)
+ musb_hdrc-objs += musb_procfs.o
+ endif
+
+endif
+
+EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
--- /dev/null
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ */
+
+#include <linux/config.h>
+#include <linux/usb.h>
+
+#include "musbdefs.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ * which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ * evidently after the 1 byte packet was received and acked, the queue
+ * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
+ * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
+ * of its next (512 byte) packet. IRQ issues?
+ *
+ * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD 64
+#define NUM_RXCHAN_BD 64
+
+static inline void cpu_drain_writebuffer(void)
+{
+ wmb();
+#ifdef CONFIG_CPU_ARM926T
+ /* REVISIT this "should not be needed",
+ * but lack of it sure seemed to hurt ...
+ */
+ asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *
+cppi_bd_alloc(struct cppi_channel *c)
+{
+ struct cppi_descriptor *bd = c->bdPoolHead;
+
+ if (bd)
+ c->bdPoolHead = bd->next;
+ return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+ if (!bd)
+ return;
+ bd->next = c->bdPoolHead;
+ c->bdPoolHead = bd;
+}
+
+/*
+ * Start Dma controller
+ *
+ * Initialize the Dma Controller as necessary.
+ */
+
+#define CAST (void *__force __iomem)
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram *__iomem rx)
+{
+ musb_writel(CAST &rx->buffOffset, 0, 0);
+ musb_writel(CAST &rx->headPtr, 0, 0);
+ musb_writel(CAST &rx->sopDescPtr, 0, 0);
+ musb_writel(CAST &rx->currDescPtr, 0, 0);
+ musb_writel(CAST &rx->currBuffPtr, 0, 0);
+ musb_writel(CAST &rx->pktLength, 0, 0);
+ musb_writel(CAST &rx->byteCount, 0, 0);
+}
+
+static void __devinit cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+ int j;
+
+ /* initialize channel fields */
+ c->activeQueueHead = NULL;
+ c->activeQueueTail = NULL;
+ c->lastHwBDProcessed = NULL;
+ c->Channel.bStatus = MGC_DMA_STATUS_UNKNOWN;
+ c->pController = cppi;
+ c->bLastModeRndis = 0;
+ c->Channel.pPrivateData = c;
+ c->bdPoolHead = NULL;
+
+ /* build the BD Free list for the channel */
+ for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+ struct cppi_descriptor *bd;
+ dma_addr_t dma;
+
+ bd = dma_pool_alloc(cppi->pool, SLAB_KERNEL, &dma);
+ bd->dma = dma;
+ cppi_bd_free(c, bd);
+ }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+ struct cppi *cppi = c->pController;
+ struct cppi_descriptor *bd;
+
+ (void) cppi_channel_abort(&c->Channel);
+ c->Channel.bStatus = MGC_DMA_STATUS_UNKNOWN;
+ c->pController = NULL;
+
+ /* free all its bds */
+ bd = c->lastHwBDProcessed;
+ do {
+ if (bd)
+ dma_pool_free(cppi->pool, bd, bd->dma);
+ bd = cppi_bd_alloc(c);
+ } while (bd);
+ c->lastHwBDProcessed = NULL;
+}
+
+static int __devinit cppi_controller_start(struct dma_controller *c)
+{
+ struct cppi *pController;
+ void *__iomem regBase;
+ int i;
+
+ pController = container_of(c, struct cppi, Controller);
+
+ /* do whatever is necessary to start controller */
+ for (i = 0; i < ARRAY_SIZE(pController->txCppi); i++) {
+ pController->txCppi[i].bTransmit = TRUE;
+ pController->txCppi[i].chNo = i;
+ }
+ for (i = 0; i < ARRAY_SIZE(pController->rxCppi); i++) {
+ pController->rxCppi[i].bTransmit = FALSE;
+ pController->rxCppi[i].chNo = i;
+ }
+
+ /* setup BD list on a per channel basis */
+ for (i = 0; i < ARRAY_SIZE(pController->txCppi); i++)
+ cppi_pool_init(pController, pController->txCppi + i);
+ for (i = 0; i < ARRAY_SIZE(pController->rxCppi); i++)
+ cppi_pool_init(pController, pController->rxCppi + i);
+
+ /* Do Necessary configuartion in H/w to get started */
+ regBase = pController->pCoreBase - DAVINCI_BASE_OFFSET;
+
+ INIT_LIST_HEAD(&pController->tx_complete);
+
+ /* initialise tx/rx channel head pointers to zero */
+ for (i = 0; i < ARRAY_SIZE(pController->txCppi); i++) {
+ struct cppi_channel *txChannel = pController->txCppi + i;
+ struct cppi_tx_stateram *__iomem txState;
+
+ INIT_LIST_HEAD(&txChannel->tx_complete);
+
+ txState = regBase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+ txChannel->stateRam = txState;
+ /* zero out entire state RAM entry for the channel */
+ txState->headPtr = 0;
+ txState->sopDescPtr = 0;
+ txState->currDescPtr = 0;
+ txState->currBuffPtr = 0;
+ txState->flags = 0;
+ txState->remLength = 0;
+ /*txState->dummy = 0; */
+ txState->completionPtr = 0;
+
+ }
+ for (i = 0; i < ARRAY_SIZE(pController->rxCppi); i++) {
+ struct cppi_channel *rxChannel = pController->rxCppi + i;
+ struct cppi_rx_stateram *__iomem rxState;
+
+ INIT_LIST_HEAD(&rxChannel->tx_complete);
+
+ rxState = regBase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+ rxChannel->stateRam = rxState;
+ cppi_reset_rx(rxChannel->stateRam);
+ }
+
+ /* enable individual cppi channels */
+ musb_writel(regBase, DAVINCI_TXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(regBase, DAVINCI_RXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ /* enable tx/rx CPPI control */
+ musb_writel(regBase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+ musb_writel(regBase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+ /* disable RNDIS mode, also host rx RNDIS autorequest */
+ musb_writel(regBase, DAVINCI_RNDIS_REG, 0);
+ musb_writel(regBase, DAVINCI_AUTOREQ_REG, 0);
+
+ return 0;
+}
+
+/*
+ * Stop Dma controller
+ *
+ * De-Init the Dma Controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+ struct cppi *pController;
+ void __iomem *regBase;
+ int i;
+
+ pController = container_of(c, struct cppi, Controller);
+
+ regBase = pController->pCoreBase - DAVINCI_BASE_OFFSET;
+ /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+ musb_writel(regBase, DAVINCI_TXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(regBase, DAVINCI_RXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ DBG(1, "Tearing down RX and TX Channels\n");
+ for (i = 0; i < ARRAY_SIZE(pController->txCppi); i++) {
+ /* FIXME restructure of txdma to use bds like rxdma */
+ pController->txCppi[i].lastHwBDProcessed = NULL;
+ cppi_pool_free(pController->txCppi + i);
+ }
+ for (i = 0; i < ARRAY_SIZE(pController->rxCppi); i++)
+ cppi_pool_free(pController->rxCppi + i);
+
+ /* in Tx Case proper teardown is supported. We resort to disabling
+ * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+ * complete TX CPPI cannot be disabled.
+ */
+ /*disable tx/rx cppi */
+ musb_writel(regBase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+ musb_writel(regBase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+ return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side. We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *ep,
+ u8 bTransmit)
+{
+ struct cppi *pController;
+ u8 chNum;
+ struct cppi_channel *otgCh;
+ void __iomem *tibase;
+ int local_end = ep->bLocalEnd;
+
+ pController = container_of(c, struct cppi, Controller);
+ tibase = pController->pCoreBase - DAVINCI_BASE_OFFSET;
+
+ /* remember local_end: 1..Max_EndPt, and cppi ChNum:0..Max_EndPt-1 */
+ chNum = local_end - 1;
+
+ /* return the corresponding CPPI Channel Handle, and
+ * probably disable the non-CPPI irq until we need it.
+ */
+ if (bTransmit) {
+ if (local_end > ARRAY_SIZE(pController->txCppi)) {
+ DBG(1, "no %cX DMA channel for ep%d\n", 'T', local_end);
+ return NULL;
+ }
+ otgCh = pController->txCppi + chNum;
+ } else {
+ if (local_end > ARRAY_SIZE(pController->rxCppi)) {
+ DBG(1, "no %cX DMA channel for ep%d\n", 'R', local_end);
+ return NULL;
+ }
+ otgCh = pController->rxCppi + chNum;
+ core_rxirq_disable(tibase, local_end);
+ }
+
+ /* REVISIT make this an error later once the same driver code works
+ * with the Mentor DMA engine too
+ */
+ if (otgCh->pEndPt)
+ DBG(1, "re-allocating DMA%d %cX channel %p\n",
+ chNum, bTransmit ? 'T' : 'R', otgCh);
+ otgCh->pEndPt = ep;
+ otgCh->Channel.bStatus = MGC_DMA_STATUS_FREE;
+
+ DBG(4, "Allocate CPPI%d %cX\n", chNum, bTransmit ? 'T' : 'R');
+ otgCh->Channel.pPrivateData = otgCh;
+ return &otgCh->Channel;
+}
+
+/* Release a CPPI Channel. */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+ struct cppi_channel *c;
+ void __iomem *tibase;
+ unsigned epnum;
+
+ /* REVISIT: for paranoia, check state and abort if needed... */
+
+ c = container_of(channel, struct cppi_channel, Channel);
+ epnum = c->chNo + 1;
+ tibase = c->pController->pCoreBase - DAVINCI_BASE_OFFSET;
+ if (!c->pEndPt)
+ DBG(1, "releasing idle DMA channel %p\n", c);
+ else if (!c->bTransmit)
+ core_rxirq_enable(tibase, epnum);
+
+ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+ c->pEndPt = NULL;
+ channel->bStatus = MGC_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+ void *__iomem base = c->pController->pCoreBase;
+
+ MGC_SelectEnd(base, c->chNo + 1);
+
+ DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
+ c->chNo, tag,
+ musb_readl(base - DAVINCI_BASE_OFFSET,
+ DAVINCI_RXCPPI_BUFCNT0_REG + 4 *c->chNo),
+ musb_readw(c->pEndPt->regs, MGC_O_HDRC_RXCSR),
+
+ musb_readl(c->stateRam, 0 * 4), /* buf offset */
+ musb_readl(c->stateRam, 1 * 4), /* head ptr */
+ musb_readl(c->stateRam, 2 * 4), /* sop bd */
+ musb_readl(c->stateRam, 3 * 4), /* current bd */
+
+ musb_readl(c->stateRam, 4 * 4), /* current buf */
+ musb_readl(c->stateRam, 5 * 4), /* pkt len */
+ musb_readl(c->stateRam, 6 * 4), /* byte cnt */
+ musb_readl(c->stateRam, 7 * 4) /* completion */
+ );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+ void *__iomem base = c->pController->pCoreBase;
+
+ MGC_SelectEnd(base, c->chNo + 1);
+
+ DBG(level, "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
+ c->chNo, tag,
+ musb_readw(c->pEndPt->regs, MGC_O_HDRC_TXCSR),
+
+ musb_readl(c->stateRam, 0 * 4), /* head ptr */
+ musb_readl(c->stateRam, 1 * 4), /* sop bd */
+ musb_readl(c->stateRam, 2 * 4), /* current bd */
+ musb_readl(c->stateRam, 3 * 4), /* buf offset */
+
+ musb_readl(c->stateRam, 4 * 4), /* flags */
+ musb_readl(c->stateRam, 5 * 4), /* len */
+ // dummy/unused word 6
+ musb_readl(c->stateRam, 7 * 4) /* completion */
+ );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+ void *__iomem tibase, int is_rndis)
+{
+ /* we may need to change the rndis flag for this cppi channel */
+ if (c->bLastModeRndis != is_rndis) {
+ u32 regVal = musb_readl(tibase, DAVINCI_RNDIS_REG);
+ u32 temp = 1 << (c->chNo);
+
+ if (is_rx)
+ temp <<= 16;
+ if (is_rndis)
+ regVal |= temp;
+ else
+ regVal &= ~temp;
+ musb_writel(tibase, DAVINCI_RNDIS_REG, regVal);
+ c->bLastModeRndis = is_rndis;
+ }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+ pr_debug("RXBD/%s %08x: "
+ "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+ tag, bd->dma,
+ bd->hNext, bd->buffPtr, bd->bOffBLen, bd->hOptions);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+ struct cppi_descriptor *bd;
+
+ if (!_dbg_level(level))
+ return;
+ cppi_dump_rx(level, rx, tag);
+ if (rx->lastHwBDProcessed)
+ cppi_dump_rxbd("last", rx->lastHwBDProcessed);
+ for (bd = rx->activeQueueHead; bd; bd = bd->next)
+ cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ void *__iomem tibase, int onepacket, unsigned n_bds)
+{
+ u32 val;
+
+#ifdef RNDIS_RX_IS_USABLE
+ u32 tmp;
+ /* assert(is_host_active(musb)) */
+
+ /* start from "AutoReq never" */
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ val = tmp & ~((0x3) << (rx->chNo * 2));
+
+ /* HCD arranged reqpkt for packet #1. we arrange int
+ * for all but the last one, maybe in two segments.
+ */
+ if (!onepacket) {
+#if 0
+ /* use two segments, autoreq "all" then the last "never" */
+ val |= ((0x3) << (rx->chNo * 2));
+ n_bds--;
+#else
+ /* one segment, autoreq "all-but-last" */
+ val |= ((0x1) << (rx->chNo * 2));
+#endif
+ }
+
+ if (val != tmp) {
+ int n = 100;
+
+ /* make sure that autoreq is updated before continuing */
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+ do {
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ if (tmp == val)
+ break;
+ cpu_relax();
+ } while (n-- > 0);
+ }
+#endif
+
+ /* REQPKT is turned off after each segment */
+ if (n_bds && rx->actualLen) {
+ void *__iomem regs = rx->pEndPt->regs;
+
+ val = musb_readw(regs, MGC_O_HDRC_RXCSR);
+ if (!(val & MGC_M_RXCSR_H_REQPKT)) {
+ val |= MGC_M_RXCSR_H_REQPKT | MGC_M_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MGC_O_HDRC_RXCSR, val);
+ /* flush writebufer */
+ val = musb_readw(regs, MGC_O_HDRC_RXCSR);
+ }
+ }
+ return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ * - RX builds new queues each time, to help handle routine "early
+ * termination" cases (faults, including errors and short reads)
+ * more correctly.
+ *
+ * - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma). Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths. (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often. RNDIS mode seems to behave too
+ * (other how it handles the exactly-N-packets case). Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length. It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+ unsigned maxpacket = tx->pktSize;
+ dma_addr_t addr = tx->startAddr + tx->currOffset;
+ size_t length = tx->transferSize - tx->currOffset;
+ struct cppi_descriptor *bd;
+ unsigned n_bds;
+ unsigned i;
+ struct cppi_tx_stateram *txState = tx->stateRam;
+ int rndis;
+
+ /* TX can use the CPPI "rndis" mode, where we can probably fit this
+ * transfer in one BD and one IRQ. The only time we would NOT want
+ * to use it is when hardware constraints prevent it, or if we'd
+ * trigger the "send a ZLP?" confusion.
+ */
+ rndis = (maxpacket & 0x3f) == 0
+ && length < 0xffff
+ && (length % maxpacket) != 0;
+
+ if (rndis) {
+ maxpacket = length;
+ n_bds = 1;
+ } else {
+ n_bds = length / maxpacket;
+ if (!length || (length % maxpacket))
+ n_bds++;
+ n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+ length = min(n_bds * maxpacket, length);
+ }
+
+ DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ tx->chNo,
+ maxpacket,
+ rndis ? "rndis" : "transparent",
+ n_bds,
+ addr, length);
+
+ cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+ /* assuming here that channel_program is called during
+ * transfer initiation ... current code maintains state
+ * for one outstanding request only (no queues, not even
+ * the implicit ones of an iso urb).
+ */
+
+ bd = tx->bdPoolHead;
+ tx->activeQueueHead = tx->bdPoolHead;
+ tx->lastHwBDProcessed = NULL;
+
+
+ /* Prepare queue of BDs first, then hand it to hardware.
+ * All BDs except maybe the last should be of full packet
+ * size; for RNDIS there _is_ only that last packet.
+ */
+ for (i = 0; i < n_bds; ) {
+ if (++i < n_bds && bd->next)
+ bd->hNext = bd->next->dma;
+ else
+ bd->hNext = 0;
+
+ bd->buffPtr = tx->startAddr
+ + tx->currOffset;
+
+ /* FIXME set EOP only on the last packet,
+ * SOP only on the first ... avoid IRQs
+ */
+ if ((tx->currOffset + maxpacket)
+ <= tx->transferSize) {
+ tx->currOffset += maxpacket;
+ bd->bOffBLen = maxpacket;
+ bd->hOptions = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | maxpacket;
+ } else {
+ /* only this one may be a partial USB Packet */
+ u32 buffSz;
+
+ buffSz = tx->transferSize - tx->currOffset;
+ tx->currOffset = tx->transferSize;
+ bd->bOffBLen = buffSz;
+
+ bd->hOptions = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | buffSz;
+ if (buffSz == 0)
+ bd->hOptions |= CPPI_ZERO_SET;
+ }
+
+ DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ bd, bd->hNext, bd->buffPtr,
+ bd->bOffBLen, bd->hOptions);
+
+ /* update the last BD enqueued to the list */
+ tx->activeQueueTail = bd;
+ bd = bd->next;
+ }
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* Write to the HeadPtr in StateRam to trigger */
+ txState->headPtr = (u32)tx->bdPoolHead->dma;
+
+ cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes. How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly,
+ * but (b) loses _badly_ because nothing (!) happens when that second packet
+ * fills the buffer, much less when a third one arrives. (Which makes this
+ * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
+ * is optional, and it's fine if senders pad messages out to end-of-buffer.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
+ * ignores SOP/EOP markings and processes both of those BDs; so both packets
+ * are loaded into the buffer (with a 212 byte gap between them), and the next
+ * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ * are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ * reliably make both cases work, with software handling both cases correctly
+ * and at the significant penalty of needing an IRQ per packet. (The lack of
+ * I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include the RNDIS driver, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode.
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class. Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ * performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode).
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+ unsigned maxpacket = rx->pktSize;
+ dma_addr_t addr = rx->startAddr + rx->currOffset;
+ size_t length = rx->transferSize - rx->currOffset;
+ struct cppi_descriptor *bd, *tail;
+ unsigned n_bds;
+ unsigned i;
+ void *__iomem tibase = musb->ctrl_base;
+
+ if (onepacket) {
+ n_bds = 1;
+ } else {
+ if (length > 0xffff) {
+ n_bds = 0xffff / maxpacket;
+ length = n_bds * maxpacket;
+ } else {
+ n_bds = length / maxpacket;
+ if (length % maxpacket)
+ n_bds++;
+ }
+ if (n_bds == 1)
+ onepacket = 1;
+ else
+ n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+ }
+
+ /* In host mode, autorequest logic can generate some IN tokens; it's
+ * tricky since we can't leave REQPKT set in RXCSR after the transfer
+ * finishes. So: multipacket transfers involve two or more segments.
+ * And always at least two IRQs ... RNDIS mode is not an option.
+ */
+ if (is_host_active(musb))
+ n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+ length = min(n_bds * maxpacket, length);
+
+ DBG(4, "RX DMA%d seg, maxp %d %spacket bds %d (cnt %d) "
+ "dma 0x%x len %u %u/%u\n",
+ rx->chNo, maxpacket,
+ onepacket ? "one" : "multi",
+ n_bds,
+ musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4))
+ & 0xffff,
+ addr, length, rx->actualLen, rx->transferSize);
+
+ /* only queue one segment at a time, since the hardware prevents
+ * correct queue shutdown after unexpected short packets
+ */
+ bd = cppi_bd_alloc(rx);
+ rx->activeQueueHead = bd;
+
+ /* Build BDs for all packets in this segment */
+ for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+ u32 buffSz;
+
+ if (i) {
+ bd = cppi_bd_alloc(rx);
+ if (!bd)
+ break;
+ tail->next = bd;
+ tail->hNext = bd->dma;
+ }
+ bd->hNext = 0;
+
+ /* all but the last packet will be maxpacket size */
+ if (maxpacket < length)
+ buffSz = maxpacket;
+ else
+ buffSz = length;
+
+ bd->buffPtr = addr;
+ addr += buffSz;
+ rx->currOffset += buffSz;
+
+ bd->bOffBLen = (0 /*offset*/ << 16) + buffSz;
+ bd->enqBuffLen = buffSz;
+
+ bd->hOptions = CPPI_OWN_SET | (i == 0 ? length : 0);
+ length -= buffSz;
+ }
+
+ /* we always expect at least one reusable BD! */
+ if (!tail) {
+ WARN("rx dma%d -- no BDs? need %d\n", rx->chNo, n_bds);
+ return;
+ } else if (i < n_bds)
+ WARN("rx dma%d -- only %d of %d BDs\n", rx->chNo, i, n_bds);
+
+ tail->next = NULL;
+ tail->hNext = 0;
+
+ bd = rx->activeQueueHead;
+ rx->activeQueueTail = tail;
+
+ /* short reads and other faults should terminate this entire
+ * dma segment. we want one "dma packet" per dma segment, not
+ * one per USB packet, terminating the whole queue at once...
+ * NOTE that current hardware seems to ignore SOP and EOP.
+ */
+ bd->hOptions |= CPPI_SOP_SET;
+ tail->hOptions |= CPPI_EOP_SET;
+
+ if (debug >= 5) {
+ struct cppi_descriptor *d;
+
+ for (d = rx->activeQueueHead; d; d = d->next)
+ cppi_dump_rxbd("S", d);
+ }
+
+ /* in case the preceding transfer left some state... */
+ tail = rx->lastHwBDProcessed;
+ if (tail) {
+ tail->next = bd;
+ tail->hNext = bd->dma;
+ }
+
+ core_rxirq_enable(tibase, rx->chNo + 1);
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* REVISIT specs say to write this AFTER the BUFCNT register
+ * below ... but that loses badly.
+ */
+ musb_writel(rx->stateRam, 4, bd->dma);
+
+ /* bufferCount must be at least 3, and zeroes on completion
+ * unless it underflows below zero, or stops at two, or keeps
+ * growing ... grr.
+ */
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4))
+ & 0xffff;
+
+ if (!i)
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4),
+ n_bds + 2);
+ else if (n_bds > (i - 3))
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4),
+ n_bds - (i - 3));
+
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4))
+ & 0xffff;
+ if (i < (2 + n_bds)) {
+ DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+ rx->chNo, i, n_bds);
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4),
+ n_bds + 2);
+ }
+
+ cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @pChannel: the channel
+ * @wPacketSz: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ * all short reads as errors and kick in high level fault recovery.
+ * For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @dwLength: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *pChannel,
+ u16 wPacketSz, u8 mode,
+ dma_addr_t dma_addr, u32 dwLength)
+{
+ struct cppi_channel *otgChannel = pChannel->pPrivateData;
+ struct cppi *pController = otgChannel->pController;
+ struct musb *musb = pController->musb;
+
+ switch (pChannel->bStatus) {
+ case MGC_DMA_STATUS_BUS_ABORT:
+ case MGC_DMA_STATUS_CORE_ABORT:
+ /* fault irq handler should have handled cleanup */
+ WARN("%cX DMA%d not cleaned up after abort!\n",
+ otgChannel->bTransmit ? 'T' : 'R',
+ otgChannel->chNo);
+ //WARN_ON(1);
+ break;
+ case MGC_DMA_STATUS_BUSY:
+ WARN("program active channel? %cX DMA%d\n",
+ otgChannel->bTransmit ? 'T' : 'R',
+ otgChannel->chNo);
+ //WARN_ON(1);
+ break;
+ case MGC_DMA_STATUS_UNKNOWN:
+ DBG(1, "%cX DMA%d not allocated!\n",
+ otgChannel->bTransmit ? 'T' : 'R',
+ otgChannel->chNo);
+ /* FALLTHROUGH */
+ case MGC_DMA_STATUS_FREE:
+ break;
+ }
+
+ pChannel->bStatus = MGC_DMA_STATUS_BUSY;
+
+ /* set transfer parameters, then queue up its first segment */
+ otgChannel->startAddr = dma_addr;
+ otgChannel->currOffset = 0;
+ otgChannel->pktSize = wPacketSz;
+ otgChannel->actualLen = 0;
+ otgChannel->transferSize = dwLength;
+
+ /* TX channel? or RX? */
+ if (otgChannel->bTransmit)
+ cppi_next_tx_segment(musb, otgChannel);
+ else
+ cppi_next_rx_segment(musb, otgChannel, mode);
+
+ return TRUE;
+}
+
+static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+ struct cppi_channel *rx = &cppi->rxCppi[ch];
+ struct cppi_rx_stateram *state = rx->stateRam;
+ struct cppi_descriptor *bd;
+ struct cppi_descriptor *last = rx->lastHwBDProcessed;
+ int completed = 0, acked = 0;
+ int i;
+ dma_addr_t safe2ack;
+ void *__iomem regs = rx->pEndPt->regs;
+
+ cppi_dump_rx(6, rx, "/K");
+
+ bd = last ? last->next : rx->activeQueueHead;
+ if (!bd)
+ return 0;
+
+ /* run through all completed BDs */
+ for (i = 0, safe2ack = musb_readl(CAST &state->completionPtr, 0);
+ (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ rmb();
+ if (!completed && (bd->hOptions & CPPI_OWN_SET))
+ break;
+
+ DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)\n",
+ bd->dma, bd->hNext, bd->buffPtr,
+ bd->bOffBLen, bd->hOptions,
+ rx->actualLen);
+
+ /* actual packet received length */
+ if ((bd->hOptions & CPPI_SOP_SET) && !completed)
+ len = bd->bOffBLen & CPPI_RECV_PKTLEN_MASK;
+ else
+ len = 0;
+
+ if (bd->hOptions & CPPI_EOQ_MASK)
+ completed = 1;
+
+ if (!completed && len < bd->enqBuffLen) {
+ /* NOTE: when we get a short packet, RXCSR_H_REQPKT
+ * must have been cleared, and no more DMA packets may
+ * active be in the queue... TI docs didn't say, but
+ * CPPI ignores those BDs even though OWN is still set.
+ */
+ completed = 1;
+ DBG(3, "rx short %d/%d (%d)\n",
+ len, bd->enqBuffLen, rx->actualLen);
+ }
+
+ /* If we got here, we expect to ack at least one BD; meanwhile
+ * CPPI may completing other BDs while we scan this list...
+ *
+ * RACE: we can notice OWN cleared before CPPI raises the
+ * matching irq by writing that BD as the completion pointer.
+ * In such cases, stop scanning and wait for the irq, avoiding
+ * lost acks and states where BD ownership is unclear.
+ */
+ if (bd->dma == safe2ack) {
+ musb_writel(CAST &state->completionPtr, 0, safe2ack);
+ safe2ack = musb_readl(CAST &state->completionPtr, 0);
+ acked = 1;
+ if (bd->dma == safe2ack)
+ safe2ack = 0;
+ }
+
+ rx->actualLen += len;
+
+ cppi_bd_free(rx, last);
+ last = bd;
+
+ /* stop scanning on end-of-segment */
+ if (bd->hNext == 0)
+ completed = 1;
+ }
+ rx->lastHwBDProcessed = last;
+
+ /* dma abort, lost ack, or ... */
+ if (!acked && last) {
+ int csr;
+
+ if (safe2ack == 0 || safe2ack == rx->lastHwBDProcessed->dma)
+ musb_writel(CAST &state->completionPtr, 0, safe2ack);
+ if (safe2ack == 0) {
+ cppi_bd_free(rx, last);
+ rx->lastHwBDProcessed = NULL;
+
+ /* if we land here on the host side, H_REQPKT will
+ * be clear and we need to restart the queue...
+ */
+ WARN_ON(rx->activeQueueHead);
+ }
+ MGC_SelectEnd(cppi->pCoreBase, rx->chNo + 1);
+ csr = musb_readw(regs, MGC_O_HDRC_RXCSR);
+ if (csr & MGC_M_RXCSR_DMAENAB) {
+ DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ rx->chNo,
+ rx->activeQueueHead, rx->activeQueueTail,
+ rx->lastHwBDProcessed
+ ? rx->lastHwBDProcessed->dma
+ : 0,
+ completed ? ", completed" : "",
+ csr);
+ cppi_dump_rxq(4, "/what?", rx);
+ }
+ }
+ if (!completed) {
+ int csr;
+
+ rx->activeQueueHead = bd;
+
+ /* REVISIT seems like "autoreq all but EOP" doesn't...
+ * setting it here "should" be racey, but seems to work
+ */
+ csr = musb_readw(rx->pEndPt->regs, MGC_O_HDRC_RXCSR);
+ if (is_host_active(cppi->musb)
+ && bd
+ && !(csr & MGC_M_RXCSR_H_REQPKT)) {
+ csr |= MGC_M_RXCSR_H_REQPKT;
+ musb_writew(regs, MGC_O_HDRC_RXCSR,
+ MGC_M_RXCSR_H_WZC_BITS | csr);
+ csr = musb_readw(rx->pEndPt->regs, MGC_O_HDRC_RXCSR);
+ }
+ } else {
+ rx->activeQueueHead = NULL;
+ rx->activeQueueTail = NULL;
+ }
+
+ cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+ return completed;
+}
+
+void cppi_completion(struct musb *pThis, u32 rx, u32 tx)
+{
+ void *__iomem regBase;
+ int i, chanNum, numCompleted;
+ u8 bReqComplete;
+ struct cppi *cppi;
+ struct cppi_descriptor *bdPtr;
+ struct musb_hw_ep *pEnd = NULL;
+
+ cppi = container_of(pThis->pDmaController, struct cppi, Controller);
+
+ regBase = pThis->ctrl_base;
+
+ chanNum = 0;
+ /* process TX channels */
+ for (chanNum = 0; tx; tx = tx >> 1, chanNum++) {
+ if (tx & 1) {
+ struct cppi_channel *txChannel;
+ struct cppi_tx_stateram *txState;
+
+ txChannel = cppi->txCppi + chanNum;
+ txState = txChannel->stateRam;
+
+ /* FIXME need a cppi_tx_scan() routine, which
+ * can also be called from abort code
+ */
+
+ cppi_dump_tx(5, txChannel, "/E");
+
+ bdPtr = txChannel->activeQueueHead;
+
+ if (NULL == bdPtr) {
+ DBG(1, "null BD\n");
+ continue;
+ }
+
+ i = 0;
+ bReqComplete = 0;
+
+ numCompleted = 0;
+
+ /* run through all completed BDs */
+ for (i = 0;
+ !bReqComplete
+ && bdPtr
+ && i < NUM_TXCHAN_BD;
+ i++, bdPtr = bdPtr->next) {
+ u16 len;
+
+ rmb();
+ if (bdPtr->hOptions & CPPI_OWN_SET)
+ break;
+
+ DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+ bdPtr, bdPtr->hNext,
+ bdPtr->buffPtr,
+ bdPtr->bOffBLen,
+ bdPtr->hOptions);
+
+ len = bdPtr->bOffBLen & CPPI_BUFFER_LEN_MASK;
+ txChannel->actualLen += len;
+
+ numCompleted++;
+ txChannel->lastHwBDProcessed = bdPtr;
+
+ /* write completion register to acknowledge
+ * processing of completed BDs, and possibly
+ * release the IRQ; EOQ might not be set ...
+ *
+ * REVISIT use the same ack strategy as rx
+ *
+ * REVISIT have observed bit 18 set; huh??
+ */
+// if ((bdPtr->hOptions & CPPI_EOQ_MASK))
+ txState->completionPtr = bdPtr->dma;
+
+ /* stop scanning on end-of-segment */
+ if (bdPtr->hNext == 0)
+ bReqComplete = 1;
+ }
+
+ /* on end of segment, maybe go to next one */
+ if (bReqComplete) {
+ //cppi_dump_tx(4, txChannel, "/complete");
+
+ /* transfer more, or report completion */
+ if (txChannel->currOffset
+ >= txChannel->transferSize) {
+ txChannel->activeQueueHead = NULL;
+ txChannel->activeQueueTail = NULL;
+ txChannel->Channel.bStatus =
+ MGC_DMA_STATUS_FREE;
+
+ pEnd = txChannel->pEndPt;
+
+ txChannel->Channel.dwActualLength =
+ txChannel->actualLen;
+
+ /* Peripheral role never repurposes the
+ * endpoint, so immediate completion is
+ * safe. Host role waits for the fifo
+ * to empty (TXPKTRDY irq) before going
+ * to the next queued bulk transfer.
+ */
+ if (is_host_active(cppi->musb)) {
+#if 0
+ /* WORKAROUND because we may
+ * not always get TXKPTRDY ...
+ */
+ int csr;
+
+ csr = musb_readw(pEnd->regs,
+ MGC_O_HDRC_TXCSR);
+ if (csr & MGC_M_TXCSR_TXPKTRDY)
+#endif
+ bReqComplete = 0;
+ }
+ if (bReqComplete)
+ musb_dma_completion(
+ pThis, chanNum + 1, 1);
+
+ } else {
+ /* Bigger transfer than we could fit in
+ * that first batch of descriptors...
+ */
+ cppi_next_tx_segment(pThis, txChannel);
+ }
+ } else
+ txChannel->activeQueueHead = bdPtr;
+ }
+ }
+
+ /* Start processing the RX block */
+ for (chanNum = 0; rx; rx = rx >> 1, chanNum++) {
+
+ if (rx & 1) {
+ struct cppi_channel *rxChannel;
+
+ rxChannel = cppi->rxCppi + chanNum;
+ bReqComplete = cppi_rx_scan(cppi, chanNum);
+
+ /* let incomplete dma segments finish */
+ if (!bReqComplete)
+ continue;
+
+ /* start another dma segment if needed */
+ if (rxChannel->actualLen != rxChannel->transferSize
+ && rxChannel->actualLen
+ == rxChannel->currOffset) {
+ cppi_next_rx_segment(pThis, rxChannel, 1);
+ continue;
+ }
+
+ /* all segments completed! */
+ rxChannel->Channel.bStatus = MGC_DMA_STATUS_FREE;
+
+ pEnd = rxChannel->pEndPt;
+
+ rxChannel->Channel.dwActualLength =
+ rxChannel->actualLen;
+ core_rxirq_disable(regBase, chanNum + 1);
+ musb_dma_completion(pThis, chanNum + 1, 0);
+ }
+ }
+
+ /* write to CPPI EOI register to re-enable interrupts */
+ musb_writel(regBase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+static struct dma_controller *
+cppi_controller_new(struct musb *musb, void __iomem *pCoreBase)
+{
+ struct cppi *pController;
+
+ pController = kzalloc(sizeof *pController, GFP_KERNEL);
+ if (!pController)
+ return NULL;
+
+ /* Initialize the Cppi DmaController structure */
+ pController->pCoreBase = pCoreBase;
+ pController->musb = musb;
+ pController->Controller.pPrivateData = pController;
+ pController->Controller.start = cppi_controller_start;
+ pController->Controller.stop = cppi_controller_stop;
+ pController->Controller.channel_alloc = cppi_channel_allocate;
+ pController->Controller.channel_release = cppi_channel_release;
+ pController->Controller.channel_program = cppi_channel_program;
+ pController->Controller.channel_abort = cppi_channel_abort;
+
+ /* NOTE: allocating from on-chip SRAM would give the least
+ * contention for memory access, if that ever matters here.
+ */
+
+ /* setup BufferPool */
+ pController->pool = dma_pool_create("cppi",
+ pController->musb->controller,
+ sizeof(struct cppi_descriptor),
+ CPPI_DESCRIPTOR_ALIGN, 0);
+ if (!pController->pool) {
+ kfree(pController);
+ return NULL;
+ }
+
+ return &pController->Controller;
+}
+
+/*
+ * Destroy a previously-instantiated DMA controller.
+ */
+static void cppi_controller_destroy(struct dma_controller *c)
+{
+ struct cppi *cppi;
+
+ cppi = container_of(c, struct cppi, Controller);
+
+ /* assert: caller stopped the controller first */
+ dma_pool_destroy(cppi->pool);
+
+ kfree(cppi);
+}
+
+const struct dma_controller_factory dma_controller_factory = {
+ .create = cppi_controller_new,
+ .destroy = cppi_controller_destroy,
+};
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+ struct cppi_channel *otgCh;
+ struct cppi *pController;
+ int chNum;
+ void *__iomem mbase;
+ void *__iomem regBase;
+ void *__iomem regs;
+ u32 regVal;
+ struct cppi_descriptor *queue;
+
+ otgCh = container_of(channel, struct cppi_channel, Channel);
+
+ pController = otgCh->pController;
+ chNum = otgCh->chNo;
+
+ switch (channel->bStatus) {
+ case MGC_DMA_STATUS_BUS_ABORT:
+ case MGC_DMA_STATUS_CORE_ABORT:
+ /* from RX or TX fault irq handler */
+ case MGC_DMA_STATUS_BUSY:
+ /* the hardware needs shutting down */
+ regs = otgCh->pEndPt->regs;
+ break;
+ case MGC_DMA_STATUS_UNKNOWN:
+ case MGC_DMA_STATUS_FREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (!otgCh->bTransmit && otgCh->activeQueueHead)
+ cppi_dump_rxq(3, "/abort", otgCh);
+
+ mbase = pController->pCoreBase;
+ regBase = mbase - DAVINCI_BASE_OFFSET;
+
+ queue = otgCh->activeQueueHead;
+ otgCh->activeQueueHead = NULL;
+ otgCh->activeQueueTail = NULL;
+
+ /* REVISIT should rely on caller having done this,
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+ MGC_SelectEnd(mbase, chNum + 1);
+
+ if (otgCh->bTransmit) {
+ struct cppi_tx_stateram *__iomem txState;
+ int enabled;
+
+ /* mask interrupts raised to signal teardown complete. */
+ enabled = musb_readl(regBase, DAVINCI_TXCPPI_INTENAB_REG)
+ & (1 << otgCh->chNo);
+ if (enabled)
+ musb_writel(regBase, DAVINCI_TXCPPI_INTCLR_REG,
+ (1 << otgCh->chNo));
+
+ // REVISIT put timeouts on these controller handshakes
+
+ cppi_dump_tx(6, otgCh, " (teardown)");
+
+ /* teardown DMA engine then usb core */
+ do {
+ regVal = musb_readl(regBase, DAVINCI_TXCPPI_TEAR_REG);
+ } while (!(regVal & CPPI_TEAR_READY));
+ musb_writel(regBase, DAVINCI_TXCPPI_TEAR_REG, chNum);
+
+ txState = otgCh->stateRam;
+ do {
+ regVal = txState->completionPtr;
+ } while (0xFFFFFFFC != regVal);
+ txState->completionPtr = 0xFFFFFFFC;
+
+ /* FIXME clean up the transfer state ... here?
+ * the completion routine should get called with
+ * an appropriate status code.
+ */
+
+ regVal = musb_readw(regs, MGC_O_HDRC_TXCSR);
+ regVal &= ~MGC_M_TXCSR_DMAENAB;
+ regVal |= MGC_M_TXCSR_FLUSHFIFO;
+ musb_writew(regs, MGC_O_HDRC_TXCSR, regVal);
+ musb_writew(regs, MGC_O_HDRC_TXCSR, regVal);
+
+ /* re-enable interrupt */
+ if (enabled)
+ musb_writel(regBase, DAVINCI_TXCPPI_INTENAB_REG,
+ (1 << otgCh->chNo));
+
+ txState->headPtr = 0;
+ txState->sopDescPtr = 0;
+ txState->currBuffPtr = 0;
+ txState->currDescPtr = 0;
+ txState->flags = 0;
+ txState->remLength = 0;
+
+ /* Ensure that we clean up any Interrupt asserted
+ * 1. Write to completion Ptr value 0x1(bit 0 set)
+ * (write back mode)
+ * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+ * (compare mode)
+ * Value written is compared(for bits 31:2) and being
+ * equal interrupt deasserted?
+ */
+
+ /* write back mode, bit 0 set, hence completion Ptr
+ * must be updated
+ */
+ txState->completionPtr = 0x1;
+ /* compare mode, write back zero now */
+ txState->completionPtr = 0;
+
+ cppi_dump_tx(5, otgCh, " (done teardown)");
+
+ /* REVISIT tx side _should_ clean up the same way
+ * as the RX side ... this does no cleanup at all!
+ */
+
+ } else /* RX */ {
+ u16 csr;
+
+ /* NOTE: docs don't guarantee any of this works ... we
+ * expect that if the usb core stops telling the cppi core
+ * to pull more data from it, then it'll be safe to flush
+ * current RX DMA state iff any pending fifo transfer is done.
+ */
+
+ core_rxirq_disable(regBase, otgCh->chNo + 1);
+
+ /* for host, ensure ReqPkt is never set again */
+ if (is_host_active(otgCh->pController->musb)) {
+ regVal = musb_readl(regBase, DAVINCI_AUTOREQ_REG);
+ regVal &= ~((0x3) << (otgCh->chNo * 2));
+ musb_writel(regBase, DAVINCI_AUTOREQ_REG, regVal);
+ }
+
+ csr = musb_readw(regs, MGC_O_HDRC_RXCSR);
+
+ /* for host, clear (just) ReqPkt at end of current packet(s) */
+ if (is_host_active(otgCh->pController->musb)) {
+ csr |= MGC_M_RXCSR_H_WZC_BITS;
+ csr &= ~MGC_M_RXCSR_H_REQPKT;
+ } else
+ csr |= MGC_M_RXCSR_P_WZC_BITS;
+
+ /* clear dma enable */
+ csr &= ~(MGC_M_RXCSR_DMAENAB);
+ musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
+ csr = musb_readw(regs, MGC_O_HDRC_RXCSR);
+
+ /* quiesce: wait for current dma to finish (if not cleanup)
+ * we can't use bit zero of stateram->sopDescPtr since that
+ * refers to an entire "DMA packet" not just emptying the
+ * current fifo; most segments need multiple usb packets.
+ */
+ if (channel->bStatus == MGC_DMA_STATUS_BUSY)
+ udelay(50);
+
+ /* scan the current list, reporting any data that was
+ * transferred and acking any IRQ
+ */
+ cppi_rx_scan(pController, chNum);
+
+ /* clobber the existing state once it's idle
+ *
+ * NOTE: arguably, we should also wait for all the other
+ * RX channels to quiesce (how??) and then temporarily
+ * disable RXCPPI_CTRL_REG ... but it seems that we can
+ * rely on the controller restarting from state ram, with
+ * only RXCPPI_BUFCNT state being bogus. BUFCNT will
+ * correct itself after the next DMA transfer though.
+ *
+ * REVISIT does using rndis mode change that?
+ */
+ cppi_reset_rx(otgCh->stateRam);
+
+ /* next DMA request _should_ load cppi head ptr */
+
+ /* ... we don't "free" that list, only mutate it in place. */
+ cppi_dump_rx(5, otgCh, " (done abort)");
+
+ /* clean up previously pending bds */
+ cppi_bd_free(otgCh, otgCh->lastHwBDProcessed);
+ otgCh->lastHwBDProcessed = NULL;
+
+ while (queue) {
+ struct cppi_descriptor *tmp = queue->next;
+ cppi_bd_free(otgCh, queue);
+ queue = tmp;
+ }
+ }
+
+ channel->bStatus = MGC_DMA_STATUS_FREE;
+ otgCh->startAddr = 0;
+ otgCh->currOffset = 0;
+ otgCh->transferSize = 0;
+ otgCh->pktSize = 0;
+ return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
--- /dev/null
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "dma.h"
+#include "musbdefs.h"
+#include "davinci.h"
+
+
+/* hOptions bit masks for CPPI BDs */
+#define CPPI_SOP_SET ((u32)(1 << 31))
+#define CPPI_EOP_SET ((u32)(1 << 30))
+#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
+#define CPPI_EOQ_MASK ((u32)(1 << 28))
+#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define CPPI_DESCRIPTOR_ALIGN 16 // bytes; 5-dec docs say 4-byte align
+
+struct cppi_descriptor {
+ /* Hardware Overlay */
+ u32 hNext; /**< Next(hardware) Buffer Descriptor Pointer */
+ u32 buffPtr; /**<Buffer Pointer (dma_addr_t) */
+ u32 bOffBLen; /**<Buffer_offset16,buffer_length16 */
+ u32 hOptions; /**<Option fields for SOP,EOP etc*/
+
+ struct cppi_descriptor *next;
+ dma_addr_t dma; /* address of this descriptor */
+
+ /* for Rx Desc, track original Buffer len to detect short packets */
+ u32 enqBuffLen;
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+/* forward declaration for CppiDmaController structure */
+struct cppi;
+
+/**
+ * Channel Control Structure
+ *
+ * CPPI Channel Control structure. Using he same for Tx/Rx. If need be
+ * derive out of this later.
+ */
+struct cppi_channel {
+ /* First field must be dma_channel for easy type casting
+ * FIXME just use container_of() and be typesafe instead!
+ */
+ struct dma_channel Channel;
+
+ /* back pointer to the Dma Controller structure */
+ struct cppi *pController;
+
+ /* which direction of which endpoint? */
+ struct musb_hw_ep *pEndPt;
+ u8 bTransmit;
+ u8 chNo;
+
+ /* DMA modes: RNDIS or "transparent" */
+ u8 bLastModeRndis;
+
+ /* book keeping for current transfer request */
+ dma_addr_t startAddr;
+ u32 transferSize;
+ u32 pktSize;
+ u32 currOffset; /* requested segments */
+ u32 actualLen; /* completed (Channel.actual) */
+
+ void __iomem *stateRam; /* CPPI state */
+
+ /* BD management fields */
+ struct cppi_descriptor *bdPoolHead;
+ struct cppi_descriptor *activeQueueHead;
+ struct cppi_descriptor *activeQueueTail;
+ struct cppi_descriptor *lastHwBDProcessed;
+
+ /* use tx_complete in host role to track endpoints waiting for
+ * FIFONOTEMPTY to clear.
+ */
+ struct list_head tx_complete;
+};
+
+/**
+ * CPPI Dma Controller Object
+ *
+ * CPPI Dma controller object.Encapsulates all bookeeping and Data
+ * structures pertaining to the CPPI Dma Controller.
+ */
+struct cppi {
+ struct dma_controller Controller;
+ struct musb *musb;
+ void __iomem *pCoreBase;
+
+ struct cppi_channel txCppi[MUSB_C_NUM_EPT - 1];
+ struct cppi_channel rxCppi[MUSB_C_NUM_EPR - 1];
+
+ struct dma_pool *pool;
+
+ struct list_head tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif /* end of ifndef _CPPI_DMA_H_ */
--- /dev/null
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+// #include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musbdefs.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#include "cppi_dma.h"
+#endif
+
+
+static inline void phy_on(void)
+{
+ /* start the on-chip PHY and its PLL */
+ __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+ IO_ADDRESS(USBPHY_CTL_PADDR));
+ while ((__raw_readl(IO_ADDRESS(USBPHY_CTL_PADDR))
+ & USBPHY_PHYCLKGD) == 0)
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ /* powerdown the on-chip PHY and its oscillator */
+ __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYSPDWN,
+ IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+ u32 tmp, old, val;
+
+ /* workaround: setup irqs through both register sets */
+ tmp = (musb->wEndMask & DAVINCI_USB_TX_ENDPTS_MASK)
+ << DAVINCI_USB_TXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ old = tmp;
+ tmp = (musb->wEndMask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+ << DAVINCI_USB_RXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ tmp |= old;
+
+ val = ~MGC_M_INTR_SOF;
+ tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+ if (is_dma_capable() && !dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __FUNCTION__);
+ else
+ dma_off = 0;
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ /* because we don't set CTRLR.UINT, "important" to:
+ * - not read/write INTRUSB/INTRUSBE
+ * - (except during initial setup, as workaround)
+ * - use INTSETR/INTCLRR instead
+ */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+ DAVINCI_USB_USBINT_MASK
+ | DAVINCI_USB_TXINT_MASK
+ | DAVINCI_USB_RXINT_MASK);
+ musb_writeb(musb->pRegs, MGC_O_HDRC_DEVCTL, 0);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+ if (is_dma_capable() && !dma_off)
+ WARN("dma still active\n");
+}
+
+
+/* REVISIT this file shouldn't modify the OTG state machine ...
+ *
+ * The OTG infrastructure needs updating, to include things like
+ * offchip DRVVBUS support and replacing MGC_OtgMachineInputs with
+ * musb struct members (so e.g. vbus_state vanishes).
+ */
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define portstate(stmt) stmt
+#else
+#define portstate(stmt)
+#endif
+
+static void session(struct musb *musb, int is_on)
+{
+ void *__iomem mregs = musb->pRegs;
+ u8 devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
+
+ /* NOTE: after drvvbus off the state _could_ be A_IDLE;
+ * but the silicon seems to couple vbus to "ID grounded".
+ */
+ devctl |= MGC_M_DEVCTL_SESSION;
+ if (is_on) {
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ } else {
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+ musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl);
+}
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(void *_musb)
+{
+ struct musb *musb = _musb;
+ int is_on = (musb->xceiv.state == OTG_STATE_A_WAIT_VRISE);
+
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+ vbus_state = is_on;
+ session(musb, is_on);
+}
+DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus, 0);
+
+#endif
+
+static void davinci_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+ if (is_on)
+ is_on = 1;
+
+ if (vbus_state == is_on)
+ return;
+
+ if (is_on) {
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ MUSB_HST_MODE(musb);
+ } else {
+ switch (musb->xceiv.state) {
+ case OTG_STATE_UNDEFINED:
+ case OTG_STATE_B_IDLE:
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_A_IDLE:
+ break;
+ default:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ }
+ }
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+ if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+ /* modified EVM board switching VBUS with GPIO(6) not I2C
+ * NOTE: PINMUX0.RGB888 (bit23) must be clear
+ */
+ if (is_on)
+ gpio_set(GPIO(6));
+ else
+ gpio_clear(GPIO(6));
+#else
+ if (sleeping)
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+ else
+ schedule_work(&evm_vbus_work);
+#endif
+ }
+#endif
+ if (sleeping) {
+ vbus_state = is_on;
+ session(musb, is_on);
+ }
+
+ DBG(2, "VBUS power %s, %s\n", is_on ? "on" : "off",
+ sleeping ? "immediate" : "deferred");
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci, struct pt_regs *r)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+ void *__iomem tibase = musb->ctrl_base;
+ u32 tmp;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ *
+ * Docs describe irq "vector" registers asociated with the CPPI and
+ * USB EOI registers. These hold a bitmask corresponding to the
+ * current IRQ, not an irq handler address. Would using those bits
+ * resolve some of the races observed in this dispatch code??
+ */
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ /* CPPI interrupts share the same IRQ line, but have their own
+ * mask, state, "vector", and EOI registers.
+ */
+ {
+ u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (cppi_tx || cppi_rx) {
+ DBG(4, "<== CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+ cppi_completion(musb, cppi_rx, cppi_tx);
+ retval = IRQ_HANDLED;
+ }
+ }
+#endif
+
+ /* ack and handle non-CPPI interrupts */
+ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+ musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+
+ musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+ >> DAVINCI_USB_RXINT_SHIFT;
+ musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+ >> DAVINCI_USB_TXINT_SHIFT;
+ musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+ >> DAVINCI_USB_USBINT_SHIFT;
+ musb->int_regs = r;
+
+ if (tmp & (1 << (8 + DAVINCI_USB_USBINT_SHIFT))) {
+ int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+
+ /* NOTE: this must complete poweron within 100 msec */
+ davinci_vbus_power(musb, drvvbus, 0);
+ DBG(2, "DRVVBUS %d (state %d)\n", drvvbus, musb->xceiv.state);
+ retval = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ retval |= musb_interrupt(musb);
+
+ /* irq stays asserted until EOI is written */
+ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+ musb->int_regs = NULL;
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ /* REVISIT we sometimes get unhandled IRQs
+ * (e.g. ep0). not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "unhandled? %08x\n", tmp);
+ return IRQ_HANDLED;
+}
+
+int __devinit musb_platform_init(struct musb *musb)
+{
+ void *__iomem tibase = musb->ctrl_base;
+ u32 revision;
+
+ musb->pRegs += DAVINCI_BASE_OFFSET;
+#if 0
+ /* REVISIT there's something odd about clocking, this
+ * didn't appear do the job ...
+ */
+ musb->clock = clk_get(pDevice, "usb");
+ if (IS_ERR(musb->clock))
+ return PTR_ERR(musb->clock);
+
+ status = clk_enable(musb->clock);
+ if (status < 0)
+ return -ENODEV;
+#endif
+
+ /* returns zero if e.g. not clocked */
+ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+ if (revision == 0)
+ return -ENODEV;
+
+ /* note that transceiver issues make us want to charge
+ * VBUS only when the PHY PLL is not active.
+ */
+#ifdef CONFIG_MACH_DAVINCI_EVM
+ evm_vbus_work.data = musb;
+#endif
+ davinci_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+ /* reset the controller */
+ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+ /* start the on-chip PHY and its PLL */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
+ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+ revision,
+ musb_readl((void *__iomem) IO_ADDRESS(
+ USBPHY_CTL_PADDR), 0x00),
+ musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+ musb->isr = davinci_interrupt;
+ return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ phy_off();
+ davinci_vbus_power(musb, 0 /*off*/, 1);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_PHYCLKGD (1 << 8)
+#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
+#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
+#define USBPHY_CLK01SEL (1 << 3)
+#define USBPHY_OSCPDWN (1 << 2)
+#define USBPHY_PHYSPDWN (1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG 0x00
+#define DAVINCI_USB_CTRL_REG 0x04
+#define DAVINCI_USB_STAT_REG 0x08
+#define DAVINCI_RNDIS_REG 0x10
+#define DAVINCI_AUTOREQ_REG 0x14
+#define DAVINCI_USB_INT_SOURCE_REG 0x20
+#define DAVINCI_USB_INT_SET_REG 0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
+#define DAVINCI_USB_INT_MASK_REG 0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG 0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG 0x3c
+#define DAVINCI_USB_EOI_INTVEC 0x40
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG 0x80
+#define DAVINCI_TXCPPI_TEAR_REG 0x84
+#define DAVINCI_CPPI_EOI_REG 0x88
+#define DAVINCI_CPPI_INTVEC_REG 0x8c
+#define DAVINCI_TXCPPI_MASKED_REG 0x90
+#define DAVINCI_TXCPPI_RAW_REG 0x94
+#define DAVINCI_TXCPPI_INTENAB_REG 0x98
+#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG 0xC0
+#define DAVINCI_RXCPPI_MASKED_REG 0xD0
+#define DAVINCI_RXCPPI_RAW_REG 0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(channelNum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((channelNum)* 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(channelNum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 +((channelNum)* 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE 1
+#define DAVINCI_DMA_CTRL_DISABLE 0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* REVISIT relying on "volatile" here is wrong ... */
+
+/* define structures of Rx/Tx stateRam entries */
+struct cppi_tx_stateram {
+ volatile u32 headPtr;
+ volatile u32 sopDescPtr;
+ volatile u32 currDescPtr;
+ volatile u32 currBuffPtr;
+ volatile u32 flags;
+ volatile u32 remLength;
+ volatile u32 dummy;
+ volatile u32 completionPtr;
+};
+
+struct cppi_rx_stateram {
+ volatile u32 buffOffset;
+ volatile u32 headPtr;
+ volatile u32 sopDescPtr;
+ volatile u32 currDescPtr;
+ volatile u32 currBuffPtr;
+ volatile u32 pktLength;
+ volatile u32 byteCount;
+ volatile u32 completionPtr;
+};
+
+#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT 16
+#define DAVINCI_USB_TXINT_SHIFT 0
+#define DAVINCI_USB_RXINT_SHIFT 8
+
+#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+ (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+ (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET 0x400
+
+#endif /* __MUSB_HDRDF_H__ */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+ do { printk(facility "%s %d: " format , \
+ __FUNCTION__, __LINE__ , ## args); } while (0)
+#define WARN(fmt, args...) yprintk(KERN_WARNING,fmt, ## args)
+#define INFO(fmt,args...) yprintk(KERN_INFO,fmt, ## args)
+#define ERR(fmt,args...) yprintk(KERN_ERR,fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+ if ( _dbg_level(level) ) { \
+ printk(facility "%s %d: " format , \
+ __FUNCTION__, __LINE__ , ## args); \
+ } } while (0)
+
+#if MUSB_DEBUG > 0
+extern unsigned debug;
+#else
+#define debug 0
+#endif
+
+static inline int _dbg_level(unsigned l)
+{
+ return debug >= l;
+}
+
+#define DBG(level,fmt,args...) xprintk(level,KERN_DEBUG,fmt, ## args)
+
+#endif // __MUSB_LINUX_DEBUG_H__
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores. On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does. Responsibilities
+ * of a DMA controller driver include:
+ *
+ * - Handling the details of moving multiple USB packets
+ * in cooperation with the Inventra USB core, including especially
+ * the correct RX side treatment of short packets and buffer-full
+ * states (both of which terminate transfers).
+ *
+ * - Knowing the correlation between dma channels and the
+ * Inventra core's local endpoint resources and data direction.
+ *
+ * - Maintaining a list of allocated/available channels.
+ *
+ * - Updating channel status on interrupts,
+ * whether shared with the Inventra core or separate.
+ */
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#ifndef CONFIG_USB_INVENTRA_FIFO
+#define is_dma_capable() (1)
+#else
+#define is_dma_capable() (0)
+#endif
+
+#if defined(CONFIG_USB_TI_CPPI_DMA) && defined(CONFIG_USB_MUSB_HDRC_HCD)
+extern void cppi_hostdma_start(struct musb * pThis, u8 bEnd);
+#else
+static inline void cppi_hostdma_start(struct musb * pThis, u8 bEnd) {}
+#endif
+
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+ /* unallocated */
+ MGC_DMA_STATUS_UNKNOWN,
+ /* allocated ... but not busy, no errors */
+ MGC_DMA_STATUS_FREE,
+ /* busy ... transactions are active */
+ MGC_DMA_STATUS_BUSY,
+ /* transaction(s) aborted due to ... dma or memory bus error */
+ MGC_DMA_STATUS_BUS_ABORT,
+ /* transaction(s) aborted due to ... core error or USB fault */
+ MGC_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @pPrivateData: channel-private data
+ * @wMaxLength: the maximum number of bytes the channel can move in one
+ * transaction (typically representing many USB maximum-sized packets)
+ * @dwActualLength: how many bytes have been transferred
+ * @bStatus: current channel status (updated e.g. on interrupt)
+ * @bDesiredMode: TRUE if mode 1 is desired; FALSE if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+ void *pPrivateData;
+ // FIXME not void* private_data, but a dma_controller *
+ size_t dwMaxLength;
+ size_t dwActualLength;
+ enum dma_channel_status bStatus;
+ u8 bDesiredMode;
+};
+
+/*
+ * Program a DMA channel to move data at the core's request.
+ * The local core endpoint and direction should already be known,
+ * since they are specified in the channel_alloc call.
+ *
+ * @channel: pointer to a channel obtained by channel_alloc
+ * @maxpacket: the maximum packet size
+ * @bMode: TRUE if mode 1; FALSE if mode 0
+ * @dma_addr: base address of data (in DMA space)
+ * @length: the number of bytes to transfer; no larger than the channel's
+ * reported dwMaxLength
+ *
+ * Returns TRUE on success, else FALSE
+ */
+typedef int (*MGC_pfDmaProgramChannel) (
+ struct dma_channel *channel,
+ u16 maxpacket,
+ u8 bMode,
+ dma_addr_t dma_addr,
+ u32 length);
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status. If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+ return (is_dma_capable() && c) ? c->bStatus : MGC_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @pPrivateData: controller-private data;
+ * @start: call this to start a DMA controller;
+ * return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ * return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ * returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+ void *pPrivateData;
+ int (*start)(struct dma_controller *);
+ int (*stop)(struct dma_controller *);
+ struct dma_channel *(*channel_alloc)(struct dma_controller *,
+ struct musb_hw_ep *, u8 is_tx);
+ void (*channel_release)(struct dma_channel *);
+ MGC_pfDmaProgramChannel channel_program;
+ int (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 bLocalEnd, u8 bTransmit);
+
+
+/**
+ * struct dma_controller_factory - DMA controller factory
+ * @create: create a DMA controller
+ * @destroy: destroy a DMA controller
+ *
+ * To allow for multi-core implementations and different
+ * types of cores and DMA controllers to co-exist,
+ * (only at the source level; no runtime coexistence supported)
+ * it is necessary to create them from factories.
+ */
+struct dma_controller_factory {
+ struct dma_controller *(*create)(struct musb *, void __iomem *);
+ void (*destroy)(struct dma_controller *);
+};
+
+extern const struct dma_controller_factory dma_controller_factory;
+
+#endif /* __MUSB_DMA_H__ */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musbdefs.h"
+
+/* ep0 is always musb->aLocalEnd[0].ep_in */
+#define next_ep0_request(musb) next_in_request(&(musb)->aLocalEnd[0])
+
+/*
+ * Locking note: we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers. And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+ switch(stage) {
+ case MGC_END0_STAGE_SETUP: return "idle";
+ case MGC_END0_STAGE_TX: return "in";
+ case MGC_END0_STAGE_RX: return "out";
+ case MGC_END0_STAGE_ACKWAIT: return "wait";
+ case MGC_END0_STAGE_STATUSIN: return "in/status";
+ case MGC_END0_STAGE_STATUSOUT: return "out/status";
+ default: return "?";
+ }
+}
+
+/* handle a standard GET_STATUS request
+ * Context: caller holds controller lock
+ */
+static int service_tx_status_request(
+ struct musb *pThis,
+ const struct usb_ctrlrequest *pControlRequest)
+{
+ void __iomem *pBase = pThis->pRegs;
+ int handled = 1;
+ u8 bResult[2], bEnd = 0;
+ const u8 bRecip = pControlRequest->bRequestType & USB_RECIP_MASK;
+
+ bResult[1] = 0;
+
+ switch (bRecip) {
+ case USB_RECIP_DEVICE:
+ bResult[0] = pThis->bIsSelfPowered << USB_DEVICE_SELF_POWERED;
+ bResult[0] |= pThis->bMayWakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (pThis->g.is_otg) {
+ bResult[0] |= pThis->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ bResult[0] |= pThis->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ bResult[0] |= pThis->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+#endif
+ break;
+
+ case USB_RECIP_INTERFACE:
+ bResult[0] = 0;
+ break;
+
+ case USB_RECIP_ENDPOINT: {
+ int is_in;
+ struct musb_ep *ep;
+ u16 tmp;
+ void __iomem *regs;
+
+ bEnd = (u8) pControlRequest->wIndex;
+ if (!bEnd) {
+ bResult[0] = 0;
+ break;
+ }
+
+ is_in = bEnd & USB_DIR_IN;
+ if (is_in) {
+ bEnd &= 0x0f;
+ ep = &pThis->aLocalEnd[bEnd].ep_in;
+ } else {
+ ep = &pThis->aLocalEnd[bEnd].ep_out;
+ }
+ regs = pThis->aLocalEnd[bEnd].regs;
+
+ if (bEnd >= MUSB_C_NUM_EPS || !ep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+
+ MGC_SelectEnd(pBase, bEnd);
+ if (is_in)
+ tmp = musb_readw(regs, MGC_O_HDRC_TXCSR)
+ & MGC_M_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MGC_O_HDRC_RXCSR)
+ & MGC_M_RXCSR_P_SENDSTALL;
+ MGC_SelectEnd(pBase, 0);
+
+ bResult[0] = tmp ? 1 : 0;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ /* fill up the fifo; caller updates csr0 */
+ if (handled > 0) {
+ u16 len = le16_to_cpu(pControlRequest->wLength);
+
+ if (len > 2)
+ len = 2;
+ musb_write_fifo(&pThis->aLocalEnd[0], len, bResult);
+ }
+
+ return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_in_request(struct musb *pThis,
+ const struct usb_ctrlrequest *pControlRequest)
+{
+ int handled = 0; /* not handled */
+
+ if ((pControlRequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (pControlRequest->bRequest) {
+ case USB_REQ_GET_STATUS:
+ handled = service_tx_status_request(pThis,
+ pControlRequest);
+ break;
+
+ /* case USB_REQ_SYNC_FRAME: */
+
+ default:
+ break;
+ }
+ }
+ return handled;
+}
+
+/*
+ * Context: caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *pThis, struct usb_request *req)
+{
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+ musb_g_giveback(&pThis->aLocalEnd[0].ep_in, req, 0);
+}
+
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+ /* implicit CRC16 then EOP to end */
+};
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ * always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ * always handled here, except for class/vendor/... features
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *pThis,
+ struct usb_ctrlrequest *pControlRequest)
+__releases(pThis->Lock)
+__acquires(pThis->Lock)
+{
+ int handled = -EINVAL;
+ void __iomem *pBase = pThis->pRegs;
+ const u8 bRecip = pControlRequest->bRequestType & USB_RECIP_MASK;
+
+ /* the gadget driver handles everything except what we MUST handle */
+ if ((pControlRequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (pControlRequest->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ pThis->bSetAddress = TRUE;
+ pThis->bAddress = (u8) (pControlRequest->wValue & 0x7f);
+ handled = 1;
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (bRecip) {
+ case USB_RECIP_DEVICE:
+ if (pControlRequest->wValue
+ != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+ pThis->bMayWakeup = 0;
+ handled = 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:{
+ const u8 bEnd = pControlRequest->wIndex & 0x0f;
+ struct musb_ep *pEnd;
+
+ if (bEnd == 0
+ || bEnd >= MUSB_C_NUM_EPS
+ || pControlRequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ if (pControlRequest->wIndex & USB_DIR_IN)
+ pEnd = &pThis->aLocalEnd[bEnd].ep_in;
+ else
+ pEnd = &pThis->aLocalEnd[bEnd].ep_out;
+ if (!pEnd->desc)
+ break;
+
+ /* REVISIT do it directly, no locking games */
+ spin_unlock(&pThis->Lock);
+ musb_gadget_set_halt(&pEnd->end_point, 0);
+ spin_lock(&pThis->Lock);
+
+ /* select ep0 again */
+ MGC_SelectEnd(pBase, 0);
+ handled = 1;
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (bRecip) {
+ case USB_RECIP_DEVICE:
+ handled = 1;
+ switch (pControlRequest->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ pThis->bMayWakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (pThis->g.speed != USB_SPEED_HIGH)
+ goto stall;
+ if (pControlRequest->wIndex & 0xff)
+ goto stall;
+
+ switch (pControlRequest->wIndex >> 8) {
+ case 1:
+ pr_debug("TEST_J\n");
+ /* TEST_J */
+ pThis->bTestModeValue =
+ MGC_M_TEST_J;
+ break;
+ case 2:
+ /* TEST_K */
+ pr_debug("TEST_K\n");
+ pThis->bTestModeValue =
+ MGC_M_TEST_K;
+ break;
+ case 3:
+ /* TEST_SE0_NAK */
+ pr_debug("TEST_SE0_NAK\n");
+ pThis->bTestModeValue =
+ MGC_M_TEST_SE0_NAK;
+ break;
+ case 4:
+ /* TEST_PACKET */
+ pr_debug("TEST_PACKET\n");
+ pThis->bTestModeValue =
+ MGC_M_TEST_PACKET;
+ break;
+ default:
+ goto stall;
+ }
+
+ /* enter test mode after irq */
+ if (handled > 0)
+ pThis->bTestMode = TRUE;
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (!pThis->g.is_otg)
+ goto stall;
+ { u8 devctl;
+ pThis->g.b_hnp_enable = 1;
+ devctl = musb_readb(pBase,
+ MGC_O_HDRC_DEVCTL);
+ /* REVISIT after roleswitch, HR will
+ * have been cleared ... reset it
+ */
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL,
+ devctl | MGC_M_DEVCTL_HR);
+ }
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (!pThis->g.is_otg)
+ goto stall;
+ pThis->g.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ if (!pThis->g.is_otg)
+ goto stall;
+ pThis->g.a_alt_hnp_support = 1;
+ break;
+#endif
+stall:
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_ENDPOINT:{
+ const u8 bEnd =
+ pControlRequest->wIndex & 0x0f;
+ struct musb_ep *pEnd;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (bEnd == 0
+ || bEnd >= MUSB_C_NUM_EPS
+ || pControlRequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ ep = pThis->aLocalEnd + bEnd;
+ regs = ep->regs;
+ is_in = pControlRequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ pEnd = &ep->ep_in;
+ else
+ pEnd = &ep->ep_out;
+ if (!pEnd->desc)
+ break;
+
+ MGC_SelectEnd(pBase, bEnd);
+ if (is_in) {
+ csr = musb_readw(regs,
+ MGC_O_HDRC_TXCSR);
+ csr |= MGC_M_TXCSR_P_SENDSTALL
+ | MGC_M_TXCSR_CLRDATATOG
+ | MGC_M_TXCSR_FLUSHFIFO
+ | MGC_M_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MGC_O_HDRC_TXCSR,
+ csr);
+ } else {
+ csr = musb_readw(regs,
+ MGC_O_HDRC_RXCSR);
+ csr |= MGC_M_RXCSR_P_SENDSTALL
+ | MGC_M_RXCSR_FLUSHFIFO
+ | MGC_M_RXCSR_CLRDATATOG
+ | MGC_M_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MGC_O_HDRC_RXCSR,
+ csr);
+ }
+
+ /* select ep0 again */
+ MGC_SelectEnd(pBase, 0);
+ handled = 1;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+ } else
+ handled = 0;
+ return handled;
+}
+
+/* we have an ep0out data packet
+ * Context: caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *this)
+{
+ void __iomem *regs = this->control_ep->regs;
+ struct usb_request *req;
+ u16 tmp;
+
+ req = next_ep0_request(this);
+
+ /* read packet and ack; or stall because of gadget driver bug:
+ * should have provided the rx buffer before setup() returned.
+ */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned len = req->length - req->actual;
+
+ /* read the buffer */
+ tmp = musb_readb(regs, MGC_O_HDRC_COUNT0);
+ if (tmp > len) {
+ req->status = -EOVERFLOW;
+ tmp = len;
+ }
+ musb_read_fifo(&this->aLocalEnd[0], tmp, buf);
+ req->actual += tmp;
+ tmp = MGC_M_CSR0_P_SVDRXPKTRDY;
+ if (tmp < 64 || req->actual == req->length) {
+ this->ep0_state = MGC_END0_STAGE_STATUSIN;
+ tmp |= MGC_M_CSR0_P_DATAEND;
+ } else
+ req = NULL;
+ } else
+ tmp = MGC_M_CSR0_P_SVDRXPKTRDY | MGC_M_CSR0_P_SENDSTALL;
+ musb_writew(regs, MGC_O_HDRC_CSR0, tmp);
+
+
+ /* NOTE: we "should" hold off reporting DATAEND and going to
+ * STATUSIN until after the completion handler decides whether
+ * to issue a stall instead, since this hardware can do that.
+ */
+ if (req)
+ musb_g_ep0_giveback(this, req);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context: caller holds controller lock
+ */
+static void ep0_txstate(struct musb *pThis)
+{
+ void __iomem *regs = pThis->control_ep->regs;
+ struct usb_request *pRequest = next_ep0_request(pThis);
+ u16 wCsrVal = MGC_M_CSR0_TXPKTRDY;
+ u8 *pFifoSource;
+ u8 wFifoCount;
+
+ if (!pRequest) {
+ // WARN_ON(1);
+ DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MGC_O_HDRC_CSR0));
+ return;
+ }
+
+ /* load the data */
+ pFifoSource = (u8 *) pRequest->buf + pRequest->actual;
+ wFifoCount = min((unsigned) MGC_END0_FIFOSIZE,
+ pRequest->length - pRequest->actual);
+ musb_write_fifo(&pThis->aLocalEnd[0], wFifoCount, pFifoSource);
+ pRequest->actual += wFifoCount;
+
+ /* update the flags */
+ if (wFifoCount < MUSB_MAX_END0_PACKET
+ || pRequest->actual == pRequest->length) {
+ pThis->ep0_state = MGC_END0_STAGE_STATUSOUT;
+ wCsrVal |= MGC_M_CSR0_P_DATAEND;
+ } else
+ pRequest = NULL;
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ musb_writew(regs, MGC_O_HDRC_CSR0, wCsrVal);
+
+ /* report completions as soon as the fifo's loaded; there's no
+ * win in waiting till this last packet gets acked. (other than
+ * very precise fault reporting, needed by USB TMC; possible with
+ * this hardware, but not usable from portable gadget drivers.)
+ */
+ if (pRequest)
+ musb_g_ep0_giveback(pThis, pRequest);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context: caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *pThis, struct usb_ctrlrequest *req)
+{
+ struct usb_request *r;
+ void __iomem *regs = pThis->control_ep->regs;
+
+ musb_read_fifo(&pThis->aLocalEnd[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+ */
+ DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ req->bRequestType,
+ req->bRequest,
+ le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex),
+ le16_to_cpu(req->wLength));
+
+ /* clean up any leftover transfers */
+ r = next_ep0_request(pThis);
+ if (r)
+ musb_g_ep0_giveback(pThis, r);
+
+ /* For zero-data requests we want to delay the STATUS stage to
+ * avoid SETUPEND errors. If we read data (OUT), delay accepting
+ * packets until there's a buffer to store them in.
+ *
+ * If we write data, the controller acts happier if we enable
+ * the TX FIFO right away, and give the controller a moment
+ * to switch modes...
+ */
+ pThis->bSetAddress = FALSE;
+ pThis->ackpend = MGC_M_CSR0_P_SVDRXPKTRDY;
+ if (req->wLength == 0)
+ pThis->ep0_state = MGC_END0_STAGE_ACKWAIT;
+ else if (req->bRequestType & USB_DIR_IN) {
+ pThis->ep0_state = MGC_END0_STAGE_TX;
+ musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SVDRXPKTRDY);
+ while ((musb_readw(regs, MGC_O_HDRC_CSR0)
+ & MGC_M_CSR0_RXPKTRDY) != 0)
+ cpu_relax();
+ pThis->ackpend = 0;
+ } else
+ pThis->ep0_state = MGC_END0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb,
+ const struct usb_ctrlrequest *pControlRequest)
+__releases(musb->Lock)
+__acquires(musb->Lock)
+{
+ int retval;
+ if (!musb->pGadgetDriver)
+ return -EOPNOTSUPP;
+ spin_unlock(&musb->Lock);
+ retval = musb->pGadgetDriver->setup(&musb->g, pControlRequest);
+ spin_lock(&musb->Lock);
+ return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ * @param pThis this
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *pThis)
+{
+ u16 wCsrVal;
+ u16 wCount;
+ void __iomem *pBase = pThis->pRegs;
+ void __iomem *regs = pThis->aLocalEnd[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+ MGC_SelectEnd(pBase, 0); /* select ep0 */
+ wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
+ wCount = musb_readb(regs, MGC_O_HDRC_COUNT0);
+
+ DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+ wCsrVal, wCount,
+ musb_readb(pBase, MGC_O_HDRC_FADDR),
+ decode_ep0stage(pThis->ep0_state));
+
+ /* I sent a stall.. need to acknowledge it now.. */
+ if (wCsrVal & MGC_M_CSR0_P_SENTSTALL) {
+ musb_writew(regs, MGC_O_HDRC_CSR0,
+ wCsrVal & ~MGC_M_CSR0_P_SENTSTALL);
+ retval = IRQ_HANDLED;
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+ wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
+ }
+
+ /* request ended "early" */
+ if (wCsrVal & MGC_M_CSR0_P_SETUPEND) {
+ musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SVDSETUPEND);
+ retval = IRQ_HANDLED;
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+ wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
+ /* NOTE: request may need completion */
+ }
+
+ /* docs from Mentor only describe tx, rx, and idle/setup states.
+ * we need to handle nuances around status stages, and also the
+ * case where status and setup stages come back-to-back ...
+ */
+ switch (pThis->ep0_state) {
+
+ case MGC_END0_STAGE_TX:
+ /* irq on clearing txpktrdy */
+ if ((wCsrVal & MGC_M_CSR0_TXPKTRDY) == 0) {
+ ep0_txstate(pThis);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MGC_END0_STAGE_RX:
+ /* irq on set rxpktrdy */
+ if (wCsrVal & MGC_M_CSR0_RXPKTRDY) {
+ ep0_rxstate(pThis);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MGC_END0_STAGE_STATUSIN:
+ /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+ /* update address (if needed) only @ the end of the
+ * status phase per usb spec, which also guarantees
+ * we get 10 msec to receive this irq... until this
+ * is done we won't see the next packet.
+ */
+ if (pThis->bSetAddress) {
+ pThis->bSetAddress = FALSE;
+ musb_writeb(pBase, MGC_O_HDRC_FADDR, pThis->bAddress);
+ }
+
+ /* enter test mode if needed (exit by reset) */
+ else if (pThis->bTestMode) {
+ DBG(1, "entering TESTMODE\n");
+
+ if (MGC_M_TEST_PACKET == pThis->bTestModeValue) {
+ musb_write_fifo(&pThis->aLocalEnd[0],
+ sizeof(musb_test_packet),
+ musb_test_packet);
+ }
+
+ musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_TXPKTRDY);
+
+ musb_writeb(pBase, MGC_O_HDRC_TESTMODE,
+ pThis->bTestModeValue);
+ }
+ /* FALLTHROUGH */
+
+ case MGC_END0_STAGE_STATUSOUT:
+ /* end of sequence #1: write to host (TX state) */
+ {
+ struct usb_request *req;
+
+ req = next_ep0_request(pThis);
+ if (req)
+ musb_g_ep0_giveback(pThis, req);
+ }
+ retval = IRQ_HANDLED;
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+ /* FALLTHROUGH */
+
+ case MGC_END0_STAGE_SETUP:
+ if (wCsrVal & MGC_M_CSR0_RXPKTRDY) {
+ struct usb_ctrlrequest setup;
+ int handled = 0;
+
+ if (wCount != 8) {
+ ERR("SETUP packet len %d != 8 ?\n", wCount);
+ break;
+ }
+ musb_read_setup(pThis, &setup);
+ retval = IRQ_HANDLED;
+
+ /* sometimes the RESET won't be reported */
+ if (unlikely(pThis->g.speed == USB_SPEED_UNKNOWN)) {
+ u8 power;
+
+ printk(KERN_NOTICE "%s: peripheral reset "
+ "irq lost!\n",
+ musb_driver_name);
+ power = musb_readb(pBase, MGC_O_HDRC_POWER);
+ pThis->g.speed = (power & MGC_M_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ }
+
+ switch (pThis->ep0_state) {
+
+ /* sequence #3 (no data stage), includes requests
+ * we can't forward (notably SET_ADDRESS and the
+ * device/endpoint feature set/clear operations)
+ * plus SET_CONFIGURATION and others we must
+ */
+ case MGC_END0_STAGE_ACKWAIT:
+ handled = service_zero_data_request(
+ pThis, &setup);
+
+ /* status stage might be immediate */
+ if (handled > 0) {
+ pThis->ackpend |= MGC_M_CSR0_P_DATAEND;
+ pThis->ep0_state =
+ MGC_END0_STAGE_STATUSIN;
+ }
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
+ * requests that we can't forward, GET_DESCRIPTOR
+ * and others that we must
+ */
+ case MGC_END0_STAGE_TX:
+ handled = service_in_request(pThis, &setup);
+ if (handled > 0) {
+ pThis->ackpend = MGC_M_CSR0_TXPKTRDY
+ | MGC_M_CSR0_P_DATAEND;
+ pThis->ep0_state =
+ MGC_END0_STAGE_STATUSOUT;
+ }
+ break;
+
+ /* sequence #2 (OUT from host), always forward */
+ default: /* MGC_END0_STAGE_RX */
+ break;
+ }
+
+ DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+ handled, wCsrVal,
+ decode_ep0stage(pThis->ep0_state));
+
+ /* unless we need to delegate this to the gadget
+ * driver, we know how to wrap this up: csr0 has
+ * not yet been written.
+ */
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(pThis, &setup);
+ if (handled < 0) {
+ MGC_SelectEnd(pBase, 0);
+stall:
+ DBG(3, "stall (%d)\n", handled);
+ pThis->ackpend |= MGC_M_CSR0_P_SENDSTALL;
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+finish:
+ musb_writew(regs, MGC_O_HDRC_CSR0,
+ pThis->ackpend);
+ pThis->ackpend = 0;
+ }
+ }
+ break;
+
+ case MGC_END0_STAGE_ACKWAIT:
+ /* This should not happen. But happens with tusb6010 with
+ * g_file_storage and high speed. Do nothing.
+ */
+ retval = IRQ_HANDLED;
+ break;
+
+ default:
+ /* "can't happen" */
+ WARN_ON(1);
+ musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SENDSTALL);
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+ break;
+ }
+
+ return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static void *musb_g_ep0_alloc_buffer(struct usb_ep *ep, unsigned bytes,
+ dma_addr_t * dma, gfp_t gfp_flags)
+{
+ *dma = DMA_ADDR_INVALID;
+ return kmalloc(bytes, gfp_flags);
+}
+
+static void musb_g_ep0_free_buffer(struct usb_ep *ep, void *address, dma_addr_t dma,
+ unsigned bytes)
+{
+ kfree(address);
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+ struct musb_ep *ep;
+ struct musb_request *req;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+ void __iomem *regs;
+
+ if (!e || !r)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->pThis;
+ regs = musb->control_ep->regs;
+
+ req = to_musb_request(r);
+ req->musb = musb;
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->bTx = ep->is_in;
+
+ spin_lock_irqsave(&musb->Lock, lockflags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MGC_END0_STAGE_RX: /* control-OUT data */
+ case MGC_END0_STAGE_TX: /* control-IN data */
+ case MGC_END0_STAGE_ACKWAIT: /* zero-length data */
+ status = 0;
+ break;
+ default:
+ DBG(1, "ep0 request queued in state %d\n",
+ musb->ep0_state);
+ status = -EINVAL;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(req->request.list), &(ep->req_list));
+
+ DBG(3, "queue to %s (%s), length=%d\n",
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+ MGC_SelectEnd(musb->pRegs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MGC_END0_STAGE_TX)
+ ep0_txstate(musb);
+
+ /* sequence #3, no-data ... issue IN status */
+ else if (musb->ep0_state == MGC_END0_STAGE_ACKWAIT) {
+ if (req->request.length)
+ status = -EINVAL;
+ else {
+ musb->ep0_state = MGC_END0_STAGE_STATUSIN;
+ musb_writew(regs, MGC_O_HDRC_CSR0,
+ musb->ackpend | MGC_M_CSR0_P_DATAEND);
+ musb->ackpend = 0;
+ musb_g_ep0_giveback(ep->pThis, r);
+ }
+
+ /* else for sequence #2 (OUT), caller provides a buffer
+ * before the next packet arrives. deferred responses
+ * (after SETUP is acked) are racey.
+ */
+ } else if (musb->ackpend) {
+ musb_writew(regs, MGC_O_HDRC_CSR0, musb->ackpend);
+ musb->ackpend = 0;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->Lock, lockflags);
+ return status;
+}
+
+static int
+musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+ struct musb_ep *ep;
+ struct musb *musb;
+ void __iomem *base, *regs;
+ unsigned long flags;
+ int status;
+ u16 csr;
+
+ if (!e || !value)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->pThis;
+ base = musb->pRegs;
+ regs = musb->control_ep->regs;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MGC_END0_STAGE_TX: /* control-IN data */
+ case MGC_END0_STAGE_ACKWAIT: /* STALL for zero-length data */
+ case MGC_END0_STAGE_RX: /* control-OUT data */
+ status = 0;
+
+ MGC_SelectEnd(base, 0);
+ csr = musb_readw(regs, MGC_O_HDRC_CSR0);
+ csr |= MGC_M_CSR0_P_SENDSTALL;
+ musb_writew(regs, MGC_O_HDRC_CSR0, csr);
+ musb->ep0_state = MGC_END0_STAGE_SETUP;
+ break;
+ default:
+ DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+ status = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return status;
+}
+
+struct usb_ep_ops musb_g_ep0_ops = {
+ .enable = musb_g_ep0_enable,
+ .disable = musb_g_ep0_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .alloc_buffer = musb_g_ep0_alloc_buffer,
+ .free_buffer = musb_g_ep0_free_buffer,
+ .queue = musb_g_ep0_queue,
+ .dequeue = musb_g_ep0_dequeue,
+ .set_halt = musb_g_ep0_halt,
+ .fifo_status = NULL,
+ .fifo_flush = NULL,
+};
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musbdefs.h"
+
+
+/* MUSB PERIPHERAL status 3-mar:
+ *
+ * - EP0 seems solid. It passes both USBCV and usbtest control cases.
+ * Minor glitches:
+ *
+ * + remote wakeup to Linux hosts work, but saw USBCV failures;
+ * in one test run (operator error?)
+ * + endpoint halt tests -- in both usbtest and usbcv -- seem
+ * to break when dma is enabled ... is something wrongly
+ * clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested. Network traffic patterns
+ * (with lots of short transfers etc) need retesting; they turn up the
+ * worst cases of the DMA, since short packets are typical but are not
+ * required.
+ *
+ * - TX/IN
+ * + both pio and dma behave in with network and g_zero tests
+ * + no cppi throughput issues other than no-hw-queueing
+ * + failed with FLAT_REG (DaVinci)
+ * + seems to behave with double buffering, PIO -and- CPPI
+ * + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ * + both pio and dma behave in with network and g_zero tests
+ * + dma is slow in typical case (short_not_ok is clear)
+ * + double buffering ok with PIO
+ * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ * + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ * drivers can't hold off host requests until userspace is ready.
+ * (Workaround: they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ * + DaVinci, basically works with cppi dma
+ * + OMAP 2430, ditto with mentor dma
+ * + TUSB 6010, platform-specific dma in the works
+ */
+
+/**************************************************************************
+Handling completion
+**************************************************************************/
+
+/*
+ * Immediately complete a request.
+ *
+ * @param pRequest the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+ struct musb_ep *ep,
+ struct usb_request *pRequest,
+ int status)
+__releases(ep->musb->Lock)
+__acquires(ep->musb->Lock)
+{
+ struct musb_request *req;
+ struct musb *musb;
+ int busy = ep->busy;
+
+ req = to_musb_request(pRequest);
+
+ list_del(&pRequest->list);
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+ musb = req->musb;
+
+ ep->busy = 1;
+ spin_unlock(&musb->Lock);
+ if (is_dma_capable() && req->mapped) {
+ dma_unmap_single(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->bTx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->request.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+ if (pRequest->status == 0)
+ DBG(5, "%s done request %p, %d/%d\n",
+ ep->end_point.name, pRequest,
+ req->request.actual, req->request.length);
+ else
+ DBG(2, "%s request %p, %d/%d fault %d\n",
+ ep->end_point.name, pRequest,
+ req->request.actual, req->request.length,
+ pRequest->status);
+ req->request.complete(&req->ep->end_point, &req->request);
+ spin_lock(&musb->Lock);
+ ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+ struct musb_request *req = NULL;
+
+ ep->busy = 1;
+
+ if (is_dma_capable() && ep->dma) {
+ struct dma_controller *c = ep->pThis->pDmaController;
+ int value;
+
+ value = c->channel_abort(ep->dma);
+ DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+ c->channel_release(ep->dma);
+ ep->dma = NULL;
+ }
+
+ while (!list_empty(&(ep->req_list))) {
+ req = container_of(ep->req_list.next, struct musb_request,
+ request.list);
+ musb_g_giveback(ep, &req->request, status);
+ }
+}
+
+/**************************************************************************
+ * TX/IN and RX/OUT Data transfers
+ **************************************************************************/
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *pThis, struct musb_ep *ep)
+{
+ if (can_bulk_split(pThis, ep->type))
+ return ep->hw_ep->wMaxPacketSizeTx;
+ else
+ return ep->wPacketSize;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+ Only mode 0 is used for transfers <= wPktSize,
+ mode 1 is used for larger transfers,
+
+ One of the following happens:
+ - Host sends IN token which causes an endpoint interrupt
+ -> TxAvail
+ -> if DMA is currently busy, exit.
+ -> if queue is non-empty, txstate().
+
+ - Request is queued by the gadget driver.
+ -> if queue was previously empty, txstate()
+
+ txstate()
+ -> start
+ /\ -> setup DMA
+ | (data is transferred to the FIFO, then sent out when
+ | IN token(s) are recd from Host.
+ | -> DMA interrupt on completion
+ | calls TxAvail.
+ | -> stop DMA, ~DmaEenab,
+ | -> set TxPktRdy for last short pkt or zlp
+ | -> Complete Request
+ | -> Continue next request (call txstate)
+ |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *pThis, struct musb_request *req)
+{
+ u8 bEnd;
+ struct musb_ep *pEnd;
+ struct usb_request *pRequest;
+ void __iomem *pBase = pThis->pRegs;
+ u16 wFifoCount = 0, wCsrVal;
+ int use_dma = 0;
+
+ bEnd = req->bEnd;
+ pEnd = req->ep;
+
+ /* we shouldn't get here while DMA is active ... but we do ... */
+ if (dma_channel_status(pEnd->dma) == MGC_DMA_STATUS_BUSY) {
+ DBG(4, "dma pending...\n");
+ return;
+ }
+
+ /* read TXCSR before */
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+
+ pRequest = &req->request;
+ wFifoCount = min(max_ep_writesize(pThis, pEnd),
+ (int)(pRequest->length - pRequest->actual));
+
+ if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) {
+ DBG(5, "%s old packet still ready , txcsr %03x\n",
+ pEnd->end_point.name, wCsrVal);
+ return;
+ }
+
+ if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) {
+ DBG(5, "%s stalling, txcsr %03x\n",
+ pEnd->end_point.name, wCsrVal);
+ return;
+ }
+
+ DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ bEnd, pEnd->wPacketSize, wFifoCount,
+ wCsrVal);
+
+#ifndef CONFIG_USB_INVENTRA_FIFO
+ if (is_dma_capable() && pEnd->dma) {
+ struct dma_controller *c = pThis->pDmaController;
+
+ use_dma = (pRequest->dma != DMA_ADDR_INVALID);
+
+ /* MGC_M_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ {
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min(pRequest->length,
+ pEnd->dma->dwMaxLength);
+ if (request_size <= pEnd->wPacketSize)
+ pEnd->dma->bDesiredMode = 0;
+ else
+ pEnd->dma->bDesiredMode = 1;
+
+ use_dma = use_dma && c->channel_program(
+ pEnd->dma, pEnd->wPacketSize,
+ pEnd->dma->bDesiredMode,
+ pRequest->dma, request_size);
+ if (use_dma) {
+ if (pEnd->dma->bDesiredMode == 0) {
+ wCsrVal &= ~(MGC_M_TXCSR_AUTOSET |
+ MGC_M_TXCSR_DMAMODE);
+ wCsrVal |= (MGC_M_TXCSR_DMAENAB |
+ MGC_M_TXCSR_MODE);
+ // against programming guide
+ }
+ else
+ wCsrVal |= (MGC_M_TXCSR_AUTOSET |
+ MGC_M_TXCSR_DMAENAB |
+ MGC_M_TXCSR_DMAMODE |
+ MGC_M_TXCSR_MODE);
+
+ wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ wCsrVal);
+ }
+ }
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ /* program endpoint CSR first, then setup DMA */
+ wCsrVal &= ~(MGC_M_TXCSR_AUTOSET
+ | MGC_M_TXCSR_DMAMODE
+ | MGC_M_TXCSR_P_UNDERRUN
+ | MGC_M_TXCSR_TXPKTRDY);
+ wCsrVal |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_DMAENAB;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ (MGC_M_TXCSR_P_WZC_BITS & ~MGC_M_TXCSR_P_UNDERRUN)
+ | wCsrVal);
+
+ /* ensure writebuffer is empty */
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+
+ /* NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and Mentor
+ * fifos) just tells CPPI it could start. Data only moves
+ * to the USB TX fifo when both fifos are ready.
+ */
+
+ /* "mode" is irrelevant here; handle terminating ZLPs like
+ * PIO does, since the hardware RNDIS mode seems unreliable
+ * except for the last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ pEnd->dma, pEnd->wPacketSize,
+ 0,
+ pRequest->dma,
+ pRequest->length);
+ if (!use_dma) {
+ c->channel_release(pEnd->dma);
+ pEnd->dma = NULL;
+ wCsrVal &= ~(MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
+ /* invariant: prequest->buf is non-null */
+ }
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ use_dma = use_dma && c->channel_program(
+ pEnd->dma, pEnd->wPacketSize,
+ pRequest->zero,
+ pRequest->dma,
+ pRequest->length);
+#endif
+ }
+#endif
+
+ if (!use_dma) {
+ musb_write_fifo(pEnd->hw_ep, wFifoCount,
+ (u8 *) (pRequest->buf + pRequest->actual));
+ pRequest->actual += wFifoCount;
+ wCsrVal |= MGC_M_TXCSR_TXPKTRDY;
+ wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsrVal);
+ }
+
+ /* host may already have the data when this message shows... */
+ DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ pEnd->end_point.name, use_dma ? "dma" : "pio",
+ pRequest->actual, pRequest->length,
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd),
+ wFifoCount,
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_TXMAXP, bEnd));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ, with controller locked.
+ */
+void musb_g_tx(struct musb *pThis, u8 bEnd)
+{
+ u16 wCsrVal;
+ struct usb_request *pRequest;
+ u8 __iomem *pBase = pThis->pRegs;
+ struct musb_ep *pEnd;
+ struct dma_channel *dma;
+
+ MGC_SelectEnd(pBase, bEnd);
+ pEnd = &pThis->aLocalEnd[bEnd].ep_in;
+ pRequest = next_request(pEnd);
+
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ DBG(4, "<== %s, txcsr %04x\n", pEnd->end_point.name, wCsrVal);
+
+ dma = is_dma_capable() ? pEnd->dma : NULL;
+ do {
+ /* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error
+ */
+ if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) {
+ wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
+ wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsrVal);
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
+ pThis->pDmaController->channel_abort(dma);
+ }
+
+ if (pRequest)
+ musb_g_giveback(pEnd, pRequest, -EPIPE);
+
+ break;
+ }
+
+ if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) {
+ /* we NAKed, no big deal ... little reason to care */
+ wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
+ wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN
+ | MGC_M_TXCSR_TXPKTRDY);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsrVal);
+ DBG(20, "underrun on ep%d, req %p\n", bEnd, pRequest);
+ }
+
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ /* SHOULD NOT HAPPEN ... has with cppi though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ DBG(5, "%s dma still busy?\n", pEnd->end_point.name);
+ break;
+ }
+
+ if (pRequest) {
+ u8 is_dma = 0;
+
+ if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) {
+ is_dma = 1;
+ wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
+ wCsrVal &= ~(MGC_M_TXCSR_DMAENAB
+ | MGC_M_TXCSR_P_UNDERRUN
+ | MGC_M_TXCSR_TXPKTRDY);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ wCsrVal);
+ /* ensure writebuffer is empty */
+ wCsrVal = MGC_ReadCsr16(pBase,
+ MGC_O_HDRC_TXCSR, bEnd);
+ DBG(4, "TXCSR%d %04x, dma off, "
+ "len %Zd, req %p\n",
+ bEnd, wCsrVal,
+ pEnd->dma->dwActualLength,
+ pRequest);
+ pRequest->actual += pEnd->dma->dwActualLength;
+ }
+
+ if (is_dma || pRequest->actual == pRequest->length) {
+
+ /* First, maybe a terminating short packet.
+ * Some DMA engines might handle this by
+ * themselves.
+ */
+ if ((pRequest->zero
+ && pRequest->length
+ && (pRequest->length
+ % pEnd->wPacketSize)
+ == 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+ || (is_dma &&
+ (pRequest->actual
+ < pEnd->wPacketSize))
+#endif
+ ) {
+ /* on dma completion, fifo may not
+ * be available yet ...
+ */
+ if (wCsrVal & MGC_M_TXCSR_TXPKTRDY)
+ break;
+
+ DBG(4, "sending zero pkt\n");
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR,
+ bEnd,
+ MGC_M_TXCSR_MODE
+ | MGC_M_TXCSR_TXPKTRDY);
+ }
+
+ /* ... or if not, then complete it */
+ musb_g_giveback(pEnd, pRequest, 0);
+
+ /* kickstart next transfer if appropriate;
+ * the packet that just completed might not
+ * be transmitted for hours or days.
+ * REVISIT for double buffering...
+ * FIXME revisit for stalls too...
+ */
+ MGC_SelectEnd(pBase, bEnd);
+ wCsrVal = MGC_ReadCsr16(pBase,
+ MGC_O_HDRC_TXCSR, bEnd);
+ if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY)
+ break;
+ pRequest = pEnd->desc
+ ? next_request(pEnd)
+ : NULL;
+ if (!pRequest) {
+ DBG(4, "%s idle now\n",
+ pEnd->end_point.name);
+ musb_platform_try_idle(pThis);
+ break;
+ }
+ }
+
+ txstate(pThis, to_musb_request(pRequest));
+ }
+
+ } while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+ - Only mode 0 is used.
+
+ - Request is queued by the gadget class driver.
+ -> if queue was previously empty, rxstate()
+
+ - Host sends OUT token which causes an endpoint interrupt
+ /\ -> RxReady
+ | -> if request queued, call rxstate
+ | /\ -> setup DMA
+ | | -> DMA interrupt on completion
+ | | -> RxReady
+ | | -> stop DMA
+ | | -> ack the read
+ | | -> if data recd = max expected
+ | | by the request, or host
+ | | sent a short packet,
+ | | complete the request,
+ | | and start the next one.
+ | |_____________________________________|
+ | else just wait for the host
+ | to send the next OUT token.
+ |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *pThis, struct musb_request *req)
+{
+ u16 wCsrVal = 0;
+ const u8 bEnd = req->bEnd;
+ struct usb_request *pRequest = &req->request;
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_ep *pEnd = &pThis->aLocalEnd[bEnd].ep_out;
+ u16 wFifoCount = 0;
+ u16 wCount = pEnd->wPacketSize;
+
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ if (is_dma_capable() && pEnd->dma) {
+ struct dma_controller *c = pThis->pDmaController;
+ struct dma_channel *channel = pEnd->dma;
+
+ /* NOTE: CPPI won't actually stop advancing the DMA
+ * queue after short packet transfers, so this is almost
+ * always going to run as IRQ-per-packet DMA so that
+ * faults will be handled correctly.
+ */
+ if (c->channel_program(channel,
+ pEnd->wPacketSize,
+ !pRequest->short_not_ok,
+ pRequest->dma + pRequest->actual,
+ pRequest->length - pRequest->actual)) {
+
+ /* make sure that if an rxpkt arrived after the irq,
+ * the cppi engine will be ready to take it as soon
+ * as DMA is enabled
+ */
+ wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
+ | MGC_M_RXCSR_DMAMODE);
+ wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsrVal);
+ return;
+ }
+ }
+#endif
+
+ if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) {
+ wCount = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCOUNT, bEnd);
+ if (pRequest->actual < pRequest->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (is_dma_capable() && pEnd->dma) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+
+ c = pThis->pDmaController;
+ channel = pEnd->dma;
+
+ /* We use DMA Req mode 0 in RxCsr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq interrupt (RxCsr_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
+ wCsrVal |= MGC_M_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+ wCsrVal |= MGC_M_RXCSR_AUTOCLEAR;
+// wCsrVal |= MGC_M_RXCSR_DMAMODE;
+
+ /* this special sequence (enabling and then
+ disabling MGC_M_RXCSR_DMAMODE) is required
+ to get DMAReq to activate
+ */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ wCsrVal | MGC_M_RXCSR_DMAMODE);
+#endif
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ wCsrVal);
+
+ if (pRequest->actual < pRequest->length) {
+ int transfer_size = 0;
+#ifdef USE_MODE1
+ transfer_size = min(pRequest->length,
+ channel->dwMaxLength);
+#else
+ transfer_size = wCount;
+#endif
+ if (transfer_size <= pEnd->wPacketSize)
+ pEnd->dma->bDesiredMode = 0;
+ else
+ pEnd->dma->bDesiredMode = 1;
+
+ use_dma = c->channel_program(
+ channel,
+ pEnd->wPacketSize,
+ channel->bDesiredMode,
+ pRequest->dma
+ + pRequest->actual,
+ transfer_size);
+ }
+
+ if (use_dma)
+ return;
+ }
+#endif /* Mentor's USB */
+
+ wFifoCount = pRequest->length - pRequest->actual;
+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ pEnd->end_point.name,
+ wCount, wFifoCount,
+ pEnd->wPacketSize);
+
+ wFifoCount = min(wCount, wFifoCount);
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+ if (tusb_dma_omap() && pEnd->dma) {
+ struct dma_controller *c = pThis->pDmaController;
+ struct dma_channel *channel = pEnd->dma;
+ u32 dma_addr = pRequest->dma + pRequest->actual;
+ int ret;
+
+ ret = c->channel_program(channel,
+ pEnd->wPacketSize,
+ channel->bDesiredMode,
+ dma_addr,
+ wFifoCount);
+ if (ret == TRUE)
+ return;
+ }
+#endif
+
+ musb_read_fifo(pEnd->hw_ep, wFifoCount,
+ (u8 *) (pRequest->buf +
+ pRequest->actual));
+ pRequest->actual += wFifoCount;
+
+ /* REVISIT if we left anything in the fifo, flush
+ * it and report -EOVERFLOW
+ */
+
+ /* ack the read! */
+ wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
+ wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsrVal);
+ }
+ }
+
+ /* reach the end or short packet detected */
+ if (pRequest->actual == pRequest->length || wCount < pEnd->wPacketSize)
+ musb_g_giveback(pEnd, pRequest, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ * @param pThis the controller
+ * @param req the request
+ */
+void musb_g_rx(struct musb *pThis, u8 bEnd)
+{
+ u16 wCsrVal;
+ struct usb_request *pRequest;
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_ep *pEnd;
+ struct dma_channel *dma;
+
+ MGC_SelectEnd(pBase, bEnd);
+
+ pEnd = &pThis->aLocalEnd[bEnd].ep_out;
+ pRequest = next_request(pEnd);
+
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+ dma = is_dma_capable() ? pEnd->dma : NULL;
+
+ DBG(4, "<== %s, rxcsr %04x%s %p\n", pEnd->end_point.name,
+ wCsrVal, dma ? " (dma)" : "", pRequest);
+
+ if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) {
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
+ (void) pThis->pDmaController->channel_abort(dma);
+ pRequest->actual += pEnd->dma->dwActualLength;
+ }
+
+ wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
+ wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsrVal);
+
+ if (pRequest)
+ musb_g_giveback(pEnd, pRequest, -EPIPE);
+ goto done;
+ }
+
+ if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) {
+ // wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
+ wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsrVal);
+
+ DBG(3, "%s iso overrun on %p\n", pEnd->name, pRequest);
+ if (pRequest && pRequest->status == -EINPROGRESS)
+ pRequest->status = -EOVERFLOW;
+ }
+ if (wCsrVal & MGC_M_RXCSR_INCOMPRX) {
+ /* REVISIT not necessarily an error */
+ DBG(4, "%s, incomprx\n", pEnd->end_point.name);
+ }
+
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ /* "should not happen"; likely RXPKTRDY pending for DMA */
+ DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1,
+ "%s busy, csr %04x\n",
+ pEnd->end_point.name, wCsrVal);
+ goto done;
+ }
+
+ if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) {
+ wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR |
+ MGC_M_RXCSR_DMAENAB |
+ MGC_M_RXCSR_DMAMODE);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ MGC_M_RXCSR_P_WZC_BITS | wCsrVal);
+
+ pRequest->actual += pEnd->dma->dwActualLength;
+
+ DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n",
+ bEnd, wCsrVal,
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd),
+ pEnd->dma->dwActualLength, pRequest);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+ /* Autoclear doesn't clear RxPktRdy for short packets */
+ if ((dma->bDesiredMode == 0) ||
+ (dma->dwActualLength & (pEnd->wPacketSize - 1))) {
+ /* ack the read! */
+ wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsrVal);
+ }
+
+ /* incomplete, and not short? wait for next IN packet */
+ if ((pRequest->actual < pRequest->length)
+ && (pEnd->dma->dwActualLength
+ == pEnd->wPacketSize))
+ goto done;
+#endif
+ musb_g_giveback(pEnd, pRequest, 0);
+
+ pRequest = next_request(pEnd);
+ if (!pRequest)
+ goto done;
+
+ /* don't start more i/o till the stall clears */
+ MGC_SelectEnd(pBase, bEnd);
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+ if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL)
+ goto done;
+ }
+
+
+ /* analyze request if the ep is hot */
+ if (pRequest)
+ rxstate(pThis, to_musb_request(pRequest));
+ else
+ DBG(3, "packet waiting for %s%s request\n",
+ pEnd->desc ? "" : "inactive ",
+ pEnd->end_point.name);
+
+done:
+ return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct musb_ep *pEnd;
+ struct musb *pThis;
+ void __iomem *pBase;
+ u8 bEnd;
+ u16 csr;
+ unsigned tmp;
+ int status = -EINVAL;
+
+ if (!ep || !desc)
+ return -EINVAL;
+
+ pEnd = to_musb_ep(ep);
+ pThis = pEnd->pThis;
+ pBase = pThis->pRegs;
+ bEnd = pEnd->bEndNumber;
+
+ spin_lock_irqsave(&pThis->Lock, flags);
+
+ if (pEnd->desc) {
+ status = -EBUSY;
+ goto fail;
+ }
+ pEnd->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* check direction and (later) maxpacket size against endpoint */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != bEnd)
+ goto fail;
+
+ /* REVISIT this rules out high bandwidth periodic transfers */
+ tmp = le16_to_cpu(desc->wMaxPacketSize);
+ if (tmp & ~0x07ff)
+ goto fail;
+ pEnd->wPacketSize = tmp;
+
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+ MGC_SelectEnd(pBase, bEnd);
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ u16 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
+
+ if (pEnd->hw_ep->bIsSharedFifo)
+ pEnd->is_in = 1;
+ if (!pEnd->is_in)
+ goto fail;
+ if (tmp > pEnd->hw_ep->wMaxPacketSizeTx)
+ goto fail;
+
+ wIntrTxE |= (1 << bEnd);
+ musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
+
+ /* REVISIT if can_bulk_split(), use by updating "tmp";
+ * likewise high bandwidth periodic tx
+ */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXMAXP, bEnd, tmp);
+
+ csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG
+ | MGC_M_TXCSR_FLUSHFIFO;
+ if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MGC_M_TXCSR_P_ISO;
+
+ /* set twice in case of double buffering */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, csr);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, csr);
+
+ } else {
+ u16 wIntrRxE = musb_readw(pBase, MGC_O_HDRC_INTRRXE);
+
+ if (pEnd->hw_ep->bIsSharedFifo)
+ pEnd->is_in = 0;
+ if (pEnd->is_in)
+ goto fail;
+ if (tmp > pEnd->hw_ep->wMaxPacketSizeRx)
+ goto fail;
+
+ wIntrRxE |= (1 << bEnd);
+ musb_writew(pBase, MGC_O_HDRC_INTRRXE, wIntrRxE);
+
+ /* REVISIT if can_bulk_combine() use by updating "tmp"
+ * likewise high bandwidth periodic rx
+ */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXMAXP, bEnd, tmp);
+
+ /* force shared fifo to OUT-only mode */
+ if (pEnd->hw_ep->bIsSharedFifo) {
+ csr = musb_readw(pBase, MGC_O_HDRC_TXCSR);
+ csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, csr);
+ }
+
+ csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG;
+ if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MGC_M_RXCSR_P_ISO;
+ else if (pEnd->type == USB_ENDPOINT_XFER_INT)
+ csr |= MGC_M_RXCSR_DISNYET;
+
+ /* set twice in case of double buffering */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, csr);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, csr);
+ }
+
+ /* NOTE: all the I/O code _should_ work fine without DMA, in case
+ * for some reason you run out of channels here.
+ */
+ if (is_dma_capable() && pThis->pDmaController) {
+ struct dma_controller *c = pThis->pDmaController;
+
+ pEnd->dma = c->channel_alloc(c, pEnd->hw_ep,
+ (desc->bEndpointAddress & USB_DIR_IN));
+ } else
+ pEnd->dma = NULL;
+
+ pEnd->desc = desc;
+ pEnd->busy = 0;
+ status = 0;
+
+ pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+ musb_driver_name, pEnd->end_point.name,
+ ({ char *s; switch (pEnd->type) {
+ case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
+ case USB_ENDPOINT_XFER_INT: s = "int"; break;
+ default: s = "iso"; break;
+ }; s; }),
+ pEnd->is_in ? "IN" : "OUT",
+ pEnd->dma ? "dma, " : "",
+ pEnd->wPacketSize);
+
+ pThis->status |= MUSB_VBUS_STATUS_CHG;
+ schedule_work(&pThis->irq_work);
+
+fail:
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+ return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+ unsigned long flags;
+ struct musb *pThis;
+ u8 bEnd;
+ struct musb_ep *pEnd;
+ int status = 0;
+
+ pEnd = to_musb_ep(ep);
+ pThis = pEnd->pThis;
+ bEnd = pEnd->bEndNumber;
+
+ spin_lock_irqsave(&pThis->Lock, flags);
+ MGC_SelectEnd(pThis->pRegs, bEnd);
+
+ /* zero the endpoint sizes */
+ if (pEnd->is_in) {
+ u16 wIntrTxE = musb_readw(pThis->pRegs, MGC_O_HDRC_INTRTXE);
+ wIntrTxE &= ~(1 << bEnd);
+ musb_writew(pThis->pRegs, MGC_O_HDRC_INTRTXE, wIntrTxE);
+ MGC_WriteCsr16(pThis->pRegs, MGC_O_HDRC_TXMAXP, bEnd, 0);
+ } else {
+ u16 wIntrRxE = musb_readw(pThis->pRegs, MGC_O_HDRC_INTRRXE);
+ wIntrRxE &= ~(1 << bEnd);
+ musb_writew(pThis->pRegs, MGC_O_HDRC_INTRRXE, wIntrRxE);
+ MGC_WriteCsr16(pThis->pRegs, MGC_O_HDRC_RXMAXP, bEnd, 0);
+ }
+
+ pEnd->desc = NULL;
+
+ /* abort all pending DMA and requests */
+ nuke(pEnd, -ESHUTDOWN);
+
+ pThis->status |= MUSB_VBUS_STATUS_CHG; /* FIXME not for ep_disable!! */
+ schedule_work(&pThis->irq_work);
+
+ spin_unlock_irqrestore(&(pThis->Lock), flags);
+
+ DBG(2, "%s\n", pEnd->end_point.name);
+
+ return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *pRequest = NULL;
+
+ pRequest = kzalloc(sizeof *pRequest, gfp_flags);
+ if (pRequest) {
+ INIT_LIST_HEAD(&pRequest->request.list);
+ pRequest->request.dma = DMA_ADDR_INVALID;
+ pRequest->bEnd = musb_ep->bEndNumber;
+ pRequest->ep = musb_ep;
+ }
+
+ return &pRequest->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_musb_request(req));
+}
+
+/*
+ * dma-coherent memory allocation (for dma-capable endpoints)
+ *
+ * NOTE: the dma_*_coherent() API calls suck; most implementations are
+ * (a) page-oriented, so small buffers lose big, and (b) asymmetric with
+ * respect to calls with irqs disabled: alloc is safe, free is not.
+ */
+static void *musb_gadget_alloc_buffer(struct usb_ep *ep, unsigned bytes,
+ dma_addr_t * dma, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+
+ return dma_alloc_coherent(musb_ep->pThis->controller,
+ bytes, dma, gfp_flags);
+}
+
+static DEFINE_SPINLOCK(buflock);
+static LIST_HEAD(buffers);
+
+struct free_record {
+ struct list_head list;
+ struct device *dev;
+ unsigned bytes;
+ dma_addr_t dma;
+};
+
+static void do_free(unsigned long ignored)
+{
+ spin_lock_irq(&buflock);
+ while (!list_empty(&buffers)) {
+ struct free_record *buf;
+
+ buf = list_entry(buffers.next, struct free_record, list);
+ list_del(&buf->list);
+ spin_unlock_irq(&buflock);
+
+ dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
+
+ spin_lock_irq(&buflock);
+ }
+ spin_unlock_irq(&buflock);
+}
+
+static DECLARE_TASKLET(deferred_free, do_free, 0);
+
+static void musb_gadget_free_buffer(struct usb_ep *ep,
+ void *address, dma_addr_t dma, unsigned bytes)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct free_record *buf = address;
+ unsigned long flags;
+
+ buf->dev = musb_ep->pThis->controller;
+ buf->bytes = bytes;
+ buf->dma = dma;
+
+ spin_lock_irqsave(&buflock, flags);
+ list_add_tail(&buf->list, &buffers);
+ tasklet_schedule(&deferred_free);
+ spin_unlock_irqrestore(&buflock, flags);
+}
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *pThis, struct musb_request *req)
+{
+ DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+ req->bTx ? "TX/IN" : "RX/OUT",
+ &req->request, req->request.length, req->bEnd);
+
+ MGC_SelectEnd(pThis->pRegs, req->bEnd);
+ if (req->bTx)
+ txstate(pThis, req);
+ else
+ rxstate(pThis, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct musb_ep *pEnd;
+ struct musb_request *pRequest;
+ struct musb *pThis;
+ int status = 0;
+ unsigned long lockflags;
+
+ if (!ep || !req)
+ return -EINVAL;
+
+ pEnd = to_musb_ep(ep);
+ pThis = pEnd->pThis;
+
+ pRequest = to_musb_request(req);
+ pRequest->musb = pThis;
+
+ if (pRequest->ep != pEnd)
+ return -EINVAL;
+
+ DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+ /* request is mine now... */
+ pRequest->request.actual = 0;
+ pRequest->request.status = -EINPROGRESS;
+ pRequest->bEnd = pEnd->bEndNumber;
+ pRequest->bTx = pEnd->is_in;
+
+ if (is_dma_capable()
+ && pRequest->request.dma == DMA_ADDR_INVALID
+ && pRequest->request.length >= MIN_DMA_REQUEST
+ && pEnd->dma) {
+ pRequest->request.dma = dma_map_single(pThis->controller,
+ pRequest->request.buf,
+ pRequest->request.length,
+ pRequest->bTx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ pRequest->mapped = 1;
+ } else if (!req->buf) {
+ return -ENODATA;
+ } else
+ pRequest->mapped = 0;
+
+ spin_lock_irqsave(&pThis->Lock, lockflags);
+
+ /* don't queue if the ep is down */
+ if (!pEnd->desc) {
+ DBG(4, "req %p queued to %s while ep %s\n",
+ req, ep->name, "disabled");
+ status = -ESHUTDOWN;
+ goto cleanup;
+ }
+
+ /* add pRequest to the list */
+ list_add_tail(&(pRequest->request.list), &(pEnd->req_list));
+
+ /* it this is the head of the queue, start i/o ... */
+ if (!pEnd->busy && &pRequest->request.list == pEnd->req_list.next)
+ musb_ep_restart(pThis, pRequest);
+
+cleanup:
+ spin_unlock_irqrestore(&pThis->Lock, lockflags);
+ return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *pRequest)
+{
+ struct musb_ep *pEnd = to_musb_ep(ep);
+ struct usb_request *r;
+ unsigned long flags;
+ int status = 0;
+
+ if (!ep || !pRequest || to_musb_request(pRequest)->ep != pEnd)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pEnd->pThis->Lock, flags);
+
+ list_for_each_entry(r, &pEnd->req_list, list) {
+ if (r == pRequest)
+ break;
+ }
+ if (r != pRequest) {
+ DBG(3, "request %p not queued to %s\n", pRequest, ep->name);
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* if the hardware doesn't have the request, easy ... */
+ if (pEnd->req_list.next != &pRequest->list || pEnd->busy)
+ musb_g_giveback(pEnd, pRequest, -ECONNRESET);
+
+ /* ... else abort the dma transfer ... */
+ else if (is_dma_capable() && pEnd->dma) {
+ struct dma_controller *c = pEnd->pThis->pDmaController;
+
+ MGC_SelectEnd(pEnd->pThis->pRegs, pEnd->bEndNumber);
+ if (c->channel_abort)
+ status = c->channel_abort(pEnd->dma);
+ else
+ status = -EBUSY;
+ if (status == 0)
+ musb_g_giveback(pEnd, pRequest, -ECONNRESET);
+ } else {
+ /* NOTE: by sticking to easily tested hardware/driver states,
+ * we leave counting of in-flight packets imprecise.
+ */
+ musb_g_giveback(pEnd, pRequest, -ECONNRESET);
+ }
+
+done:
+ spin_unlock_irqrestore(&pEnd->pThis->Lock, flags);
+ return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+ struct musb_ep *pEnd;
+ u8 bEnd;
+ struct musb *pThis;
+ void __iomem *pBase;
+ unsigned long flags;
+ u16 wCsr;
+ struct musb_request *pRequest = NULL;
+ int status = 0;
+
+ if (!ep)
+ return -EINVAL;
+
+ pEnd = to_musb_ep(ep);
+ bEnd = pEnd->bEndNumber;
+ pThis = pEnd->pThis;
+ pBase = pThis->pRegs;
+
+ spin_lock_irqsave(&pThis->Lock, flags);
+
+ if ((USB_ENDPOINT_XFER_ISOC == pEnd->type)) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ MGC_SelectEnd(pBase, bEnd);
+
+ /* cannot portably stall with non-empty FIFO */
+ pRequest = to_musb_request(next_request(pEnd));
+ if (value && pEnd->is_in) {
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
+ DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+ return -EAGAIN;
+ }
+
+ }
+
+ /* set/clear the stall and toggle bits */
+ DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ if (pEnd->is_in) {
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ wCsr |= MGC_M_TXCSR_P_WZC_BITS
+ | MGC_M_TXCSR_CLRDATATOG
+ | MGC_M_TXCSR_FLUSHFIFO;
+ if (value)
+ wCsr |= MGC_M_TXCSR_P_SENDSTALL;
+ else
+ wCsr &= ~(MGC_M_TXCSR_P_SENDSTALL
+ | MGC_M_TXCSR_P_SENTSTALL);
+ wCsr &= ~MGC_M_TXCSR_TXPKTRDY;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsr);
+ } else {
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+ wCsr |= MGC_M_RXCSR_P_WZC_BITS
+ | MGC_M_RXCSR_FLUSHFIFO
+ | MGC_M_RXCSR_CLRDATATOG;
+ if (value)
+ wCsr |= MGC_M_RXCSR_P_SENDSTALL;
+ else
+ wCsr &= ~(MGC_M_RXCSR_P_SENDSTALL
+ | MGC_M_RXCSR_P_SENTSTALL);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsr);
+ }
+
+done:
+
+ /* maybe start the first request in the queue */
+ if (!pEnd->busy && !value && pRequest) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(pThis, pRequest);
+ }
+
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+ return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ int retval = -EINVAL;
+
+ if (musb_ep->desc && !musb_ep->is_in) {
+ struct musb *musb = musb_ep->pThis;
+ int bEnd = musb_ep->bEndNumber;
+ void __iomem *mbase = musb->pRegs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ MGC_SelectEnd(mbase, bEnd);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = MGC_ReadCsr16(mbase, MGC_O_HDRC_RXCOUNT, bEnd);
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ }
+ return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb;
+ void __iomem *mbase;
+ u8 nEnd;
+ unsigned long flags;
+ u16 wCsr, wIntrTxE;
+
+ musb = musb_ep->pThis;
+ mbase = musb->pRegs;
+ nEnd = musb_ep->bEndNumber;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ MGC_SelectEnd(mbase, (u8) nEnd);
+
+ /* disable interrupts */
+ wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
+ musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << nEnd));
+
+ if (musb_ep->is_in) {
+ wCsr = MGC_ReadCsr16(mbase, MGC_O_HDRC_TXCSR, nEnd);
+ wCsr |= MGC_M_TXCSR_FLUSHFIFO | MGC_M_TXCSR_P_WZC_BITS;
+ MGC_WriteCsr16(mbase, MGC_O_HDRC_TXCSR, nEnd, wCsr);
+ MGC_WriteCsr16(mbase, MGC_O_HDRC_TXCSR, nEnd, wCsr);
+ } else {
+ wCsr = MGC_ReadCsr16(mbase, MGC_O_HDRC_RXCSR, nEnd);
+ wCsr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_P_WZC_BITS;
+ MGC_WriteCsr16(mbase, MGC_O_HDRC_RXCSR, nEnd, wCsr);
+ MGC_WriteCsr16(mbase, MGC_O_HDRC_RXCSR, nEnd, wCsr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
+ spin_unlock_irqrestore(&musb->Lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+ .enable = musb_gadget_enable,
+ .disable = musb_gadget_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .alloc_buffer = musb_gadget_alloc_buffer,
+ .free_buffer = musb_gadget_free_buffer,
+ .queue = musb_gadget_queue,
+ .dequeue = musb_gadget_dequeue,
+ .set_halt = musb_gadget_set_halt,
+ .fifo_status = musb_gadget_fifo_status,
+ .fifo_flush = musb_gadget_fifo_flush
+};
+
+/***********************************************************************/
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct musb *pThis = gadget_to_musb(gadget);
+
+ return (int)musb_readw(pThis->pRegs, MGC_O_HDRC_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+ int status = 0;
+ u8 power;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ /* FIXME if not suspended, fail */
+ if (musb->bMayWakeup)
+ break;
+ goto fail;
+ case OTG_STATE_B_IDLE:
+ /* REVISIT we might be able to do SRP even without OTG,
+ * though Linux doesn't yet expose that capability
+ */
+ if (is_otg_enabled(musb)) {
+ musb->xceiv.state = OTG_STATE_B_SRP_INIT;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+fail:
+ status = -EINVAL;
+ goto done;
+ }
+
+ power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
+ power |= MGC_M_POWER_RESUME;
+ musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
+
+ /* FIXME do this next chunk in a timer callback, no udelay */
+ mdelay(10);
+
+ power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
+ power &= ~MGC_M_POWER_RESUME;
+ musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
+
+done:
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct musb *pThis = gadget_to_musb(gadget);
+
+ pThis->bIsSelfPowered = !!is_selfpowered;
+ return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+ u8 power;
+
+ power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
+ if (is_on)
+ power |= MGC_M_POWER_SOFTCONN;
+ else
+ power &= ~MGC_M_POWER_SOFTCONN;
+
+ /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+ DBG(3, "gadget %s D+ pullup %s\n",
+ musb->pGadgetDriver->function, is_on ? "on" : "off");
+ musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ DBG(2, "<= %s =>\n", __FUNCTION__);
+
+ // FIXME iff driver's softconnect flag is set (as it is during probe,
+ // though that can clear it), just musb_pullup().
+
+ return -EINVAL;
+}
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ /* FIXME -- delegate to otg_transciever logic */
+
+ DBG(2, "<= vbus_draw %u =>\n", mA);
+ return 0;
+}
+#endif
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ /* NOTE: this assumes we are sensing vbus; we'd rather
+ * not pullup unless the B-session is active.
+ */
+ spin_lock_irqsave(&musb->Lock, flags);
+ if (is_on != musb->softconnect) {
+ musb->softconnect = is_on;
+ musb_pullup(musb, is_on);
+ }
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+ .get_frame = musb_gadget_get_frame,
+ .wakeup = musb_gadget_wakeup,
+ .set_selfpowered = musb_gadget_set_self_powered,
+ //.vbus_session = musb_gadget_vbus_session,
+ //.vbus_draw = musb_gadget_vbus_draw,
+ .pullup = musb_gadget_pullup,
+};
+
+/****************************************************************
+ * Registration operations
+ ****************************************************************/
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port. It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+ // kref_put(WHAT)
+ dev_dbg(dev, "%s\n", __FUNCTION__);
+}
+
+
+static void __devinit
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 bEnd, int is_in)
+{
+ struct musb_hw_ep *hw_ep = musb->aLocalEnd + bEnd;
+
+ memset(ep, 0, sizeof *ep);
+
+ ep->bEndNumber = bEnd;
+ ep->pThis = musb;
+ ep->hw_ep = hw_ep;
+ ep->is_in = is_in;
+
+ INIT_LIST_HEAD(&ep->req_list);
+
+ sprintf(ep->name, "ep%d%s", bEnd,
+ (!bEnd || hw_ep->bIsSharedFifo) ? "" : (
+ is_in ? "in" : "out"));
+ ep->end_point.name = ep->name;
+ INIT_LIST_HEAD(&ep->end_point.ep_list);
+ if (!bEnd) {
+ ep->end_point.maxpacket = 64;
+ ep->end_point.ops = &musb_g_ep0_ops;
+ musb->g.ep0 = &ep->end_point;
+ } else {
+ if (is_in)
+ ep->end_point.maxpacket = hw_ep->wMaxPacketSizeTx;
+ else
+ ep->end_point.maxpacket = hw_ep->wMaxPacketSizeRx;
+ ep->end_point.ops = &musb_ep_ops;
+ list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+ }
+ DBG(4, "periph: %s, maxpacket %d\n", ep->end_point.name,
+ ep->end_point.maxpacket);
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __devinit musb_g_init_endpoints(struct musb *pThis)
+{
+ u8 bEnd;
+ struct musb_hw_ep *hw_ep;
+ unsigned count = 0;
+
+ /* intialize endpoint list just once */
+ INIT_LIST_HEAD(&(pThis->g.ep_list));
+
+ for (bEnd = 0, hw_ep = pThis->aLocalEnd;
+ bEnd < pThis->bEndCount;
+ bEnd++, hw_ep++) {
+ if (hw_ep->bIsSharedFifo /* || !bEnd */) {
+ init_peripheral_ep(pThis, &hw_ep->ep_in, bEnd, 0);
+ count++;
+ } else {
+ if (hw_ep->wMaxPacketSizeTx) {
+ init_peripheral_ep(pThis, &hw_ep->ep_in, bEnd, 1);
+ count++;
+ }
+ if (hw_ep->wMaxPacketSizeRx) {
+ init_peripheral_ep(pThis, &hw_ep->ep_out, bEnd, 0);
+ count++;
+ }
+ }
+ }
+ DBG(2, "initialized %d (max %d) endpoints\n", count,
+ pThis->bEndCount * 2 - 1);
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __devinit musb_gadget_setup(struct musb *pThis)
+{
+ int status;
+
+ /* REVISIT minor race: if (erroneously) setting up two
+ * musb peripherals at the same time, only the bus lock
+ * is probably held.
+ */
+ if (the_gadget)
+ return -EBUSY;
+ the_gadget = pThis;
+
+ pThis->g.ops = &musb_gadget_operations;
+ pThis->g.is_dualspeed = 1;
+ pThis->g.speed = USB_SPEED_UNKNOWN;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (pThis->board_mode == MUSB_OTG)
+ pThis->g.is_otg = 1;
+#endif
+
+ /* this "gadget" abstracts/virtualizes the controller */
+ strcpy(pThis->g.dev.bus_id, "gadget");
+ pThis->g.dev.parent = pThis->controller;
+ pThis->g.dev.dma_mask = pThis->controller->dma_mask;
+ pThis->g.dev.release = musb_gadget_release;
+ pThis->g.name = musb_driver_name;
+
+ musb_g_init_endpoints(pThis);
+
+ status = device_register(&pThis->g.dev);
+ if (status != 0)
+ the_gadget = NULL;
+ return status;
+}
+
+void musb_gadget_cleanup(struct musb *pThis)
+{
+ if (pThis != the_gadget)
+ return;
+
+ device_unregister(&pThis->g.dev);
+ the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ int retval;
+ unsigned long flags;
+ struct musb *pThis = the_gadget;
+
+ if (!driver
+ || driver->speed != USB_SPEED_HIGH
+ || !driver->bind
+ || !driver->unbind
+ || !driver->setup)
+ return -EINVAL;
+
+ /* driver must be initialized to support peripheral mode */
+ if (!pThis || !(pThis->board_mode == MUSB_OTG
+ || pThis->board_mode != MUSB_OTG)) {
+ DBG(1,"%s, no dev??\n", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ DBG(3, "registering driver %s\n", driver->function);
+ spin_lock_irqsave(&pThis->Lock, flags);
+
+ if (pThis->pGadgetDriver) {
+ DBG(1, "%s is already bound to %s\n",
+ musb_driver_name,
+ pThis->pGadgetDriver->driver.name);
+ retval = -EBUSY;
+ } else {
+ pThis->pGadgetDriver = driver;
+ pThis->g.dev.driver = &driver->driver;
+ driver->driver.bus = NULL;
+ pThis->softconnect = 1;
+ retval = 0;
+ }
+
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+
+ if (retval == 0)
+ retval = driver->bind(&pThis->g);
+ if (retval != 0) {
+ DBG(3, "bind to driver %s failed --> %d\n",
+ driver->driver.name, retval);
+ pThis->pGadgetDriver = NULL;
+ pThis->g.dev.driver = NULL;
+ }
+
+ /* start peripheral and/or OTG engines */
+ if (retval == 0) {
+ spin_lock_irqsave(&pThis->Lock, flags);
+
+ /* REVISIT always use otg_set_peripheral(), handling
+ * issues including the root hub one below ...
+ */
+ pThis->xceiv.gadget = &pThis->g;
+ pThis->xceiv.state = OTG_STATE_B_IDLE;
+
+ /* FIXME this ignores the softconnect flag. Drivers are
+ * allowed hold the peripheral inactive until for example
+ * userspace hooks up printer hardware or DSP codecs, so
+ * hosts only see fully functional devices.
+ */
+
+ musb_start(pThis);
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (pThis->board_mode == MUSB_OTG) {
+ DBG(3, "OTG startup...\n");
+
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(musb_to_hcd(pThis), -1, 0);
+ if (retval < 0) {
+ DBG(1, "add_hcd failed, %d\n", retval);
+ spin_lock_irqsave(&pThis->Lock, flags);
+ pThis->xceiv.gadget = NULL;
+ pThis->xceiv.state = OTG_STATE_UNDEFINED;
+ pThis->pGadgetDriver = NULL;
+ pThis->g.dev.driver = NULL;
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+ }
+ }
+#endif
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void
+stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+ int i;
+ struct musb_hw_ep *hw_ep;
+
+ /* don't disconnect if it's not connected */
+ if (musb->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (musb->softconnect) {
+ musb->softconnect = 0;
+ musb_pullup(musb, 0);
+ }
+ musb_stop(musb);
+
+ /* killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ if (driver) {
+ for (i = 0, hw_ep = musb->aLocalEnd;
+ i < musb->bEndCount;
+ i++, hw_ep++) {
+ MGC_SelectEnd(musb->pRegs, i);
+ if (hw_ep->bIsSharedFifo /* || !bEnd */) {
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ } else {
+ if (hw_ep->wMaxPacketSizeTx)
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ if (hw_ep->wMaxPacketSizeRx)
+ nuke(&hw_ep->ep_out, -ESHUTDOWN);
+ }
+ }
+
+ spin_unlock(&musb->Lock);
+ driver->disconnect (&musb->g);
+ spin_lock(&musb->Lock);
+ }
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ unsigned long flags;
+ int retval = 0;
+ struct musb *musb = the_gadget;
+
+ if (!driver || !musb)
+ return -EINVAL;
+
+ /* REVISIT always use otg_set_peripheral() here too;
+ * this needs to shut down the OTG engine.
+ */
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ if (musb->pGadgetDriver == driver) {
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+
+ DBG(3, "unregistering driver %s\n", driver->function);
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ driver->unbind(&musb->g);
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ musb->pGadgetDriver = NULL;
+ musb->g.dev.driver = NULL;
+
+ musb_platform_try_idle(musb);
+ } else
+ retval = -EINVAL;
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (retval == 0 && musb->board_mode == MUSB_OTG) {
+ usb_remove_hcd(musb_to_hcd(musb));
+ /* FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
+ }
+#endif
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/***********************************************************************/
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *pThis)
+{
+ DBG(4, "<==\n");
+ if (pThis->pGadgetDriver && pThis->pGadgetDriver->resume) {
+ spin_unlock(&pThis->Lock);
+ pThis->pGadgetDriver->resume(&pThis->g);
+ spin_lock(&pThis->Lock);
+ }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *pThis)
+{
+ u8 devctl;
+
+ devctl = musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL);
+ DBG(3, "devctl %02x\n", devctl);
+
+ switch (pThis->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ if ((devctl & MGC_M_DEVCTL_VBUS) == MGC_M_DEVCTL_VBUS)
+ pThis->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (pThis->pGadgetDriver && pThis->pGadgetDriver->suspend) {
+ spin_unlock(&pThis->Lock);
+ pThis->pGadgetDriver->suspend(&pThis->g);
+ spin_lock(&pThis->Lock);
+ }
+ break;
+ default:
+ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+ * A_PERIPHERAL may need care too
+ */
+ WARN("unhandled SUSPEND transition (%d)\n", pThis->xceiv.state);
+ }
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *pThis)
+{
+ DBG(3, "devctl %02x\n", musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL));
+
+ pThis->g.speed = USB_SPEED_UNKNOWN;
+ if (pThis->pGadgetDriver && pThis->pGadgetDriver->disconnect) {
+ spin_unlock(&pThis->Lock);
+ pThis->pGadgetDriver->disconnect(&pThis->g);
+ spin_lock(&pThis->Lock);
+ }
+
+ switch (pThis->xceiv.state) {
+ default:
+#ifdef CONFIG_USB_MUSB_OTG
+ pThis->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ pThis->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+ }
+}
+
+void musb_g_reset(struct musb *pThis)
+__releases(pThis->Lock)
+__acquires(pThis->Lock)
+{
+ void __iomem *pBase = pThis->pRegs;
+ u8 devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ u8 power;
+
+ DBG(3, "<== %s addr=%x driver '%s'\n",
+ (devctl & MGC_M_DEVCTL_BDEVICE)
+ ? "B-Device" : "A-Device",
+ musb_readb(pBase, MGC_O_HDRC_FADDR),
+ pThis->pGadgetDriver
+ ? pThis->pGadgetDriver->driver.name
+ : NULL
+ );
+
+ /* HR does NOT clear itself */
+ if (devctl & MGC_M_DEVCTL_HR)
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
+
+ /* report disconnect, if we didn't already (flushing EP state) */
+ if (pThis->g.speed != USB_SPEED_UNKNOWN)
+ musb_g_disconnect(pThis);
+
+ /* what speed did we negotiate? */
+ power = musb_readb(pBase, MGC_O_HDRC_POWER);
+ pThis->g.speed = (power & MGC_M_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ /* start in USB_STATE_DEFAULT */
+ MUSB_DEV_MODE(pThis);
+ pThis->bAddress = 0;
+ pThis->ep0_state = MGC_END0_STAGE_SETUP;
+
+ pThis->bMayWakeup = 0;
+ pThis->g.b_hnp_enable = 0;
+ pThis->g.a_alt_hnp_support = 0;
+ pThis->g.a_hnp_support = 0;
+
+ /* Normal reset, as B-Device;
+ * or else after HNP, as A-Device
+ */
+ if (devctl & MGC_M_DEVCTL_BDEVICE) {
+ pThis->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ pThis->g.is_a_peripheral = 0;
+ } else if (is_otg_enabled(pThis) && pThis->board_mode == MUSB_OTG) {
+ pThis->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ pThis->g.is_a_peripheral = 1;
+ } else
+ WARN_ON(1);
+}
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+ struct usb_request request;
+ struct musb_ep *ep;
+ struct musb *musb;
+ u8 bTx; /* endpoint direction */
+ u8 bEnd;
+ u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+ /* stuff towards the head is basically write-once. */
+ struct usb_ep end_point;
+ char name[12];
+ struct musb_hw_ep *hw_ep;
+ struct musb *pThis;
+ u8 bEndNumber;
+
+ /* ... when enabled/disabled ... */
+ u8 type;
+ u8 is_in;
+ u16 wPacketSize;
+ const struct usb_endpoint_descriptor *desc;
+ struct dma_channel *dma;
+
+ /* later things are modified based on usage */
+ struct list_head req_list;
+
+ u8 busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+ struct list_head *queue = &ep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *pThis, u8 bEnd);
+extern void musb_g_rx(struct musb *pThis, u8 bEnd);
+
+extern struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif /* __MUSB_GADGET_H */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006 by Nokia Corporation
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musbdefs.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ * transfers unable to starve other requests; or to make efficient use
+ * of hardware with periodic transfers. (Note that network drivers
+ * commonly post bulk reads that stay pending for a long time; these
+ * would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction. The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+/*************************** Forwards ***************************/
+
+static void musb_ep_program(struct musb *pThis, u8 bEnd,
+ struct urb *pUrb, unsigned int nOut,
+ u8 * pBuffer, u32 dwLength);
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * pThis must be locked.
+ */
+void musb_h_tx_start(struct musb *pThis, u8 bEnd)
+{
+ u16 wCsr;
+ void __iomem *pBase = pThis->pRegs;
+
+ /* NOTE: no locks here; caller should lock */
+ MGC_SelectEnd(pBase, bEnd);
+ if (bEnd) {
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ wCsr |= MGC_M_TXCSR_TXPKTRDY | MGC_M_TXCSR_H_WZC_BITS;
+ DBG(5, "Writing TXCSR%d = %x\n", bEnd, wCsr);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsr);
+ } else {
+ wCsr = MGC_M_CSR0_H_SETUPPKT | MGC_M_CSR0_TXPKTRDY;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsr);
+ }
+
+}
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+
+void cppi_hostdma_start(struct musb *pThis, u8 bEnd)
+{
+ void __iomem *pBase = pThis->pRegs;
+ u16 txCsr;
+
+ /* NOTE: no locks here; caller should lock */
+ MGC_SelectEnd(pBase, bEnd);
+ txCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ txCsr |= MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_H_WZC_BITS;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, txCsr);
+}
+
+#endif
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u16 wFrame;
+ u32 dwLength;
+ void *pBuffer;
+ void __iomem *pBase = musb->pRegs;
+ struct urb *urb = next_urb(qh);
+ struct musb_hw_ep *pEnd = qh->hw_ep;
+ unsigned nPipe = urb->pipe;
+ u8 bAddress = usb_pipedevice(nPipe);
+ int bEnd = pEnd->bLocalEnd;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ pEnd->out_qh = qh;
+ musb->bEnd0Stage = MGC_END0_START;
+ pBuffer = urb->setup_packet;
+ dwLength = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ pBuffer = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ dwLength = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ pBuffer = urb->transfer_buffer;
+ dwLength = urb->transfer_buffer_length;
+ }
+
+ DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ qh, urb, bAddress, qh->epnum,
+ is_in ? "in" : "out",
+ ({char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+ case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
+ default: s = "-intr"; break;
+ }; s;}),
+ bEnd, pBuffer, dwLength);
+
+ /* Configure endpoint */
+ if (is_in || pEnd->bIsSharedFifo)
+ pEnd->in_qh = qh;
+ else
+ pEnd->out_qh = qh;
+ musb_ep_program(musb, bEnd, urb, !is_in, pBuffer, dwLength);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* TODO: with CPPI DMA, once DMA is setup and DmaReqEnable in TxCSR
+ * is set (which is the case) transfer is initiated. For periodic
+ * transfer support, add another field in pEnd struct which will
+ * serve as a flag. If CPPI DMA is programmed for the transfer set
+ * this flag and disable DMAReqEnab while programming TxCSR in
+ * programEnd() Once we reach the appropriate time, enable DMA Req
+ * instead of calling musb_h_tx_start() function
+ */
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ DBG(3, "check whether there's still time for periodic Tx\n");
+ qh->iso_idx = 0;
+ wFrame = musb_readw(pBase, MGC_O_HDRC_FRAME);
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if ((urb->transfer_flags & URB_ISO_ASAP)
+ || (wFrame >= urb->start_frame)) {
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code... or the branch below...
+ * ... and we don't set urb->start_frame
+ */
+ qh->frame = 0;
+ printk("Start --> periodic TX%s on %d\n",
+ pEnd->tx_channel ? " DMA" : "",
+ bEnd);
+ if (!pEnd->tx_channel)
+ musb_h_tx_start(musb, bEnd);
+ else
+ cppi_hostdma_start(musb, bEnd);
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+DBG(1,"SOF for %d\n", bEnd);
+#if 1 // ifndef CONFIG_ARCH_DAVINCI
+ musb_writeb(pBase, MGC_O_HDRC_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+ DBG(4, "Start TX%d %s\n", bEnd,
+ pEnd->tx_channel ? "dma" : "pio");
+
+ if (!pEnd->tx_channel)
+ musb_h_tx_start(musb, bEnd);
+ else
+ cppi_hostdma_start(musb, bEnd);
+ }
+}
+
+/* caller owns no controller locks, irqs are blocked */
+static inline void
+__musb_giveback(struct musb_hw_ep *hw_ep, struct urb *urb, int status)
+__releases(urb->lock)
+__acquires(urb->lock)
+{
+ struct musb *musb = hw_ep->musb;
+
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length < urb->transfer_buffer_length)
+ && status == 0
+ && usb_pipein(urb->pipe))
+ status = -EREMOTEIO;
+
+ spin_lock(&urb->lock);
+ urb->hcpriv = NULL;
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ spin_unlock(&urb->lock);
+
+ DBG(({ int level; switch (urb->status) {
+ case 0:
+ level = 4;
+ break;
+ /* common/boring faults */
+ case -EREMOTEIO:
+ case -ESHUTDOWN:
+ case -EPIPE:
+ level = 3;
+ break;
+ default:
+ level = 2;
+ break;
+ }; level; }),
+ "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->status,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, musb->int_regs);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u16 csr;
+ void __iomem *hw = ep->musb->pRegs;
+ struct musb_qh *qh;
+
+ /* FIXME: the current Mentor DMA code seems to have
+ * problems getting toggle correct.
+ */
+
+ if (is_in || ep->bIsSharedFifo)
+ qh = ep->in_qh;
+ else
+ qh = ep->out_qh;
+
+ if (!is_in) {
+ csr = MGC_ReadCsr16(hw, MGC_O_HDRC_TXCSR,
+ ep->bLocalEnd);
+ usb_settoggle(udev, qh->epnum, 1,
+ (csr & MGC_M_TXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ } else {
+ csr = MGC_ReadCsr16(hw, MGC_O_HDRC_RXCSR,
+ ep->bLocalEnd);
+ usb_settoggle(udev, qh->epnum, 0,
+ (csr & MGC_M_RXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+__releases(qh->hw_ep->musb->Lock)
+__acquires(qh->hw_ep->musb->Lock)
+{
+ int is_in;
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ int ready = qh->is_ready;
+
+ if (ep->bIsSharedFifo)
+ is_in = 1;
+ else
+ is_in = usb_pipein(urb->pipe);
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ musb_save_toggle(ep, is_in, urb);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ qh->is_ready = 0;
+ spin_unlock(&musb->Lock);
+ __musb_giveback(ep, urb, status);
+ spin_lock(&musb->Lock);
+ qh->is_ready = ready;
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+
+ if (is_in)
+ ep->rx_reinit = 1;
+ else
+ ep->tx_reinit = 1;
+
+ /* clobber old pointers to this qh */
+ if (is_in || ep->bIsSharedFifo)
+ ep->in_qh = NULL;
+ else
+ ep->out_qh = NULL;
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ musb->periodic[ep->bLocalEnd] = NULL;
+ kfree(qh);
+ qh = NULL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ }
+ return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *pThis, struct urb *urb,
+ struct musb_hw_ep *pEnd, int is_in)
+{
+ struct musb_qh *qh;
+
+ if (is_in || pEnd->bIsSharedFifo)
+ qh = pEnd->in_qh;
+ else
+ qh = pEnd->out_qh;
+ qh = musb_giveback(qh, urb, 0);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ /* REVISIT udelay reportedly works around issues in unmodified
+ * Mentor RTL before v1.5, where it doesn't disable the pull-up
+ * resisters in high speed mode. That causes signal reflection
+ * and errors because inter packet IDLE time vanishes.
+ *
+ * Yes, this delay makes DMA-OUT a bit slower than PIO. But
+ * without it, some devices are unusable. But there seem to be
+ * other issues too, at least on DaVinci; the delay improves
+ * some full speed cases, and being DMA-coupled is strange...
+ */
+ if (is_dma_capable() && !is_in && pEnd->tx_channel)
+ udelay(15); /* 10 usec ~= 1x 512byte packet */
+#endif
+
+ if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ DBG(4, "... next ep%d %cX urb %p\n",
+ pEnd->bLocalEnd, is_in ? 'R' : 'T',
+ next_urb(qh));
+ musb_start_urb(pThis, is_in, qh);
+ }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_RXPKTRDY;
+ csr &= ~( MGC_M_RXCSR_H_REQPKT
+ | MGC_M_RXCSR_H_AUTOREQ
+ | MGC_M_RXCSR_AUTOCLEAR
+ );
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr);
+ musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MGC_O_HDRC_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static u8 musb_host_packet_rx(struct musb *pThis, struct urb *pUrb,
+ u8 bEnd, u8 bIsochError)
+{
+ u16 wRxCount;
+ u8 *pBuffer;
+ u16 wCsr;
+ u8 bDone = FALSE;
+ u32 length;
+ int do_flush = 0;
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
+ struct musb_qh *qh = pEnd->in_qh;
+ int nPipe = pUrb->pipe;
+ void *buffer = pUrb->transfer_buffer;
+
+ // MGC_SelectEnd(pBase, bEnd);
+ wRxCount = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCOUNT, bEnd);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(nPipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (bIsochError) {
+ status = -EILSEQ;
+ pUrb->error_count++;
+ }
+
+ d = pUrb->iso_frame_desc + qh->iso_idx;
+ pBuffer = buffer + d->offset;
+ length = d->length;
+ if (wRxCount > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ pUrb->error_count++;
+ }
+ DBG(2, "** OVERFLOW %d into %d\n", wRxCount, length);
+ do_flush = 1;
+ } else
+ length = wRxCount;
+ pUrb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ bDone = (++qh->iso_idx >= pUrb->number_of_packets);
+ } else {
+ /* non-isoch */
+ pBuffer = buffer + qh->offset;
+ length = pUrb->transfer_buffer_length - qh->offset;
+ if (wRxCount > length) {
+ if (pUrb->status == -EINPROGRESS)
+ pUrb->status = -EOVERFLOW;
+ DBG(2, "** OVERFLOW %d into %d\n", wRxCount, length);
+ do_flush = 1;
+ } else
+ length = wRxCount;
+ pUrb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ bDone = (pUrb->actual_length == pUrb->transfer_buffer_length)
+ || (wRxCount < qh->maxpacket)
+ || (pUrb->status != -EINPROGRESS);
+ if (bDone
+ && (pUrb->status == -EINPROGRESS)
+ && (pUrb->transfer_flags & URB_SHORT_NOT_OK)
+ && (pUrb->actual_length
+ < pUrb->transfer_buffer_length))
+ pUrb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(pEnd, length, pBuffer);
+
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+ wCsr |= MGC_M_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(pEnd, wCsr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ wCsr &= ~(MGC_M_RXCSR_RXPKTRDY | MGC_M_RXCSR_H_REQPKT);
+ if (!bDone)
+ wCsr |= MGC_M_RXCSR_H_REQPKT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wCsr);
+ }
+
+ return bDone;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->bIsSharedFifo) {
+ csr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR);
+ if (csr & MGC_M_TXCSR_MODE) {
+ if (csr & MGC_M_TXCSR_FIFONOTEMPTY) {
+ /* this shouldn't happen; irq?? */
+ ERR("shared fifo not empty?\n");
+ musb_writew(ep->regs, MGC_O_HDRC_TXCSR,
+ MGC_M_TXCSR_FLUSHFIFO);
+ musb_writew(ep->regs, MGC_O_HDRC_TXCSR,
+ MGC_M_TXCSR_FRCDATATOG);
+ }
+ }
+ /* clear mode (and everything else) to enable Rx */
+ musb_writew(ep->regs, MGC_O_HDRC_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ } else {
+ csr = musb_readw(ep->regs, MGC_O_HDRC_RXCSR);
+ if (csr & MGC_M_RXCSR_RXPKTRDY)
+ WARN("rx%d, packet/%d ready?\n", ep->bLocalEnd,
+ musb_readw(ep->regs, MGC_O_HDRC_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MGC_M_RXCSR_CLRDATATOG);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->bIsMultipoint) {
+ musb_writeb(ep->target_regs, MGC_O_HDRC_RXFUNCADDR,
+ qh->addr_reg);
+ musb_writeb(ep->target_regs, MGC_O_HDRC_RXHUBADDR,
+ qh->h_addr_reg);
+ musb_writeb(ep->target_regs, MGC_O_HDRC_RXHUBPORT,
+ qh->h_port_reg);
+ } else
+ musb_writeb(musb->pRegs, MGC_O_HDRC_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MGC_O_HDRC_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MGC_O_HDRC_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ musb_writew(ep->regs, MGC_O_HDRC_RXMAXP, qh->maxpacket);
+
+ ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+#define MGC_M_TXCSR_ISO 0 /* FIXME */
+static void musb_ep_program(struct musb *pThis, u8 bEnd,
+ struct urb *pUrb, unsigned int is_out,
+ u8 * pBuffer, u32 dwLength)
+{
+#ifndef CONFIG_USB_INVENTRA_FIFO
+ struct dma_controller *pDmaController;
+ struct dma_channel *pDmaChannel;
+ u8 bDmaOk;
+#endif
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
+ struct musb_qh *qh;
+ u16 wPacketSize;
+
+ if (!is_out || pEnd->bIsSharedFifo)
+ qh = pEnd->in_qh;
+ else
+ qh = pEnd->out_qh;
+
+ wPacketSize = qh->maxpacket;
+
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d\n",
+ is_out ? "-->" : "<--",
+ bEnd, pUrb, pUrb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ dwLength);
+
+ MGC_SelectEnd(pBase, bEnd);
+
+#ifndef CONFIG_USB_INVENTRA_FIFO
+ pDmaChannel = is_out ? pEnd->tx_channel : pEnd->rx_channel;
+ pDmaController = pThis->pDmaController;
+
+ /* candidate for DMA */
+ if (is_dma_capable() && bEnd && pDmaController) {
+ bDmaOk = 1;
+ if (bDmaOk && !pDmaChannel) {
+ pDmaChannel = pDmaController->channel_alloc(
+ pDmaController, pEnd, is_out);
+ if (is_out)
+ pEnd->tx_channel = pDmaChannel;
+ else
+ pEnd->rx_channel = pDmaChannel;
+ }
+ } else
+ bDmaOk = 0;
+#endif /* PIO isn't the only option */
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 wCsr;
+ u16 wIntrTxE;
+ u16 wLoadCount;
+
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+
+ /* disable interrupt in case we flush */
+ wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
+ musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << bEnd));
+
+ /* general endpoint setup */
+ if (bEnd) {
+ u16 csr = wCsr;
+
+ /* flush all old state, set default */
+ csr &= ~(MGC_M_TXCSR_H_NAKTIMEOUT
+ | MGC_M_TXCSR_DMAMODE
+ | MGC_M_TXCSR_FRCDATATOG
+ | MGC_M_TXCSR_ISO
+ | MGC_M_TXCSR_H_RXSTALL
+ | MGC_M_TXCSR_H_ERROR
+ | MGC_M_TXCSR_FIFONOTEMPTY
+ | MGC_M_TXCSR_TXPKTRDY
+ );
+ csr |= MGC_M_TXCSR_FLUSHFIFO
+ | MGC_M_TXCSR_MODE;
+
+ if (qh->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MGC_M_TXCSR_ISO;
+ else if (usb_gettoggle(pUrb->dev,
+ qh->epnum, 1))
+ csr |= MGC_M_TXCSR_H_WR_DATATOGGLE
+ | MGC_M_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MGC_M_TXCSR_CLRDATATOG;
+
+ /* twice in case of double packet buffering */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ csr);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ csr);
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR,
+ bEnd);
+ } else {
+ /* endpoint 0: just flush */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, bEnd,
+ wCsr | MGC_M_CSR0_FLUSHFIFO);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, bEnd,
+ wCsr | MGC_M_CSR0_FLUSHFIFO);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (pThis->bIsMultipoint) {
+ musb_writeb(pBase,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXFUNCADDR),
+ qh->addr_reg);
+ musb_writeb(pBase,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXHUBADDR),
+ qh->h_addr_reg);
+ musb_writeb(pBase,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXHUBPORT),
+ qh->h_port_reg);
+ } else
+ musb_writeb(pBase, MGC_O_HDRC_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (bEnd) {
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_TXTYPE, bEnd,
+ qh->type_reg);
+ if (can_bulk_split(pThis, qh->type))
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXMAXP, bEnd,
+ wPacketSize |
+ ((pEnd->wMaxPacketSizeTx /
+ wPacketSize) - 1) << 11);
+ else
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXMAXP, bEnd,
+ wPacketSize);
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_TXINTERVAL, bEnd,
+ qh->intv_reg);
+ } else {
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_NAKLIMIT0, 0,
+ qh->intv_reg);
+ if (pThis->bIsMultipoint)
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_TYPE0, 0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(pThis, qh->type))
+ wLoadCount = min((u32) pEnd->wMaxPacketSizeTx,
+ dwLength);
+ else
+ wLoadCount = min((u32) wPacketSize, dwLength);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (bDmaOk && pDmaChannel) {
+
+ /* clear previous state */
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ wCsr &= ~(MGC_M_TXCSR_AUTOSET |
+ MGC_M_TXCSR_DMAMODE |
+ MGC_M_TXCSR_DMAENAB);
+ wCsr |= MGC_M_TXCSR_MODE;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ wCsr | MGC_M_TXCSR_MODE);
+
+ qh->segsize = min(dwLength, pDmaChannel->dwMaxLength);
+
+ if (qh->segsize <= wPacketSize)
+ pDmaChannel->bDesiredMode = 0;
+ else
+ pDmaChannel->bDesiredMode = 1;
+
+
+ if (pDmaChannel->bDesiredMode == 0) {
+ wCsr &= ~(MGC_M_TXCSR_AUTOSET |
+ MGC_M_TXCSR_DMAMODE);
+ wCsr |= (MGC_M_TXCSR_DMAENAB);
+ // against programming guide
+ } else
+ wCsr |= (MGC_M_TXCSR_AUTOSET |
+ MGC_M_TXCSR_DMAENAB |
+ MGC_M_TXCSR_DMAMODE);
+
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wCsr);
+
+ bDmaOk = pDmaController->channel_program(
+ pDmaChannel, wPacketSize,
+ pDmaChannel->bDesiredMode,
+ pUrb->transfer_dma,
+ qh->segsize);
+ if (bDmaOk) {
+ wLoadCount = 0;
+ } else {
+ pDmaController->channel_release(pDmaChannel);
+ pDmaChannel = pEnd->pDmaChannel = NULL;
+ }
+ }
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+
+ /* candidate for DMA */
+ if (bDmaOk && pDmaChannel) {
+
+ /* program endpoint CSRs first, then setup DMA.
+ * assume CPPI setup succeeds.
+ * defer enabling dma.
+ */
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ wCsr &= ~(MGC_M_TXCSR_AUTOSET
+ | MGC_M_TXCSR_DMAMODE
+ | MGC_M_TXCSR_DMAENAB);
+ wCsr |= MGC_M_TXCSR_MODE;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ wCsr | MGC_M_TXCSR_MODE);
+
+ pDmaChannel->dwActualLength = 0L;
+ qh->segsize = dwLength;
+
+ /* TX uses "rndis" mode automatically, but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ bDmaOk = pDmaController->channel_program(
+ pDmaChannel, wPacketSize,
+ (pUrb->transfer_flags
+ & URB_ZERO_PACKET)
+ == URB_ZERO_PACKET,
+ pUrb->transfer_dma,
+ qh->segsize);
+ if (bDmaOk) {
+ wLoadCount = 0;
+ } else {
+ pDmaController->channel_release(pDmaChannel);
+ pDmaChannel = pEnd->tx_channel = NULL;
+
+ /* REVISIT there's an error path here that
+ * needs handling: can't do dma, but
+ * there's no pio buffer address...
+ */
+ }
+ }
+#endif
+ if (wLoadCount) {
+ /* PIO to load FIFO */
+ qh->segsize = wLoadCount;
+ musb_write_fifo(pEnd, wLoadCount, pBuffer);
+ wCsr = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+ wCsr &=
+ ~(MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_DMAMODE |
+ MGC_M_TXCSR_AUTOSET);
+ /* write CSR */
+ wCsr |= MGC_M_TXCSR_MODE;
+
+ if (bEnd)
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ wCsr);
+
+ }
+
+ /* re-enable interrupt */
+ musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
+
+ /* IN/receive */
+ } else {
+ u16 csr;
+
+ if (pEnd->rx_reinit) {
+ musb_rx_reinit(pThis, qh, pEnd);
+
+ /* init new state: toggle and NYET, maybe DMA later */
+ if (usb_gettoggle(pUrb->dev, qh->epnum, 0))
+ csr = MGC_M_RXCSR_H_WR_DATATOGGLE
+ | MGC_M_RXCSR_H_DATATOGGLE;
+ else
+ csr = 0;
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MGC_M_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(pEnd->regs, MGC_O_HDRC_RXCSR);
+
+ if (csr & (MGC_M_RXCSR_RXPKTRDY
+ | MGC_M_RXCSR_DMAENAB
+ | MGC_M_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ pEnd->bLocalEnd, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MGC_M_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ /* candidate for DMA */
+ if (pDmaChannel) {
+ pDmaChannel->dwActualLength = 0L;
+ qh->segsize = dwLength;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(pEnd->regs, MGC_O_HDRC_RXCSR, csr);
+ csr = musb_readw(pEnd->regs,
+ MGC_O_HDRC_RXCSR);
+
+ /* unless caller treats short rx transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ bDmaOk = pDmaController->channel_program(
+ pDmaChannel, wPacketSize,
+ !(pUrb->transfer_flags
+ & URB_SHORT_NOT_OK),
+ pUrb->transfer_dma,
+ qh->segsize);
+ if (!bDmaOk) {
+ pDmaController->channel_release(
+ pDmaChannel);
+ pDmaChannel = pEnd->rx_channel = NULL;
+ } else
+ csr |= MGC_M_RXCSR_DMAENAB;
+ }
+#endif
+ csr |= MGC_M_RXCSR_H_REQPKT;
+ DBG(7, "RXCSR%d := %04x\n", bEnd, csr);
+ musb_writew(pEnd->regs, MGC_O_HDRC_RXCSR, csr);
+ csr = musb_readw(pEnd->regs, MGC_O_HDRC_RXCSR);
+ }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * return TRUE if more packets are required for this transaction
+ */
+static u8 musb_h_ep0_continue(struct musb *pThis,
+ u16 wCount, struct urb *pUrb)
+{
+ u8 bMore = FALSE;
+ u8 *pFifoDest = NULL;
+ u16 wFifoCount = 0;
+ struct musb_hw_ep *pEnd = pThis->control_ep;
+ struct musb_qh *qh = pEnd->in_qh;
+ struct usb_ctrlrequest *pRequest =
+ (struct usb_ctrlrequest *)pUrb->setup_packet;
+
+ if (MGC_END0_IN == pThis->bEnd0Stage) {
+ /* we are receiving from peripheral */
+ pFifoDest = pUrb->transfer_buffer + pUrb->actual_length;
+ wFifoCount = min(wCount, ((u16)
+ (pUrb->transfer_buffer_length - pUrb->actual_length)));
+ if (wFifoCount < wCount)
+ pUrb->status = -EOVERFLOW;
+
+ musb_read_fifo(pEnd, wFifoCount, pFifoDest);
+
+ pUrb->actual_length += wFifoCount;
+ if (wCount < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ if ((pUrb->transfer_flags & URB_SHORT_NOT_OK)
+ && (pUrb->actual_length <
+ pUrb->transfer_buffer_length))
+ pUrb->status = -EREMOTEIO;
+ } else if (pUrb->actual_length <
+ pUrb->transfer_buffer_length)
+ bMore = TRUE;
+ } else {
+/*
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "hub%d port%d%s bytes %d\n",
+ is_out ? "-->" : "<--",
+ bEnd, pUrb, pUrb->dev->speed,
+ bAddress, qh->epnum, is_out ? "out" : "in",
+ bHubAddr, bHubPort + 1,
+ bIsMulti ? " multi" : "",
+ dwLength);
+*/
+ if ((MGC_END0_START == pThis->bEnd0Stage)
+ && (pRequest->bRequestType & USB_DIR_IN)) {
+ /* this means we just did setup; switch to IN */
+ DBG(4, "start IN-DATA\n");
+ pThis->bEnd0Stage = MGC_END0_IN;
+ bMore = TRUE;
+
+ } else if (pRequest->wLength
+ && (MGC_END0_START == pThis->bEnd0Stage)) {
+ pThis->bEnd0Stage = MGC_END0_OUT;
+ pFifoDest = (u8 *) (pUrb->transfer_buffer +
+ pUrb->actual_length);
+ wFifoCount =
+ min(qh->maxpacket,
+ ((u16)
+ (pUrb->transfer_buffer_length -
+ pUrb->actual_length)));
+ DBG(3, "Sending %d bytes to %p\n", wFifoCount,
+ pFifoDest);
+ musb_write_fifo(pEnd, wFifoCount, pFifoDest);
+
+ qh->segsize = wFifoCount;
+ pUrb->actual_length += wFifoCount;
+ if (pUrb->actual_length
+ < pUrb->transfer_buffer_length) {
+ bMore = TRUE;
+ }
+ }
+ }
+
+ return bMore;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *pThis)
+{
+ struct urb *pUrb;
+ u16 wCsrVal, wCount;
+ int status = 0;
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_hw_ep *pEnd = pThis->control_ep;
+ struct musb_qh *qh = pEnd->in_qh;
+ u8 bComplete = FALSE;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ pUrb = next_urb(qh);
+
+ MGC_SelectEnd(pBase, 0);
+ wCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_CSR0, 0);
+ wCount = MGC_ReadCsr8(pBase, MGC_O_HDRC_COUNT0, 0);
+
+ DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ wCsrVal, qh, wCount, pUrb, pThis->bEnd0Stage);
+
+ /* if we just did status stage, we are done */
+ if (MGC_END0_STATUS == pThis->bEnd0Stage) {
+ retval = IRQ_HANDLED;
+ bComplete = TRUE;
+ }
+
+ /* prepare status */
+ if (wCsrVal & MGC_M_CSR0_H_RXSTALL) {
+ DBG(6, "STALLING ENDPOINT\n");
+ status = -EPIPE;
+
+ } else if (wCsrVal & MGC_M_CSR0_H_ERROR) {
+ DBG(2, "no response, csr0 %04x\n", wCsrVal);
+ status = -EPROTO;
+
+ } else if (wCsrVal & MGC_M_CSR0_H_NAKTIMEOUT) {
+ DBG(2, "control NAK timeout\n");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ DBG(6, "aborting\n");
+ retval = IRQ_HANDLED;
+ if (pUrb)
+ pUrb->status = status;
+ bComplete = TRUE;
+
+ /* use the proper sequence to abort the transfer */
+ if (wCsrVal & MGC_M_CSR0_H_REQPKT) {
+ wCsrVal &= ~MGC_M_CSR0_H_REQPKT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ wCsrVal &= ~MGC_M_CSR0_H_NAKTIMEOUT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ } else {
+ wCsrVal |= MGC_M_CSR0_FLUSHFIFO;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ wCsrVal &= ~MGC_M_CSR0_H_NAKTIMEOUT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ }
+
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_NAKLIMIT0, 0, 0);
+
+ /* clear it */
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, 0);
+ }
+
+ if (unlikely(!pUrb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, MGC_M_CSR0_FLUSHFIFO);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, MGC_M_CSR0_FLUSHFIFO);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, 0);
+
+ goto done;
+ }
+
+ if (!bComplete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(pThis, wCount, pUrb)) {
+ /* more packets required */
+ wCsrVal = (MGC_END0_IN == pThis->bEnd0Stage) ?
+ MGC_M_CSR0_H_REQPKT : MGC_M_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ wCsrVal = MGC_M_CSR0_H_STATUSPKT |
+ (usb_pipeout(pUrb->pipe) ? MGC_M_CSR0_H_REQPKT :
+ MGC_M_CSR0_TXPKTRDY);
+ /* flag status stage */
+ pThis->bEnd0Stage = MGC_END0_STATUS;
+
+ DBG(5, "ep0 STATUS, csr %04x\n", wCsrVal);
+
+ }
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0, wCsrVal);
+ retval = IRQ_HANDLED;
+ }
+
+ /* call completion handler if done */
+ if (bComplete)
+ musb_advance_schedule(pThis, pUrb, pEnd, 1);
+done:
+ set_bit(HCD_FLAG_SAW_IRQ, &musb_to_hcd(pThis)->flags);
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *pThis, u8 bEnd)
+{
+ int nPipe;
+ u8 bDone = FALSE;
+ u16 wTxCsrVal;
+ size_t wLength = 0;
+ u8 *pBuffer = NULL;
+ struct urb *pUrb;
+ struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
+ struct musb_qh *qh = pEnd->out_qh;
+ u32 status = 0;
+ void __iomem *pBase = pThis->pRegs;
+ struct dma_channel *dma;
+
+ pUrb = next_urb(qh);
+
+ MGC_SelectEnd(pBase, bEnd);
+ wTxCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+ if (!pUrb) {
+ DBG(4, "extra TX%d ready, csr %04x\n", bEnd, wTxCsrVal);
+ goto finish;
+ }
+
+ nPipe = pUrb->pipe;
+ dma = is_dma_capable() ? pEnd->tx_channel : NULL;
+ DBG(4, "OUT/TX%d end, csr %04x%s\n", bEnd, wTxCsrVal,
+ dma ? ", dma" : "");
+
+ /* check for errors */
+ if (wTxCsrVal & MGC_M_TXCSR_H_RXSTALL) {
+ DBG(3, "TX end %d stall\n", bEnd);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (wTxCsrVal & MGC_M_TXCSR_H_ERROR) {
+ DBG(3, "TX data error on ep=%d\n", bEnd);
+
+ status = -ETIMEDOUT;
+
+ } else if (wTxCsrVal & MGC_M_TXCSR_H_NAKTIMEOUT) {
+ DBG(6, "TX end=%d device not responding\n", bEnd);
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_CSR0, 0,
+ MGC_M_TXCSR_H_WZC_BITS
+ | MGC_M_TXCSR_TXPKTRDY);
+ goto finish;
+ }
+
+ if (status) {
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
+ (void) pThis->pDmaController->channel_abort(dma);
+ }
+
+ /* do the proper sequence to abort the transfer in the
+ * usb core; the dma engine should already be stopped.
+ */
+// SCRUB (TX)
+ wTxCsrVal &= ~(MGC_M_TXCSR_FIFONOTEMPTY
+ | MGC_M_TXCSR_AUTOSET
+ | MGC_M_TXCSR_DMAENAB
+ | MGC_M_TXCSR_H_ERROR
+ | MGC_M_TXCSR_H_RXSTALL
+ | MGC_M_TXCSR_H_NAKTIMEOUT
+ );
+ wTxCsrVal |= MGC_M_TXCSR_FLUSHFIFO;
+
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wTxCsrVal);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd, wTxCsrVal);
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_TXINTERVAL, bEnd, 0);
+
+ bDone = TRUE;
+ }
+
+ /* second cppi case */
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ DBG(4, "extra TX%d ready, csr %04x\n", bEnd, wTxCsrVal);
+ goto finish;
+
+ }
+
+ /* REVISIT this looks wrong... */
+ if (!status || dma || usb_pipeisoc(nPipe)) {
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ /* mode 0 or last short packet)
+ * REVISIT how about ZLP?
+ */
+ if ((dma->bDesiredMode == 0)
+ || (dma->dwActualLength
+ & (qh->maxpacket - 1))) {
+ /* Send out the packet first ... */
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ MGC_M_TXCSR_TXPKTRDY);
+ }
+#endif
+ if (dma)
+ wLength = dma->dwActualLength;
+ else
+ wLength = qh->segsize;
+ qh->offset += wLength;
+
+ if (usb_pipeisoc(nPipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = pUrb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = qh->segsize;
+ if (++qh->iso_idx >= pUrb->number_of_packets) {
+ bDone = TRUE;
+ } else if (!dma) {
+ d++;
+ pBuffer = pUrb->transfer_buffer + d->offset;
+ wLength = d->length;
+ }
+ } else if (dma) {
+ bDone = TRUE;
+ } else {
+ /* see if we need to send more data, or ZLP */
+ if (qh->segsize < qh->maxpacket)
+ bDone = TRUE;
+ else if (qh->offset == pUrb->transfer_buffer_length
+ && !(pUrb-> transfer_flags
+ & URB_ZERO_PACKET))
+ bDone = TRUE;
+ if (!bDone) {
+ pBuffer = pUrb->transfer_buffer
+ + qh->offset;
+ wLength = pUrb->transfer_buffer_length
+ - qh->offset;
+ }
+ }
+ }
+
+ /* urb->status != -EINPROGRESS means request has been faulted,
+ * so we must abort this transfer after cleanup
+ */
+ if (pUrb->status != -EINPROGRESS) {
+ bDone = TRUE;
+ if (status == 0)
+ status = pUrb->status;
+ }
+
+ if (bDone) {
+ /* set status */
+ pUrb->status = status;
+ pUrb->actual_length = qh->offset;
+ musb_advance_schedule(pThis, pUrb, pEnd, USB_DIR_OUT);
+
+ } else if (!(wTxCsrVal & MGC_M_TXCSR_DMAENAB)) {
+ // WARN_ON(!pBuffer);
+
+ /* REVISIT: some docs say that when pEnd->tx_double_buffered,
+ * (and presumably, fifo is not half-full) we should write TWO
+ * packets before updating TXCSR ... other docs disagree ...
+ */
+ /* PIO: start next packet in this URB */
+ wLength = min(qh->maxpacket, (u16) wLength);
+ musb_write_fifo(pEnd, wLength, pBuffer);
+ qh->segsize = wLength;
+
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_TXCSR, bEnd,
+ MGC_M_TXCSR_H_WZC_BITS | MGC_M_TXCSR_TXPKTRDY);
+ } else
+ DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+ return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+ - first IN token is sent out (by setting ReqPkt)
+ LinuxIsr -> RxReady()
+ /\ => first packet is received
+ | - Set in mode 0 (DmaEnab, ~ReqPkt)
+ | -> DMA Isr (transfer complete) -> RxReady()
+ | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+ | - if urb not complete, send next IN token (ReqPkt)
+ | | else complete urb.
+ | |
+ ---------------------------
+ *
+ * Nuances of mode 1:
+ * For short packets, no ack (+RxPktRdy) is sent automatically
+ * (even if AutoClear is ON)
+ * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ * automatically => major problem, as collecting the next packet becomes
+ * difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ * All we care about at this driver level is that
+ * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ * (b) termination conditions are: short RX, or buffer full;
+ * (c) fault modes include
+ * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ * (and that endpoint's dma queue stops immediately)
+ * - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ * thus be a great candidate for using mode 1 ... for all but the
+ * last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *pThis, u8 bEnd)
+{
+ struct urb *pUrb;
+ struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
+ struct musb_qh *qh = pEnd->in_qh;
+ size_t xfer_len;
+ void __iomem *pBase = pThis->pRegs;
+ int nPipe;
+ u16 wRxCsrVal, wVal;
+ u8 bIsochError = FALSE;
+ u8 bDone = FALSE;
+ u32 status;
+ struct dma_channel *dma;
+
+ MGC_SelectEnd(pBase, bEnd);
+
+ pUrb = next_urb(qh);
+ dma = is_dma_capable() ? pEnd->rx_channel : NULL;
+ status = 0;
+ xfer_len = 0;
+
+ wVal = wRxCsrVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+
+ if (unlikely(!pUrb)) {
+ /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+ * usbtest #11 (unlinks) triggers it regularly, sometimes
+ * with fifo full. (Only with DMA??)
+ */
+ DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", bEnd, wVal,
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCOUNT, bEnd));
+ musb_h_flush_rxfifo(pEnd, MGC_M_RXCSR_CLRDATATOG);
+ return;
+ }
+
+ nPipe = pUrb->pipe;
+
+ DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zd)\n", bEnd,
+ wRxCsrVal, pUrb->actual_length,
+ dma ? dma->dwActualLength : 0);
+
+ /* check for errors, concurrent stall & unlink is not really
+ * handled yet! */
+ if (wRxCsrVal & MGC_M_RXCSR_H_RXSTALL) {
+ DBG(3, "RX end %d STALL\n", bEnd);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (wRxCsrVal & MGC_M_RXCSR_H_ERROR) {
+ DBG(3, "end %d RX proto error\n", bEnd);
+
+ status = -EPROTO;
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_RXINTERVAL, bEnd, 0);
+
+ } else if (wRxCsrVal & MGC_M_RXCSR_DATAERROR) {
+
+ if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+ /* NOTE this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) rx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->in_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ DBG(6, "RX end %d NAK timeout\n", bEnd);
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ MGC_M_RXCSR_H_WZC_BITS
+ | MGC_M_RXCSR_H_REQPKT);
+
+ goto finish;
+ } else {
+ DBG(4, "RX end %d ISO data error\n", bEnd);
+ /* packet error reported later */
+ bIsochError = TRUE;
+ }
+ }
+
+ /* faults abort the transfer */
+ if (status) {
+ /* clean up dma and collect transfer count */
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
+ (void) pThis->pDmaController->channel_abort(dma);
+ xfer_len = dma->dwActualLength;
+ }
+ musb_h_flush_rxfifo(pEnd, 0);
+ MGC_WriteCsr8(pBase, MGC_O_HDRC_RXINTERVAL, bEnd, 0);
+ bDone = TRUE;
+ goto finish;
+ }
+
+ if (unlikely(dma_channel_status(dma) == MGC_DMA_STATUS_BUSY)) {
+ /* SHOULD NEVER HAPPEN */
+ ERR("RX%d dma busy\n", bEnd);
+ goto finish;
+ }
+
+ /* thorough shutdown for now ... given more precise fault handling
+ * and better queueing support, we might keep a DMA pipeline going
+ * while processing this irq for earlier completions.
+ */
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+ if (wRxCsrVal & MGC_M_RXCSR_H_REQPKT) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+ * shouldn't this be the "half full" double buffer case?
+ */
+ if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
+ dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
+ (void) pThis->pDmaController->channel_abort(dma);
+ xfer_len = dma->dwActualLength;
+ bDone = TRUE;
+ }
+
+ DBG(2, "RXCSR%d %04x, reqpkt, len %zd%s\n", bEnd, wRxCsrVal,
+ xfer_len, dma ? ", dma" : "");
+ wRxCsrVal &= ~MGC_M_RXCSR_H_REQPKT;
+
+ MGC_SelectEnd(pBase, bEnd);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ MGC_M_RXCSR_H_WZC_BITS | wRxCsrVal);
+ }
+#endif
+ if (dma && (wRxCsrVal & MGC_M_RXCSR_DMAENAB)) {
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ xfer_len = dma->dwActualLength;
+ pUrb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+
+ /* bDone if pUrb buffer is full or short packet is recd */
+ bDone = (pUrb->actual_length >= pUrb->transfer_buffer_length)
+ || (dma->dwActualLength & (qh->maxpacket - 1));
+
+ wVal &= ~(MGC_M_RXCSR_DMAENAB |
+ MGC_M_RXCSR_H_AUTOREQ |
+ MGC_M_RXCSR_AUTOCLEAR |
+ MGC_M_RXCSR_RXPKTRDY);
+
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wVal);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wVal);
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!bDone) {
+ wVal |= MGC_M_RXCSR_H_REQPKT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ MGC_M_RXCSR_H_WZC_BITS | wVal);
+ }
+
+ DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", bEnd,
+ bDone ? "off" : "reset",
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd),
+ MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCOUNT, bEnd));
+#else
+ bDone = TRUE;
+ xfer_len = dma->dwActualLength;
+#endif
+ } else if (pUrb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(wRxCsrVal & MGC_M_RXCSR_RXPKTRDY))) {
+ status = -EPROTO;
+ ERR("Rx interrupt with no errors or packet!\n");
+
+ // FIXME this is another "SHOULD NEVER HAPPEN"
+
+// SCRUB (RX)
+ /* do the proper sequence to abort the transfer */
+ MGC_SelectEnd(pBase, bEnd);
+ wVal &= ~MGC_M_RXCSR_H_REQPKT;
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd, wVal);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma) {
+ struct dma_controller *c;
+ u16 wRxCount;
+ int status;
+
+ wRxCount = MGC_ReadCsr16(pBase,
+ MGC_O_HDRC_RXCOUNT, bEnd);
+
+ DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ bEnd, wRxCount,
+ pUrb->transfer_dma
+ + pUrb->actual_length,
+ qh->offset,
+ pUrb->transfer_buffer_length);
+
+ c = pThis->pDmaController;
+
+ dma->bDesiredMode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((pUrb->transfer_flags &
+ URB_SHORT_NOT_OK)
+ && (pUrb->transfer_buffer_length -
+ pUrb->actual_length)
+ > qh->maxpacket)
+ dma->bDesiredMode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+ wVal = MGC_ReadCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd);
+ wVal &= ~MGC_M_RXCSR_H_REQPKT;
+
+ if (dma->bDesiredMode == 0) {
+ wVal &= ~MGC_M_RXCSR_H_AUTOREQ;
+ wVal |= (MGC_M_RXCSR_AUTOCLEAR |
+ MGC_M_RXCSR_DMAENAB);
+ } else
+ wVal |= (MGC_M_RXCSR_H_AUTOREQ |
+ MGC_M_RXCSR_AUTOCLEAR |
+ MGC_M_RXCSR_DMAENAB);
+
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR, bEnd,
+ MGC_M_RXCSR_H_WZC_BITS | wVal);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ status = c->channel_program(
+ dma, qh->maxpacket,
+ dma->bDesiredMode,
+ pUrb->transfer_dma
+ + pUrb->actual_length,
+ (dma->bDesiredMode == 0)
+ ? wRxCount
+ : pUrb->transfer_buffer_length);
+
+ if (!status) {
+ c->channel_release(dma);
+ dma = pEnd->rx_channel = NULL;
+ /* REVISIT reset CSR */
+ }
+ }
+#endif /* Mentor DMA */
+
+ if (!dma) {
+ bDone = musb_host_packet_rx(pThis, pUrb,
+ bEnd, bIsochError);
+ DBG(6, "read %spacket\n", bDone ? "last " : "");
+ }
+ }
+
+finish:
+ pUrb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+ if (bDone) {
+ if (pUrb->status == -EINPROGRESS)
+ pUrb->status = status;
+ musb_advance_schedule(pThis, pUrb, pEnd, USB_DIR_IN);
+ }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+ struct musb *musb,
+ struct musb_qh *qh,
+ int is_in)
+{
+ int idle;
+ int wBestDiff;
+ int nBestEnd, nEnd;
+ struct musb_hw_ep *hw_ep;
+ struct list_head *head = NULL;
+
+ /* use fixed hardware for control and bulk */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ head = &musb->control;
+ hw_ep = musb->control_ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ hw_ep = musb->bulk_ep;
+ if (is_in)
+ head = &musb->in_bulk;
+ else
+ head = &musb->out_bulk;
+ break;
+ }
+ if (head) {
+ idle = list_empty(head);
+ list_add_tail(&qh->ring, head);
+ goto success;
+ }
+
+ /* else, periodic transfers get muxed to other endpoints */
+
+ /* FIXME this doesn't consider direction, so it can only
+ * work for one half of the endpoint hardware, and assumes
+ * the previous cases handled all non-shared endpoints...
+ */
+
+ /* we know this qh hasn't been scheduled, so all we need to do
+ * is choose which hardware endpoint to put it on ...
+ *
+ * REVISIT what we really want here is a regular schedule tree
+ * like e.g. OHCI uses, but for now musb->periodic is just an
+ * array of the _single_ logical endpoint associated with a
+ * given physical one (identity mapping logical->physical).
+ *
+ * that simplistic approach makes TT scheduling a lot simpler;
+ * there is none, and thus none of its complexity...
+ */
+ wBestDiff = 4096;
+ nBestEnd = -1;
+
+ for (nEnd = 1; nEnd < musb->bEndCount; nEnd++) {
+ int diff;
+
+ if (musb->periodic[nEnd])
+ continue;
+ hw_ep = &musb->aLocalEnd[nEnd];
+ if (hw_ep == musb->bulk_ep)
+ continue;
+
+ if (is_in)
+ diff = hw_ep->wMaxPacketSizeRx - qh->maxpacket;
+ else
+ diff = hw_ep->wMaxPacketSizeTx - qh->maxpacket;
+
+ if (wBestDiff > diff) {
+ wBestDiff = diff;
+ nBestEnd = nEnd;
+ }
+ }
+ if (nBestEnd < 0)
+ return -ENOSPC;
+
+ idle = 1;
+ hw_ep = musb->aLocalEnd + nBestEnd;
+ musb->periodic[nBestEnd] = qh;
+ DBG(4, "qh %p periodic slot %d\n", qh, nBestEnd);
+success:
+ qh->hw_ep = hw_ep;
+ qh->hep->hcpriv = qh;
+ if (idle)
+ musb_start_urb(musb, is_in, qh);
+ return 0;
+}
+
+static int musb_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct usb_host_endpoint *hep,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh = hep->hcpriv;
+ struct usb_endpoint_descriptor *epd = &hep->desc;
+ int status;
+ unsigned type_reg;
+ unsigned interval;
+
+ /* host role must be active */
+ if (!is_host_active(musb))
+ return -ENODEV;
+
+ /* DMA mapping was already done, if needed, and this urb is on
+ * hep->urb_list ... so there's little to do unless hep wasn't
+ * yet scheduled onto a live qh.
+ *
+ * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+ * disabled, testing for empty qh->ring and avoiding qh setup costs
+ * except for the first urb queued after a config change.
+ */
+ if (qh) {
+ urb->hcpriv = qh;
+ return 0;
+ }
+
+ /* Allocate and initialize qh, minimizing the work done each time
+ * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
+ *
+ * REVISIT consider a dedicated qh kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+ qh = kzalloc(sizeof *qh, mem_flags);
+ if (!qh)
+ return -ENOMEM;
+
+ qh->hep = hep;
+ qh->dev = urb->dev;
+ INIT_LIST_HEAD(&qh->ring);
+ qh->is_ready = 1;
+
+ qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+ /* no high bandwidth support yet */
+ if (qh->maxpacket & ~0x7ff) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+ qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+ /* precompute rxtype/txtype/type0 register */
+ type_reg = (qh->type << 4) | qh->epnum;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ type_reg |= 0xc0;
+ break;
+ case USB_SPEED_FULL:
+ type_reg |= 0x80;
+ break;
+ default:
+ type_reg |= 0x40;
+ }
+ qh->type_reg = type_reg;
+
+ /* precompute rxinterval/txinterval register */
+ interval = min((u8)16, epd->bInterval); /* log encoding */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ /* fullspeed uses linear encoding */
+ if (USB_SPEED_FULL == urb->dev->speed) {
+ interval = epd->bInterval;
+ if (!interval)
+ interval = 1;
+ }
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_ISOC:
+ /* iso always uses log encoding */
+ break;
+ default:
+ /* REVISIT we actually want to use NAK limits, hinting to the
+ * transfer scheduling logic to try some other qh, e.g. try
+ * for 2 msec first:
+ *
+ * interval = (USB_SPEED_HIGH == pUrb->dev->speed) ? 16 : 2;
+ *
+ * The downside of disabling this is that transfer scheduling
+ * gets VERY unfair for nonperiodic transfers; a misbehaving
+ * peripheral could make that hurt. Or for reads, one that's
+ * perfectly normal: network and other drivers keep reads
+ * posted at all times, having one pending for a week should
+ * be perfectly safe.
+ *
+ * The upside of disabling it is avoidng transfer scheduling
+ * code to put this aside for while.
+ */
+ interval = 0;
+ }
+ qh->intv_reg = interval;
+
+ /* precompute addressing for external hub/tt ports */
+ if (musb->bIsMultipoint) {
+ struct usb_device *parent = urb->dev->parent;
+
+ if (parent != hcd->self.root_hub) {
+ qh->h_addr_reg = (u8) parent->devnum;
+
+ /* set up tt info if needed */
+ if (urb->dev->tt) {
+ qh->h_port_reg = (u8) urb->dev->ttport;
+ qh->h_addr_reg |= 0x80;
+ }
+ }
+ }
+
+ /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+ * until we get real dma queues (with an entry for each urb/buffer),
+ * we only have work to do in the former case.
+ */
+ spin_lock_irqsave(&musb->Lock, flags);
+ if (hep->hcpriv) {
+ /* some concurrent activity submitted another urb to hep...
+ * odd, rare, error prone, but legal.
+ */
+ kfree(qh);
+ status = 0;
+ } else
+ status = musb_schedule(musb, qh,
+ epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+ if (status == 0) {
+ urb->hcpriv = qh;
+ /* FIXME set urb->start_frame for iso/intr, it's tested in
+ * musb_start_urb(), but otherwise only konicawc cares ...
+ */
+ }
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+done:
+ if (status != 0)
+ kfree(qh);
+ return status;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+ struct musb_hw_ep *ep = qh->hw_ep;
+ unsigned hw_end = ep->bLocalEnd;
+ void __iomem *regs = ep->musb->pRegs;
+ u16 csr;
+ int status = 0;
+
+ MGC_SelectEnd(regs, hw_end);
+
+ if (is_dma_capable()) {
+ struct dma_channel *dma;
+
+ dma = is_in ? ep->rx_channel : ep->tx_channel;
+ status = ep->musb->pDmaController->channel_abort(dma);
+ DBG(status ? 1 : 3, "abort %cX%d DMA for urb %p --> %d\n",
+ is_in ? 'R' : 'T', ep->bLocalEnd, urb, status);
+ urb->actual_length += dma->dwActualLength;
+ }
+
+ /* turn off DMA requests, discard state, stop polling ... */
+ if (is_in) {
+ /* giveback saves bulk toggle */
+ csr = musb_h_flush_rxfifo(ep, 0);
+
+ /* REVISIT we still get an irq; should likely clear the
+ * endpoint's irq status here to avoid bogus irqs.
+ * clearing that status is platform-specific...
+ */
+ } else {
+// SCRUB (TX)
+ csr = MGC_ReadCsr16(regs, MGC_O_HDRC_TXCSR, hw_end);
+ csr &= ~( MGC_M_TXCSR_AUTOSET
+ | MGC_M_TXCSR_DMAENAB
+ | MGC_M_TXCSR_H_RXSTALL
+ | MGC_M_TXCSR_H_NAKTIMEOUT
+ | MGC_M_TXCSR_H_ERROR
+ | MGC_M_TXCSR_FIFONOTEMPTY
+ );
+ csr |= MGC_M_TXCSR_FLUSHFIFO;
+ MGC_WriteCsr16(regs, MGC_O_HDRC_TXCSR, 0, csr);
+ MGC_WriteCsr16(regs, MGC_O_HDRC_TXCSR, 0, csr);
+ /* flush cpu writebuffer */
+ csr = MGC_ReadCsr16(regs, MGC_O_HDRC_TXCSR, hw_end);
+ }
+ if (status == 0)
+ musb_advance_schedule(ep->musb, urb, ep, is_in);
+ return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ struct list_head *sched;
+ struct urb *tmp;
+ unsigned long flags;
+ int status = -ENOENT;
+
+ DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ /* make sure the urb is still queued and not completed */
+ spin_lock(&urb->lock);
+ qh = urb->hcpriv;
+ if (qh) {
+ struct usb_host_endpoint *hep;
+
+ hep = qh->hep;
+ list_for_each_entry(tmp, &hep->urb_list, urb_list) {
+ if (urb == tmp) {
+ status = 0;
+ break;
+ }
+ }
+ }
+ spin_unlock(&urb->lock);
+ if (status)
+ goto done;
+
+ /* Any URB not actively programmed into endpoint hardware can be
+ * immediately given back. Such an URB must be at the head of its
+ * endpoint queue, unless someday we get real DMA queues. And even
+ * then, it might not be known to the hardware...
+ *
+ * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * has already been updated. This is a synchronous abort; it'd be
+ * OK to hold off until after some IRQ, though.
+ */
+ if (urb->urb_list.prev != &qh->hep->urb_list)
+ status = -EINPROGRESS;
+ else {
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_pipein(urb->pipe))
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic
+ * transfers won't always be at the head of a
+ * singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+ if (status < 0 || (sched && qh != first_qh(sched))) {
+ status = -EINPROGRESS;
+ musb_giveback(qh, urb, 0);
+ } else
+ status = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return status;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ u8 epnum = hep->desc.bEndpointAddress;
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ u8 is_in = epnum & USB_DIR_IN;
+ struct musb_qh *qh = hep->hcpriv;
+ struct urb *urb, *tmp;
+ struct list_head *sched;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (is_in)
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic transfers
+ * won't always be at the head of a singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+ /* kick first urb off the hardware, if needed */
+ qh->is_ready = 0;
+ if (!sched || qh == first_qh(sched)) {
+ urb = next_urb(qh);
+
+ /* make software (then hardware) stop ASAP */
+ spin_lock(&urb->lock);
+ if (urb->status == -EINPROGRESS)
+ urb->status = -ESHUTDOWN;
+ spin_unlock(&urb->lock);
+
+ /* cleanup */
+ musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ } else
+ urb = NULL;
+
+ /* then just nuke all the others */
+ list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+ musb_giveback(qh, urb, -ESHUTDOWN);
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ return musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+ musb_stop(hcd_to_musb(hcd));
+ hcd->state = HC_STATE_HALT;
+}
+
+const struct hc_driver musb_hc_driver = {
+ .description = "musb-hcd",
+ .product_desc = "MUSB HDRC host driver",
+ .hcd_priv_size = sizeof (struct musb),
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ /* not using irq handler or reset hooks from usbcore, since
+ * those must be shared with peripheral code for OTG configs
+ */
+
+ .start = musb_h_start,
+ .stop = musb_h_stop,
+
+ .get_frame_number = musb_h_get_frame_number,
+
+ .urb_enqueue = musb_urb_enqueue,
+ .urb_dequeue = musb_urb_dequeue,
+ .endpoint_disable = musb_h_disable,
+
+ .hub_status_data = musb_hub_status_data,
+ .hub_control = musb_hub_control,
+// .bus_suspend = musb_bus_suspend,
+// .bus_resume = musb_bus_resume,
+// .start_port_reset = NULL,
+// .hub_irq_enable = NULL,
+};
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+ return (struct usb_hcd *) (((void *)musb)
+ - offsetof(struct usb_hcd, hcd_priv));
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return (void *) hcd->hcd_priv;
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints
+ */
+struct musb_qh {
+ struct usb_host_endpoint *hep; /* usbcore info */
+ struct usb_device *dev;
+ struct musb_hw_ep *hw_ep; /* current binding */
+
+ struct list_head ring; /* of musb_qh */
+ //struct musb_qh *next; /* for periodic tree */
+
+ unsigned offset; /* in urb->transfer_buffer */
+ unsigned segsize; /* current xfer fragment */
+
+ u8 type_reg; /* {rx,tx} type register */
+ u8 intv_reg; /* {rx,tx} interval register */
+ u8 addr_reg; /* device address register */
+ u8 h_addr_reg; /* hub address register */
+ u8 h_port_reg; /* hub port register */
+
+ u8 is_ready; /* safe to modify hw_ep */
+ u8 type; /* XFERTYPE_* */
+ u8 epnum;
+ u16 maxpacket;
+ u16 frame; /* for periodic schedule */
+ unsigned iso_idx; /* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+ if (list_empty(q))
+ return NULL;
+ return container_of(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_h_tx_start(struct musb *, u8 bEnd);
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+extern int musb_bus_suspend(struct usb_hcd *);
+extern int musb_bus_resume(struct usb_hcd *);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct list_head *queue;
+
+ if (!qh)
+ return NULL;
+ queue = &qh->hep->urb_list;
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct urb, urb_list);
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _MUSB_HOST_H */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/*
+ * Inventra Controller Driver (ICD) for Linux.
+ *
+ * The code managing debug files (currently in procfs).
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h> /* FIXME remove procfs writes */
+
+#include "musbdefs.h"
+
+#include "davinci.h"
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+static const char *state_string(enum usb_otg_state state)
+{
+ switch (state) {
+ case OTG_STATE_A_IDLE: return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
+ case OTG_STATE_A_HOST: return "a_host";
+ case OTG_STATE_A_SUSPEND: return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
+ case OTG_STATE_B_IDLE: return "b_idle";
+ case OTG_STATE_B_SRP_INIT: return "b_srp_init";
+ case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
+ case OTG_STATE_B_HOST: return "b_host";
+ default: return "UNDEFINED";
+ }
+}
+
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
+{
+ int count;
+ int tmp;
+ struct usb_host_endpoint *hep = qh->hep;
+ struct urb *urb;
+
+ count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n",
+ qh, qh->dev->devnum, qh->epnum,
+ ({ char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_INT:
+ s = "-int"; break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ s = ""; break;
+ default:
+ s = "iso"; break;
+ }; s; }),
+ qh->maxpacket);
+ buf += count;
+ max -= count;
+
+ list_for_each_entry(urb, &hep->urb_list, urb_list) {
+ tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length);
+ if (tmp < 0)
+ break;
+ tmp = min(tmp, (int)max);
+ count += tmp;
+ buf += tmp;
+ max -= tmp;
+ }
+ return count;
+}
+
+static int
+dump_queue(struct list_head *q, char *buf, unsigned max)
+{
+ int count = 0;
+ struct musb_qh *qh;
+
+ list_for_each_entry(qh, q, ring) {
+ int tmp;
+
+ tmp = dump_qh(qh, buf, max);
+ if (tmp < 0)
+ break;
+ tmp = min(tmp, (int)max);
+ count += tmp;
+ buf += tmp;
+ max -= tmp;
+ }
+ return count;
+}
+
+#endif /* HCD */
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
+{
+ char *buf = buffer;
+ int code = 0;
+ void __iomem *regs = ep->hw_ep->regs;
+
+ do {
+ struct usb_request *req;
+
+ code = snprintf(buf, max,
+ "\n%s (hw%d): %scsr %04x maxp %04x\n",
+ ep->name, ep->bEndNumber,
+ ep->dma ? "dma, " : "",
+ musb_readw(regs,
+ (ep->is_in || !ep->bEndNumber)
+ ? MGC_O_HDRC_TXCSR
+ : MGC_O_HDRC_RXCSR),
+ musb_readw(regs, ep->is_in
+ ? MGC_O_HDRC_TXMAXP
+ : MGC_O_HDRC_RXMAXP)
+ );
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ if (ep->bEndNumber) {
+ unsigned cppi = ep->bEndNumber - 1;
+ void __iomem *base = ep->pThis->ctrl_base;
+ unsigned off1 = cppi << 2;
+ void __iomem *ram = base;
+ char tmp[16];
+
+ if (ep->is_in) {
+ ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
+ tmp[0] = 0;
+ } else {
+ ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
+ snprintf(tmp, sizeof tmp, "%d left, ",
+ musb_readl(base,
+ DAVINCI_RXCPPI_BUFCNT0_REG + off1));
+ }
+
+ code = snprintf(buf, max, "%cX DMA%d: %s"
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ ep->is_in ? 'T' : 'R',
+ ep->bEndNumber - 1, tmp,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+#endif
+
+ if (list_empty(&ep->req_list)) {
+ code = snprintf(buf, max, "\t(queue empty)\n");
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ break;
+ }
+ list_for_each_entry (req, &ep->req_list, list) {
+ code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
+ req,
+ req->zero ? "zero, " : "",
+ req->short_not_ok ? "!short, " : "",
+ req->actual, req->length);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ } while(0);
+ return (buf > buffer) ? (buf - buffer) : code;
+}
+#endif
+
+static int
+dump_end_info(struct musb *pThis, u8 bEnd, char *aBuffer, unsigned max)
+{
+ int code = 0;
+ char *buf = aBuffer;
+ struct musb_hw_ep *pEnd = &pThis->aLocalEnd[bEnd];
+
+ do {
+ MGC_SelectEnd(pThis->pRegs, bEnd);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (is_host_active(pThis)) {
+ int dump_rx, dump_tx;
+ void __iomem *regs = pEnd->regs;
+
+ /* TEMPORARY (!) until we have a real periodic
+ * schedule tree ...
+ */
+ if (!bEnd) {
+ /* control is shared, uses RX queue
+ * but (mostly) shadowed tx registers
+ */
+ dump_tx = !list_empty(&pThis->control);
+ dump_rx = 0;
+ } else if (pEnd == pThis->bulk_ep) {
+ dump_tx = !list_empty(&pThis->out_bulk);
+ dump_rx = !list_empty(&pThis->in_bulk);
+ } else if (pThis->periodic[bEnd]) {
+ struct usb_host_endpoint *hep;
+
+ hep = pThis->periodic[bEnd]->hep;
+ dump_rx = hep->desc.bEndpointAddress
+ & USB_ENDPOINT_DIR_MASK;
+ dump_tx = !dump_rx;
+ } else
+ break;
+ /* END TEMPORARY */
+
+
+ /* FIXME for rx and tx dump hardware fifo and
+ * double-buffer flags ... and make register and stat
+ * dumps (mostly) usable on the peripheral side too
+ */
+ if (dump_rx) {
+ code = snprintf(buf, max,
+ "\nRX%d: rxcsr %04x interval %02x "
+ "max %04x type %02x; "
+ "dev %d hub %d port %d"
+ "\n",
+ bEnd,
+ musb_readw(regs, MGC_O_HDRC_RXCSR),
+ musb_readw(regs, MGC_O_HDRC_RXINTERVAL),
+ musb_readw(regs, MGC_O_HDRC_RXMAXP),
+ musb_readw(regs, MGC_O_HDRC_RXTYPE),
+ /* FIXME: assumes multipoint */
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_RXFUNCADDR)),
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_RXHUBADDR)),
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_RXHUBPORT))
+ );
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ if (bEnd && pEnd->rx_channel) {
+ unsigned cppi = bEnd - 1;
+ unsigned off1 = cppi << 2;
+ void __iomem *base;
+ void __iomem *ram;
+ char tmp[16];
+
+ base = pThis->ctrl_base;
+ ram = base + DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
+ snprintf(tmp, sizeof tmp, "%d left, ",
+ musb_readl(base,
+ DAVINCI_RXCPPI_BUFCNT0_REG
+ + off1));
+
+ code = snprintf(buf, max,
+ " rx dma%d: %s"
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ cppi, tmp,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+#endif
+ if (pEnd == pThis->bulk_ep
+ && !list_empty(
+ &pThis->in_bulk)) {
+ code = dump_queue(&pThis->in_bulk,
+ buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (pThis->periodic[bEnd]) {
+ code = dump_qh(pThis->periodic[bEnd],
+ buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+
+ if (dump_tx) {
+ code = snprintf(buf, max,
+ "\nTX%d: txcsr %04x interval %02x "
+ "max %04x type %02x; "
+ "dev %d hub %d port %d"
+ "\n",
+ bEnd,
+ musb_readw(regs, MGC_O_HDRC_TXCSR),
+ musb_readw(regs, MGC_O_HDRC_TXINTERVAL),
+ musb_readw(regs, MGC_O_HDRC_TXMAXP),
+ musb_readw(regs, MGC_O_HDRC_TXTYPE),
+ /* FIXME: assumes multipoint */
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXFUNCADDR)),
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXHUBADDR)),
+ musb_readb(pThis->pRegs,
+ MGC_BUSCTL_OFFSET(bEnd,
+ MGC_O_HDRC_TXHUBPORT))
+ );
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ if (bEnd && pEnd->tx_channel) {
+ unsigned cppi = bEnd - 1;
+ void __iomem *base;
+ void __iomem *ram;
+
+ base = pThis->ctrl_base;
+ ram = base + DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
+ code = snprintf(buf, max,
+ " tx dma%d: "
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ cppi,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+#endif
+ if (pEnd == pThis->control_ep
+ && !list_empty(
+ &pThis->control)) {
+ code = dump_queue(&pThis->control,
+ buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (pEnd == pThis->bulk_ep
+ && !list_empty(
+ &pThis->out_bulk)) {
+ code = dump_queue(&pThis->out_bulk,
+ buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (pThis->periodic[bEnd]) {
+ code = dump_qh(pThis->periodic[bEnd],
+ buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+ }
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_active(pThis)) {
+ code = 0;
+
+ if (pEnd->ep_in.desc || !bEnd) {
+ code = dump_ep(&pEnd->ep_in, buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ if (pEnd->ep_out.desc) {
+ code = dump_ep(&pEnd->ep_out, buf, max);
+ if (code < 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+#endif
+ } while (0);
+
+ return buf - aBuffer;
+}
+
+/** Dump the current status and compile options.
+ * @param pThis the device driver instance
+ * @param buffer where to dump the status; it must be big enough hold the
+ * result otherwise "BAD THINGS HAPPENS(TM)".
+ */
+static int dump_header_stats(struct musb *pThis, char *buffer)
+{
+ int code, count = 0;
+ const void __iomem *pBase = pThis->pRegs;
+
+ *buffer = 0;
+ count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
+ "(Power=%02x, DevCtl=%02x)\n",
+ (pThis->bIsMultipoint ? "M" : ""), MUSB_MODE(pThis),
+ musb_readb(pBase, MGC_O_HDRC_POWER),
+ musb_readb(pBase, MGC_O_HDRC_DEVCTL));
+ if (count < 0)
+ return count;
+ buffer += count;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ code = sprintf(buffer, "OTG state: %s (%s)\n",
+ state_string(pThis->OtgMachine.bState),
+ state_string(pThis->xceiv.state));
+ if (code < 0)
+ return code;
+ buffer += code;
+ count += code;
+#endif
+
+ code = sprintf(buffer,
+ "Options: "
+#ifdef CONFIG_USB_INVENTRA_FIFO
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d [eps=%d]\n",
+ debug,
+ pThis->bEndCount);
+ if (code < 0)
+ return code;
+ count += code;
+ buffer += code;
+
+#ifdef CONFIG_ARCH_DAVINCI
+ code = sprintf(buffer,
+ "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
+ "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
+ "\n",
+ musb_readl(pThis->ctrl_base, DAVINCI_USB_CTRL_REG),
+ musb_readl(pThis->ctrl_base, DAVINCI_USB_STAT_REG),
+ __raw_readl(IO_ADDRESS(USBPHY_CTL_PADDR)),
+ musb_readl(pThis->ctrl_base, DAVINCI_RNDIS_REG),
+ musb_readl(pThis->ctrl_base, DAVINCI_AUTOREQ_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_USB_INT_SOURCE_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_USB_INT_MASK_REG));
+ if (code < 0)
+ return count;
+ count += code;
+ buffer += code;
+#endif /* DAVINCI */
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ if (pThis->pDmaController) {
+ code = sprintf(buffer,
+ "CPPI: txcr=%d txsrc=%01x txena=%01x; "
+ "rxcr=%d rxsrc=%01x rxena=%01x "
+ "\n",
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_TXCPPI_CTRL_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_TXCPPI_RAW_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_TXCPPI_INTENAB_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_RXCPPI_CTRL_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_RXCPPI_RAW_REG),
+ musb_readl(pThis->ctrl_base,
+ DAVINCI_RXCPPI_INTENAB_REG));
+ if (code < 0)
+ return count;
+ count += code;
+ buffer += code;
+ }
+#endif /* CPPI */
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_enabled(pThis)) {
+ code = sprintf(buffer, "Gadget driver: %s\n",
+ pThis->pGadgetDriver
+ ? pThis->pGadgetDriver->driver.name
+ : "(none)");
+ if (code < 0)
+ return code;
+ count += code;
+ buffer += code;
+ }
+#endif
+
+ return count;
+}
+
+/* Write to ProcFS
+ *
+ * C soft-connect
+ * c soft-disconnect
+ * I enable HS
+ * i disable HS
+ * s stop session
+ * F force session (OTG-unfriendly)
+ * E rElinquish bus (OTG)
+ * H request host mode
+ * h cancel host request
+ * D<num> set/query the debug level
+ */
+static int musb_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char cmd;
+ u8 bReg;
+ struct musb *musb = (struct musb *)data;
+ void __iomem *pBase = musb->pRegs;
+
+ /* MOD_INC_USE_COUNT; */
+
+ copy_from_user(&cmd, buffer, 1);
+ switch (cmd) {
+ case 'C':
+ if (pBase) {
+ bReg =
+ musb_readb(pBase,
+ MGC_O_HDRC_POWER) | MGC_M_POWER_SOFTCONN;
+ musb_writeb(pBase, MGC_O_HDRC_POWER, bReg);
+ }
+ break;
+
+ case 'c':
+ if (pBase) {
+ bReg =
+ musb_readb(pBase,
+ MGC_O_HDRC_POWER) & ~MGC_M_POWER_SOFTCONN;
+ musb_writeb(pBase, MGC_O_HDRC_POWER, bReg);
+ }
+ break;
+
+ case 'I':
+ if (pBase) {
+ bReg =
+ musb_readb(pBase,
+ MGC_O_HDRC_POWER) | MGC_M_POWER_HSENAB;
+ musb_writeb(pBase, MGC_O_HDRC_POWER, bReg);
+ }
+ break;
+
+ case 'i':
+ if (pBase) {
+ bReg =
+ musb_readb(pBase,
+ MGC_O_HDRC_POWER) & ~MGC_M_POWER_HSENAB;
+ musb_writeb(pBase, MGC_O_HDRC_POWER, bReg);
+ }
+ break;
+
+ case 'F':
+ bReg = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ bReg |= MGC_M_DEVCTL_SESSION;
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, bReg);
+ break;
+
+ case 'H':
+ if (pBase) {
+ bReg = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ bReg |= MGC_M_DEVCTL_HR;
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, bReg);
+ //MUSB_HST_MODE( ((struct musb*)data) );
+ //WARN("Host Mode\n");
+ }
+ break;
+
+ case 'h':
+ if (pBase) {
+ bReg = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ bReg &= ~MGC_M_DEVCTL_HR;
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, bReg);
+ }
+ break;
+
+#if (MUSB_DEBUG>0)
+ /* set/read debug level */
+ case 'D':{
+ if (count > 1) {
+ char digits[8], *p = digits;
+ int i = 0, level = 0, sign = 1, len =
+ min(count - 1, (unsigned long)8);
+
+ copy_from_user(&digits, &buffer[1], len);
+
+ /* optional sign */
+ if (*p == '-') {
+ len -= 1;
+ sign = -sign;
+ p++;
+ }
+
+ /* read it */
+ while (i++ < len && *p > '0' && *p < '9') {
+ level = level * 10 + (*p - '0');
+ p++;
+ }
+
+ level *= sign;
+ DBG(1, "debug level %d\n", level);
+ debug = level;
+ }
+ }
+ break;
+
+
+ case '?':
+ INFO("?: you are seeing it\n");
+ INFO("C/c: soft connect enable/disable\n");
+ INFO("I/i: hispeed enable/disable\n");
+ INFO("F: force session start\n");
+ INFO("H: host mode\n");
+ INFO("D: set/read dbug level\n");
+ break;
+#endif
+
+ default:
+ ERR("Command %c not implemented\n", cmd);
+ break;
+ }
+
+ musb_platform_try_idle(musb);
+
+ return count;
+}
+
+static int musb_proc_read(char *page, char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ char *buffer = page;
+ int code = 0;
+ unsigned long flags;
+ struct musb *pThis = data;
+ unsigned bEnd;
+
+ count -= off;
+ count -= 1; /* for NUL at end */
+ if (count < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pThis->Lock, flags);
+
+ code = dump_header_stats(pThis, buffer);
+ if (code > 0) {
+ buffer += code;
+ count -= code;
+ }
+
+ /* generate the report for the end points */
+ // REVISIT ... not unless something's connected!
+ for (bEnd = 0; count >= 0 && bEnd < pThis->bEndCount;
+ bEnd++) {
+ code = dump_end_info(pThis, bEnd, buffer, count);
+ if (code > 0) {
+ buffer += code;
+ count -= code;
+ }
+ }
+
+ spin_unlock_irqrestore(&pThis->Lock, flags);
+ *eof = 1;
+
+ musb_platform_try_idle(pThis);
+
+ return (buffer - page) - off;
+}
+
+void __devexit musb_debug_delete(char *name, struct musb *musb)
+{
+ if (musb->pProcEntry)
+ remove_proc_entry(name, NULL);
+}
+
+struct proc_dir_entry *__devinit
+musb_debug_create(char *name, struct musb *data)
+{
+ struct proc_dir_entry *pde;
+
+ /* FIXME convert everything to seq_file; then later, debugfs */
+
+ if (!name)
+ return NULL;
+
+ data->pProcEntry = pde = create_proc_entry(name,
+ S_IFREG | S_IRUGO | S_IWUSR, NULL);
+ if (pde) {
+ pde->data = data;
+ // pde->owner = THIS_MODULE;
+
+ pde->read_proc = musb_proc_read;
+ pde->write_proc = musb_proc_write;
+
+ pde->size = 0;
+
+ pr_debug("Registered /proc/%s\n", name);
+ } else {
+ pr_debug("Cannot create a valid proc file entry");
+ }
+
+ return pde;
+}
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef __MUSB_MUSBDEFS_H__
+#define __MUSB_MUSBDEFS_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/usb_ch9.h>
+#include <linux/usb_otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "debug.h"
+#include "dma.h"
+
+#ifdef CONFIG_USB_MUSB_SOC
+/*
+ * Get core configuration from a header converted (by cfg_conv)
+ * from the Verilog config file generated by the core config utility
+ *
+ * For now we assume that header is provided along with other
+ * arch-specific files. Discrete chips will need a build tweak.
+ * So will using AHB IDs from silicon that provides them.
+ */
+#include <asm/arch/hdrc_cnf.h>
+#endif
+
+#include "plat_arc.h"
+#include "musbhdrc.h"
+
+
+/* REVISIT tune this */
+#define MIN_DMA_REQUEST 1 /* use PIO below this xfer size */
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+#include "otg.h"
+
+#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
+#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
+#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
+
+/* NOTE: otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m) (is_peripheral_capable() && !(m)->bIsHost)
+#define is_host_active(m) (is_host_capable() && (m)->bIsHost)
+
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't really
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+
+#else
+#define is_peripheral_enabled(musb) is_peripheral_capable()
+#define is_host_enabled(musb) is_host_capable()
+#define is_otg_enabled(musb) 0
+
+#define is_peripheral_active(musb) is_peripheral_capable()
+#define is_host_active(musb) is_host_capable()
+#endif
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#include <linux/usb_gadget.h>
+#include "musb_gadget.h"
+
+#define is_peripheral_capable() (1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define is_peripheral_capable() (0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_tx(struct musb *m, u8 e) {}
+static inline void musb_g_rx(struct musb *m, u8 e) {}
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#include <linux/usb.h>
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+#define is_host_capable() (1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define is_host_capable() (0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+static inline void musb_root_disconnect(struct musb *musb) { BUG(); }
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MGC_END0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+#define MGC_END0_START 0x0
+#define MGC_END0_OUT 0x2
+#define MGC_END0_IN 0x4
+#define MGC_END0_STATUS 0x8
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+ MGC_END0_STAGE_SETUP, /* idle, waiting for setup */
+ MGC_END0_STAGE_TX, /* IN data */
+ MGC_END0_STAGE_RX, /* OUT data */
+ MGC_END0_STAGE_STATUSIN, /* (after OUT data) */
+ MGC_END0_STAGE_STATUSOUT, /* (after IN data) */
+ MGC_END0_STAGE_ACKWAIT, /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* driver and cable VBUS status states for musb_irq_work */
+#define MUSB_VBUS_STATUS_CHG (1 << 0)
+
+/* failure codes */
+#define MUSB_ERR_WAITING 1
+#define MUSB_ERR_VBUS -1
+#define MUSB_ERR_BABBLE -2
+#define MUSB_ERR_CORRUPTED -3
+#define MUSB_ERR_IRQ -4
+#define MUSB_ERR_SHUTDOWN -5
+#define MUSB_ERR_RESTART -6
+
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP243X)
+/* REVISIT "flat" takes about 1% more object code space and can't be very
+ * noticeable for speed differences. But for now indexed access seems to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if defined(CONFIG_USB_TUSB6010)
+#define MGC_SelectEnd(_pBase, _bEnd) \
+ musb_writeb((_pBase), MGC_O_HDRC_INDEX, (_bEnd))
+#define MGC_END_OFFSET MGC_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif defined(MUSB_FLAT_REG)
+#define MGC_SelectEnd(_pBase, _bEnd) (((void)(_pBase)),((void)(_bEnd)))
+#define MGC_END_OFFSET MGC_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define MGC_SelectEnd(_pBase, _bEnd) \
+ musb_writeb((_pBase), MGC_O_HDRC_INDEX, (_bEnd))
+#define MGC_END_OFFSET MGC_INDEXED_OFFSET
+#endif
+
+/* FIXME: replace with musb_readcsr(hw_ep *, REGNAME), etc
+ * using hw_ep->regs, for all access except writing INDEX
+ */
+#ifdef MUSB_FLAT_REG
+#define MGC_ReadCsr8(_pBase, _bOffset, _bEnd) \
+ musb_readb((_pBase), MGC_END_OFFSET((_bEnd), (_bOffset)))
+#define MGC_ReadCsr16(_pBase, _bOffset, _bEnd) \
+ musb_readw((_pBase), MGC_END_OFFSET((_bEnd), (_bOffset)))
+#define MGC_WriteCsr8(_pBase, _bOffset, _bEnd, _bData) \
+ musb_writeb((_pBase), MGC_END_OFFSET((_bEnd), (_bOffset)), (_bData))
+#define MGC_WriteCsr16(_pBase, _bOffset, _bEnd, _bData) \
+ musb_writew((_pBase), MGC_END_OFFSET((_bEnd), (_bOffset)), (_bData))
+#else
+#define MGC_ReadCsr8(_pBase, _bOffset, _bEnd) \
+ musb_readb(_pBase, (_bOffset + 0x10))
+#define MGC_ReadCsr16(_pBase, _bOffset, _bEnd) \
+ musb_readw(_pBase, (_bOffset + 0x10))
+#define MGC_WriteCsr8(_pBase, _bOffset, _bEnd, _bData) \
+ musb_writeb(_pBase, (_bOffset + 0x10), _bData)
+#define MGC_WriteCsr16(_pBase, _bOffset, _bEnd, _bData) \
+ musb_writew(_pBase, (_bOffset + 0x10), _bData)
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_pthis)\
+ { (_pthis)->bIsHost=TRUE; (_pthis)->bIsDevice=FALSE; \
+ (_pthis)->bFailCode=0; }
+#define MUSB_DEV_MODE(_pthis) \
+ { (_pthis)->bIsHost=FALSE; (_pthis)->bIsDevice=TRUE; \
+ (_pthis)->bFailCode=0; }
+#define MUSB_OTG_MODE(_pthis) \
+ { (_pthis)->bIsHost=FALSE; (_pthis)->bIsDevice=FALSE; \
+ (_pthis)->bFailCode=MUSB_ERR_WAITING; }
+#define MUSB_ERR_MODE(_pthis, _cause) \
+ { (_pthis)->bIsHost=FALSE; (_pthis)->bIsDevice=FALSE; \
+ (_pthis)->bFailCode=_cause; }
+
+#define MUSB_IS_ERR(_x) ( (_x)->bFailCode<0 )
+#define MUSB_IS_HST(_x) (!MUSB_IS_ERR(_x) \
+ && (_x)->bIsHost && !(_x)->bIsDevice )
+#define MUSB_IS_DEV(_x) (!MUSB_IS_ERR(_x) \
+ && !(_x)->bIsHost && (_x)->bIsDevice )
+#define MUSB_IS_OTG(_x) (!MUSB_IS_ERR(_x) \
+ && !(_x)->bIsHost && !(_x)->bIsDevice )
+
+#define test_devctl_hst_mode(_x) \
+ (musb_readb((_x)->pRegs, MGC_O_HDRC_DEVCTL)&MGC_M_DEVCTL_HM)
+
+/* REVISIT OTG isn't a third non-error mode... */
+#define MUSB_MODE(_x) ( MUSB_IS_HST(_x)?"HOST" \
+ :(MUSB_IS_DEV(_x)?"PERIPHERAL" \
+ :(MUSB_IS_OTG(_x)?"UNCONNECTED" \
+ :"ERROR")) )
+
+/************************** Ep Configuration ********************************/
+
+/** The End point descriptor */
+struct MUSB_EpFifoDescriptor {
+ u8 bType; /* 0 for autoconfig, CNTR, ISOC, BULK, INTR */
+ u8 bDir; /* 0 for autoconfig, INOUT, IN, OUT */
+ int wSize; /* 0 for autoconfig, or the size */
+};
+
+#define MUSB_EPD_AUTOCONFIG 0
+
+#define MUSB_EPD_T_CNTRL 1
+#define MUSB_EPD_T_ISOC 2
+#define MUSB_EPD_T_BULK 3
+#define MUSB_EPD_T_INTR 4
+
+#define MUSB_EPD_D_INOUT 0
+#define MUSB_EPD_D_TX 1
+#define MUSB_EPD_D_RX 2
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+ struct musb *musb;
+ void __iomem *fifo;
+ void __iomem *regs;
+
+ /* index in musb->aLocalEnd[] */
+ u8 bLocalEnd;
+
+ /* hardware configuration, possibly dynamic */
+ u8 bIsSharedFifo;
+ u8 tx_double_buffered;
+ u8 rx_double_buffered;
+ u16 wMaxPacketSizeTx;
+ u16 wMaxPacketSizeRx;
+
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+ /* TUSB has "asynchronous" and "synchronous" dma modes */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ void __iomem *target_regs;
+
+ /* currently scheduled peripheral endpoint */
+ struct musb_qh *in_qh;
+ struct musb_qh *out_qh;
+
+ u8 rx_reinit;
+ u8 tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* peripheral side */
+ struct musb_ep ep_in; /* TX */
+ struct musb_ep ep_out; /* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_in);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_out);
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+ spinlock_t Lock;
+ struct clk *clock;
+ irqreturn_t (*isr)(int, void *, struct pt_regs *);
+ struct work_struct irq_work;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+ u32 port1_status;
+ unsigned long rh_timer;
+
+ u8 bEnd0Stage; /* end0 stage while in host */
+
+ /* bulk traffic normally dedicates endpoint hardware, and each
+ * direction has its own ring of host side endpoints.
+ * we try to progress the transfer at the head of each endpoint's
+ * queue until it completes or NAKs too much; then we try the next
+ * endpoint.
+ */
+ struct musb_hw_ep *bulk_ep;
+
+ struct list_head control; /* of musb_qh */
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+ struct musb_qh *periodic[32]; /* tree of interrupt+iso */
+
+#endif
+
+ struct dma_controller *pDmaController;
+
+ struct device *controller;
+ void __iomem *ctrl_base;
+ void __iomem *pRegs;
+
+#ifdef CONFIG_USB_TUSB6010
+ dma_addr_t async;
+ dma_addr_t sync;
+#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+ u16 int_rx;
+ u16 int_tx;
+ struct pt_regs *int_regs;
+
+ struct otg_transceiver xceiv;
+
+ int nIrq;
+
+ struct musb_hw_ep aLocalEnd[MUSB_C_NUM_EPS];
+#define control_ep aLocalEnd
+
+ u16 vbuserr_retry;
+ u16 wEndMask;
+ u8 bEndCount;
+
+ u8 board_mode; /* enum musb_mode */
+ int (*board_set_power)(int state);
+
+ u8 status; /* status change flags for musb_irq_work */
+
+ s8 bFailCode; /* one of MUSB_ERR_* failure code */
+
+ unsigned bIsMultipoint:1;
+ unsigned bIsDevice:1;
+ unsigned bIsHost:1;
+ unsigned bIgnoreDisconnect:1; /* during bus resets */
+
+#ifdef C_MP_TX
+ unsigned bBulkSplit:1;
+#define can_bulk_split(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bBulkSplit)
+#else
+#define can_bulk_split(musb,type) 0
+#endif
+
+#ifdef C_MP_RX
+ unsigned bBulkCombine:1;
+ /* REVISIT allegedly doesn't work reliably */
+#if 0
+#define can_bulk_combine(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bBulkCombine)
+#else
+#define can_bulk_combine(musb,type) 0
+#endif
+#else
+#define can_bulk_combine(musb,type) 0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ unsigned bIsSelfPowered:1;
+ unsigned bMayWakeup:1;
+ unsigned bSetAddress:1;
+ unsigned bTestMode:1;
+ unsigned softconnect:1;
+
+ enum musb_g_ep0_state ep0_state;
+ u8 bAddress;
+ u8 bTestModeValue;
+ u16 ackpend; /* ep0 */
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *pGadgetDriver; /* its driver */
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+ struct otg_machine OtgMachine;
+ u8 bDelayPortPowerOff;
+#endif
+
+#ifdef MUSB_CONFIG_PROC_FS
+ struct proc_dir_entry *pProcEntry;
+#endif
+};
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+ return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *pThis);
+extern void musb_stop(struct musb *pThis);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep,
+ u16 wCount, const u8 * pSource);
+extern void musb_read_fifo(struct musb_hw_ep *ep,
+ u16 wCount, u8 * pDest);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+extern void musb_platform_try_idle(struct musb *musb);
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_try_idle(x) do {} while (0)
+#define musb_platform_get_vbus_status(x) 0
+#endif
+
+extern int __devinit musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+/*-------------------------- ProcFS definitions ---------------------*/
+
+struct proc_dir_entry;
+
+#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
+extern struct proc_dir_entry *musb_debug_create(char *name,
+ struct musb *data);
+extern void musb_debug_delete(char *name, struct musb *data);
+
+#else
+static inline struct proc_dir_entry *musb_debug_create(char *name,
+ struct musb *data)
+{
+ return NULL;
+}
+static inline void musb_debug_delete(char *name, struct musb *data)
+{
+}
+#endif
+
+#endif /* __MUSB_MUSBDEFS_H__ */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#ifndef __MUSB_HDRC_DEFS_H__
+#define __MUSB_HDRC_DEFS_H__
+
+/*
+ * HDRC-specific definitions
+ */
+
+#define MGC_MAX_USB_ENDS 16
+
+#define MGC_END0_FIFOSIZE 64 /* this is non-configurable */
+
+/*
+ * MUSBMHDRC Register map
+ */
+
+/* Common USB registers */
+
+#define MGC_O_HDRC_FADDR 0x00 /* 8-bit */
+#define MGC_O_HDRC_POWER 0x01 /* 8-bit */
+
+#define MGC_O_HDRC_INTRTX 0x02 /* 16-bit */
+#define MGC_O_HDRC_INTRRX 0x04
+#define MGC_O_HDRC_INTRTXE 0x06
+#define MGC_O_HDRC_INTRRXE 0x08
+#define MGC_O_HDRC_INTRUSB 0x0A /* 8 bit */
+#define MGC_O_HDRC_INTRUSBE 0x0B /* 8 bit */
+#define MGC_O_HDRC_FRAME 0x0C
+#define MGC_O_HDRC_INDEX 0x0E /* 8 bit */
+#define MGC_O_HDRC_TESTMODE 0x0F /* 8 bit */
+
+/* Get offset for a given FIFO from musb->pRegs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+#endif
+
+/* Additional Control Registers */
+
+#define MGC_O_HDRC_DEVCTL 0x60 /* 8 bit */
+// vctrl/vstatus: optional vendor utmi+phy register at 0x68
+#define MGC_O_HDRC_HWVERS 0x6C /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MGC_O_HDRC_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MGC_O_HDRC_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MGC_O_HDRC_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MGC_O_HDRC_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* offsets to endpoint registers */
+#define MGC_O_HDRC_TXMAXP 0x00
+#define MGC_O_HDRC_TXCSR 0x02
+#define MGC_O_HDRC_CSR0 MGC_O_HDRC_TXCSR /* re-used for EP0 */
+#define MGC_O_HDRC_RXMAXP 0x04
+#define MGC_O_HDRC_RXCSR 0x06
+#define MGC_O_HDRC_RXCOUNT 0x08
+#define MGC_O_HDRC_COUNT0 MGC_O_HDRC_RXCOUNT /* re-used for EP0 */
+#define MGC_O_HDRC_TXTYPE 0x0A
+#define MGC_O_HDRC_TYPE0 MGC_O_HDRC_TXTYPE /* re-used for EP0 */
+#define MGC_O_HDRC_TXINTERVAL 0x0B
+#define MGC_O_HDRC_NAKLIMIT0 MGC_O_HDRC_TXINTERVAL /* re-used for EP0 */
+#define MGC_O_HDRC_RXTYPE 0x0C
+#define MGC_O_HDRC_RXINTERVAL 0x0D
+#define MGC_O_HDRC_FIFOSIZE 0x0F
+#define MGC_O_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */
+
+/* offsets to endpoint registers in indexed model (using INDEX register) */
+#define MGC_INDEXED_OFFSET(_bEnd, _bOffset) \
+ (0x10 + (_bOffset))
+
+/* offsets to endpoint registers in flat models */
+#define MGC_FLAT_OFFSET(_bEnd, _bOffset) \
+ (0x100 + (0x10*(_bEnd)) + (_bOffset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MGC_TUSB_OFFSET(_bEnd, _bOffset) \
+ (_bEnd ? (0x400 + (((_bEnd - 1) & 0xf) << 2) + (_bOffset)) : \
+ ((_bEnd - 0x400) + TUSB_EP0_CONF + (_bOffset)))
+#include "tusb6010.h" /* needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control" registers */
+#define MGC_O_HDRC_TXFUNCADDR 0x00
+#define MGC_O_HDRC_TXHUBADDR 0x02
+#define MGC_O_HDRC_TXHUBPORT 0x03
+
+#define MGC_O_HDRC_RXFUNCADDR 0x04
+#define MGC_O_HDRC_RXHUBADDR 0x06
+#define MGC_O_HDRC_RXHUBPORT 0x07
+
+#define MGC_BUSCTL_OFFSET(_bEnd, _bOffset) \
+ (0x80 + (8*(_bEnd)) + (_bOffset))
+
+/*
+ * MUSBHDRC Register bit masks
+ */
+
+/* POWER */
+
+#define MGC_M_POWER_ISOUPDATE 0x80
+#define MGC_M_POWER_SOFTCONN 0x40
+#define MGC_M_POWER_HSENAB 0x20
+#define MGC_M_POWER_HSMODE 0x10
+#define MGC_M_POWER_RESET 0x08
+#define MGC_M_POWER_RESUME 0x04
+#define MGC_M_POWER_SUSPENDM 0x02
+#define MGC_M_POWER_ENSUSPEND 0x01
+
+/* INTRUSB */
+#define MGC_M_INTR_SUSPEND 0x01
+#define MGC_M_INTR_RESUME 0x02
+#define MGC_M_INTR_RESET 0x04
+#define MGC_M_INTR_BABBLE 0x04
+#define MGC_M_INTR_SOF 0x08
+#define MGC_M_INTR_CONNECT 0x10
+#define MGC_M_INTR_DISCONNECT 0x20
+#define MGC_M_INTR_SESSREQ 0x40
+#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */
+#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */
+
+/* DEVCTL */
+#define MGC_M_DEVCTL_BDEVICE 0x80
+#define MGC_M_DEVCTL_FSDEV 0x40
+#define MGC_M_DEVCTL_LSDEV 0x20
+#define MGC_M_DEVCTL_VBUS 0x18
+#define MGC_S_DEVCTL_VBUS 3
+#define MGC_M_DEVCTL_HM 0x04
+#define MGC_M_DEVCTL_HR 0x02
+#define MGC_M_DEVCTL_SESSION 0x01
+
+/* TESTMODE */
+
+#define MGC_M_TEST_FORCE_HOST 0x80
+#define MGC_M_TEST_FIFO_ACCESS 0x40
+#define MGC_M_TEST_FORCE_FS 0x20
+#define MGC_M_TEST_FORCE_HS 0x10
+#define MGC_M_TEST_PACKET 0x08
+#define MGC_M_TEST_K 0x04
+#define MGC_M_TEST_J 0x02
+#define MGC_M_TEST_SE0_NAK 0x01
+
+/* allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MGC_M_FIFOSZ_DPB 0x10
+/* allocation size (8, 16, 32, ... 4096) */
+#define MGC_M_FIFOSZ_SIZE 0x0f
+
+/* CSR0 */
+#define MGC_M_CSR0_FLUSHFIFO 0x0100
+#define MGC_M_CSR0_TXPKTRDY 0x0002
+#define MGC_M_CSR0_RXPKTRDY 0x0001
+
+/* CSR0 in Peripheral mode */
+#define MGC_M_CSR0_P_SVDSETUPEND 0x0080
+#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040
+#define MGC_M_CSR0_P_SENDSTALL 0x0020
+#define MGC_M_CSR0_P_SETUPEND 0x0010
+#define MGC_M_CSR0_P_DATAEND 0x0008
+#define MGC_M_CSR0_P_SENTSTALL 0x0004
+
+/* CSR0 in Host mode */
+#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */
+#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */
+#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080
+#define MGC_M_CSR0_H_STATUSPKT 0x0040
+#define MGC_M_CSR0_H_REQPKT 0x0020
+#define MGC_M_CSR0_H_ERROR 0x0010
+#define MGC_M_CSR0_H_SETUPPKT 0x0008
+#define MGC_M_CSR0_H_RXSTALL 0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MGC_M_CSR0_P_WZC_BITS \
+ ( MGC_M_CSR0_P_SENTSTALL )
+#define MGC_M_CSR0_H_WZC_BITS \
+ ( MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_RXSTALL \
+ | MGC_M_CSR0_RXPKTRDY )
+
+
+/* TxType/RxType */
+#define MGC_M_TYPE_SPEED 0xc0
+#define MGC_S_TYPE_SPEED 6
+#define MGC_TYPE_SPEED_HIGH 1
+#define MGC_TYPE_SPEED_FULL 2
+#define MGC_TYPE_SPEED_LOW 3
+#define MGC_M_TYPE_PROTO 0x30
+#define MGC_S_TYPE_PROTO 4
+#define MGC_M_TYPE_REMOTE_END 0xf
+
+/* CONFIGDATA */
+
+#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */
+#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */
+#define MGC_M_CONFIGDATA_BIGENDIAN 0x20
+#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
+#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
+#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */
+#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
+#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+
+#define MGC_M_TXCSR_AUTOSET 0x8000
+#define MGC_M_TXCSR_MODE 0x2000
+#define MGC_M_TXCSR_DMAENAB 0x1000
+#define MGC_M_TXCSR_FRCDATATOG 0x0800
+#define MGC_M_TXCSR_DMAMODE 0x0400
+#define MGC_M_TXCSR_CLRDATATOG 0x0040
+#define MGC_M_TXCSR_FLUSHFIFO 0x0008
+#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002
+#define MGC_M_TXCSR_TXPKTRDY 0x0001
+
+/* TXCSR in Peripheral mode */
+
+#define MGC_M_TXCSR_P_ISO 0x4000
+#define MGC_M_TXCSR_P_INCOMPTX 0x0080
+#define MGC_M_TXCSR_P_SENTSTALL 0x0020
+#define MGC_M_TXCSR_P_SENDSTALL 0x0010
+#define MGC_M_TXCSR_P_UNDERRUN 0x0004
+
+/* TXCSR in Host mode */
+
+#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200
+#define MGC_M_TXCSR_H_DATATOGGLE 0x0100
+#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080
+#define MGC_M_TXCSR_H_RXSTALL 0x0020
+#define MGC_M_TXCSR_H_ERROR 0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MGC_M_TXCSR_P_WZC_BITS \
+ ( MGC_M_TXCSR_P_INCOMPTX | MGC_M_TXCSR_P_SENTSTALL \
+ | MGC_M_TXCSR_P_UNDERRUN | MGC_M_TXCSR_FIFONOTEMPTY )
+#define MGC_M_TXCSR_H_WZC_BITS \
+ ( MGC_M_TXCSR_H_NAKTIMEOUT | MGC_M_TXCSR_H_RXSTALL \
+ | MGC_M_TXCSR_H_ERROR | MGC_M_TXCSR_FIFONOTEMPTY )
+
+
+/* RXCSR in Peripheral and Host mode */
+
+#define MGC_M_RXCSR_AUTOCLEAR 0x8000
+#define MGC_M_RXCSR_DMAENAB 0x2000
+#define MGC_M_RXCSR_DISNYET 0x1000
+#define MGC_M_RXCSR_DMAMODE 0x0800
+#define MGC_M_RXCSR_INCOMPRX 0x0100
+#define MGC_M_RXCSR_CLRDATATOG 0x0080
+#define MGC_M_RXCSR_FLUSHFIFO 0x0010
+#define MGC_M_RXCSR_DATAERROR 0x0008
+#define MGC_M_RXCSR_FIFOFULL 0x0002
+#define MGC_M_RXCSR_RXPKTRDY 0x0001
+
+/* RXCSR in Peripheral mode */
+
+#define MGC_M_RXCSR_P_ISO 0x4000
+#define MGC_M_RXCSR_P_SENTSTALL 0x0040
+#define MGC_M_RXCSR_P_SENDSTALL 0x0020
+#define MGC_M_RXCSR_P_OVERRUN 0x0004
+
+/* RXCSR in Host mode */
+
+#define MGC_M_RXCSR_H_AUTOREQ 0x4000
+#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400
+#define MGC_M_RXCSR_H_DATATOGGLE 0x0200
+#define MGC_M_RXCSR_H_RXSTALL 0x0040
+#define MGC_M_RXCSR_H_REQPKT 0x0020
+#define MGC_M_RXCSR_H_ERROR 0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MGC_M_RXCSR_P_WZC_BITS \
+ ( MGC_M_RXCSR_P_SENTSTALL | MGC_M_RXCSR_P_OVERRUN \
+ | MGC_M_RXCSR_RXPKTRDY )
+#define MGC_M_RXCSR_H_WZC_BITS \
+ ( MGC_M_RXCSR_H_RXSTALL | MGC_M_RXCSR_H_ERROR \
+ | MGC_M_RXCSR_DATAERROR | MGC_M_RXCSR_RXPKTRDY )
+
+
+/* HUBADDR */
+#define MGC_M_HUBADDR_MULTI_TT 0x80
+
+
+#endif /* __MUSB_HDRC_DEFS_H__ */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/*
+ * DMA implementation for high-speed controllers.
+ */
+
+#include "musbdefs.h"
+
+
+/****************************** CONSTANTS ********************************/
+
+#define MGC_O_HSDMA_BASE 0x200
+#define MGC_O_HSDMA_INTR 0x200
+
+#define MGC_O_HSDMA_CONTROL 4
+#define MGC_O_HSDMA_ADDRESS 8
+#define MGC_O_HSDMA_COUNT 0xc
+
+#define MGC_HSDMA_CHANNEL_OFFSET(_bChannel, _bOffset) \
+ (MGC_O_HSDMA_BASE + (_bChannel << 4) + _bOffset)
+
+/* control register (16-bit): */
+#define MGC_S_HSDMA_ENABLE 0
+#define MGC_S_HSDMA_TRANSMIT 1
+#define MGC_S_HSDMA_MODE1 2
+#define MGC_S_HSDMA_IRQENABLE 3
+#define MGC_S_HSDMA_ENDPOINT 4
+#define MGC_S_HSDMA_BUSERROR 8
+#define MGC_S_HSDMA_BURSTMODE 9
+#define MGC_M_HSDMA_BURSTMODE (3 << MGC_S_HSDMA_BURSTMODE)
+#define MGC_HSDMA_BURSTMODE_UNSPEC 0
+#define MGC_HSDMA_BURSTMODE_INCR4 1
+#define MGC_HSDMA_BURSTMODE_INCR8 2
+#define MGC_HSDMA_BURSTMODE_INCR16 3
+
+#define MGC_HSDMA_CHANNELS 8
+
+/******************************* Types ********************************/
+
+struct _MGC_HsDmaController;
+
+typedef struct {
+ struct dma_channel Channel;
+ struct _MGC_HsDmaController *pController;
+ u32 dwStartAddress;
+ u32 dwCount;
+ u8 bIndex;
+ u8 bEnd;
+ u8 bTransmit;
+} MGC_HsDmaChannel;
+
+struct hsdma {
+ struct dma_controller Controller;
+ MGC_HsDmaChannel aChannel[MGC_HSDMA_CHANNELS];
+ void *pDmaPrivate;
+ void __iomem *pCoreBase;
+ u8 bChannelCount;
+ u8 bmUsedChannels;
+};
+
+/* FIXME remove typedef noise */
+typedef struct hsdma MGC_HsDmaController;
+
+/****************************** FUNCTIONS ********************************/
+
+static int MGC_HsDmaStartController(struct dma_controller *c)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int MGC_HsDmaStopController(struct dma_controller *c)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static struct dma_channel *MGC_HsDmaAllocateChannel(
+ struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 bTransmit)
+{
+ u8 bBit;
+ struct dma_channel *pChannel = NULL;
+ MGC_HsDmaChannel *pImplChannel = NULL;
+ MGC_HsDmaController *pController;
+
+ pcontroller = container_of(c, struct hsdma, Controller);
+ for (bBit = 0; bBit < MGC_HSDMA_CHANNELS; bBit++) {
+ if (!(pController->bmUsedChannels & (1 << bBit))) {
+ pController->bmUsedChannels |= (1 << bBit);
+ pImplChannel = &(pController->aChannel[bBit]);
+ pImplChannel->pController = pController;
+ pImplChannel->bIndex = bBit;
+ pImplChannel->bEnd = hw_ep->bLocalEnd;
+ pImplChannel->bTransmit = bTransmit;
+ pChannel = &(pImplChannel->Channel);
+ pChannel->pPrivateData = pImplChannel;
+ pChannel->bStatus = MGC_DMA_STATUS_FREE;
+ pChannel->dwMaxLength = 0x10000;
+ /* Tx => mode 1; Rx => mode 0 */
+ pChannel->bDesiredMode = bTransmit;
+ pChannel->dwActualLength = 0;
+ break;
+ }
+ }
+ return pChannel;
+}
+
+static void MGC_HsDmaReleaseChannel(struct dma_channel *pChannel)
+{
+ MGC_HsDmaChannel *pImplChannel =
+ (MGC_HsDmaChannel *) pChannel->pPrivateData;
+
+ pImplChannel->pController->bmUsedChannels &=
+ ~(1 << pImplChannel->bIndex);
+ pChannel->bStatus = MGC_DMA_STATUS_FREE;
+}
+
+static void clear_state(struct dma_channel *pChannel)
+{
+ MGC_HsDmaChannel *pImplChannel =
+ (MGC_HsDmaChannel *) pChannel->pPrivateData;
+ MGC_HsDmaController *pController = pImplChannel->pController;
+ u8 *pBase = pController->pCoreBase;
+ u8 bChannel = pImplChannel->bIndex;
+
+ musb_writew(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_CONTROL),
+ 0);
+ musb_writel(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_ADDRESS),
+ 0);
+ musb_writel(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_COUNT),
+ 0);
+
+ pChannel->dwActualLength = 0L;
+ pImplChannel->dwStartAddress = 0;
+ pImplChannel->dwCount = 0;
+}
+
+static u8 configure_channel(struct dma_channel *pChannel,
+ u16 wPacketSize, u8 bMode,
+ dma_addr_t dma_addr, u32 dwLength)
+{
+ MGC_HsDmaChannel *pImplChannel =
+ (MGC_HsDmaChannel *) pChannel->pPrivateData;
+ MGC_HsDmaController *pController = pImplChannel->pController;
+ u8 *pBase = pController->pCoreBase;
+ u8 bChannel = pImplChannel->bIndex;
+ u16 wCsr = 0;
+
+ DBG(2, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+ pChannel, wPacketSize, dma_addr, dwLength, bMode);
+
+ if (bMode) {
+ wCsr |= 1 << MGC_S_HSDMA_MODE1;
+ if (dwLength < wPacketSize) {
+ return FALSE;
+ }
+ if (wPacketSize >= 64) {
+ wCsr |=
+ MGC_HSDMA_BURSTMODE_INCR16 << MGC_S_HSDMA_BURSTMODE;
+ } else if (wPacketSize >= 32) {
+ wCsr |=
+ MGC_HSDMA_BURSTMODE_INCR8 << MGC_S_HSDMA_BURSTMODE;
+ } else if (wPacketSize >= 16) {
+ wCsr |=
+ MGC_HSDMA_BURSTMODE_INCR4 << MGC_S_HSDMA_BURSTMODE;
+ }
+ }
+
+ wCsr |= (pImplChannel->bEnd << MGC_S_HSDMA_ENDPOINT)
+ | (1 << MGC_S_HSDMA_ENABLE)
+ | (1 << MGC_S_HSDMA_IRQENABLE)
+ | (pImplChannel->bTransmit ? (1 << MGC_S_HSDMA_TRANSMIT) : 0);
+
+ /* address/count */
+ musb_writel(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_ADDRESS),
+ dma_addr);
+ musb_writel(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_COUNT),
+ dwLength);
+
+ /* control (this should start things) */
+ musb_writew(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel, MGC_O_HSDMA_CONTROL),
+ wCsr);
+
+ return TRUE;
+}
+
+static int MGC_HsDmaProgramChannel(struct dma_channel * pChannel,
+ u16 wPacketSize, u8 bMode,
+ dma_addr_t dma_addr, u32 dwLength)
+{
+ MGC_HsDmaChannel *pImplChannel =
+ (MGC_HsDmaChannel *) pChannel->pPrivateData;
+
+ DBG(2, "pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ wPacketSize, dma_addr, dwLength, bMode);
+
+ BUG_ON(pChannel->bStatus != MGC_DMA_STATUS_FREE);
+
+ pChannel->dwActualLength = 0L;
+ pImplChannel->dwStartAddress = dma_addr;
+ pImplChannel->dwCount = dwLength;
+
+ pChannel->bStatus = MGC_DMA_STATUS_BUSY;
+
+ if ((bMode == 1) && (dwLength >= wPacketSize)) {
+
+#if 0
+ /* mode 1 sends an extra IN token at the end of
+ * full packet transfer in host Rx
+ */
+ if (dwLength % wPacketSize == 0)
+ dwLength -= wPacketSize;
+
+ /* mode 1 doesn't give an interrupt on short packet */
+ configure_channel(pChannel, wPacketSize, 1, dma_addr,
+ dwLength & ~(wPacketSize - 1));
+ /* the rest (<= pkt_size) will be transferred in mode 0 */
+#endif
+
+ configure_channel(pChannel, wPacketSize, 1, dma_addr,
+ dwLength);
+
+ } else
+ configure_channel(pChannel, wPacketSize, 0, dma_addr,
+ dwLength);
+
+ return TRUE;
+}
+
+// REVISIT...
+static int MGC_HsDmaAbortChannel(struct dma_channel *pChannel)
+{
+ clear_state(pChannel);
+ pChannel->bStatus = MGC_DMA_STATUS_FREE;
+ return 0;
+}
+
+static irqreturn_t
+hsdma_irq(int irq, void *pPrivateData, struct pt_regs *regs)
+{
+ u8 bChannel;
+ u16 wCsr;
+ u32 dwAddress;
+ MGC_HsDmaChannel *pImplChannel;
+ MGC_HsDmaController *pController = pPrivateData;
+ u8 *pBase = pController->pCoreBase;
+ struct dma_channel *pChannel;
+ u8 bIntr = musb_readb(pBase, MGC_O_HSDMA_INTR);
+
+ if (!bIntr)
+ return IRQ_NONE;
+
+ for (bChannel = 0; bChannel < MGC_HSDMA_CHANNELS; bChannel++) {
+ if (bIntr & (1 << bChannel)) {
+
+ pImplChannel = (MGC_HsDmaChannel *)
+ &(pController->aChannel[bChannel]);
+ pChannel = &pImplChannel->Channel;
+
+ wCsr = musb_readw(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET(bChannel,
+ MGC_O_HSDMA_CONTROL));
+
+ if (wCsr & (1 << MGC_S_HSDMA_BUSERROR)) {
+ pImplChannel->Channel.bStatus =
+ MGC_DMA_STATUS_BUS_ABORT;
+ } else {
+ dwAddress = musb_readl(pBase,
+ MGC_HSDMA_CHANNEL_OFFSET
+ (bChannel,
+ MGC_O_HSDMA_ADDRESS));
+ pChannel->dwActualLength =
+ dwAddress - pImplChannel->dwStartAddress;
+
+ DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ pChannel, pImplChannel->dwStartAddress,
+ dwAddress, pChannel->dwActualLength,
+ pImplChannel->dwCount,
+ (pChannel->dwActualLength <
+ pImplChannel->dwCount) ?
+ "=> reconfig 0": "=> complete");
+#if 0
+ if (pChannel->dwActualLength <
+ pImplChannel->dwCount) {
+ /* mode 1 sends an extra IN request if
+ the last packet is a complete packet */
+ u16 newcsr = MGC_ReadCsr16(pBase,
+ MGC_O_HDRC_RXCSR,
+ pImplChannel->bEnd);
+ newcsr &= ~(MGC_M_RXCSR_H_AUTOREQ |
+ MGC_M_RXCSR_H_REQPKT);
+ MGC_WriteCsr16(pBase, MGC_O_HDRC_RXCSR,
+ pImplChannel->bEnd,
+ MGC_M_RXCSR_H_WZC_BITS |
+ newcsr);
+
+ configure_channel(pChannel,
+ pImplChannel->wMaxPacketSize,
+ 0, dwAddress,
+ pImplChannel->dwCount -
+ pChannel->dwActualLength);
+ }
+ else
+#endif
+ {
+ pChannel->bStatus = MGC_DMA_STATUS_FREE;
+ /* completed */
+ musb_dma_completion(
+ pController->pDmaPrivate,
+ pImplChannel->bEnd,
+ pImplChannel->bTransmit);
+ }
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void hsdma_controller_destroy(struct dma_controller *pController)
+{
+ MGC_HsDmaController *pHsController = pController->pPrivateData;
+
+ if (pHsController) {
+ pHsController->Controller.pPrivateData = NULL;
+ kfree(pHsController);
+ }
+}
+
+static struct dma_controller *
+hsdma_controller_new(struct musb *pThis, void __iomem *pCoreBase)
+{
+ MGC_HsDmaController *pController;
+ struct device *dev = pThis->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq(pdev, 1);
+
+ if (irq == 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ if (!(pController = kzalloc(sizeof(MGC_HsDmaController), GFP_KERNEL)))
+ return NULL;
+
+ pController->bChannelCount = MGC_HSDMA_CHANNELS;
+ pController->pDmaPrivate = pThis;
+ pController->pCoreBase = pCoreBase;
+
+ pController->Controller.pPrivateData = pController;
+ pController->Controller.start = MGC_HsDmaStartController;
+ pController->Controller.stop = MGC_HsDmaStopController;
+ pController->Controller.channel_alloc = MGC_HsDmaAllocateChannel;
+ pController->Controller.channel_release = MGC_HsDmaReleaseChannel;
+ pController->Controller.channel_program = MGC_HsDmaProgramChannel;
+ pController->Controller.channel_abort = MGC_HsDmaAbortChannel;
+
+ if (request_irq(irq, hsdma_irq, SA_INTERRUPT,
+ pThis->controller->bus_id, &pController->Controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ hsdma_controller_destroy(&pController->Controller);
+ return NULL;
+ }
+
+ return &pController->Controller;
+}
+
+const struct dma_controller_factory dma_controller_factory = {
+ .create = hsdma_controller_new,
+ .destroy = hsdma_controller_destroy,
+};
--- /dev/null
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musbdefs.h"
+#include "omap2430.h"
+
+
+static int dma_off;
+
+void musb_platform_enable(struct musb *musb)
+{
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __FUNCTION__);
+ else
+ dma_off = 1;
+}
+
+void musb_platform_disable(struct musb *musb)
+{
+ if (is_dma_capable()) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __FUNCTION__);
+ dma_off = 1;
+ }
+}
+
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+int __devinit musb_platform_init(struct musb *musb)
+{
+ /* Erratum - reset value of STP has pull-down.
+ Change it to pull-up. */
+ omap2_cfg_reg(AE5_2430_USB0HS_STP);
+
+ /* start clock */
+ musb->clock = clk_get((struct device *)musb->controller, "usbhs_ick");
+ clk_use(musb->clock);
+
+ omap_writel(omap_readl(OTG_INTERFSEL) | (1<<0), OTG_INTERFSEL);
+ omap_writel(omap_readl(OTG_SYSCONFIG) |
+ ((1 << 12) | (1 << 3) | (1 << 2)),
+ OTG_SYSCONFIG);
+
+ pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
+ omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+ omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+ omap_readl(OTG_SIMENABLE));
+
+ omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+ return 0;
+}
+
+int __exit musb_platform_exit(struct musb *musb)
+{
+ omap_vbus_power(musb, 0 /*off*/, 1);
+
+ /* REVISIT older omap trees need "unuse", more current
+ * ones just have disable()
+ */
+ clk_unuse(musb->clock);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#ifdef CONFIG_ARCH_OMAP243X
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET 0
+#define HS_OTG(offset) (OMAP243X_HS_BASE + (offset))
+#define OTG_REVISION HS_OTG(0x400)
+#define OTG_SYSCONFIG HS_OTG(0x404)
+#define OTG_SYSSTATUS HS_OTG(0x408)
+#define OTG_INTERFSEL HS_OTG(0x40c)
+#define OTG_SIMENABLE HS_OTG(0x410)
+
+#endif /* CONFIG_ARCH_OMAP243X */
+
+#endif /* __MUSB_OMAP243X_H__ */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/* OTG state machine status 8-mar:
+ *
+ * - on DaVinci
+ * + EVM gamma boards have troublesome C133, preventing
+ * conformant timings for A_WAIT_VFALL transitions
+ * + ID-pin based role initialization and VBUS switching
+ * seems partly functional ... seems to bypass this code.
+ * + haven't tried HNP or SRP.
+ *
+ * - needs updating along the lines of <linux/usb_otg.h>
+ *
+ * - doesn't yet use all the linux 2.6.10 usbcore hooks for OTG, but
+ * some of the conversion (and consequent shrinkage) has begun.
+ *
+ * - it's not clear if any version of this code ever have passed
+ * the USB-IF OTG tests even at full speed; presumably not.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include "musbdefs.h"
+#include "otg.h"
+
+
+static void otg_set_session(struct musb *musb, u8 bSession)
+{
+ void __iomem *pBase = musb->pRegs;
+ u8 devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+
+ DBG(2, "<==\n");
+
+ /* REVISIT unclear what this should do, but this looks
+ * like the wrong thing ... the OTG machine should never
+ * shut down so long as both host and peripheral drivers
+ * are active. you'd think the docs would help...
+ */
+ if (bSession) {
+ devctl |= MGC_M_DEVCTL_SESSION;
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, devctl);
+ } else {
+ //devctl &= ~MGC_M_DEVCTL_SESSION;
+ musb_root_disconnect(musb);
+ //musb_writeb(pBase, MGC_O_HDRC_DEVCTL, devctl);
+ }
+}
+
+#if 0
+static void otg_request_session(struct musb *musb)
+{
+ void __iomem *pBase = musb->pRegs;
+ u8 devctl;
+
+ DBG(2, "<==\n");
+ devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ devctl |= MGC_M_DEVCTL_SESSION;
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, devctl);
+}
+#endif
+
+/* caller has irqlocked musb,
+ * and if host or peripheral needs to be shut down, already did that.
+ */
+static void otg_state_changed(struct musb *musb, enum usb_otg_state state)
+{
+ /* caller should pass the timeout here */
+ unsigned long timer = 0;
+
+ if (state == musb->OtgMachine.bState)
+ return;
+
+ DBG(1, "%d --> %d\n", musb->OtgMachine.bState, state);
+ musb->OtgMachine.bState = state;
+
+ /* OTG timeouts the hardware doesn't handle:
+ * - ...
+ */
+
+ switch (state) {
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_B_HOST:
+ MUSB_HST_MODE(musb);
+ break;
+
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_A_PERIPHERAL:
+ MUSB_DEV_MODE(musb);
+ break;
+
+ default:
+ DBG(1, "state change to %d?\n", state);
+ /* REVISIT this "otg" mode is goofy; just switch between
+ * default-A and default-B state machines, they already
+ * include disconnect-equivalent states (IDLE).
+ */
+ MUSB_OTG_MODE(musb);
+ break;
+ }
+
+ if (timer)
+ mod_timer(&musb->OtgMachine.Timer, jiffies + timer);
+ else
+ del_timer(&musb->OtgMachine.Timer);
+
+ /* FIXME the otg state implies MUSB_MODE(). Properly track
+ * xceiv.state, then remove OtgMachine.bState and MUSB_MODE...
+ */
+ DBG(2, "==> OTG state %d(%d), mode %s\n",
+ state, musb->xceiv.state,
+ MUSB_MODE(musb));
+}
+
+
+/**
+ * Timer expiration function to complete the interrupt URB on changes
+ * @param ptr standard expiration param (hub pointer)
+ */
+static void otg_timeout(unsigned long ptr)
+{
+ struct otg_machine *pMachine = (void *) ptr;
+ void __iomem *mregs;
+ u8 devctl;
+ struct musb *musb = pMachine->musb;
+ unsigned long flags;
+
+ DBG(0, "** TIMEOUT ** state %d(%d)\n",
+ pMachine->bState, pMachine->musb->xceiv.state);
+
+ /* REVISIT: a few of these cases _require_ (per the OTG spec)
+ * some sort of user notification, such as turning on an LED
+ * or displaying a message on the screen; INFO() not enough.
+ */
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ switch (pMachine->bState) {
+ case OTG_STATE_B_SRP_INIT:
+ INFO("SRP failed\n");
+ otg_set_session(pMachine->musb, FALSE);
+ otg_state_changed(pMachine->musb, OTG_STATE_B_IDLE);
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ INFO("No response from A-device\n");
+ mregs = pMachine->musb->pRegs;
+ devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
+ musb_writeb(mregs, MGC_O_HDRC_DEVCTL,
+ devctl & ~MGC_M_DEVCTL_HR);
+ otg_set_session(pMachine->musb, TRUE);
+ otg_state_changed(pMachine->musb, OTG_STATE_B_PERIPHERAL);
+ break;
+
+ case OTG_STATE_A_WAIT_BCON:
+ /* REVISIT we'd like to force the VBUS-off path here... */
+ INFO("No response from B-device\n");
+ otg_set_session(pMachine->musb, FALSE);
+ /* transition via OTG_STATE_A_WAIT_VFALL */
+ otg_state_changed(pMachine->musb, OTG_STATE_A_IDLE);
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ /* FIXME b-dev HNP is _optional_ so this is no error */
+ INFO("No B-device HNP response\n");
+ otg_set_session(pMachine->musb, FALSE);
+ /* transition via OTG_STATE_A_WAIT_VFALL */
+ otg_state_changed(pMachine->musb, OTG_STATE_A_IDLE);
+ break;
+
+ default:
+ WARN("timeout in state %d, now what?\n", pMachine->bState);
+ }
+ spin_unlock_irqrestore(&musb->Lock, flags);
+}
+
+void MGC_OtgMachineInit(struct otg_machine *pMachine, struct musb *musb)
+{
+ memset(pMachine, 0, sizeof *pMachine);
+ spin_lock_init(&pMachine->Lock);
+ pMachine->musb = musb;
+
+ init_timer(&pMachine->Timer);
+ pMachine->Timer.function = otg_timeout;
+ pMachine->Timer.data = (unsigned long)pMachine;
+
+ pMachine->bState = OTG_STATE_B_IDLE;
+ pMachine->bRequest = MGC_OTG_REQUEST_UNKNOWN;
+}
+
+void MGC_OtgMachineDestroy(struct otg_machine *pMachine)
+{
+ /* stop timer */
+ del_timer_sync(&pMachine->Timer);
+}
+
+/* caller has irqlocked musb */
+void MGC_OtgMachineInputsChanged(struct otg_machine *pMachine,
+ const MGC_OtgMachineInputs * pInputs)
+{
+
+ DBG(2, "<== bState %d(%d)%s%s%s%s%s%s\n",
+ pMachine->bState, pMachine->musb->xceiv.state,
+ pInputs->bSession ? ", sess" : "",
+ pInputs->bSuspend ? ", susp" : "",
+ pInputs->bConnection ? ", bcon" : "",
+ pInputs->bReset ? ", reset" : "",
+ pInputs->bConnectorId ? ", B-Dev" : ", A-Dev",
+ pInputs->bVbusError ? ", vbus_error" : "");
+
+ if (pInputs->bVbusError) {
+ /* transition via OTG_STATE_VBUS_ERR and
+ * then OTG_STATE_A_WAIT_VFALL
+ */
+ otg_state_changed(pMachine->musb, OTG_STATE_A_IDLE);
+ return;
+ }
+
+ switch (pMachine->bState) {
+ case OTG_STATE_B_IDLE:
+ if (pInputs->bSession && pInputs->bConnectorId) {
+ /* WRONG: if VBUS is below session threshold,
+ * it's still B_IDLE
+ */
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_B_PERIPHERAL);
+ }
+ break;
+ case OTG_STATE_A_IDLE:
+ if (pInputs->bConnection) {
+ /*
+ * SKIP a state because connect IRQ comes so quickly
+ * after setting session,
+ * and only happens in host mode
+ */
+ otg_state_changed(pMachine->musb, OTG_STATE_A_HOST);
+ } else if (pInputs->bSession) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_WAIT_BCON);
+ mod_timer(&pMachine->Timer, jiffies
+ + msecs_to_jiffies(MGC_OTG_T_A_WAIT_BCON));
+ }
+ break;
+
+ case OTG_STATE_B_SRP_INIT:
+ if (pInputs->bReset) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_B_PERIPHERAL);
+ } else if (pInputs->bConnection) {
+ /* FIXME bogus: there is no such transition!!! */
+ otg_state_changed(pMachine->musb,
+ pInputs->bConnectorId
+ ? OTG_STATE_B_HOST
+ : OTG_STATE_A_HOST);
+ }
+ break;
+
+ case OTG_STATE_B_PERIPHERAL:
+ if (!pInputs->bSession) {
+ otg_state_changed(pMachine->musb, OTG_STATE_B_IDLE);
+ }
+
+ /* FIXME nothing ever sets bRequest ... */
+ if ((MGC_OTG_REQUEST_START_BUS == pMachine->bRequest)
+ && pMachine->musb->g.b_hnp_enable) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_B_WAIT_ACON);
+ //mdelay(10);
+ //otg_set_session(pMachine->musb, FALSE);
+ mod_timer(&pMachine->Timer, jiffies
+ + msecs_to_jiffies(MGC_OTG_T_B_ASE0_BRST));
+ }
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ if (pInputs->bConnection) {
+ otg_state_changed(pMachine->musb, OTG_STATE_B_HOST);
+ } else if (!pInputs->bSession) {
+ otg_state_changed(pMachine->musb, OTG_STATE_B_IDLE);
+ } else if (!pInputs->bSuspend) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_B_PERIPHERAL);
+ }
+ break;
+
+ case OTG_STATE_B_HOST:
+ if (!pInputs->bConnection) {
+ otg_state_changed(pMachine->musb, OTG_STATE_B_IDLE);
+ } else if (pInputs->bConnection && !pInputs->bReset) {
+ /* REVISIT seems incomplete */
+ }
+ break;
+
+ case OTG_STATE_A_WAIT_BCON:
+ if (pInputs->bConnection) {
+ otg_state_changed(pMachine->musb, OTG_STATE_A_HOST);
+ } else if (pInputs->bReset) {
+ /* FIXME there is no such transition */
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_PERIPHERAL);
+ }
+ break;
+
+ case OTG_STATE_A_HOST:
+ if (!pInputs->bConnection) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_WAIT_BCON);
+ mod_timer(&pMachine->Timer, jiffies
+ + msecs_to_jiffies(MGC_OTG_T_A_WAIT_BCON));
+ } else if (pInputs->bConnection && !pInputs->bReset) {
+ /* REVISIT seems incomplete */
+ }
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ if (!pInputs->bSuspend) {
+ otg_state_changed(pMachine->musb, OTG_STATE_A_HOST);
+ } else if (!pInputs->bConnection) {
+ if (musb_to_hcd(pMachine->musb)->self.b_hnp_enable) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_PERIPHERAL);
+ } else {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_WAIT_BCON);
+ mod_timer(&pMachine->Timer, jiffies
+ + msecs_to_jiffies(MGC_OTG_T_A_WAIT_BCON));
+ }
+ }
+ break;
+
+ case OTG_STATE_A_PERIPHERAL:
+ if (!pInputs->bSession) {
+ /* transition via OTG_STATE_A_WAIT_VFALL */
+ otg_state_changed(pMachine->musb, OTG_STATE_A_IDLE);
+ } else if (pInputs->bSuspend) {
+ otg_state_changed(pMachine->musb,
+ OTG_STATE_A_WAIT_BCON);
+ mod_timer(&pMachine->Timer, jiffies
+ + msecs_to_jiffies(MGC_OTG_T_A_WAIT_BCON));
+ }
+ break;
+
+ default:
+ WARN("event in state %d, now what?\n", pMachine->bState);
+ }
+}
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/*
+ * Interface to a generic OTG state machine for use by an OTG controller.
+ *
+ * FIXME most of this must vanish; usbcore handles some of it, and
+ * the OTG parts of a peripheral controller (and its driver) handle
+ * other things. Package it as an "otg transceiver".
+ */
+
+#ifndef __MUSB_LINUX_OTG_H__
+#define __MUSB_LINUX_OTG_H__
+
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+/**
+ * Introduction.
+ * An OTG state machine for use by a controller driver for an OTG controller
+ * that wishes to be OTG-aware.
+ * The state machine requires relevant inputs and a couple of services
+ * from the controller driver, and calls the controller driver to inform
+ * it of the current state and errors.
+ * Finally, it provides the necessary bus control service.
+ */
+
+/****************************** CONSTANTS ********************************/
+
+/*
+ * Define this (in milliseconds) to a target-specific value to override default.
+ * The OTG-spec minimum is 5000, and maximum is 6000 (see OTG spec errata).
+ */
+#ifndef MGC_OTG_T_B_SRP_FAIL
+#define MGC_OTG_T_B_SRP_FAIL 5000
+#endif
+
+/*
+ * Define this (in milliseconds) to a target-specific value to override default.
+ * This is the time an A-device should wait for a B-device to connect.
+ * The OTG-spec minimum is 1000.
+ * As a special case, for normal host-like behavior, you can set this to 0.
+ */
+#ifndef MGC_OTG_T_A_WAIT_BCON
+#define MGC_OTG_T_A_WAIT_BCON 1000
+#endif
+
+/*
+ * Define this (in milliseconds) to a target-specific value to override default.
+ * The OTG-spec minimum is 250.
+ */
+#ifndef MGC_OTG_T_AIDL_BDIS
+#define MGC_OTG_T_AIDL_BDIS 250
+#endif
+
+//#define MGC_OTG_T_B_ASE0_BRST 4
+#define MGC_OTG_T_B_ASE0_BRST 100
+
+/*
+ * MGC_OtgRequest.
+ * A software request for the OTG state machine
+ */
+typedef enum {
+ MGC_OTG_REQUEST_UNKNOWN,
+ /** Request the bus */
+ MGC_OTG_REQUEST_START_BUS,
+ /** Drop the bus */
+ MGC_OTG_REQUEST_DROP_BUS,
+ /** Suspend the bus */
+ MGC_OTG_REQUEST_SUSPEND_BUS,
+ /** Reset the state machine */
+ MGC_OTG_REQUEST_RESET
+} MGC_OtgRequest;
+
+
+/******************************** TYPES **********************************/
+
+/*
+ * MGC_OtgMachineInputs.
+ * The set of inputs which drives the state machine
+ * @field bSession TRUE when a session is in progress; FALSE when not
+ * @field bConnectorId TRUE for B-device; FALSE for A-device
+ * (assumed valid only when a bSession is TRUE)
+ * @field bReset TRUE when reset is detected (peripheral role only)
+ * @field bConnection TRUE when connection is detected (host role only)
+ * @field bSuspend TRUE when bus suspend is detected
+ * @field bVbusError TRUE when a Vbus error is detected
+ */
+typedef struct {
+ u8 bSession;
+ u8 bConnectorId;
+ u8 bReset;
+ u8 bConnection;
+ u8 bSuspend;
+ u8 bVbusError;
+} MGC_OtgMachineInputs;
+
+/*
+ * OTG state machine instance data.
+ * @field Lock spinlock
+ * @field bState current state (one of the OTG_STATE_* constants)
+ * @field pOtgServices pointer to OTG services
+ * @field Timer interval timer for status change interrupts
+ * @field bState current state
+ * @field bRequest current pending request
+ */
+struct otg_machine {
+ spinlock_t Lock;
+ struct musb *musb;
+ enum usb_otg_state bState;
+ struct timer_list Timer;
+ MGC_OtgRequest bRequest;
+
+ /* FIXME standard Linux-USB host and peripheral code includes
+ * OTG support ... most of this "otg machine" must vanish
+ */
+
+};
+
+/****************************** FUNCTIONS ********************************/
+
+/*
+ * Initialize an OTG state machine.
+ */
+extern void MGC_OtgMachineInit(struct otg_machine * pMachine,
+ struct musb *musb);
+
+/*
+ * Destroy an OTG state machine
+ * @param pMachine machine pointer
+ * @see #MGC_OtgMachineInit
+ */
+extern void MGC_OtgMachineDestroy(struct otg_machine * pMachine);
+
+/*
+ * OTG inputs have changed.
+ * A controller driver calls this when anything in the
+ * MGC_OtgMachineInputs has changed
+ * @param pMachine machine pointer
+ * @param pInputs current inputs
+ * @see #MGC_OtgMachineInit
+ */
+extern void MGC_OtgMachineInputsChanged(struct otg_machine * pMachine,
+ const MGC_OtgMachineInputs * pInputs);
+
+#endif /* multiple inclusion protection */
--- /dev/null
+/******************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/*
+ * Linux-specific architecture definitions
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <asm/io.h>
+
+#ifndef CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+ { insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+ { insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+ { insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+ { outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+ { outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+ { outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE: these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+ { __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+ { return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { __raw_writeb(data, addr + offset); }
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif
--- /dev/null
+/*****************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006 by Nokia Corporation
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works. These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments. Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE: the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version. This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ * - Lack of host-side transaction scheduling, for all transfer types.
+ * The hardware doesn't do it; instead, software must.
+ *
+ * This is not an issue for OTG devices that don't support external
+ * hubs, but for more "normal" USB hosts it's a user issue that the
+ * "multipoint" support doesn't scale in the expected ways. That
+ * includes DaVinci EVM in a common non-OTG mode.
+ *
+ * * Control and bulk use dedicated endpoints, and there's as
+ * yet no mechanism to either (a) reclaim the hardware when
+ * peripherals are NAKing, which gets complicated with bulk
+ * endpoints, or (b) use more than a single bulk endpoint in
+ * each direction.
+ *
+ * RESULT: one device may be perceived as blocking another one.
+ *
+ * * Interrupt and isochronous will dynamically allocate endpoint
+ * hardware, but (a) there's no record keeping for bandwidth;
+ * (b) in the common case that few endpoints are available, there
+ * is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ * RESULT: At one extreme, bandwidth can be overcommitted in
+ * some hardware configurations, no faults will be reported.
+ * At the other extreme, the bandwidth capabilities which do
+ * exist tend to be severely undercommitted. You can't yet hook
+ * up both a keyboard and a mouse to an external USB hub.
+ *
+ * * Host side doesn't understand that hardware endpoints have two
+ * directions, so it uses only half the resources available on
+ * chips like DaVinci or TUSB 6010.
+ *
+ * +++ PARTIALLY RESOLVED +++
+ *
+ * RESULT: On DaVinci (and TUSB 6010), only one external device may
+ * use periodic transfers, other than the hub used to connect it.
+ * (And if it were to understand, there would still be limitations
+ * because of the lack of periodic endpoint scheduling.)
+ *
+ * - Host-side doesn't use the HCD framework, even the older version in
+ * the 2.6.10 kernel, which doesn't provide per-endpoint URB queues.
+ *
+ * +++ PARTIALLY RESOLVED +++
+ *
+ * RESULT: code bloat, because it provides its own root hub;
+ * correctness issues.
+ *
+ * - Provides its own OTG bits. These are untested, and many of them
+ * seem to be superfluous code bloat given what usbcore does. (They
+ * have now been partially removed.)
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ * - Kconfig for everything user-configurable
+ * - <asm/arch/hdrc_cnf.h> for SOC or family details
+ * - platform_device for addressing, irq, and platform_data
+ * - platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+
+#ifdef CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musbdefs.h"
+// #ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define VBUSERR_RETRY_COUNT 2 /* is this too few? */
+// #endif
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+#if MUSB_DEBUG > 0
+unsigned debug = MUSB_DEBUG;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "initial debug message level");
+
+#define MUSB_VERSION_SUFFIX "/dbg"
+#endif
+
+#define DRIVER_AUTHOR "Mentor Graphics Corp. and Texas Instruments"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION_BASE "2.2a/db-0.5.1"
+
+#ifndef MUSB_VERSION_SUFFIX
+#define MUSB_VERSION_SUFFIX ""
+#endif
+#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+const char musb_driver_name[] = "musb_hdrc";
+
+/* this module is always GPL, the gadget might not... */
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/* time (millseconds) to wait before a restart */
+#define MUSB_RESTART_TIME 5000
+
+/* how many babbles to allow before giving up */
+#define MUSB_MAX_BABBLE_COUNT 10
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* usbcore insists dev->driver_data is a "struct hcd *" */
+ return hcd_to_musb(dev_get_drvdata(dev));
+#else
+ return dev_get_drvdata(dev);
+#endif
+}
+
+static void otg_input_changed(struct musb * pThis, u8 devctl, u8 reset,
+ u8 connection, u8 suspend)
+{
+#ifdef CONFIG_USB_MUSB_OTG
+ struct otg_machine *otgm = &pThis->OtgMachine;
+ MGC_OtgMachineInputs Inputs;
+
+ /* reading suspend state from Power register does NOT work */
+ memset(&Inputs, 0, sizeof(Inputs));
+
+ Inputs.bSession = (devctl & MGC_M_DEVCTL_SESSION) ? TRUE : FALSE;
+ Inputs.bSuspend = suspend;
+ Inputs.bConnection = connection;
+ Inputs.bReset = reset;
+ Inputs.bConnectorId = (devctl & MGC_M_DEVCTL_BDEVICE) ? TRUE : FALSE;
+
+ MGC_OtgMachineInputsChanged(otgm, &Inputs);
+#endif
+}
+
+static void otg_input_changed_X(struct musb * pThis, u8 bVbusError, u8 bConnect)
+{
+#ifdef CONFIG_USB_MUSB_OTG
+ MGC_OtgMachineInputs Inputs;
+ void __iomem *pBase = pThis->pRegs;
+ u8 devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ u8 power = musb_readb(pBase, MGC_O_HDRC_POWER);
+
+ DBG(2, "<== power %02x, devctl %02x%s%s\n", power, devctl,
+ bConnect ? ", bcon" : "",
+ bVbusError ? ", vbus_error" : "");
+
+ /* speculative */
+ memset(&Inputs, 0, sizeof(Inputs));
+ Inputs.bSession = (devctl & MGC_M_DEVCTL_SESSION) ? TRUE : FALSE;
+ Inputs.bConnectorId = (devctl & MGC_M_DEVCTL_BDEVICE) ? TRUE : FALSE;
+ Inputs.bReset = (power & MGC_M_POWER_RESET) ? TRUE : FALSE;
+ Inputs.bConnection = bConnect;
+ Inputs.bVbusError = bVbusError;
+ Inputs.bSuspend = (power & MGC_M_POWER_SUSPENDM) ? TRUE : FALSE;
+ MGC_OtgMachineInputsChanged(&(pThis->OtgMachine), &Inputs);
+#endif /* CONFIG_USB_MUSB_OTG */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 wCount, const u8 *pSource)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ prefetch((u8 *)pSource);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', hw_ep->bLocalEnd, fifo, wCount, pSource);
+
+ /* we can't assume unaligned reads work */
+ if (likely((0x01 & (unsigned long) pSource) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned source address */
+ if ((0x02 & (unsigned long) pSource) == 0) {
+ if (wCount >= 4) {
+ writesl(fifo, pSource + index, wCount >> 2);
+ index += wCount & ~0x03;
+ }
+ if (wCount & 0x02) {
+ musb_writew(fifo, 0, *(u16*)&pSource[index]);
+ index += 2;
+ }
+ } else {
+ if (wCount >= 2) {
+ writesw(fifo, pSource + index, wCount >> 1);
+ index += wCount & ~0x01;
+ }
+ }
+ if (wCount & 0x01)
+ musb_writeb(fifo, 0, pSource[index]);
+ } else {
+ /* byte aligned */
+ writesb(fifo, pSource, wCount);
+ }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 wCount, u8 *pDest)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->bLocalEnd, fifo, wCount, pDest);
+
+ /* we can't assume unaligned writes work */
+ if (likely((0x01 & (unsigned long) pDest) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) pDest) == 0) {
+ if (wCount >= 4) {
+ readsl(fifo, pDest, wCount >> 2);
+ index = wCount & ~0x03;
+ }
+ if (wCount & 0x02) {
+ *(u16*)&pDest[index] = musb_readw(fifo, 0);
+ index += 2;
+ }
+ } else {
+ if (wCount >= 2) {
+ readsw(fifo, pDest, wCount >> 1);
+ index = wCount & ~0x01;
+ }
+ }
+ if (wCount & 0x01)
+ pDest[index] = musb_readb(fifo, 0);
+ } else {
+ /* byte aligned */
+ readsb(fifo, pDest, wCount);
+ }
+}
+
+#endif /* normal PIO */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param pThis instance pointer
+ * @param bIntrUSB register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MGC_M_INTR_RESUME | MGC_M_INTR_SESSREQ \
+ | MGC_M_INTR_VBUSERROR | MGC_M_INTR_CONNECT \
+ | MGC_M_INTR_RESET )
+
+static irqreturn_t musb_stage0_irq(struct musb * pThis, u8 bIntrUSB,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ void __iomem *pBase = pThis->pRegs;
+#endif
+
+ DBG(3, "<== Power=%02x, DevCtl=%02x, bIntrUSB=0x%x\n", power, devctl,
+ bIntrUSB);
+
+ /* in host mode when a device resume me (from power save)
+ * in device mode when the host resume me; it shold not change
+ * "identity".
+ */
+ if (bIntrUSB & MGC_M_INTR_RESUME) {
+ handled = IRQ_HANDLED;
+ DBG(3, "RESUME\n");
+
+ if (devctl & MGC_M_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* REVISIT: this is where SRP kicks in, yes? */
+ MUSB_HST_MODE(pThis); /* unnecessary */
+ power &= ~MGC_M_POWER_SUSPENDM;
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ power | MGC_M_POWER_RESUME);
+
+ /* should now be A_SUSPEND */
+ pThis->xceiv.state = OTG_STATE_A_HOST;
+#endif
+ } else {
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ MUSB_DEV_MODE(pThis); /* unnecessary */
+#endif
+ musb_g_resume(pThis);
+ }
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* see manual for the order of the tests */
+ if (bIntrUSB & MGC_M_INTR_SESSREQ) {
+ DBG(1, "SESSION_REQUEST (%d)\n", pThis->xceiv.state);
+
+ /* IRQ arrives from ID pin sense or (later, if VBUS power
+ * is removed) SRP. responses are time critical:
+ * - turn on VBUS (with silicon-specific mechanism)
+ * - go through A_WAIT_VRISE
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
+ pThis->bEnd0Stage = MGC_END0_START;
+ pThis->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(pThis);
+
+ handled = IRQ_HANDLED;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ {
+ MGC_OtgMachineInputs Inputs;
+ memset(&Inputs, 0, sizeof(Inputs));
+ Inputs.bSession = TRUE;
+ Inputs.bConnectorId = FALSE;
+ Inputs.bReset = FALSE;
+ Inputs.bConnection = FALSE;
+ Inputs.bSuspend = FALSE;
+ MGC_OtgMachineInputsChanged(&(pThis->OtgMachine), &Inputs);
+ }
+#endif
+ }
+
+ if (bIntrUSB & MGC_M_INTR_VBUSERROR) {
+
+ // MGC_OtgMachineInputsChanged(otgm, &Inputs);
+ // ... may need to abort otg timer ...
+
+ DBG(1, "VBUS_ERROR (%02x)\n", devctl);
+
+ /* after hw goes to A_IDLE, try connecting again */
+ pThis->xceiv.state = OTG_STATE_A_IDLE;
+ if (pThis->vbuserr_retry--)
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL,
+ MGC_M_DEVCTL_SESSION);
+ return IRQ_HANDLED;
+ } else
+ pThis->vbuserr_retry = VBUSERR_RETRY_COUNT;
+
+ if (bIntrUSB & MGC_M_INTR_CONNECT) {
+ handled = IRQ_HANDLED;
+
+ pThis->bEnd0Stage = MGC_END0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* flush endpoints when transitioning from Device Mode */
+ if (is_peripheral_active(pThis)) {
+ // REVISIT HNP; just force disconnect
+ }
+ pThis->bDelayPortPowerOff = FALSE;
+#endif
+ pThis->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+ );
+ pThis->port1_status |= USB_PORT_STAT_CONNECTION
+ |(USB_PORT_STAT_C_CONNECTION << 16);
+
+ /* high vs full speed is just a guess until after reset */
+ if (devctl & MGC_M_DEVCTL_LSDEV)
+ pThis->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+ usb_hcd_poll_rh_status(musb_to_hcd(pThis));
+
+ MUSB_HST_MODE(pThis);
+
+ /* indicate new connection to OTG machine */
+ switch (pThis->xceiv.state) {
+ case OTG_STATE_B_WAIT_ACON:
+ pThis->xceiv.state = OTG_STATE_B_HOST;
+ break;
+ default:
+ DBG(2, "connect in state %d\n", pThis->xceiv.state);
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_WAIT_VRISE:
+ pThis->xceiv.state = OTG_STATE_A_HOST;
+ break;
+ }
+ DBG(1, "CONNECT (host state %d)\n", pThis->xceiv.state);
+ otg_input_changed(pThis, devctl, FALSE, TRUE, FALSE);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* saved one bit: bus reset and babble share the same bit;
+ * If I am host is a babble! i must be the only one allowed
+ * to reset the bus; when in otg mode it means that I have
+ * to switch to device
+ */
+ if (bIntrUSB & MGC_M_INTR_RESET) {
+ if (devctl & MGC_M_DEVCTL_HM) {
+ DBG(1, "BABBLE\n");
+
+ /* REVISIT it's unclear how to handle this. Mentor's
+ * code stopped the whole USB host, which is clearly
+ * very wrong. For now, just expect the hardware is
+ * sane, so babbling devices also trigger a normal
+ * endpoint i/o fault (with automatic recovery).
+ * (A "babble" IRQ seems quite pointless...)
+ */
+
+ } else {
+ DBG(1, "BUS RESET\n");
+
+ musb_g_reset(pThis);
+
+ /* reading state from Power register doesn't work */
+ otg_input_changed(pThis, devctl, TRUE, FALSE,
+ (power & MGC_M_POWER_SUSPENDM)
+ ? TRUE : FALSE);
+ }
+
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param pThis instance pointer
+ * @param bIntrUSB register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb * pThis, u8 bIntrUSB,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (bIntrUSB & MGC_M_INTR_SOF) {
+ void __iomem *pBase = pThis->pRegs;
+ struct musb_hw_ep *ep;
+ u8 bEnd;
+ u16 wFrame;
+
+ DBG(6, "START_OF_FRAME\n");
+ handled = IRQ_HANDLED;
+
+ /* start any periodic Tx transfers waiting for current frame */
+ wFrame = musb_readw(pBase, MGC_O_HDRC_FRAME);
+ ep = pThis->aLocalEnd;
+ for (bEnd = 1; (bEnd < pThis->bEndCount)
+ && (pThis->wEndMask >= (1 << bEnd));
+ bEnd++, ep++) {
+ // FIXME handle framecounter wraps (12 bits)
+ // eliminate duplicated StartUrb logic
+ if (ep->dwWaitFrame >= wFrame) {
+ ep->dwWaitFrame = 0;
+ printk("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ bEnd);
+ if (!ep->tx_channel)
+ musb_h_tx_start(pThis, bEnd);
+ else
+ cppi_hostdma_start(pThis, bEnd);
+ }
+ } /* end of for loop */
+ }
+#endif
+
+ if ((bIntrUSB & MGC_M_INTR_DISCONNECT) && !pThis->bIgnoreDisconnect) {
+ DBG(1, "DISCONNECT as %s, devctl %02x\n",
+ MUSB_MODE(pThis), devctl);
+ handled = IRQ_HANDLED;
+
+ /* need to check it against pThis, because devctl is going
+ * to report ID low as soon as the device gets disconnected
+ */
+ if (is_host_active(pThis))
+ musb_root_disconnect(pThis);
+ else
+ musb_g_disconnect(pThis);
+
+ /* REVISIT all OTG state machine transitions */
+ otg_input_changed_X(pThis, FALSE, FALSE);
+ }
+
+ if (bIntrUSB & MGC_M_INTR_SUSPEND) {
+ DBG(1, "SUSPEND, devctl %02x\n", devctl);
+ handled = IRQ_HANDLED;
+
+ /* peripheral suspend, may trigger HNP */
+ if (!(devctl & MGC_M_DEVCTL_HM)) {
+ musb_g_suspend(pThis);
+ otg_input_changed(pThis, devctl, FALSE, FALSE, TRUE);
+ musb_platform_try_idle(pThis);
+ }
+ }
+
+ return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb * pThis)
+{
+ void __iomem *pBase = pThis->pRegs;
+ u8 state;
+
+ DBG(2, "<==\n");
+
+ /* TODO: always set ISOUPDATE in POWER (periph mode) and leave it on! */
+
+ /* Set INT enable registers, enable interrupts */
+ musb_writew(pBase, MGC_O_HDRC_INTRTXE, pThis->wEndMask);
+ musb_writew(pBase, MGC_O_HDRC_INTRRXE, pThis->wEndMask & 0xfffe);
+ musb_writeb(pBase, MGC_O_HDRC_INTRUSBE, 0xf7);
+
+ musb_platform_enable(pThis);
+
+ musb_writeb(pBase, MGC_O_HDRC_TESTMODE, 0);
+
+ /* enable high-speed/low-power and start session */
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ MGC_M_POWER_SOFTCONN | MGC_M_POWER_HSENAB);
+
+ switch (pThis->board_mode) {
+ case MUSB_HOST:
+ case MUSB_OTG:
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
+ break;
+ case MUSB_PERIPHERAL:
+ state = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL,
+ state & ~MGC_M_DEVCTL_SESSION);
+ break;
+ }
+}
+
+
+static void musb_generic_disable(struct musb *pThis)
+{
+ void __iomem *pBase = pThis->pRegs;
+ u16 temp;
+
+ /* disable interrupts */
+ musb_writeb(pBase, MGC_O_HDRC_INTRUSBE, 0);
+ musb_writew(pBase, MGC_O_HDRC_INTRTX, 0);
+ musb_writew(pBase, MGC_O_HDRC_INTRRX, 0);
+
+ /* off */
+ musb_writeb(pBase, MGC_O_HDRC_DEVCTL, 0);
+
+ /* flush pending interrupts */
+ temp = musb_readb(pBase, MGC_O_HDRC_INTRUSB);
+ temp = musb_readw(pBase, MGC_O_HDRC_INTRTX);
+ temp = musb_readw(pBase, MGC_O_HDRC_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb * pThis)
+{
+ /* stop IRQs, timers, ... */
+ musb_platform_disable(pThis);
+ musb_generic_disable(pThis);
+ DBG(3, "HDRC disabled\n");
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (is_otg_enabled(pThis))
+ MGC_OtgMachineDestroy(&pThis->OtgMachine);
+#endif
+
+ /* FIXME
+ * - mark host and/or peripheral drivers unusable/inactive
+ * - disable DMA (and enable it in HdrcStart)
+ * - make sure we can musb_start() after musb_stop(); with
+ * OTG mode, gadget driver module rmmod/modprobe cycles that
+ * - ...
+ */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (is_host_enabled(pThis)) {
+ /* REVISIT aren't there some paths where this is wrong? */
+ dev_warn(pThis->controller, "%s, root hub still active\n",
+ __FUNCTION__);
+ }
+#endif
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ MUSB_ERR_MODE(musb, MUSB_ERR_SHUTDOWN);
+ spin_unlock_irqrestore(&musb->Lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing. The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested. We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#ifdef MUSB_C_DYNFIFO_DEF
+#define can_dynfifo() 1
+#else
+#define can_dynfifo() 0
+#endif
+
+static ushort __devinitdata fifo_mode = 2;
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+#define DYN_FIFO_SIZE (1<<(MUSB_C_RAM_BITS+2))
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+ u8 hw_ep_num;
+ enum fifo_style style;
+ enum buf_mode mode;
+ u16 maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static const struct fifo_cfg __devinitdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static const struct fifo_cfg __devinitdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static const struct fifo_cfg __devinitdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static const struct fifo_cfg __devinitdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __devinit
+fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct fifo_cfg *cfg, u16 offset)
+{
+ void __iomem *mbase = musb->pRegs;
+ int size = 0;
+ u16 maxpacket = cfg->maxpacket;
+ u16 c_off = offset >> 3;
+ u8 c_size;
+
+ /* expect hw_ep has already been zero-initialized */
+
+ size = ffs(max(maxpacket, (u16) 8)) - 1;
+ maxpacket = 1 << size;
+
+ c_size = size - 3;
+ if (cfg->mode == BUF_DOUBLE) {
+ if ((offset + (maxpacket << 1)) > DYN_FIFO_SIZE)
+ return -EMSGSIZE;
+ c_size |= MGC_M_FIFOSZ_DPB;
+ } else {
+ if ((offset + maxpacket) > DYN_FIFO_SIZE)
+ return -EMSGSIZE;
+ }
+
+ /* configure the FIFO */
+ musb_writeb(mbase, MGC_O_HDRC_INDEX, hw_ep->bLocalEnd);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirection halves.
+ */
+ if (hw_ep->bLocalEnd == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+#endif
+ switch (cfg->style) {
+ case FIFO_TX:
+ musb_writeb(mbase, MGC_O_HDRC_TXFIFOSZ, c_size);
+ musb_writew(mbase, MGC_O_HDRC_TXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = !!(c_size & MGC_M_FIFOSZ_DPB);
+ hw_ep->wMaxPacketSizeTx = maxpacket;
+ break;
+ case FIFO_RX:
+ musb_writeb(mbase, MGC_O_HDRC_RXFIFOSZ, c_size);
+ musb_writew(mbase, MGC_O_HDRC_RXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MGC_M_FIFOSZ_DPB);
+ hw_ep->wMaxPacketSizeRx = maxpacket;
+ break;
+ case FIFO_RXTX:
+ musb_writeb(mbase, MGC_O_HDRC_TXFIFOSZ, c_size);
+ musb_writew(mbase, MGC_O_HDRC_TXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MGC_M_FIFOSZ_DPB);
+ hw_ep->wMaxPacketSizeRx = maxpacket;
+
+ musb_writeb(mbase, MGC_O_HDRC_RXFIFOSZ, c_size);
+ musb_writew(mbase, MGC_O_HDRC_RXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+ hw_ep->wMaxPacketSizeTx = maxpacket;
+
+ hw_ep->bIsSharedFifo = TRUE;
+ break;
+ }
+
+ /* NOTE rx and tx endpoint irqs aren't managed separately,
+ * which happens to be ok
+ */
+ musb->wEndMask |= (1 << hw_ep->bLocalEnd);
+
+ return offset + (maxpacket << ((c_size & MGC_M_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static const struct fifo_cfg __devinitdata ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __devinit ep_config_from_table(struct musb *musb)
+{
+ const struct fifo_cfg *cfg;
+ unsigned n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->aLocalEnd;
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ /* FALLTHROUGH */
+ case 0:
+ cfg = mode_0_cfg;
+ n = ARRAY_SIZE(mode_0_cfg);
+ break;
+ case 1:
+ cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+ musb_driver_name, fifo_mode);
+
+
+ offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+ // assert(offset > 0)
+
+ while (n--) {
+ u8 epn = cfg->hw_ep_num;
+
+ if (epn >= MUSB_C_NUM_EPS) {
+ pr_debug( "%s: invalid ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+ if (offset < 0) {
+ pr_debug( "%s: mem overrun, ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ epn++;
+ musb->bEndCount = max(epn, musb->bEndCount);
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug( "%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param pThis the controller
+ */
+static int __devinit ep_config_from_hw(struct musb *musb)
+{
+ u8 bEnd = 0, reg;
+ struct musb_hw_ep *pEnd;
+ void *pBase = musb->pRegs;
+
+ DBG(2, "<== static silicon ep config\n");
+
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (bEnd = 1; bEnd < MUSB_C_NUM_EPS; bEnd++) {
+ MGC_SelectEnd(pBase, bEnd);
+ pEnd = musb->aLocalEnd + bEnd;
+
+ /* read from core using indexed model */
+ reg = musb_readb(pEnd->regs, 0x10 + MGC_O_HDRC_FIFOSIZE);
+ if (!reg) {
+ /* 0's returned when no more endpoints */
+ break;
+ }
+ musb->bEndCount++;
+ musb->wEndMask |= (1 << bEnd);
+
+ pEnd->wMaxPacketSizeTx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ pEnd->wMaxPacketSizeRx = pEnd->wMaxPacketSizeTx;
+ pEnd->bIsSharedFifo = TRUE;
+ continue;
+ } else {
+ pEnd->wMaxPacketSizeRx = 1 << ((reg & 0xf0) >> 4);
+ pEnd->bIsSharedFifo = FALSE;
+ }
+
+ /* FIXME set up pEnd->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* pick an RX/TX endpoint for bulk */
+ if (pEnd->wMaxPacketSizeTx < 512
+ || pEnd->wMaxPacketSizeRx < 512)
+ continue;
+
+ /* REVISIT: this algorithm is lazy, we should at least
+ * try to pick a double buffered endpoint.
+ */
+ if (musb->bulk_ep)
+ continue;
+ musb->bulk_ep = pEnd;
+#endif
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug( "%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __devinit musb_core_init(u16 wType, struct musb *pThis)
+{
+#ifdef MUSB_AHB_ID
+ u32 dwData;
+#endif
+ u8 reg;
+ char *type;
+ u16 wRelease, wRelMajor, wRelMinor;
+ char aInfo[78], aRevision[32], aDate[12];
+ void __iomem *pBase = pThis->pRegs;
+ int status = 0;
+ int i;
+
+ /* log core options (read using indexed model) */
+ MGC_SelectEnd(pBase, 0);
+ reg = musb_readb(pBase, 0x10 + MGC_O_HDRC_CONFIGDATA);
+
+ strcpy(aInfo, (reg & MGC_M_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+ if (reg & MGC_M_CONFIGDATA_DYNFIFO) {
+ strcat(aInfo, ", dyn FIFOs");
+ }
+ if (reg & MGC_M_CONFIGDATA_MPRXE) {
+ strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+ pThis->bBulkCombine = TRUE;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MGC_M_CONFIGDATA_MPTXE) {
+ strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+ pThis->bBulkSplit = TRUE;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MGC_M_CONFIGDATA_HBRXE) {
+ strcat(aInfo, ", HB-ISO Rx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MGC_M_CONFIGDATA_HBTXE) {
+ strcat(aInfo, ", HB-ISO Tx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MGC_M_CONFIGDATA_SOFTCONE) {
+ strcat(aInfo, ", SoftConn");
+ }
+
+ printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+ musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+ dwData = musb_readl(pBase, 0x404);
+ sprintf(aDate, "%04d-%02x-%02x", (dwData & 0xffff),
+ (dwData >> 16) & 0xff, (dwData >> 24) & 0xff);
+ /* FIXME ID2 and ID3 are unused */
+ dwData = musb_readl(pBase, 0x408);
+ printk("ID2=%lx\n", (long unsigned)dwData);
+ dwData = musb_readl(pBase, 0x40c);
+ printk("ID3=%lx\n", (long unsigned)dwData);
+ reg = musb_readb(pBase, 0x400);
+ wType = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+ aDate[0] = 0;
+#endif
+ if (MUSB_CONTROLLER_MHDRC == wType) {
+ pThis->bIsMultipoint = 1;
+ type = "M";
+ } else {
+ pThis->bIsMultipoint = 0;
+ type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
+ printk(KERN_ERR
+ "%s: kernel must blacklist external hubs\n",
+ musb_driver_name);
+#endif
+#endif
+ }
+
+ /* log release info */
+ wRelease = musb_readw(pBase, MGC_O_HDRC_HWVERS);
+ wRelMajor = (wRelease >> 10) & 0x1f;
+ wRelMinor = wRelease & 0x3ff;
+ snprintf(aRevision, 32, "%d.%d%s", wRelMajor,
+ wRelMinor, (wRelease & 0x8000) ? "RC" : "");
+ printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+ musb_driver_name, type, aRevision, aDate);
+
+ /* configure ep0 */
+ pThis->aLocalEnd[0].wMaxPacketSizeTx = MGC_END0_FIFOSIZE;
+ pThis->aLocalEnd[0].wMaxPacketSizeRx = MGC_END0_FIFOSIZE;
+
+ /* discover endpoint configuration */
+ pThis->bEndCount = 1;
+ pThis->wEndMask = 1;
+
+ if (reg & MGC_M_CONFIGDATA_DYNFIFO) {
+ if (can_dynfifo())
+ status = ep_config_from_table(pThis);
+ else {
+ ERR("reconfigure software for Dynamic FIFOs\n");
+ status = -ENODEV;
+ }
+ } else {
+ if (!can_dynfifo())
+ status = ep_config_from_hw(pThis);
+ else {
+ ERR("reconfigure software for static FIFOs\n");
+ return -ENODEV;
+ }
+ }
+
+ if (status < 0)
+ return status;
+
+ /* finish init, and print endpoint config */
+ for (i = 0; i < pThis->bEndCount; i++) {
+ struct musb_hw_ep *hw_ep = pThis->aLocalEnd + i;
+
+ hw_ep->fifo = MUSB_FIFO_OFFSET(i) + pBase;
+#ifdef CONFIG_USB_TUSB6010
+ hw_ep->fifo_async = pThis->async + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync = pThis->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+#endif
+
+ hw_ep->regs = MGC_END_OFFSET(i, 0) + pBase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ hw_ep->target_regs = MGC_BUSCTL_OFFSET(i, 0) + pBase;
+ hw_ep->rx_reinit = 1;
+ hw_ep->tx_reinit = 1;
+#endif
+
+ if (hw_ep->wMaxPacketSizeTx) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ hw_ep->bIsSharedFifo ? "shared" : "tx",
+ hw_ep->tx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->wMaxPacketSizeTx);
+ }
+ if (hw_ep->wMaxPacketSizeRx && !hw_ep->bIsSharedFifo) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ "rx",
+ hw_ep->rx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->wMaxPacketSizeRx);
+ }
+ if (!(hw_ep->wMaxPacketSizeTx || hw_ep->wMaxPacketSizeRx))
+ DBG(1, "hw_ep %d not configured\n", i);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_OMAP243X
+
+static irqreturn_t generic_interrupt(int irq, void *__hci, struct pt_regs *r)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ musb->int_usb = musb_readb(musb->pRegs, MGC_O_HDRC_INTRUSB);
+ musb->int_tx = musb_readw(musb->pRegs, MGC_O_HDRC_INTRTX);
+ musb->int_rx = musb_readw(musb->pRegs, MGC_O_HDRC_INTRRX);
+ musb->int_regs = r;
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ /* REVISIT we sometimes get spurious IRQs on g_ep0
+ * not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "spurious?\n");
+
+ return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect: other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+ irqreturn_t retval = IRQ_NONE;
+ u8 devctl, power;
+ int ep_num;
+ u32 reg;
+
+ devctl = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
+ power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
+
+ DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+ (devctl & MGC_M_DEVCTL_HM) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+ /* ignore requests when in error */
+ if (MUSB_IS_ERR(musb)) {
+ WARN("irq in error\n");
+ musb_platform_disable(musb);
+ return IRQ_NONE;
+ }
+
+ /* the core can interrupt us for multiple reasons; docs have
+ * a generic interrupt flowchart to follow
+ */
+ if (musb->int_usb & STAGE0_MASK)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl, power);
+ else
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+
+ /* "stage 1" is handling endpoint irqs */
+
+ /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (devctl & MGC_M_DEVCTL_HM)
+ retval |= musb_h_ep0_irq(musb);
+ else
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+ /* RX on endpoints 1-15 */
+ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ // MGC_SelectEnd(musb->pRegs, ep_num);
+ /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MGC_M_DEVCTL_HM)
+ musb_host_rx(musb, ep_num);
+ else
+ musb_g_rx(musb, ep_num);
+ }
+
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* TX on endpoints 1-15 */
+ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ // MGC_SelectEnd(musb->pRegs, ep_num);
+ /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MGC_M_DEVCTL_HM)
+ musb_host_tx(musb, ep_num);
+ else
+ musb_g_tx(musb, ep_num);
+ }
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* finish handling "global" interrupts after handling fifos */
+ if (musb->int_usb)
+ retval |= musb_stage2_irq(musb,
+ musb->int_usb, devctl, power);
+
+ return retval;
+}
+
+
+#ifndef CONFIG_USB_INVENTRA_FIFO
+static int __devinitdata use_dma = is_dma_capable();
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 bLocalEnd, u8 bTransmit)
+{
+ u8 devctl = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
+
+ /* called with controller lock already held */
+
+ if (!bLocalEnd) {
+#if !(defined(CONFIG_USB_TI_CPPI_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA))
+ /* endpoint 0 */
+ if (devctl & MGC_M_DEVCTL_HM)
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+#endif
+ } else {
+ /* endpoints 1..15 */
+ if (bTransmit) {
+ if (devctl & MGC_M_DEVCTL_HM)
+ musb_host_tx(musb, bLocalEnd);
+ else
+ musb_g_tx(musb, bLocalEnd);
+ } else {
+ /* receive */
+ if (devctl & MGC_M_DEVCTL_HM)
+ musb_host_rx(musb, bLocalEnd);
+ else
+ musb_g_rx(musb, bLocalEnd);
+ }
+ }
+}
+
+#else
+#define use_dma is_dma_capable()
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t musb_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ switch (musb->board_mode) {
+ case MUSB_HOST:
+ ret = sprintf(buf, "host\n");
+ break;
+ case MUSB_PERIPHERAL:
+ ret = sprintf(buf, "peripheral\n");
+ break;
+ case MUSB_OTG:
+ ret = sprintf(buf, "otg\n");
+ break;
+ }
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ return ret;
+}
+static DEVICE_ATTR(mode, S_IRUGO, musb_mode_show, NULL);
+
+static ssize_t musb_cable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ char *v1= "", *v2 = "?";
+ unsigned long flags;
+ int vbus;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+#ifdef CONFIG_USB_TUSB6010
+ /* REVISIT: connect-A != connect-B ... */
+ vbus = musb_platform_get_vbus_status(musb);
+ if (vbus)
+ v2 = "connected";
+ else
+ v2 = "disconnected";
+ musb_platform_try_idle(musb);
+#else
+ /* NOTE: board-specific issues, like too-big capacitors keeping
+ * VBUS high for a long time after power has been removed, can
+ * cause temporary false indications of a connection.
+ */
+ vbus = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
+ if (vbus & 0x10) {
+ /* REVISIT retest on real OTG hardware */
+ switch (musb->board_mode) {
+ case MUSB_HOST:
+ v2 = "A";
+ break;
+ case MUSB_PERIPHERAL:
+ v2 = "B";
+ break;
+ case MUSB_OTG:
+ v1 = "Mini-";
+ v2 = (vbus & MGC_M_DEVCTL_BDEVICE) ? "A" : "B";
+ break;
+ }
+ } else /* VBUS level below A-Valid */
+ v2 = "disconnected";
+#endif
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ return sprintf(buf, "%s%s\n", v1, v2);
+}
+static DEVICE_ATTR(cable, S_IRUGO, musb_cable_show, NULL);
+
+#endif
+
+static void musb_irq_work(void *data)
+{
+ struct musb *musb = (struct musb *)data;
+ unsigned long flags;
+ u8 event = 0;
+ spin_lock_irqsave(&musb->Lock, flags);
+ if (musb->status & MUSB_VBUS_STATUS_CHG) {
+ musb->status &= ~MUSB_VBUS_STATUS_CHG;
+ event = 1;
+ }
+ musb_platform_try_idle(musb);
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+#ifdef CONFIG_SYSFS
+ if (event)
+ sysfs_notify(&musb->controller->kobj, NULL, "cable");
+#endif
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__devinit
+allocate_instance(struct device *dev, void __iomem *mbase)
+{
+ struct musb *musb;
+ struct musb_hw_ep *ep;
+ int epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct usb_hcd *hcd;
+
+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+ if (!hcd)
+ return NULL;
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+ musb = hcd_to_musb(hcd);
+ INIT_LIST_HEAD(&musb->control);
+ INIT_LIST_HEAD(&musb->in_bulk);
+ INIT_LIST_HEAD(&musb->out_bulk);
+
+ hcd->uses_new_polling = 1;
+
+#else
+ musb = kzalloc(sizeof *musb, GFP_KERNEL);
+ if (!musb)
+ return NULL;
+ dev_set_drvdata(dev, musb);
+
+#endif
+
+ musb->pRegs = mbase;
+ musb->ctrl_base = mbase;
+ musb->nIrq = -ENODEV;
+ for (epnum = 0, ep = musb->aLocalEnd;
+ epnum < MUSB_C_NUM_EPS;
+ epnum++, ep++) {
+
+ ep->musb = musb;
+ ep->bLocalEnd = epnum;
+ }
+
+ musb->controller = dev;
+ return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+ /* this has multiple entry modes. it handles fault cleanup after
+ * probe(), where things may be partially set up, as well as rmmod
+ * cleanup after everything's been de-activated.
+ */
+
+#ifdef CONFIG_SYSFS
+ device_remove_file(musb->controller, &dev_attr_mode);
+ device_remove_file(musb->controller, &dev_attr_cable);
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ musb_gadget_cleanup(musb);
+#endif
+
+ if (musb->nIrq >= 0)
+ free_irq(musb->nIrq, musb);
+ if (is_dma_capable() && musb->pDmaController) {
+ struct dma_controller *c = musb->pDmaController;
+
+//
+ (void) c->stop(c->pPrivateData);
+ dma_controller_factory.destroy(c);
+ }
+ musb_platform_exit(musb);
+ if (musb->clock) {
+ clk_disable(musb->clock);
+ clk_put(musb->clock);
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ usb_put_hcd(musb_to_hcd(musb));
+#else
+ kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @pRegs: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+static int __devinit
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+ int status;
+ struct musb *pThis;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+ */
+ if (!plat) {
+ dev_dbg(dev, "no platform_data?\n");
+ return -ENODEV;
+ }
+ switch (plat->mode) {
+ case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+ break;
+#else
+ bad_config:
+#endif
+ default:
+ dev_dbg(dev, "incompatible Kconfig role setting\n");
+ return -EINVAL;
+ }
+
+ /* allocate */
+ pThis = allocate_instance(dev, ctrl);
+ if (!pThis)
+ return -ENOMEM;
+
+ spin_lock_init(&pThis->Lock);
+ pThis->board_mode = plat->mode;
+ pThis->board_set_power = plat->set_power;
+
+ /* assume vbus is off */
+
+ /* platform adjusts pThis->pRegs and pThis->isr if needed,
+ * and activates clocks
+ */
+ pThis->isr = generic_interrupt;
+ status = musb_platform_init(pThis);
+
+ if (status < 0)
+ goto fail;
+ if (!pThis->isr) {
+ status = -ENODEV;
+ goto fail2;
+ }
+
+#ifndef CONFIG_USB_INVENTRA_FIFO
+ if (use_dma && dev->dma_mask) {
+ struct dma_controller *c;
+
+ c = dma_controller_factory.create(pThis, pThis->pRegs);
+ pThis->pDmaController = c;
+ if (c)
+ (void) c->start(c->pPrivateData);
+ }
+#endif
+ /* ideally this would be abstracted in platform setup */
+ if (!is_dma_capable() || !pThis->pDmaController)
+ dev->dma_mask = NULL;
+
+ /* be sure interrupts are disabled before connecting ISR */
+ musb_platform_disable(pThis);
+
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+ : MUSB_CONTROLLER_HDRC, pThis);
+ if (status < 0)
+ goto fail2;
+
+ /* attach to the IRQ */
+ if (request_irq (nIrq, pThis->isr, 0, dev->bus_id, pThis)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+ status = -ENODEV;
+ goto fail2;
+ }
+ pThis->nIrq = nIrq;
+
+ pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+ musb_driver_name,
+ ({char *s;
+ switch (pThis->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && pThis->pDmaController)
+ ? "DMA" : "PIO",
+ pThis->nIrq);
+
+// FIXME:
+// - convert to the HCD framework
+// - if (board_mode == MUSB_OTG) do startup with peripheral
+// - ... involves refcounting updates
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* host side needs more setup, except for no-host modes */
+ if (pThis->board_mode != MUSB_PERIPHERAL) {
+ struct usb_hcd *hcd = musb_to_hcd(pThis);
+
+ if (pThis->board_mode == MUSB_OTG)
+ hcd->self.otg_port = 1;
+ pThis->xceiv.host = &hcd->self;
+ hcd->power_budget = 2 * (plat->power ? : 250);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* if present, this gets used even on non-otg boards */
+ MGC_OtgMachineInit(&pThis->OtgMachine, pThis);
+#endif
+
+ /* For the host-only role, we can activate right away.
+ * Otherwise, wait till the gadget driver hooks up.
+ *
+ * REVISIT switch to compile-time is_role_host() etc
+ * to get rid of #ifdeffery
+ */
+ switch (pThis->board_mode) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case MUSB_HOST:
+ MUSB_HST_MODE(pThis);
+ pThis->xceiv.state = OTG_STATE_A_IDLE;
+ status = usb_add_hcd(musb_to_hcd(pThis), -1, 0);
+
+ DBG(1, "%s mode, status %d, devctl %02x %c\n",
+ "HOST", status,
+ musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL),
+ (musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL)
+ & MGC_M_DEVCTL_BDEVICE
+ ? 'B' : 'A'));
+ break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case MUSB_PERIPHERAL:
+ MUSB_DEV_MODE(pThis);
+ status = musb_gadget_setup(pThis);
+
+ DBG(1, "%s mode, status %d, dev%02x\n",
+ "PERIPHERAL", status,
+ musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL));
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_OTG
+ case MUSB_OTG:
+ MUSB_OTG_MODE(pThis);
+ status = musb_gadget_setup(pThis);
+
+ DBG(1, "%s mode, status %d, dev%02x\n",
+ "OTG", status,
+ musb_readb(pThis->pRegs, MGC_O_HDRC_DEVCTL));
+#endif
+ break;
+ }
+
+ if (status == 0)
+ musb_debug_create("driver/musb_hdrc", pThis);
+ else {
+fail:
+ musb_free(pThis);
+ }
+
+ INIT_WORK(&pThis->irq_work, musb_irq_work, pThis);
+
+#ifdef CONFIG_SYSFS
+ device_create_file(dev, &dev_attr_mode);
+ device_create_file(dev, &dev_attr_cable);
+#endif
+
+ return status;
+
+fail2:
+ musb_platform_exit(pThis);
+ goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+static int __devinit musb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq(pdev, 0);
+ struct resource *iomem;
+ void __iomem *base;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || irq == 0)
+ return -ENODEV;
+
+ base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ if (!base) {
+ dev_err(dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+ * - Peripheral mode: peripheral is deactivated (or never-activated)
+ * - OTG mode: both roles are deactivated (or never-activated)
+ */
+ musb_shutdown(pdev);
+ musb_debug_delete("driver/musb_hdrc", musb);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (musb->board_mode == MUSB_HOST)
+ usb_remove_hcd(musb_to_hcd(musb));
+#endif
+ musb_free(musb);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+/* REVISIT when power savings matter on DaVinci, look at turning
+ * off its phy clock during system suspend iff wakeup is disabled
+ */
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ if (is_peripheral_active(musb)) {
+ /* FIXME force disconnect unless we know USB will wake
+ * the system up quickly enough to respond ...
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+ * they will even be wakeup-enabled
+ */
+ }
+
+ musb_platform_try_idle(musb);
+ clk_disable(musb->clock);
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+ clk_enable(musb->clock);
+ /* for static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down and we're
+ * not treating that as a whole-system restart (e.g. swsusp)
+ */
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return 0;
+}
+
+#else
+#define musb_suspend NULL
+#define musb_resume NULL
+#endif
+
+static struct platform_driver musb_driver = {
+ .driver = {
+ .name = (char *)musb_driver_name,
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = musb_probe,
+ .remove = __devexit_p(musb_remove),
+ .shutdown = musb_shutdown,
+ .suspend = musb_suspend,
+ .resume = musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (usb_disabled())
+ return 0;
+#endif
+
+ pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_USB_INVENTRA_FIFO
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d\n",
+ musb_driver_name, debug);
+ return platform_driver_register(&musb_driver);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+ platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
--- /dev/null
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ * configured for NOR FLASH interface instead of VLYNQ serial
+ * interface.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musbdefs.h"
+
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->regs;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->bLocalEnd;
+ u8 *bufp = (u8 *)buf;
+ int i, remain;
+ u32 val;
+
+ prefetch(bufp);
+
+ DBG(3, "%cX ep%d count %d bufp %p\n", 'T', epnum, len, bufp);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+ TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ /* Write 32-bit blocks from buffer to FIFO
+ * REVISIT: Optimize for burst ... writesl/writesw
+ */
+ if (len >= 4) {
+ if (((unsigned long)bufp & 0x3) == 0) {
+ for (i = 0; i < (len / 4); i++ ) {
+ val = *(u32 *)bufp;
+ bufp += 4;
+ musb_writel(fifo, 0, val);
+ }
+ } else if (((unsigned long)bufp & 0x2) == 0x2) {
+ for (i = 0; i < (len / 4); i++ ) {
+ val = (u32)(*(u16 *)bufp);
+ bufp += 2;
+ val |= (*(u16 *)bufp) << 16;
+ bufp += 2;
+ musb_writel(fifo, 0, val);
+ }
+ } else {
+ for (i = 0; i < (len / 4); i++ ) {
+ memcpy(&val, bufp, 4);
+ bufp += 4;
+ musb_writel(fifo, 0, val);
+ }
+ }
+ remain = len - (i * 4);
+ } else
+ remain = len;
+
+ if (remain) {
+ /* Write rest of 1-3 bytes from buffer into FIFO */
+ memcpy(&val, bufp, remain);
+ musb_writel(fifo, 0, val);
+ }
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->regs;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->bLocalEnd;
+ u8 *bufp = (u8 *)buf;
+ int i, remain;
+ u32 val;
+
+ DBG(3, "%cX ep%d count %d buf %p\n", 'R', epnum, len, bufp);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ /* Read 32-bit blocks from FIFO to buffer
+ * REVISIT: Optimize for burst ... writesl/writesw
+ */
+ if (len >= 4) {
+ if (((unsigned long)bufp & 0x3) == 0) {
+ for (i = 0; i < (len / 4); i++) {
+ val = musb_readl(fifo, 0);
+ *(u32 *)bufp = val;
+ bufp += 4;
+ }
+ } else if (((unsigned long)bufp & 0x2) == 0x2) {
+ for (i = 0; i < (len / 4); i++) {
+ val = musb_readl(fifo, 0);
+ *(u16 *)bufp = (u16)(val & 0xffff);
+ bufp += 2;
+ *(u16 *)bufp = (u16)(val >> 16);
+ bufp += 2;
+ }
+ } else {
+ for (i = 0; i < (len / 4); i++) {
+ val = musb_readl(fifo, 0);
+ memcpy(bufp, &val, 4);
+ bufp += 4;
+ }
+ }
+ remain = len - (i * 4);
+ } else
+ remain = len;
+
+ if (remain) {
+ /* Read rest of 1-3 bytes from FIFO */
+ val = musb_readl(fifo, 0);
+ memcpy(bufp, &val, remain);
+ }
+}
+
+/*
+ * Enables TUSB6010 to use VBUS as power source in peripheral mode.
+ */
+static inline void tusb_enable_vbus_charge(struct musb *musb)
+{
+ void __iomem *base = musb->ctrl_base;
+ u32 reg;
+
+ musb_writel(base, TUSB_PRCM_WAKEUP_MASK, 0xffff);
+ reg = musb_readl(base, TUSB_PRCM_MNGMT);
+ reg &= ~TUSB_PRCM_MNGMT_SUSPEND_MASK;
+ reg |= TUSB_PRCM_MNGMT_CPEN_MASK;
+ musb_writel(base, TUSB_PRCM_MNGMT, reg);
+}
+
+/*
+ * Idles TUSB6010 until next wake-up event interrupt. Use all wake-up
+ * events for now. Note that TUSB will not respond if NOR chip select
+ * wake-up event is masked. Also note that any access to TUSB will wake
+ * it up from idle.
+ */
+static inline void tusb_allow_idle(struct musb *musb, int wakeup_mask)
+{
+ void __iomem *base = musb->ctrl_base;
+ u32 reg;
+
+ musb_writel(base, TUSB_PRCM_WAKEUP_MASK, wakeup_mask);
+ reg = musb_readl(base, TUSB_PRCM_MNGMT);
+ reg &= ~TUSB_PRCM_MNGMT_CPEN_MASK;
+ reg |= TUSB_PRCM_MNGMT_SUSPEND_MASK;
+ musb_writel(base, TUSB_PRCM_MNGMT, reg);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+ void __iomem *base = musb->ctrl_base;
+ u32 otg_stat, prcm_mngmt;
+ int ret = 0;
+
+ otg_stat = musb_readl(base, TUSB_DEV_OTG_STAT);
+ prcm_mngmt = musb_readl(base, TUSB_PRCM_MNGMT);
+
+ /* Temporarily enable VBUS detection if it was disabled for
+ * suspend mode. Unless it's enabled otg_stat and devctl will
+ * not show correct VBUS state.
+ */
+ if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+ u32 tmp = prcm_mngmt;
+ tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ musb_writel(base, TUSB_PRCM_MNGMT, tmp);
+ otg_stat = musb_readl(base, TUSB_DEV_OTG_STAT);
+ musb_writel(base, TUSB_PRCM_MNGMT, prcm_mngmt);
+ }
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_SENSE)
+ ret = 1;
+
+ return ret;
+}
+
+/*
+ * Sets the TUSB6010 idles mode in peripheral mode depending on the
+ * gadget driver state and cable VBUS status. Needs to be called as
+ * the last function everywhere where there is register access to
+ * TUSB6010 because of the NOR flash wake-up capability.
+ * Caller must take care of locking.
+ */
+void musb_platform_try_idle(struct musb *musb)
+{
+ u32 wakeup_mask = 0;
+
+ /* Suspend with only NOR flash wake-up event enabled if no
+ * gadget driver is active.
+ */
+ if (musb->xceiv.state == OTG_STATE_UNDEFINED) {
+ wakeup_mask = 0xffff & ~TUSB_PRCM_WNORCS;
+ tusb_allow_idle(musb, wakeup_mask);
+ return;
+ }
+
+ /* Use VBUS as power source if available, otherwise suspend
+ * with all wake-up events enabled.
+ *
+ * FIXME only B-device state machine ever _consumes_ VBUS.
+ */
+ if (musb_platform_get_vbus_status(musb))
+ tusb_enable_vbus_charge(musb);
+ else {
+ wakeup_mask = TUSB_PRCM_WLD;
+ tusb_allow_idle(musb, wakeup_mask);
+ }
+}
+
+irqreturn_t tusb_interrupt(int irq, void *__hci, struct pt_regs *r)
+{
+ struct musb *musb = __hci;
+ void __iomem *base = musb->ctrl_base;
+ unsigned long flags;
+ u32 dma_src, int_src, otg_stat, musb_src = 0;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ dma_src = musb_readl(base, TUSB_DMA_INT_SRC);
+ int_src = musb_readl(base, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+ otg_stat = musb_readl(base, TUSB_DEV_OTG_STAT);
+
+ DBG(3, "TUSB interrupt dma: %08x int: %08x otg: %08x\n",
+ dma_src, int_src, otg_stat);
+
+ musb->int_usb = 0;
+ musb->int_rx = 0;
+ musb->int_tx = 0;
+ musb->int_regs = r;
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS) {
+ /* ID pin is up. Either A-plug was removed or TUSB6010
+ * is in peripheral mode */
+
+ /* Still in pheripheral mode? */
+ if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+ DBG(3, "tusb: Status change\n");
+ //goto out;
+ }
+ }
+
+ /* Peripheral suspend. Cable may be disconnected, try to idle */
+ if (int_src & TUSB_INT_SRC_USB_IP_SUSPEND) {
+ musb->status |= MUSB_VBUS_STATUS_CHG;
+ schedule_work(&musb->irq_work);
+ }
+
+ /* Connect and disconnect for host mode */
+ if (int_src & TUSB_INT_SRC_USB_IP_CONN) {
+ DBG(3, "tusb: Connected\n");
+ }
+ else if (int_src & TUSB_INT_SRC_USB_IP_DISCON) {
+ DBG(3, "tusb: Disconnected\n");
+ }
+
+ /* VBUS state change */
+ if ((int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) ||
+ (int_src & TUSB_INT_SRC_USB_IP_VBUS_ERR))
+ {
+ musb->status |= MUSB_VBUS_STATUS_CHG;
+ schedule_work(&musb->irq_work);
+
+#if 0
+ DBG(3, "tusb: VBUS changed. VBUS state %d\n",
+ (otg_stat & TUSB_DEV_OTG_STAT_VBUS_SENSE) ? 1 : 0);
+ if (!(otg_stat & TUSB_DEV_OTG_STAT_VBUS_SENSE) &&
+ !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+ /* VBUS went off and ID pin is down */
+ DBG(3, "tusb: No VBUS, starting session\n");
+ /* Start session again, VBUS will be enabled */
+ musb_writeb(musb_base, MGC_O_HDRC_DEVCTL,
+ MGC_M_DEVCTL_SESSION);
+ }
+#endif
+ }
+
+ /* ID pin change */
+ if (int_src & TUSB_INT_SRC_ID_STATUS_CHNG) {
+ DBG(3, "tusb: ID pin changed. State is %d\n",
+ (musb_readl(base, TUSB_DEV_OTG_STAT) &
+ TUSB_DEV_OTG_STAT_ID_STATUS) ? 1 : 0);
+ }
+
+ /* OTG timer expiration */
+ if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+ DBG(3, "tusb: OTG timer expired\n");
+ musb_writel(base, TUSB_DEV_OTG_TIMER,
+ musb_readl(base, TUSB_DEV_OTG_TIMER) |
+ TUSB_DEV_OTG_TIMER_ENABLE);
+ }
+
+ /* TX dma callback must be handled here, RX dma callback is
+ * handled in tusb_omap_dma_cb.
+ */
+ if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE) && dma_src) {
+ u32 real_dma_src = musb_readl(base, TUSB_DMA_INT_MASK);
+ real_dma_src = ~real_dma_src & dma_src;
+ if (tusb_dma_omap()) {
+ int tx_source = (real_dma_src & 0xffff);
+ int i;
+
+ for (i = 1; i <= 15; i++) {
+ if (tx_source & (1 << i)) {
+ DBG(1, "completing ep%i %s\n", i, "tx");
+ musb_dma_completion(musb, i, 1);
+ }
+ }
+ }
+ musb_writel(base, TUSB_DMA_INT_CLEAR, dma_src);
+ }
+
+ /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB * interrupts */
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+ musb_src = musb_readl(base, TUSB_USBIP_INT_SRC);
+ musb_writel(base, TUSB_USBIP_INT_CLEAR, musb_src);
+ musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+ musb->int_tx = (musb_src & 0xffff);
+ }
+ musb->int_usb = (int_src & 0xff);
+ if (musb->int_usb || musb->int_rx || musb->int_tx)
+ musb_interrupt(musb);
+
+ /* Acknowledge wake-up source interrupts */
+ if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+ u32 reg = musb_readl(base, TUSB_PRCM_WAKEUP_SOURCE);
+ musb_writel(base, TUSB_PRCM_WAKEUP_CLEAR, reg);
+ schedule_work(&musb->irq_work);
+ }
+
+ /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+ if (int_src)
+ musb_writel(base, TUSB_INT_SRC_CLEAR,
+ int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ * - Interrupt should really be IRQT_FALLING level sensitive
+ */
+void musb_platform_enable(struct musb * musb)
+{
+ void __iomem *base = musb->ctrl_base;
+
+ /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+ * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+ musb_writel(base, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+ /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+ musb_writel(base, TUSB_USBIP_INT_MASK, 0);
+ musb_writel(base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(base, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ /* Clear all subsystem interrups */
+ musb_writel(base, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+ musb_writel(base, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+ musb_writel(base, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+ /* Acknowledge pending interrupt(s) */
+ musb_writel(base, TUSB_INT_SRC_CLEAR,
+ ~TUSB_INT_MASK_RESERVED_BITS);
+
+#if 0
+ /* Set OTG timer for about one second */
+ musb_writel(base, TUSB_DEV_OTG_TIMER,
+ TUSB_DEV_OTG_TIMER_ENABLE |
+ TUSB_DEV_OTG_TIMER_VAL(0x3c00000));
+#endif
+
+ /* Only 0 clock cycles for minimum interrupt de-assertion time and
+ * interrupt polarity active low seems to work reliably here */
+ musb_writel(base, TUSB_INT_CTRL_CONF,
+ TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+ set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __FUNCTION__);
+ else
+ dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ if (is_dma_capable()) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __FUNCTION__);
+ dma_off = 1;
+ }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void tusb_setup_cpu_interface(struct musb *musb)
+{
+ void __iomem *base = musb->ctrl_base;
+
+ /* Disable GPIO[7:0] pullups (used as output DMA requests) */
+ musb_writel(base, TUSB_PULLUP_1_CTRL, 0x000000FF);
+ /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+ musb_writel(base, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+ /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+ musb_writel(base, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+ /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+ * de-assertion time 2 system clocks p 62 */
+ musb_writel(base, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ /* Set 0 wait count for synchronous burst access */
+ musb_writel(base, TUSB_WAIT_COUNT, 1);
+}
+
+#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
+
+static int tusb_print_revision(struct musb *musb)
+{
+ void __iomem *base = musb->ctrl_base;
+
+ pr_info("tusb: Revisions: %s%i.%i %s%i.%i %s%i.%i %s%i.%i\n",
+ "prcm",
+ TUSB_REV_MAJOR(musb_readl(base, TUSB_PRCM_REV)),
+ TUSB_REV_MINOR(musb_readl(base, TUSB_PRCM_REV)),
+ "int",
+ TUSB_REV_MAJOR(musb_readl(base, TUSB_INT_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(base, TUSB_INT_CTRL_REV)),
+ "gpio",
+ TUSB_REV_MAJOR(musb_readl(base, TUSB_GPIO_REV)),
+ TUSB_REV_MINOR(musb_readl(base, TUSB_GPIO_REV)),
+ "dma",
+ TUSB_REV_MAJOR(musb_readl(base, TUSB_DMA_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(base, TUSB_DMA_CTRL_REV)));
+
+ return TUSB_REV_MAJOR(musb_readl(base, TUSB_INT_CTRL_REV));
+}
+
+static int tusb_start(struct musb *musb)
+{
+ void __iomem *base = musb->ctrl_base;
+ int ret = -1;
+ unsigned long flags;
+
+ if (musb->board_set_power)
+ ret = musb->board_set_power(1);
+ if (ret != 0) {
+ printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+ goto err;
+ }
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ if (musb_readl(base, TUSB_PROD_TEST_RESET) !=
+ TUSB_PROD_TEST_RESET_VAL) {
+ printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+ goto err;
+ }
+
+ ret = tusb_print_revision(musb);
+ if (ret < 2) {
+ printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+ ret);
+ goto err;
+ }
+
+ /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+ * NOR FLASH interface is used */
+ musb_writel(base, TUSB_VLYNQ_CTRL, 8);
+
+ /* Select PHY free running 60MHz as a system clock */
+ musb_writel(base, TUSB_PRCM_CONF, //FIXME: CPEN should not be needed!
+ TUSB_PRCM_CONF_SFW_CPEN | TUSB_PRCM_CONF_SYS_CLKSEL(1));
+
+ /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+ * power saving, enable VBus detect and session end comparators,
+ * enable IDpullup, enable VBus charging */
+ musb_writel(base, TUSB_PRCM_MNGMT,
+ TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+ TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+ TUSB_PRCM_MNGMT_DFT_CLK_DIS |
+ TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS |
+ TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+ TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+ TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+#if 0
+ musb_writel(base, TUSB_PHY_OTG_CTRL_ENABLE,
+ musb_readl(base, TUSB_PHY_OTG_CTRL_ENABLE) |
+ TUSB_PHY_OTG_CTRL_WRPROTECT |
+ TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP |
+ TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN |
+ TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN);
+ musb_writel(base, TUSB_PHY_OTG_CTRL,
+ musb_readl(base, TUSB_PHY_OTG_CTRL) |
+ TUSB_PHY_OTG_CTRL_WRPROTECT |
+ TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP |
+ TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN |
+ TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN);
+#endif
+ tusb_setup_cpu_interface(musb);
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+
+ return 0;
+
+err:
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return -ENODEV;
+}
+
+int __devinit musb_platform_init(struct musb *musb)
+{
+ struct platform_device *pdev;
+ struct resource *mem;
+ int ret;
+
+ pdev = to_platform_device(musb->controller);
+
+ /* dma address for async dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ musb->async = mem->start;
+
+ /* dma address for sync dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ pr_debug("no sync dma resource?\n");
+ return -ENODEV;
+ }
+ musb->sync = mem->start;
+
+ /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+ * FIFOs at 0x600, TUSB at 0x800
+ */
+ musb->pRegs += TUSB_BASE_OFFSET;
+
+ ret = tusb_start(musb);
+ if (ret) {
+ printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+ ret);
+ return -ENODEV;
+ }
+ musb->isr = tusb_interrupt;
+
+ musb_platform_try_idle(musb);
+
+ return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb() 1
+#else
+#define musb_in_tusb() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL 0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET 0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE 0x800
+
+#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
+#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
+#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
+#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
+#define TUSB_DEV_CONF_ID_SEL (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
+#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
+#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
+#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
+#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
+#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
+#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
+#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
+#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
+#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
+#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
+#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
+#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
+#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
+#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
+#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
+#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
+#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
+#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
+#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
+#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
+#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
+#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
+#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
+#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
+#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
+#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
+#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
+#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
+#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
+#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
+#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
+#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
+#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
+#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
+#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
+#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
+#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
+#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
+#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
+#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
+#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
+#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
+#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
+#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
+#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
+#define TUSB_PRCM_MNGMT_PM_CLEAR_MASK ((0x3 << 3) | (0x3 << 0))
+#define TUSB_PRCM_MNGMT_CPEN_MASK ((1 << 9) | (0x3 << 3))
+#define TUSB_PRCM_MNGMT_SUSPEND_MASK ((1 << 10) | (0x3 << 0))
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
+#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
+#define TUSB_PRCM_WGPIO_7 (1 << 12)
+#define TUSB_PRCM_WGPIO_6 (1 << 11)
+#define TUSB_PRCM_WGPIO_5 (1 << 10)
+#define TUSB_PRCM_WGPIO_4 (1 << 9)
+#define TUSB_PRCM_WGPIO_3 (1 << 8)
+#define TUSB_PRCM_WGPIO_2 (1 << 7)
+#define TUSB_PRCM_WGPIO_1 (1 << 6)
+#define TUSB_PRCM_WGPIO_0 (1 << 5)
+#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
+#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
+#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
+#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
+#define TUSB_PRCM_WLD (1 << 0) /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
+#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
+#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
+#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
+#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
+#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
+#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
+#define TUSB_INT_SRC_DEV_READY (1 << 12)
+#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
+#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
+#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
+#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
+#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
+#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
+#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
+#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
+#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
+#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
+#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
+#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
+#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
+#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
+#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
+ TUSB_INT_MASK_RESERVED_13 | \
+ TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
+ TUSB_INT_SRC_RESERVED_18 | \
+ TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
+#define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL 0xa596
+#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
+
+/*----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_TUSB6010
+
+/* configuration parameters specific to this silicon */
+
+/* Number of Tx endpoints. Legal values are 1 - 16 (this value includes EP0) */
+#define MUSB_C_NUM_EPT 5
+
+/* Number of Rx endpoints. Legal values are 1 - 16 (this value includes EP0) */
+#define MUSB_C_NUM_EPR 5
+
+/* Endpoint 1 to 15 direction types. C_EP1_DEF is defined if either Tx endpoint
+ * 1 or Rx endpoint 1 are used.
+ */
+#define MUSB_C_EP1_DEF
+
+/* C_EP1_TX_DEF is defined if Tx endpoint 1 is used */
+#define MUSB_C_EP1_TX_DEF
+
+/* C_EP1_RX_DEF is defined if Rx endpoint 1 is used */
+#define MUSB_C_EP1_RX_DEF
+
+/* C_EP1_TOR_DEF is defined if Tx endpoint 1 and Rx endpoint 1 share a FIFO */
+/* #define C_EP1_TOR_DEF */
+
+/* C_EP1_TAR_DEF is defined if both Tx endpoint 1 and Rx endpoint 1 are used
+ * and do not share a FIFO.
+ */
+#define MUSB_C_EP1_TAR_DEF
+
+/* Similarly for all other used endpoints */
+#define MUSB_C_EP2_DEF
+#define MUSB_C_EP2_TX_DEF
+#define MUSB_C_EP2_RX_DEF
+#define MUSB_C_EP2_TAR_DEF
+#define MUSB_C_EP3_DEF
+#define MUSB_C_EP3_TX_DEF
+#define MUSB_C_EP3_RX_DEF
+#define MUSB_C_EP3_TAR_DEF
+#define MUSB_C_EP4_DEF
+#define MUSB_C_EP4_TX_DEF
+#define MUSB_C_EP4_RX_DEF
+#define MUSB_C_EP4_TAR_DEF
+
+/* Endpoint 1 to 15 FIFO address bits. Legal values are 3 to 13 - corresponding
+ * to FIFO sizes of 8 to 8192 bytes. If an Tx endpoint shares a FIFO with an Rx
+ * endpoint then the Rx FIFO size must be the same as the Tx FIFO size. All
+ * endpoints 1 to 15 must be defined, unused endpoints should be set to 2.
+ */
+#define MUSB_C_EP1T_BITS 5
+#define MUSB_C_EP1R_BITS 5
+#define MUSB_C_EP2T_BITS 5
+#define MUSB_C_EP2R_BITS 5
+#define MUSB_C_EP3T_BITS 3
+#define MUSB_C_EP3R_BITS 3
+#define MUSB_C_EP4T_BITS 3
+#define MUSB_C_EP4R_BITS 3
+
+#define MUSB_C_EP5T_BITS 2
+#define MUSB_C_EP5R_BITS 2
+#define MUSB_C_EP6T_BITS 2
+#define MUSB_C_EP6R_BITS 2
+#define MUSB_C_EP7T_BITS 2
+#define MUSB_C_EP7R_BITS 2
+#define MUSB_C_EP8T_BITS 2
+#define MUSB_C_EP8R_BITS 2
+#define MUSB_C_EP9T_BITS 2
+#define MUSB_C_EP9R_BITS 2
+#define MUSB_C_EP10T_BITS 2
+#define MUSB_C_EP10R_BITS 2
+#define MUSB_C_EP11T_BITS 2
+#define MUSB_C_EP11R_BITS 2
+#define MUSB_C_EP12T_BITS 2
+#define MUSB_C_EP12R_BITS 2
+#define MUSB_C_EP13T_BITS 2
+#define MUSB_C_EP13R_BITS 2
+#define MUSB_C_EP14T_BITS 2
+#define MUSB_C_EP14R_BITS 2
+#define MUSB_C_EP15T_BITS 2
+#define MUSB_C_EP15R_BITS 2
+
+/* Define the following constant if the USB2.0 Transceiver Macrocell data width
+ * is 16-bits.
+ */
+/* #define C_UTM_16 */
+
+/* Define this constant if the CPU uses big-endian byte ordering. */
+/* #define C_BIGEND */
+
+/* Define the following constant if any Tx endpoint is required to support
+ * multiple bulk packets.
+ */
+/* #define C_MP_TX */
+
+/* Define the following constant if any Rx endpoint is required to support
+ * multiple bulk packets.
+ */
+/* #define C_MP_RX */
+
+/* Define the following constant if any Tx endpoint is required to support high
+ * bandwidth ISO.
+ */
+/* #define C_HB_TX */
+
+/* Define the following constant if any Rx endpoint is required to support high
+ * bandwidth ISO.
+ */
+/* #define C_HB_RX */
+
+/* Define the following constant if software connect/disconnect control is
+ * required.
+ */
+#define MUSB_C_SOFT_CON
+
+/* Define the following constant if Vendor Control Registers are required. */
+/* #define C_VEND_REG */
+
+/* Vendor control register widths. */
+#define MUSB_C_VCTL_BITS 4
+#define MUSB_C_VSTAT_BITS 8
+
+/* Define the following constant to include a DMA controller. */
+/* #define C_DMA */
+
+/* Define the following constant if 2 or more DMA channels are required. */
+/* #define C_DMA2 */
+
+/* Define the following constant if 3 or more DMA channels are required. */
+/* #define C_DMA3 */
+
+/* Define the following constant if 4 or more DMA channels are required. */
+/* #define C_DMA4 */
+
+/* Define the following constant if 5 or more DMA channels are required. */
+/* #define C_DMA5 */
+
+/* Define the following constant if 6 or more DMA channels are required. */
+/* #define C_DMA6 */
+
+/* Define the following constant if 7 or more DMA channels are required. */
+/* #define C_DMA7 */
+
+/* Define the following constant if 8 or more DMA channels are required. */
+/* #define C_DMA8 */
+
+/* Enable Dynamic FIFO Sizing */
+#define MUSB_C_DYNFIFO_DEF
+
+/* Derived constants. The following constants are derived from the previous
+ * configuration constants
+ */
+
+/* Total number of endpoints. Legal values are 2 - 16. This must be equal to
+ * the larger of C_NUM_EPT, C_NUM_EPR
+ */
+/* #define MUSB_C_NUM_EPS 5 */
+
+/* C_EPMAX_BITS is equal to the largest endpoint FIFO word address bits */
+#define MUSB_C_EPMAX_BITS 11
+
+/* C_RAM_BITS is the number of address bits required to address the RAM (32-bit
+ * addresses). It is defined as log2 of the sum of 2** of all the endpoint FIFO
+ * dword address bits (rounded up).
+ */
+#define MUSB_C_RAM_BITS 10
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif /* __TUSB6010_H__ */
--- /dev/null
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musbdefs.h"
+
+/*
+ * REVISIT: With TUSB2.0 only one dmareq line can be used at a time.
+ * This should get fixed in hardware at some point.
+ */
+#define BROKEN_DMAREQ
+
+#ifdef BROKEN_DMAREQ
+#define dmareq_works() 0
+#else
+#define dmareq_works() 1
+#endif
+
+#define to_chdat(c) (struct tusb_omap_dma_ch *)(c)->pPrivateData
+
+#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+ struct musb *musb;
+ void __iomem *tusb_base;
+ unsigned long phys_offset;
+ int epnum;
+ u8 tx;
+ struct musb_hw_ep *hw_ep;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ struct tusb_omap_dma *tusb_dma;
+
+ void __iomem *dma_addr;
+
+ unsigned long packet_sz;
+ unsigned long len;
+ unsigned long transfer_len;
+ unsigned long completed_len;
+};
+
+struct tusb_omap_dma {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *tusb_base;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ // DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch);
+
+ return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ // DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch);
+
+ return 0;
+}
+
+#ifdef BROKEN_DMAREQ
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tusb_base, TUSB_DMA_EP_MAP);
+ if (reg != 0) {
+ DBG(1, "ep%i dmareq0 is busy for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return -EAGAIN;
+ }
+
+ if (chdat->tx)
+ reg = (1 << 4) | chdat->epnum;
+ else
+ reg = chdat->epnum;
+
+ musb_writel(chdat->tusb_base, TUSB_DMA_EP_MAP, reg);
+
+ return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tusb_base, TUSB_DMA_EP_MAP);
+
+ if ((reg & 0xf) != chdat->epnum) {
+ printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return;
+ }
+ musb_writel(chdat->tusb_base, TUSB_DMA_EP_MAP, 0);
+}
+
+#else
+#define tusb_omap_use_shared_dmareq(x, y) do {} while (0)
+#define tusb_omap_free_shared_dmareq(x, y) do {} while (0)
+#endif
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct dma_channel *channel = (struct dma_channel *)data;
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *ep_conf = hw_ep->regs;
+ void __iomem *musb_base = musb->pRegs;
+ unsigned long transferred, flags;
+ int ch;
+
+ spin_lock_irqsave(&musb->Lock, flags);
+
+ if (dmareq_works())
+ ch = chdat->ch;
+ else
+ ch = tusb_dma->ch;
+
+ if (ch_status != OMAP_DMA_BLOCK_IRQ)
+ printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+ DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, ch_status);
+
+ if (chdat->tx)
+ transferred = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ transferred = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ transferred = TUSB_EP_CONFIG_XFR_SIZE(transferred);
+ channel->dwActualLength = chdat->transfer_len - transferred;
+
+ if (!dmareq_works())
+ tusb_omap_free_shared_dmareq(chdat);
+
+ channel->bStatus = MGC_DMA_STATUS_FREE;
+
+ /* Handle only RX callbacks here. TX callbacks musb be handled based
+ * on the TUSB DMA status interrupt.
+ * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+ * interrupt for RX and TX.
+ */
+ if (!chdat->tx)
+ musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+ /* We musb terminate short tx transfers manually by setting TXPKTRDY.
+ * REVISIT: This same problem may occur with other MUSB dma as well.
+ * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+ */
+ if ((chdat->transfer_len < chdat->packet_sz) ||
+ (chdat->transfer_len % chdat->packet_sz != 0)) {
+ u16 csr;
+
+ if (chdat->tx) {
+ DBG(1, "terminating short tx packet\n");
+ MGC_SelectEnd(musb_base, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MGC_O_HDRC_TXCSR);
+ csr |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY;
+ musb_writew(hw_ep->regs, MGC_O_HDRC_TXCSR, csr);
+ }
+ }
+
+ spin_unlock_irqrestore(&musb->Lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *musb_base = musb->pRegs;
+ void __iomem *ep_conf = hw_ep->regs;
+ dma_addr_t fifo = hw_ep->fifo_sync;
+ struct omap_dma_channel_params dma_params;
+ int src_burst, dst_burst;
+ u32 transfer_len;
+ u16 csr;
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ if (len < 32) {
+ DBG(3, "dma too short for ep%i %s dma_addr: %08x len: %u\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx", dma_addr, len);
+ return FALSE;
+ }
+
+#if 0
+ if ((len % 32 != 0)) {
+ transfer_len = len / 32;
+ transfer_len *= 32;
+ DBG(3, "ep%i short %s dma: %lu/%lu %lu remainder\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ transfer_len, len, len - transfer_len);
+ } else
+ transfer_len = len;
+#else
+ if ((len % 32) != 0) {
+ DBG(3, "bad dma length for ep%i %s dma_addr: %08x len: %u\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx", dma_addr, len);
+ return FALSE;
+ } else
+ transfer_len = len;
+#endif
+
+ if (dma_addr & 0x1) {
+ DBG(3, "unaligned dma address for ep%i %s: %08x\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx", dma_addr);
+ return FALSE;
+ }
+
+ if (dmareq_works()) {
+
+ /* FIXME: Check for allocated dma ch */
+ ch = chdat->ch;
+
+ dmareq = chdat->dmareq;
+ sync_dev = chdat->sync_dev;
+ } else {
+ if (tusb_omap_use_shared_dmareq(chdat) != 0)
+ return FALSE;
+
+ /* FIXME: Check for allocated dma ch */
+ ch = tusb_dma->ch;
+
+ dmareq = tusb_dma->dmareq;
+ sync_dev = tusb_dma->sync_dev;
+
+ omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+ }
+
+ chdat->packet_sz = packet_sz;
+ chdat->len = len;
+ chdat->transfer_len = transfer_len;
+ channel->dwActualLength = 0;
+ chdat->dma_addr = (void __iomem *)dma_addr;
+ channel->bStatus = MGC_DMA_STATUS_BUSY;
+
+ DBG(1, "ep%i %s dma ch%i dma: %08x len: %u packet_sz: %i rndis: %d\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, dma_addr, transfer_len, packet_sz, rndis_mode);
+
+ /* Since we're recycling dma areas, we need to clean or invalidate */
+ if (chdat->tx) {
+ consistent_sync(phys_to_virt(dma_addr), len,
+ DMA_TO_DEVICE);
+ } else
+ consistent_sync(phys_to_virt(dma_addr), len,
+ DMA_FROM_DEVICE);
+
+ /*
+ * Prepare omap DMA for transfer
+ */
+ if (chdat->tx) {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = 8; /* 8x32-bit burst */
+ dma_params.frame_count = transfer_len / 32; /* Burst sz frame */
+
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.src_start = (unsigned long)dma_addr;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.dst_start = (unsigned long)fifo;
+ dma_params.dst_ei = 1;
+ dma_params.dst_fi = -31; /* Loop 32 byte window */
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 0; /* Dest sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_16;
+ dst_burst = OMAP_DMA_DATA_BURST_8;
+ } else {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = 8; /* 8x32-bit burst */
+ dma_params.frame_count = transfer_len / 32; /* Burst sz frame */
+
+ dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.src_start = (unsigned long)fifo;
+ dma_params.src_ei = 1;
+ dma_params.src_fi = -31; /* Loop 32 byte window */
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.dst_start = (unsigned long)dma_addr;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 1; /* Source sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
+ }
+
+ /* Use 16x16 transfer if addresses not 32-bit aligned */
+ if ((dma_params.src_start & 0x2) || (dma_params.dst_start & 0x2)) {
+ DBG(1, "using 16x16 async dma from 0x%08lx to 0x%08lx\n",
+ dma_params.src_start, dma_params.dst_start);
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ dma_params.elem_count = 16; /* 16x16-bit burst */
+
+ fifo = hw_ep->fifo_async;
+
+ /* REVISIT: Check if 16x16 sync dma might also work */
+ if (chdat->tx)
+ dma_params.dst_start = (unsigned long) fifo;
+ else
+ dma_params.src_start =(unsigned long) fifo;
+ } else {
+ DBG(1, "ep%i %s using 16x32 sync dma from 0x%08lx to 0x%08lx\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ dma_params.src_start, dma_params.dst_start);
+ }
+
+ omap_set_dma_params(ch, &dma_params);
+ omap_set_dma_src_burst_mode(ch, src_burst);
+ omap_set_dma_dest_burst_mode(ch, dst_burst);
+ omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+ /*
+ * Prepare MUSB for DMA transfer
+ */
+ if (chdat->tx) {
+ MGC_SelectEnd(musb_base, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MGC_O_HDRC_TXCSR);
+ csr |= (MGC_M_TXCSR_AUTOSET | MGC_M_TXCSR_DMAENAB
+ | MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
+ csr &= ~MGC_M_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MGC_O_HDRC_TXCSR, csr);
+ } else {
+ MGC_SelectEnd(musb_base, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MGC_O_HDRC_RXCSR);
+ csr |= MGC_M_RXCSR_DMAENAB;
+ csr &= ~(MGC_M_RXCSR_AUTOCLEAR | MGC_M_RXCSR_DMAMODE);
+ musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR,
+ csr | MGC_M_RXCSR_P_WZC_BITS);
+ }
+
+ /*
+ * Start DMA transfer
+ */
+ omap_start_dma(ch);
+
+ if (chdat->tx) {
+ /* Send packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, packet_sz);
+
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(transfer_len));
+ } else {
+ /* Receive packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ packet_sz << 16);
+
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(transfer_len));
+ }
+
+ return TRUE;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+
+ if (!dmareq_works()) {
+ if (tusb_dma->ch >= 0) {
+ omap_stop_dma(tusb_dma->ch);
+ omap_free_dma(tusb_dma->ch);
+ tusb_dma->ch = -1;
+ }
+
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+ }
+
+ channel->bStatus = MGC_DMA_STATUS_FREE;
+
+ return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tusb_base, TUSB_DMA_EP_MAP);
+ int i, dmareq_nr = -1;
+
+ const int sync_dev[6] = {
+ OMAP24XX_DMA_EXT_DMAREQ0,
+ OMAP24XX_DMA_EXT_DMAREQ1,
+ OMAP24XX_DMA_EXT_DMAREQ2,
+ OMAP24XX_DMA_EXT_DMAREQ3,
+ OMAP24XX_DMA_EXT_DMAREQ4,
+ OMAP24XX_DMA_EXT_DMAREQ5,
+ };
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+ if (cur == 0) {
+ dmareq_nr = i;
+ break;
+ }
+ }
+
+ if (dmareq_nr == -1)
+ return -EAGAIN;
+
+ reg |= (chdat->epnum << (dmareq_nr * 5));
+ if (chdat->tx)
+ reg |= ((1 << 4) << (dmareq_nr * 5));
+ musb_writel(chdat->tusb_base, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = dmareq_nr;
+ chdat->sync_dev = sync_dev[chdat->dmareq];
+
+ return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg;
+
+ if (!chdat || chdat->dmareq < 0)
+ return;
+
+ reg = musb_readl(chdat->tusb_base, TUSB_DMA_EP_MAP);
+ reg &= ~(0x1f << (chdat->dmareq * 5));
+ musb_writel(chdat->tusb_base, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = -1;
+ chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 tx)
+{
+ int ret, i;
+ const char *dev_name;
+ struct tusb_omap_dma *tusb_dma;
+ struct musb *musb;
+ void __iomem *tusb_base;
+ struct dma_channel *channel = NULL;
+ struct tusb_omap_dma_ch *chdat = NULL;
+ u32 reg;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ musb = tusb_dma->musb;
+ tusb_base = musb->ctrl_base;
+
+ reg = musb_readl(tusb_base, TUSB_DMA_INT_MASK);
+ if (tx)
+ reg &= ~(1 << hw_ep->bLocalEnd);
+ else
+ reg &= ~(1 << (hw_ep->bLocalEnd + 15));
+ musb_writel(tusb_base, TUSB_DMA_INT_MASK, reg);
+
+ /* REVISIT: Why does dmareq5 not work? */
+ if (hw_ep->bLocalEnd == 0) {
+ DBG(1, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ return NULL;
+ }
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch->bStatus == MGC_DMA_STATUS_UNKNOWN) {
+ ch->bStatus = MGC_DMA_STATUS_FREE;
+ channel = ch;
+ chdat = ch->pPrivateData;
+ break;
+ }
+ }
+
+ if (!channel)
+ return NULL;
+
+ if (tx) {
+ chdat->tx = 1;
+ dev_name = "TUSB transmit";
+ } else {
+ chdat->tx = 0;
+ dev_name = "TUSB receive";
+ }
+
+ chdat->musb = tusb_dma->musb;
+ chdat->tusb_base = tusb_dma->tusb_base;
+ chdat->hw_ep = hw_ep;
+ chdat->epnum = hw_ep->bLocalEnd;
+ chdat->dmareq = -1;
+ chdat->completed_len = 0;
+ chdat->tusb_dma = tusb_dma;
+
+ channel->dwMaxLength = 0x7fffffff;
+ channel->bDesiredMode = 0;
+ channel->dwActualLength = 0;
+
+ if (dmareq_works()) {
+ ret = tusb_omap_dma_allocate_dmareq(chdat);
+ if (ret != 0)
+ goto free_dmareq;
+
+ ret = omap_request_dma(chdat->sync_dev, dev_name,
+ tusb_omap_dma_cb,
+ channel, &chdat->ch);
+ if (ret != 0)
+ goto free_dmareq;
+ } else if (tusb_dma->ch == -1) {
+ tusb_dma->dmareq = 0;
+ tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+ /* Callback data gets set later in the shared dmareq case */
+ ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+ tusb_omap_dma_cb,
+ NULL, &tusb_dma->ch);
+ if (ret != 0)
+ goto free_dmareq;
+
+ chdat->dmareq = -1;
+ chdat->ch = -1;
+ }
+
+ DBG(1, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+ chdat->epnum,
+ chdat->tx ? "tx" : "rx",
+ chdat->ch >=0 ? "dedicated" : "shared",
+ chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+ chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+ chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+ return channel;
+
+free_dmareq:
+ tusb_omap_dma_free_dmareq(chdat);
+
+ DBG(1, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ channel->bStatus = MGC_DMA_STATUS_UNKNOWN;
+
+ return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct musb *musb = chdat->musb;
+ void __iomem *tusb_base = musb->ctrl_base;
+ u32 reg;
+
+ DBG(1, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+ reg = musb_readl(tusb_base, TUSB_DMA_INT_MASK);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tusb_base, TUSB_DMA_INT_MASK, reg);
+
+ reg = musb_readl(tusb_base, TUSB_DMA_INT_CLEAR);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tusb_base, TUSB_DMA_INT_CLEAR, reg);
+
+ channel->bStatus = MGC_DMA_STATUS_UNKNOWN;
+
+ if (chdat->ch >= 0) {
+ omap_stop_dma(chdat->ch);
+ omap_free_dma(chdat->ch);
+ chdat->ch = -1;
+ }
+
+ if (chdat->dmareq >= 0)
+ tusb_omap_dma_free_dmareq(chdat);
+
+ channel = NULL;
+}
+
+static void tusb_omap_dma_cleanup(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch) {
+ if (ch->pPrivateData)
+ kfree(ch->pPrivateData);
+ kfree(ch);
+ }
+ }
+
+ if (!dmareq_works() && tusb_dma && tusb_dma->ch >= 0)
+ omap_free_dma(tusb_dma->ch);
+
+ kfree(tusb_dma);
+}
+
+static struct dma_controller *
+tusb_omap_dma_init(struct musb *musb, void __iomem *base)
+{
+ void __iomem *tusb_base = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ /* REVISIT: Get dmareq lines used from board-*.c */
+#ifdef CONFIG_ARCH_OMAP2
+ omap_cfg_reg(AA10_242X_DMAREQ0);
+ omap_cfg_reg(AA6_242X_DMAREQ1);
+ omap_cfg_reg(E4_242X_DMAREQ2);
+ omap_cfg_reg(G4_242X_DMAREQ3);
+ omap_cfg_reg(D3_242X_DMAREQ4);
+ omap_cfg_reg(E3_242X_DMAREQ5);
+#endif
+
+ musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+ musb_writel(tusb_base, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+ if (!tusb_dma)
+ goto cleanup;
+
+ tusb_dma->musb = musb;
+ tusb_dma->tusb_base = musb->ctrl_base;
+
+ tusb_dma->ch = -1;
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+
+ tusb_dma->controller.start = tusb_omap_dma_start;
+ tusb_dma->controller.stop = tusb_omap_dma_stop;
+ tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+ tusb_dma->controller.channel_release = tusb_omap_dma_release;
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+ tusb_dma->controller.pPrivateData = tusb_dma;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch;
+ struct tusb_omap_dma_ch *chdat;
+
+ ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+ if (!ch)
+ goto cleanup;
+
+ dma_channel_pool[i] = ch;
+
+ chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+ if (!chdat)
+ goto cleanup;
+
+ ch->bStatus = MGC_DMA_STATUS_UNKNOWN;
+ ch->pPrivateData = chdat;
+ }
+
+ return &tusb_dma->controller;
+
+cleanup:
+ tusb_omap_dma_cleanup(&tusb_dma->controller);
+
+ return NULL;
+}
+
+const struct dma_controller_factory dma_controller_factory = {
+ .create = tusb_omap_dma_init,
+ .destroy = tusb_omap_dma_cleanup,
+};
--- /dev/null
+/*****************************************************************
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006 by Nokia Corporation
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
+ * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
+ * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
+ * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
+ * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
+ * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
+ * GRAPHICS SUPPORT CUSTOMER.
+ ******************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include "musbdefs.h"
+
+
+
+static void musb_port_suspend(struct musb *musb, u8 bSuspend)
+{
+ u8 power;
+ void __iomem *pBase = musb->pRegs;
+
+ power = musb_readb(pBase, MGC_O_HDRC_POWER);
+
+ if (bSuspend) {
+ DBG(3, "Root port suspended\n");
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ power | MGC_M_POWER_SUSPENDM);
+ musb->port1_status |= USB_PORT_STAT_SUSPEND;
+ } else if (power & MGC_M_POWER_SUSPENDM) {
+ DBG(3, "Root port resumed\n");
+ power &= ~(MGC_M_POWER_SUSPENDM | MGC_M_POWER_RESUME);
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ power | MGC_M_POWER_RESUME);
+
+ musb_writeb(pBase, MGC_O_HDRC_POWER, power);
+ musb->port1_status &= ~USB_PORT_STAT_SUSPEND;
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ }
+}
+
+static void musb_port_reset(struct musb *musb, u8 bReset)
+{
+ u8 power;
+ void __iomem *pBase = musb->pRegs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* REVISIT this looks wrong for HNP */
+ u8 devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
+
+ if (musb->bDelayPortPowerOff || !(devctl & MGC_M_DEVCTL_HM)) {
+// return;
+ DBG(1, "what?\n");
+ }
+#endif
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: caller guarantees it will turn off the reset when
+ * the appropriate amount of time has passed
+ */
+ power = musb_readb(pBase, MGC_O_HDRC_POWER);
+ if (bReset) {
+ musb->bIgnoreDisconnect = TRUE;
+ power &= 0xf0;
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ power | MGC_M_POWER_RESET);
+
+ musb->port1_status |= USB_PORT_STAT_RESET;
+ musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+ musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ } else {
+ DBG(4, "root port reset stopped\n");
+ musb_writeb(pBase, MGC_O_HDRC_POWER,
+ power & ~MGC_M_POWER_RESET);
+
+ musb->bIgnoreDisconnect = FALSE;
+
+ power = musb_readb(pBase, MGC_O_HDRC_POWER);
+ if (power & MGC_M_POWER_HSMODE) {
+ DBG(4, "high-speed device connected\n");
+ musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+ }
+
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ musb->port1_status |= USB_PORT_STAT_ENABLE
+ | (USB_PORT_STAT_C_RESET << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+ }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+ musb->port1_status &=
+ ~(USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED
+ | USB_PORT_STAT_TEST
+ );
+ musb->port1_status |= USB_PORT_STAT_C_CONNECTION << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ DBG(1, "host disconnect, state %d\n", musb->xceiv.state);
+ }
+}
+
+
+/*---------------------------------------------------------------------*/
+
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int retval = 0;
+
+ /* called in_irq() via usb_hcd_poll_rh_status() */
+ if (musb->port1_status & 0xffff0000) {
+ *buf = 0x02;
+ retval = 1;
+ }
+ return retval;
+}
+
+int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u32 temp;
+ int retval = 0;
+ unsigned long flags;
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)
+ || !is_host_active(musb)))
+ return -ESHUTDOWN;
+
+ /* hub features: always zero, setting is a NOP
+ * port features: reported, sometimes updated
+ * no indicators
+ */
+ spin_lock_irqsave(&musb->Lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (wIndex != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, FALSE);
+ break;
+ case USB_PORT_FEAT_POWER:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_SUSPEND:
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "clear feature %d\n", wValue);
+ musb->port1_status &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ {
+ struct usb_hub_descriptor *desc = (void *)buf;
+
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = __constant_cpu_to_le16(
+ 0x0001 /* per-port power switching */
+ | 0x0010 /* no overcurrent reporting */
+ );
+ desc->bPwrOn2PwrGood = 5; /* msec/2 */
+ desc->bHubContrCurrent = 0;
+
+ /* workaround bogus struct definition */
+ desc->DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->DeviceRemovable[1] = 0xff;
+ }
+ break;
+ case GetHubStatus:
+ temp = 0;
+ *(__le32 *) buf = cpu_to_le32 (temp);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ if ((musb->port1_status & USB_PORT_STAT_RESET)
+ && time_after(jiffies, musb->rh_timer))
+ musb_port_reset(musb, FALSE);
+
+ *(__le32 *) buf = cpu_to_le32 (musb->port1_status);
+ /* port change status is more interesting */
+ DBG((*(u16*)(buf+2)) ? 2 : 5, "port status %08x\n",
+ musb->port1_status);
+ break;
+ case SetPortFeature:
+ if (wIndex != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /* NOTE: this controller has a strange state machine
+ * that involves "requesting sessions" according to
+ * magic side effects from incompletely-described
+ * rules about startup...
+ *
+ * This call is what really starts the host mode; be
+ * very careful about side effects if you reorder any
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+ musb_start(musb);
+ musb->port1_status |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_RESET:
+ musb_port_reset(musb, TRUE);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, TRUE);
+ break;
+ case USB_PORT_FEAT_TEST:
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "set feature %d\n", wValue);
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&musb->Lock, flags);
+ return retval;
+}
--- /dev/null
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musb_hdrc". It encapsulates
+ * key configuration differences between boards.
+ */
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed. (Developer boards sometimes won't.)
+ */
+enum musb_mode {
+ MUSB_UNDEFINED = 0,
+ MUSB_HOST, /* A or Mini-A connector */
+ MUSB_PERIPHERAL, /* B or Mini-B connector */
+ MUSB_OTG /* Mini-AB connector */
+};
+
+struct musb_hdrc_platform_data {
+ /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
+ u8 mode;
+
+ /* (HOST or OTG) switch VBUS on/off */
+ int (*set_vbus)(struct device *dev, int is_on);
+
+ /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+ u8 power;
+
+ /* (HOST or OTG) msec/2 after VBUS on till power good */
+ u8 potpgt;
+
+ /* TBD: chip defaults should probably go someplace else,
+ * e.g. number of tx/rx endpoints, etc
+ */
+ unsigned multipoint:1;
+
+ /* Power the device on or off */
+ int (*set_power)(int state);
+};
+