#endif
}
-static inline struct cppi_descriptor *
-cppi_bd_alloc(struct cppi_channel *c)
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
{
struct cppi_descriptor *bd = c->bdPoolHead;
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->txCppi); i++) {
- controller->txCppi[i].transmit = TRUE;
+ controller->txCppi[i].transmit = true;
controller->txCppi[i].chNo = i;
}
for (i = 0; i < ARRAY_SIZE(controller->rxCppi); i++) {
- controller->rxCppi[i].transmit = FALSE;
+ controller->rxCppi[i].transmit = false;
controller->rxCppi[i].chNo = i;
}
else
cppi_next_rx_segment(musb, otgChannel, mode);
- return TRUE;
+ return true;
}
static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
struct cppi_descriptor {
/* Hardware Overlay */
- u32 hNext; /**< Next(hardware) Buffer Descriptor Pointer */
- u32 buffPtr; /**<Buffer Pointer (dma_addr_t) */
- u32 bOffBLen; /**<Buffer_offset16,buffer_length16 */
- u32 hOptions; /**<Option fields for SOP,EOP etc*/
+ u32 hNext; /* Next(hardware) Buffer Descriptor Pointer */
+ u32 buffPtr; /* Buffer Pointer (dma_addr_t) */
+ u32 bOffBLen; /* Buffer_offset16,buffer_length16 */
+ u32 hOptions; /* Option fields for SOP,EOP etc*/
struct cppi_descriptor *next;
dma_addr_t dma; /* address of this descriptor */
} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
-/* forward declaration for CppiDmaController structure */
struct cppi;
/**
/* which direction of which endpoint? */
struct musb_hw_ep *hw_ep;
- u8 transmit;
+ bool transmit;
u8 chNo;
/* DMA modes: RNDIS or "transparent" */
spin_lock_irqsave(&musb->lock, flags);
if (musb->xceiv.state == OTG_STATE_B_WAIT_ACON) {
- DBG(1, "HNP: B_WAIT_ACON timeout, going back to B_PERIPHERAL\n");
+ DBG(1, "HNP: B_WAIT_ACON timeout; back to B_PERIPHERAL\n");
musb_g_disconnect(musb);
musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
* not get a disconnect irq...
*/
if ((devctl & MUSB_DEVCTL_VBUS)
- != (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+ ) {
musb->int_usb |= MUSB_INTR_DISCONNECT;
musb->int_usb &= ~MUSB_INTR_SUSPEND;
break;
switch (musb->xceiv.state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "HNP: SUSPEND and CONNECT, now b_host\n");
+ DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
musb->xceiv.state = OTG_STATE_B_HOST;
hcd->self.is_b_host = 1;
int_usb &= ~MUSB_INTR_SUSPEND;
/*
* Looks like non-HS BABBLE can be ignored, but
* HS BABBLE is an error condition. For HS the solution
- * is to avoid babble in the first place and fix whatever
- * causes BABBLE. When HS BABBLE happens we can only stop
- * the session.
+ * is to avoid babble in the first place and fix what
+ * caused BABBLE. When HS BABBLE happens we can only
+ * stop the session.
*/
if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
DBG(1, "BABBLE devctl: %02x\n", devctl);
else {
- ERR("Stopping host session because of babble\n");
+ ERR("Stopping host session -- babble\n");
musb_writeb(mbase, MUSB_DEVCTL, 0);
}
} else if (is_peripheral_capable()) {
hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
hw_ep->max_packet_sz_tx = maxpacket;
- hw_ep->is_shared_fifo = TRUE;
+ hw_ep->is_shared_fifo = true;
break;
}
/* shared TX/RX FIFO? */
if ((reg & 0xf0) == 0xf0) {
hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
- hw_ep->is_shared_fifo = TRUE;
+ hw_ep->is_shared_fifo = true;
continue;
} else {
hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
- hw_ep->is_shared_fifo = FALSE;
+ hw_ep->is_shared_fifo = false;
}
/* FIXME set up hw_ep->{rx,tx}_double_buffered */
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
#ifdef C_MP_RX
- musb->bulk_combine = TRUE;
+ musb->bulk_combine = true;
#else
strcat(aInfo, " (X)"); /* no driver support */
#endif
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
#ifdef C_MP_TX
- musb->bulk_split = TRUE;
+ musb->bulk_split = true;
#else
strcat(aInfo, " (X)"); /* no driver support */
#endif
/****************************** CONSTANTS ********************************/
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
#ifndef MUSB_C_NUM_EPS
#define MUSB_C_NUM_EPS ((u8)16)
#endif
* directly with the "flat" model, or after setting up an index register.
*/
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) || \
- defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+ || defined(CONFIG_ARCH_OMAP3430)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
*/
/****************************** FUNCTIONS ********************************/
#define MUSB_HST_MODE(_musb)\
- { (_musb)->is_host=TRUE; }
+ { (_musb)->is_host = true; }
#define MUSB_DEV_MODE(_musb) \
- { (_musb)->is_host=FALSE; }
+ { (_musb)->is_host = false; }
#define test_devctl_hst_mode(_x) \
(musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
u8 epnum;
/* hardware configuration, possibly dynamic */
- u8 is_shared_fifo;
- u8 tx_double_buffered;
- u8 rx_double_buffered;
+ bool is_shared_fifo;
+ bool tx_double_buffered;
+ bool rx_double_buffered;
u16 max_packet_sz_tx;
u16 max_packet_sz_rx;
u8 min_power; /* vbus for periph, in mA/2 */
+ bool is_host;
+
/* active means connected and not suspended */
unsigned is_active:1;
unsigned is_multipoint:1;
- unsigned is_host:1;
unsigned ignore_disconnect:1; /* during bus resets */
int a_wait_bcon; /* VBUS timeout in msecs */
#ifdef C_MP_RX
unsigned bulk_combine:1;
- /* REVISIT allegedly doesn't work reliably */
-#if 0
#define can_bulk_combine(musb,type) \
(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
#else
#define can_bulk_combine(musb,type) 0
#endif
-#else
-#define can_bulk_combine(musb,type) 0
-#endif
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_self_powered:1;
unsigned is_bus_powered:1;
- unsigned set_address:1;
- unsigned test_mode:1;
- unsigned softconnect:1;
+ unsigned set_address:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
enum musb_g_ep0_state ep0_state;
u8 address;
extern void musb_start(struct musb *musb);
extern void musb_stop(struct musb *musb);
-extern void musb_write_fifo(struct musb_hw_ep *ep,
- u16 len, const u8 * src);
-extern void musb_read_fifo(struct musb_hw_ep *ep,
- u16 len, u8 * dst);
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 * src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 * dst);
extern void musb_load_testpacket(struct musb *);
struct proc_dir_entry;
#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
-extern struct proc_dir_entry *musb_debug_create(char *name,
- struct musb *data);
+extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
extern void musb_debug_delete(char *name, struct musb *data);
#else
-static inline struct proc_dir_entry *musb_debug_create(char *name,
- struct musb *data)
+static inline struct proc_dir_entry *
+musb_debug_create(char *name, struct musb *data)
{
return NULL;
}
* transaction (typically representing many USB maximum-sized packets)
* @actual_len: how many bytes have been transferred
* @status: current channel status (updated e.g. on interrupt)
- * @desired_mode: TRUE if mode 1 is desired; FALSE if mode 0 is desired
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
*
* channels are associated with an endpoint for the duration of at least
* one usb transfer.
size_t max_len;
size_t actual_len;
enum dma_channel_status status;
- u8 desired_mode;
+ bool desired_mode;
};
/*
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
- int status)
+ int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
: NULL;
if (!request) {
DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
+ musb_ep->end_point.name);
break;
}
}
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrrupt for short packets.
*
- * Theoretically, we could enable DMAReq interrupt (MUSB_RXCSR_DMAMODE = 1),
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
// csr |= MUSB_RXCSR_DMAMODE;
/* this special sequence (enabling and then
- disabling MUSB_RXCSR_DMAMODE) is required
- to get DMAReq to activate
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE);
#endif
- musb_writew(epio, MUSB_RXCSR,
- csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
if (request->actual < request->length) {
int transfer_size = 0;
if (use_dma)
return;
}
-#endif /* Mentor's USB */
+#endif /* Mentor's DMA */
fifo_count = request->length - request->actual;
DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
channel->desired_mode,
dma_addr,
fifo_count);
- if (ret == TRUE)
+ if (ret)
return;
}
#endif
return -EINVAL;
}
-
-static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
- /* FIXME -- delegate to otg_transciever logic */
-
- DBG(2, "<= vbus_draw %u =>\n", mA);
- return 0;
-}
#endif
static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
}
EXPORT_SYMBOL(usb_gadget_register_driver);
-static void
-stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
int i;
struct musb_hw_ep *hw_ep;
* Context: caller holds controller lock
*/
static int
-service_in_request(struct musb *musb,
- const struct usb_ctrlrequest *ctrlrequest)
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
{
int handled = 0; /* not handled */
switch (ctrlrequest->bRequest) {
case USB_REQ_SET_ADDRESS:
/* change it after the status stage */
- musb->set_address = TRUE;
+ musb->set_address = true;
musb->address = (u8) (ctrlrequest->wValue & 0x7f);
handled = 1;
break;
/* enter test mode after irq */
if (handled > 0)
- musb->test_mode = TRUE;
+ musb->test_mode = true;
break;
#ifdef CONFIG_USB_MUSB_OTG
case USB_DEVICE_B_HNP_ENABLE:
* the TX FIFO right away, and give the controller a moment
* to switch modes...
*/
- musb->set_address = FALSE;
+ musb->set_address = false;
musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
if (req->wLength == 0) {
if (req->bRequestType & USB_DIR_IN)
}
static int
-forward_to_driver(struct musb *musb,
- const struct usb_ctrlrequest *ctrlrequest)
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
__releases(musb->lock)
__acquires(musb->lock)
{
* is done we won't see the next packet.
*/
if (musb->set_address) {
- musb->set_address = FALSE;
+ musb->set_address = false;
musb_writeb(mbase, MUSB_FADDR, musb->address);
}
return status;
}
-static int
-musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
{
/* we just won't support this */
return -EINVAL;
}
/* for bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
{
struct usb_device *udev = urb->dev;
u16 csr;
/*
* PIO RX for a packet (or part of it).
*/
-static u8 musb_host_packet_rx(struct musb *musb, struct urb *urb,
- u8 epnum, u8 iso_err)
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{
- u16 rx_count;
- u8 *buf;
- u16 csr;
- u8 done = FALSE;
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
u32 length;
int do_flush = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
/*
* Service the default endpoint (ep0) as host.
- * Return TRUE until it's time to start the status stage.
+ * Return true until it's time to start the status stage.
*/
-static int musb_h_ep0_continue(struct musb *musb,
- u16 len, struct urb *urb)
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{
- int more = FALSE;
- u8 *fifo_dest = NULL;
- u16 fifo_count = 0;
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
struct musb_hw_ep *hw_ep = musb->control_ep;
struct musb_qh *qh = hw_ep->in_qh;
struct usb_ctrlrequest *request;
*/
} else if (urb->actual_length <
urb->transfer_buffer_length)
- more = TRUE;
+ more = true;
break;
case MUSB_EP0_START:
request = (struct usb_ctrlrequest *) urb->setup_packet;
} else if (request->bRequestType & USB_DIR_IN) {
DBG(4, "start IN-DATA\n");
musb->ep0_stage = MUSB_EP0_IN;
- more = TRUE;
+ more = true;
break;
} else {
DBG(4, "start OUT-DATA\n");
musb->ep0_stage = MUSB_EP0_OUT;
- more = TRUE;
+ more = true;
}
/* FALLTHROUGH */
case MUSB_EP0_OUT:
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
- more = TRUE;
+ more = true;
}
break;
default:
struct musb_hw_ep *hw_ep = musb->control_ep;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
- u8 complete = FALSE;
+ bool complete = false;
irqreturn_t retval = IRQ_NONE;
/* ep0 only has one queue, "in" */
/* if we just did status stage, we are done */
if (MUSB_EP0_STATUS == musb->ep0_stage) {
retval = IRQ_HANDLED;
- complete = TRUE;
+ complete = true;
}
/* prepare status */
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
- complete = TRUE;
+ complete = true;
/* use the proper sequence to abort the transfer */
if (csr & MUSB_CSR0_H_REQPKT) {
void musb_host_tx(struct musb *musb, u8 epnum)
{
int pipe;
- u8 done = FALSE;
+ bool done = false;
u16 tx_csr;
size_t wLength = 0;
u8 *buf = NULL;
musb_writew(epio, MUSB_TXCSR, tx_csr);
musb_writeb(epio, MUSB_TXINTERVAL, 0);
- done = TRUE;
+ done = true;
}
/* second cppi case */
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = qh->segsize;
if (++qh->iso_idx >= urb->number_of_packets) {
- done = TRUE;
+ done = true;
} else if (!dma) {
d++;
buf = urb->transfer_buffer + d->offset;
wLength = d->length;
}
} else if (dma) {
- done = TRUE;
+ done = true;
} else {
/* see if we need to send more data, or ZLP */
if (qh->segsize < qh->maxpacket)
- done = TRUE;
+ done = true;
else if (qh->offset == urb->transfer_buffer_length
&& !(urb-> transfer_flags
& URB_ZERO_PACKET))
- done = TRUE;
+ done = true;
if (!done) {
buf = urb->transfer_buffer
+ qh->offset;
* so we must abort this transfer after cleanup
*/
if (urb->status != -EINPROGRESS) {
- done = TRUE;
+ done = true;
if (status == 0)
status = urb->status;
}
void __iomem *mbase = musb->mregs;
int pipe;
u16 rx_csr, val;
- u8 iso_err = FALSE;
- u8 done = FALSE;
+ bool iso_err = false;
+ bool done = false;
u32 status;
struct dma_channel *dma;
} else {
DBG(4, "RX end %d ISO data error\n", epnum);
/* packet error reported later */
- iso_err = TRUE;
+ iso_err = true;
}
}
}
musb_h_flush_rxfifo(hw_ep, 0);
musb_writeb(epio, MUSB_RXINTERVAL, 0);
- done = TRUE;
+ done = true;
goto finish;
}
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
(void) musb->dma_controller->channel_abort(dma);
xfer_len = dma->actual_len;
- done = TRUE;
+ done = true;
}
DBG(2, "RXCSR%d %04x, reqpkt, len %zd%s\n", epnum, rx_csr,
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
#else
- done = TRUE;
+ done = true;
#endif
} else if (urb->status == -EINPROGRESS) {
/* if no errors, be sure a packet is ready for unloading */
#include "musb_core.h"
-static void musb_port_suspend(struct musb *musb, u8 bSuspend)
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
{
u8 power;
void __iomem *mbase = musb->mregs;
* SE0 changing to connect (J) or wakeup (K) states.
*/
power = musb_readb(mbase, MUSB_POWER);
- if (bSuspend) {
+ if (do_suspend) {
int retries = 10000;
power &= ~MUSB_POWER_RESUME;
}
}
-static void musb_port_reset(struct musb *musb, u8 bReset)
+static void musb_port_reset(struct musb *musb, bool do_reset)
{
u8 power;
void __iomem *mbase = musb->mregs;
#ifdef CONFIG_USB_MUSB_OTG
if (musb->xceiv.state == OTG_STATE_B_IDLE) {
- DBG(2, "HNP: Returning from HNP, not resetting hub as b_idle\n");
+ DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
* the appropriate amount of time has passed
*/
power = musb_readb(mbase, MUSB_POWER);
- if (bReset) {
+ if (do_reset) {
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
msleep(1);
}
- musb->ignore_disconnect = TRUE;
+ musb->ignore_disconnect = true;
power &= 0xf0;
musb_writeb(mbase, MUSB_POWER,
power | MUSB_POWER_RESET);
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESET);
- musb->ignore_disconnect = FALSE;
+ musb->ignore_disconnect = false;
power = musb_readb(mbase, MUSB_POWER);
if (power & MUSB_POWER_HSMODE) {
case USB_PORT_FEAT_ENABLE:
break;
case USB_PORT_FEAT_SUSPEND:
- musb_port_suspend(musb, FALSE);
+ musb_port_suspend(musb, false);
break;
case USB_PORT_FEAT_POWER:
if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
/* finish RESET signaling? */
if ((musb->port1_status & USB_PORT_STAT_RESET)
&& time_after(jiffies, musb->rh_timer))
- musb_port_reset(musb, FALSE);
+ musb_port_reset(musb, false);
/* finish RESUME signaling? */
if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
musb->xceiv.state = OTG_STATE_A_HOST;
}
- put_unaligned(cpu_to_le32(musb->port1_status & ~MUSB_PORT_STAT_RESUME),
+ put_unaligned(cpu_to_le32(musb->port1_status
+ & ~MUSB_PORT_STAT_RESUME),
(__le32 *) buf);
/* port change status is more interesting */
musb_start(musb);
break;
case USB_PORT_FEAT_RESET:
- musb_port_reset(musb, TRUE);
+ musb_port_reset(musb, true);
break;
case USB_PORT_FEAT_SUSPEND:
- musb_port_suspend(musb, TRUE);
+ musb_port_suspend(musb, true);
break;
case USB_PORT_FEAT_TEST:
if (unlikely(is_host_active(musb)))
temp = MUSB_TEST_FORCE_HOST
| MUSB_TEST_FORCE_HS;
- musb_writeb(musb->mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb_writeb(musb->mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
break;
case 6:
pr_debug("TEST_FIFO_ACCESS\n");
struct musb_dma_controller {
struct dma_controller Controller;
struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
- void *pDmaPrivate;
- void __iomem *pCoreBase;
- u8 bChannelCount;
- u8 bmUsedChannels;
+ void *pDmaPrivate;
+ void __iomem *pCoreBase;
+ u8 bChannelCount;
+ u8 bmUsedChannels;
u8 irq;
};
for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
if (controller->bmUsedChannels & (1 << bBit)) {
- pChannel = &(controller->aChannel[bBit].Channel);
+ pChannel = &controller->aChannel[bBit].Channel;
dma_channel_release(pChannel);
if (!controller->bmUsedChannels)
return 0;
}
-static struct dma_channel* dma_channel_allocate(struct dma_controller *c,
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 transmit)
{
u8 bBit;
u16 csr = 0;
DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
- pChannel, packet_sz, dma_addr, len, mode);
+ pChannel, packet_sz, dma_addr, len, mode);
if (mode) {
csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
- if (len < packet_sz) {
- return FALSE;
- }
+ BUG_ON(len < packet_sz);
+
if (packet_sz >= 64) {
- csr |=
- MUSB_HSDMA_BURSTMODE_INCR16 << MUSB_HSDMA_BURSTMODE_SHIFT;
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
} else if (packet_sz >= 32) {
- csr |=
- MUSB_HSDMA_BURSTMODE_INCR8 << MUSB_HSDMA_BURSTMODE_SHIFT;
+ csr |= MUSB_HSDMA_BURSTMODE_INCR8
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
} else if (packet_sz >= 16) {
- csr |=
- MUSB_HSDMA_BURSTMODE_INCR4 << MUSB_HSDMA_BURSTMODE_SHIFT;
+ csr |= MUSB_HSDMA_BURSTMODE_INCR4
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
}
}
csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
| (1 << MUSB_HSDMA_ENABLE_SHIFT)
| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
- | (pImplChannel->transmit ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) : 0);
+ | (pImplChannel->transmit
+ ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+ : 0);
/* address/count */
musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
- dma_addr);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ dma_addr);
musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
- len);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ len);
/* control (this should start things) */
musb_writew(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
- csr);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ csr);
}
static int dma_channel_program(struct dma_channel * pChannel,
pImplChannel->wMaxPacketSize = packet_sz;
pChannel->status = MUSB_DMA_STATUS_BUSY;
- if ((mode == 1) && (len >= packet_sz)) {
- configure_channel(pChannel, packet_sz, 1, dma_addr,
- len);
- } else
- configure_channel(pChannel, packet_sz, 0, dma_addr,
- len);
+ if ((mode == 1) && (len >= packet_sz))
+ configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+ else
+ configure_channel(pChannel, packet_sz, 0, dma_addr, len);
- return TRUE;
+ return true;
}
static int dma_channel_abort(struct dma_channel *pChannel)
MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_DMAMODE);
musb_writew(mbase,
- MUSB_EP_OFFSET(pImplChannel->epnum,MUSB_TXCSR),
- csr);
+ MUSB_EP_OFFSET(pImplChannel->epnum,MUSB_TXCSR),
+ csr);
}
else {
csr = musb_readw(mbase,
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE);
musb_writew(mbase,
- MUSB_EP_OFFSET(pImplChannel->epnum,MUSB_RXCSR),
- csr);
+ MUSB_EP_OFFSET(pImplChannel->epnum,MUSB_RXCSR),
+ csr);
}
musb_writew(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), 0);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ 0);
musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), 0);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ 0);
musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), 0);
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ 0);
pChannel->status = MUSB_DMA_STATUS_FREE;
}
pChannel = &pImplChannel->Channel;
csr = musb_readw(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
MUSB_HSDMA_CONTROL));
- if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
+ if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
pImplChannel->Channel.status =
- MUSB_DMA_STATUS_BUS_ABORT;
- } else {
+ MUSB_DMA_STATUS_BUS_ABORT;
+ else {
dwAddress = musb_readl(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(
bChannel,
MUSB_HSDMA_ADDRESS));
- pChannel->actual_len =
- dwAddress - pImplChannel->dwStartAddress;
+ pChannel->actual_len = dwAddress
+ - pImplChannel->dwStartAddress;
DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
- pChannel, pImplChannel->dwStartAddress,
- dwAddress, pChannel->actual_len,
- pImplChannel->len,
- (pChannel->actual_len <
- pImplChannel->len) ?
+ pChannel, pImplChannel->dwStartAddress,
+ dwAddress, pChannel->actual_len,
+ pImplChannel->len,
+ (pChannel->actual_len
+ < pImplChannel->len) ?
"=> reconfig 0": "=> complete");
u8 devctl = musb_readb(mbase,
/* completed */
if ((devctl & MUSB_DEVCTL_HM)
- && (pImplChannel->transmit)
- && ((pChannel->desired_mode == 0)
- || (pChannel->actual_len &
+ && (pImplChannel->transmit)
+ && ((pChannel->desired_mode == 0)
+ || (pChannel->actual_len &
(pImplChannel->wMaxPacketSize - 1)))
- ) {
+ ) {
/* Send out the packet */
musb_ep_select(mbase,
pImplChannel->epnum);
- musb_writew(mbase,
- MUSB_EP_OFFSET(pImplChannel->epnum,MUSB_TXCSR),
+ musb_writew(mbase, MUSB_EP_OFFSET(
+ pImplChannel->epnum,
+ MUSB_TXCSR),
MUSB_TXCSR_TXPKTRDY);
} else
musb_dma_completion(
# define UTMI_8BIT (0 << PHYSEL)
# define ULPI_12PIN (1 << PHYSEL)
# define ULPI_8PIN (2 << PHYSEL)
-#define OTG_SIMENABLE_REG OMAP_HSOTG(0x10)
+#define OTG_SIMENABLE_REG OMAP_HSOTG(0x10)
# define TM1 (1 << 0)
#define OTG_FORCESTDBY_REG OMAP_HSOTG(0x14)
# define ENABLEFORCE (1 << 0)
s8 sync_dev;
if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
- return FALSE;
+ return false;
/*
* HW issue #10: Async dma will eventually corrupt the XFR_SIZE
* register is corrupt, and we won't know if the DMA worked.
*/
if (dma_addr & 0x2)
- return FALSE;
+ return false;
chdat->transfer_len = len & ~0x1f;
} else {
if (tusb_omap_use_shared_dmareq(chdat) != 0) {
DBG(3, "could not get dma for ep%i\n", chdat->epnum);
- return FALSE;
+ return false;
}
if (tusb_dma->ch < 0) {
/* REVISIT: This should get blocked earlier, happens
* with MSC ErrorRecoveryTest
*/
WARN_ON(1);
- return FALSE;
+ return false;
}
ch = tusb_dma->ch;
TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
}
- return TRUE;
+ return true;
}
static int tusb_omap_dma_abort(struct dma_channel *channel)