dma_addr_t rx_desc_dma;
int rx_desc_area_size;
struct sk_buff **rx_skb;
-
- struct timer_list rx_oom;
};
struct tx_queue {
u8 rxq_mask;
int rxq_primary;
struct napi_struct napi;
+ struct timer_list rx_oom;
struct rx_queue rxq[8];
/*
/* rx ***********************************************************************/
static void txq_reclaim(struct tx_queue *txq, int force);
-static void rxq_refill(struct rx_queue *rxq)
+static int rxq_refill(struct rx_queue *rxq, int budget, int *oom)
{
- struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
- unsigned long flags;
+ int skb_size;
+ int refilled;
- spin_lock_irqsave(&mp->lock, flags);
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = rxq_to_mp(rxq)->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ skb_size = (skb_size + 7) & ~7;
- while (rxq->rx_desc_count < rxq->rx_ring_size) {
- int skb_size;
+ refilled = 0;
+ while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
struct sk_buff *skb;
int unaligned;
int rx;
- /*
- * Reserve 2+14 bytes for an ethernet header (the
- * hardware automatically prepends 2 bytes of dummy
- * data to each received packet), 16 bytes for up to
- * four VLAN tags, and 4 bytes for the trailing FCS
- * -- 36 bytes total.
- */
- skb_size = mp->dev->mtu + 36;
-
- /*
- * Make sure that the skb size is a multiple of 8
- * bytes, as the lower three bits of the receive
- * descriptor's buffer size field are ignored by
- * the hardware.
- */
- skb_size = (skb_size + 7) & ~7;
-
skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
- if (skb == NULL)
+ if (skb == NULL) {
+ *oom = 1;
break;
+ }
unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
if (unaligned)
skb_reserve(skb, dma_get_cache_alignment() - unaligned);
+ refilled++;
rxq->rx_desc_count++;
rx = rxq->rx_used_desc++;
skb_reserve(skb, 2);
}
- if (rxq->rx_desc_count != rxq->rx_ring_size)
- mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
-
- spin_unlock_irqrestore(&mp->lock, flags);
-}
-
-static inline void rxq_refill_timer_wrapper(unsigned long data)
-{
- rxq_refill((struct rx_queue *)data);
+ return refilled;
}
static int rxq_process(struct rx_queue *rxq, int budget)
struct rx_desc *rx_desc;
unsigned int cmd_sts;
struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&mp->lock, flags);
rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
- if (cmd_sts & BUFFER_OWNED_BY_DMA) {
- spin_unlock_irqrestore(&mp->lock, flags);
+ if (cmd_sts & BUFFER_OWNED_BY_DMA)
break;
- }
rmb();
skb = rxq->rx_skb[rxq->rx_curr_desc];
if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq->rx_curr_desc = 0;
- spin_unlock_irqrestore(&mp->lock, flags);
-
dma_unmap_single(NULL, rx_desc->buf_ptr,
rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
mp->dev->last_rx = jiffies;
}
- rxq_refill(rxq);
-
return rx;
}
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
{
struct mv643xx_eth_private *mp;
- int rx;
+ int work_done;
+ int oom;
int i;
mp = container_of(napi, struct mv643xx_eth_private, napi);
}
#endif
- rx = 0;
- for (i = 7; rx < budget && i >= 0; i--)
- if (mp->rxq_mask & (1 << i))
- rx += rxq_process(mp->rxq + i, budget - rx);
+ work_done = 0;
+ oom = 0;
+ for (i = 7; work_done < budget && i >= 0; i--) {
+ if (mp->rxq_mask & (1 << i)) {
+ struct rx_queue *rxq = mp->rxq + i;
- if (rx < budget) {
+ work_done += rxq_process(rxq, budget - work_done);
+ work_done += rxq_refill(rxq, budget - work_done, &oom);
+ }
+ }
+
+ if (work_done < budget) {
+ if (oom)
+ mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
netif_rx_complete(mp->dev, napi);
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
}
- return rx;
+ return work_done;
+}
+
+static inline void oom_timer_wrapper(unsigned long data)
+{
+ struct mv643xx_eth_private *mp = (void *)data;
+
+ napi_schedule(&mp->napi);
}
nexti * sizeof(struct rx_desc);
}
- init_timer(&rxq->rx_oom);
- rxq->rx_oom.data = (unsigned long)rxq;
- rxq->rx_oom.function = rxq_refill_timer_wrapper;
-
return 0;
rxq_disable(rxq);
- del_timer_sync(&rxq->rx_oom);
-
for (i = 0; i < rxq->rx_ring_size; i++) {
if (rxq->rx_skb[i]) {
dev_kfree_skb(rxq->rx_skb[i]);
wrl(mp, INT_MASK(mp->port_num), 0x00000000);
rdl(mp, INT_MASK(mp->port_num));
- netif_rx_schedule(dev, &mp->napi);
+ napi_schedule(&mp->napi);
}
/*
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int err;
+ int oom;
int i;
wrl(mp, INT_CAUSE(mp->port_num), 0);
init_mac_tables(mp);
+ napi_enable(&mp->napi);
+
+ oom = 0;
for (i = 0; i < 8; i++) {
if ((mp->rxq_mask & (1 << i)) == 0)
continue;
goto out;
}
- rxq_refill(mp->rxq + i);
+ rxq_refill(mp->rxq + i, INT_MAX, &oom);
+ }
+
+ if (oom) {
+ mp->rx_oom.expires = jiffies + (HZ / 10);
+ add_timer(&mp->rx_oom);
}
for (i = 0; i < 8; i++) {
}
}
- napi_enable(&mp->napi);
-
netif_carrier_off(dev);
netif_stop_queue(dev);
napi_disable(&mp->napi);
+ del_timer_sync(&mp->rx_oom);
+
netif_carrier_off(dev);
netif_stop_queue(dev);
mp->dev = dev;
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
-
set_params(mp, pd);
spin_lock_init(&mp->lock);
}
init_pscr(mp, pd->speed, pd->duplex);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+
+ init_timer(&mp->rx_oom);
+ mp->rx_oom.data = (unsigned long)mp;
+ mp->rx_oom.function = oom_timer_wrapper;
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
BUG_ON(!res);