]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
netdev: The ingress_lock member is no longer needed.
authorDavid S. Miller <davem@davemloft.net>
Wed, 9 Jul 2008 00:33:13 +0000 (17:33 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Jul 2008 00:33:13 +0000 (17:33 -0700)
Every qdisc is assosciated with a queue, and in the case of ingress
qdiscs that will now be netdev->rx_queue so using that queue's lock is
the thing to do.

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ifb.c
include/linux/netdevice.h
net/core/dev.c
net/sched/sch_api.c
net/sched/sch_generic.c

index bc3de272a8295193172bbc111eeb537d234bcb92..ccbd6554f6eb922e1211f12fdd74bf028287e13d 100644 (file)
@@ -229,13 +229,13 @@ module_param(numifbs, int, 0);
 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 /*
- * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
+ * dev_ifb->tx_queue.lock is usually taken after dev->rx_queue.lock,
  * reversely to e.g. qdisc_lock_tree(). It should be safe until
- * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
+ * ifb doesn't take dev->tx_queue.lock with dev_ifb->rx_queue.lock.
  * But lockdep should know that ifb has different locks from dev.
  */
 static struct lock_class_key ifb_tx_queue_lock_key;
-static struct lock_class_key ifb_ingress_lock_key;
+static struct lock_class_key ifb_rx_queue_lock_key;
 
 
 static int __init ifb_init_one(int index)
@@ -259,7 +259,7 @@ static int __init ifb_init_one(int index)
                goto err;
 
        lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
-       lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
+       lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);
 
        return 0;
 
index e835acacb4793ac02207c2195dbb6fc1ae944b99..633a44c6fa5e24cc0cc7153f00a7d85fce2a854a 100644 (file)
@@ -632,8 +632,6 @@ struct net_device
        struct netdev_queue     rx_queue;
        struct netdev_queue     tx_queue ____cacheline_aligned_in_smp;
 
-       /* ingress path synchronizer */
-       spinlock_t              ingress_lock;
        struct Qdisc            *qdisc_ingress;
 
 /*
index 05011048b86c9c3e22484ea072ba5ae4ea30cd8b..2322fb69fd532142dfcd77374c901da9fb6a7387 100644 (file)
@@ -2014,10 +2014,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
  */
 static int ing_filter(struct sk_buff *skb)
 {
-       struct Qdisc *q;
        struct net_device *dev = skb->dev;
-       int result = TC_ACT_OK;
        u32 ttl = G_TC_RTTL(skb->tc_verd);
+       struct netdev_queue *rxq;
+       int result = TC_ACT_OK;
+       struct Qdisc *q;
 
        if (MAX_RED_LOOP < ttl++) {
                printk(KERN_WARNING
@@ -2029,10 +2030,12 @@ static int ing_filter(struct sk_buff *skb)
        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 
-       spin_lock(&dev->ingress_lock);
+       rxq = &dev->rx_queue;
+
+       spin_lock(&rxq->lock);
        if ((q = dev->qdisc_ingress) != NULL)
                result = q->enqueue(skb, q);
-       spin_unlock(&dev->ingress_lock);
+       spin_unlock(&rxq->lock);
 
        return result;
 }
@@ -3795,7 +3798,6 @@ int register_netdevice(struct net_device *dev)
        spin_lock_init(&dev->_xmit_lock);
        netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
        dev->xmit_lock_owner = -1;
-       spin_lock_init(&dev->ingress_lock);
 
        dev->iflink = -1;
 
index 2a1834f8c7d8db8b8194db6f96fd40a9e42c1956..570cef2a9c5f9e22d935ce5641703f89bbac0726 100644 (file)
@@ -601,12 +601,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 
        sch->parent = parent;
 
+       sch->stats_lock = &dev_queue->lock;
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
-               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
        } else {
-               sch->stats_lock = &dev_queue->lock;
                if (handle == 0) {
                        handle = qdisc_alloc_handle(dev);
                        err = -ENOMEM;
index ee8f9f78a095231f5fdd67341a84150f9c897d4e..804d44b003488fee4cfd8f9e79cfe017df9d792e 100644 (file)
  * - enqueue, dequeue are serialized via top level device
  *   spinlock queue->lock.
  * - ingress filtering is serialized via top level device
- *   spinlock dev->ingress_lock.
+ *   spinlock dev->rx_queue.lock.
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
 void qdisc_lock_tree(struct net_device *dev)
        __acquires(dev->tx_queue.lock)
-       __acquires(dev->ingress_lock)
+       __acquires(dev->rx_queue.lock)
 {
        spin_lock_bh(&dev->tx_queue.lock);
-       spin_lock(&dev->ingress_lock);
+       spin_lock(&dev->rx_queue.lock);
 }
 EXPORT_SYMBOL(qdisc_lock_tree);
 
 void qdisc_unlock_tree(struct net_device *dev)
-       __releases(dev->ingress_lock)
+       __releases(dev->rx_queue.lock)
        __releases(dev->tx_queue.lock)
 {
-       spin_unlock(&dev->ingress_lock);
+       spin_unlock(&dev->rx_queue.lock);
        spin_unlock_bh(&dev->tx_queue.lock);
 }
 EXPORT_SYMBOL(qdisc_unlock_tree);