]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[IEEE80211]: Fix softmac lockdep reports.
authorJohannes Berg <johannes@sipsolutions.net>
Thu, 4 Oct 2007 01:14:23 +0000 (18:14 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 10 Oct 2007 23:52:22 +0000 (16:52 -0700)
It seems I was actually able to hit this deadlock, on my quad G5 softmac
locks up more often than not. This fixes it by using an own workqueue
that can safely be flushed under RTNL.

Not sure if the patch is correct with the workqueue naming. And don't
think with the patch it doesn't continually lock up. It still does, just
doesn't invoke lockdep warnings all the time.

Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/ieee80211softmac.h
net/ieee80211/softmac/ieee80211softmac_assoc.c
net/ieee80211/softmac/ieee80211softmac_auth.c
net/ieee80211/softmac/ieee80211softmac_event.c
net/ieee80211/softmac/ieee80211softmac_module.c
net/ieee80211/softmac/ieee80211softmac_scan.c
net/ieee80211/softmac/ieee80211softmac_wx.c

index 89119277553dbb8dbe90085088d4242c27f3c023..1ef6282fdded66520c5a15956848a77dfaaecbfa 100644 (file)
@@ -229,6 +229,8 @@ struct ieee80211softmac_device {
        /* this lock protects this structure */
        spinlock_t lock;
 
+       struct workqueue_struct *wq;
+
        u8 running; /* SoftMAC started? */
        u8 scanning;
 
index 4c0feb2dacd89cd92977635af6ad9f19e2d5d20c..c4d122ddd72c76d76397c1ce244200df368670be 100644 (file)
@@ -53,7 +53,7 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
        /* Set a timer for timeout */
        /* FIXME: make timeout configurable */
        if (likely(mac->running))
-               schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ);
+               queue_delayed_work(mac->wq, &mac->associnfo.timeout, 5 * HZ);
        spin_unlock_irqrestore(&mac->lock, flags);
 }
 
@@ -419,7 +419,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
                                network->authenticated = 0;
                                /* we don't want to do this more than once ... */
                                network->auth_desynced_once = 1;
-                               schedule_delayed_work(&mac->associnfo.work, 0);
+                               queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
                                break;
                        }
                default:
@@ -441,7 +441,7 @@ ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
 
        spin_lock_irqsave(&mac->lock, flags);
        mac->associnfo.associating = 1;
-       schedule_delayed_work(&mac->associnfo.work, 0);
+       queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
        spin_unlock_irqrestore(&mac->lock, flags);
 }
 
@@ -483,7 +483,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
                dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
                return 0;
        }
-       schedule_delayed_work(&mac->associnfo.work, 0);
+       queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
 
        return 0;
 }
index 855fa0fe641bc8bbe65854f824523490992daf0b..a53a751d07025e54613175af93d01c62bb179248 100644 (file)
@@ -62,7 +62,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
 
        /* add to list */
        list_add_tail(&auth->list, &mac->auth_queue);
-       schedule_delayed_work(&auth->work, 0);
+       queue_delayed_work(mac->wq, &auth->work, 0);
        spin_unlock_irqrestore(&mac->lock, flags);
 
        return 0;
@@ -97,7 +97,7 @@ ieee80211softmac_auth_queue(struct work_struct *work)
                }
                net->authenticated = 0;
                /* add a timeout call so we eventually give up waiting for an auth reply */
-               schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT);
+               queue_delayed_work(mac->wq, &auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT);
                auth->retry--;
                spin_unlock_irqrestore(&mac->lock, flags);
                if (ieee80211softmac_send_mgt_frame(mac, auth->net, IEEE80211_STYPE_AUTH, auth->state))
@@ -242,7 +242,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
                         * request. */
                        cancel_delayed_work(&aq->work);
                        INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
-                       schedule_delayed_work(&aq->work, 0);
+                       queue_delayed_work(mac->wq, &aq->work, 0);
                        spin_unlock_irqrestore(&mac->lock, flags);
                        return 0;
                case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -408,6 +408,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
        ieee80211softmac_deauth_from_net(mac, net);
 
        /* let's try to re-associate */
-       schedule_delayed_work(&mac->associnfo.work, 0);
+       queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
        return 0;
 }
index b3e33a4d48691f7ee3400ff9e22e612ebd49ffe4..8cef05b60f16403dc9f3c9b3cfca1436683edf1c 100644 (file)
@@ -172,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
                                /* User may have subscribed to ANY event, so
                                 * we tell them which event triggered it. */
                                eventptr->event_type = event;
-                               schedule_delayed_work(&eventptr->work, 0);
+                               queue_delayed_work(mac->wq, &eventptr->work, 0);
                        }
                }
 }
index 6398e6e674936af9be7124f897da0478a681e724..07505ca859af7f7c91f69c54b39a731a24d75cbd 100644 (file)
@@ -36,8 +36,13 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
        dev = alloc_ieee80211(sizeof(*softmac) + sizeof_priv);
        if (!dev)
                return NULL;
-
        softmac = ieee80211_priv(dev);
+       softmac->wq = create_freezeable_workqueue("softmac");
+       if (!softmac->wq) {
+               free_ieee80211(dev);
+               return NULL;
+       }
+
        softmac->dev = dev;
        softmac->ieee = netdev_priv(dev);
        spin_lock_init(&softmac->lock);
@@ -105,7 +110,7 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
                cancel_delayed_work(&eventptr->work);
 
        spin_unlock_irqrestore(&sm->lock, flags);
-       flush_scheduled_work();
+       flush_workqueue(sm->wq);
 
        /* now we should be save and no longer need locking... */
        spin_lock_irqsave(&sm->lock, flags);
@@ -139,6 +144,7 @@ void free_ieee80211softmac(struct net_device *dev)
        ieee80211softmac_clear_pending_work(sm);
        kfree(sm->scaninfo);
        kfree(sm->wpa.IE);
+       destroy_workqueue(sm->wq);
        free_ieee80211(dev);
 }
 EXPORT_SYMBOL_GPL(free_ieee80211softmac);
index abea3648680ebf02b978931fa5c20b3a2e0101fc..bfab8d7db88f8426bf260c8f7d4d823814afa9d9 100644 (file)
@@ -123,7 +123,7 @@ void ieee80211softmac_scan(struct work_struct *work)
                                spin_unlock_irqrestore(&sm->lock, flags);
                                break;
                        }
-                       schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY);
+                       queue_delayed_work(sm->wq, &si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY);
                        spin_unlock_irqrestore(&sm->lock, flags);
                        return;
                } else {
@@ -190,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
        sm->scaninfo->started = 1;
        sm->scaninfo->stop = 0;
        INIT_COMPLETION(sm->scaninfo->finished);
-       schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
+       queue_delayed_work(sm->wq, &sm->scaninfo->softmac_scan, 0);
        spin_unlock_irqrestore(&sm->lock, flags);
        return 0;
 }
index 8e8ad08a411c16da17995132f41331a2e7660773..ac36767b56e892c63d7eb0c0684b4d1b088f835e 100644 (file)
@@ -91,7 +91,7 @@ check_assoc_again:
                /* We must unlock to avoid deadlocks with the assoc workqueue
                 * on the associnfo.mutex */
                mutex_unlock(&sm->associnfo.mutex);
-               flush_scheduled_work();
+               flush_workqueue(sm->wq);
                /* Avoid race! Check assoc status again. Maybe someone started an
                 * association while we flushed. */
                goto check_assoc_again;
@@ -114,7 +114,7 @@ check_assoc_again:
 
        sm->associnfo.associating = 1;
        /* queue lower level code to do work (if necessary) */
-       schedule_delayed_work(&sm->associnfo.work, 0);
+       queue_delayed_work(sm->wq, &sm->associnfo.work, 0);
 
        mutex_unlock(&sm->associnfo.mutex);
 
@@ -349,7 +349,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
                /* force reassociation */
                mac->associnfo.bssvalid = 0;
                if (mac->associnfo.associated)
-                       schedule_delayed_work(&mac->associnfo.work, 0);
+                       queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
        } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
                /* the bssid we have is no longer fixed */
                mac->associnfo.bssfixed = 0;
@@ -366,7 +366,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
                /* tell the other code that this bssid should be used no matter what */
                mac->associnfo.bssfixed = 1;
                /* queue associate if new bssid or (old one again and not associated) */
-               schedule_delayed_work(&mac->associnfo.work, 0);
+               queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
        }
 
  out: