Fix up for make allyesconfig.
Signed-Off-By: David Howells <dhowells@redhat.com>
        }
 }
 
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
 { 
        on_each_cpu(mce_checkregs, NULL, 1, 1);
        schedule_delayed_work(&mce_work, MCE_RATE);
 
 
 struct warm_boot_cpu_info {
        struct completion *complete;
+       struct work_struct task;
        int apicid;
        int cpu;
 };
 
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
 {
-       struct warm_boot_cpu_info *info = p;
+       struct warm_boot_cpu_info *info =
+               container_of(work, struct warm_boot_cpu_info, task);
        do_boot_cpu(info->apicid, info->cpu);
        complete(info->complete);
 }
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct warm_boot_cpu_info info;
-       struct work_struct task;
        int     apicid, ret;
        struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
        info.complete = &done;
        info.apicid = apicid;
        info.cpu = cpu;
-       INIT_WORK(&task, do_warm_boot_cpu, &info);
+       INIT_WORK(&info.task, do_warm_boot_cpu);
 
        tsc_sync_disabled = 1;
 
        clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
                        KERNEL_PGD_PTRS);
        flush_tlb_all();
-       schedule_work(&task);
+       schedule_work(&info.task);
        wait_for_completion(&done);
 
        tsc_sync_disabled = 0;
 
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
 {
        unsigned int cpu;
 
 {
        int ret;
 
-       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
        ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
        if (!ret)
 
 /* EEH event workqueue setup. */
 static DEFINE_SPINLOCK(eeh_eventlist_lock);
 LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
 
 /* Serialize reset sequences for a given pci device */
 DEFINE_MUTEX(eeh_event_mutex);
  * eeh_thread_launcher
  * @dummy - unused
  */
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
 {
        if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
                printk(KERN_ERR "Failed to start EEH daemon\n");
 
                               int flags);
 static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
                              char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
 
 
 static struct atmdev_ops idt77252_ops =
 }
 
 static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
 {
-       struct idt77252_dev *card = dev_id;
+       struct idt77252_dev *card =
+               container_of(work, struct idt77252_dev, tqueue);
        u32 stat;
        int done;
 
        card->pcidev = pcidev;
        sprintf(card->name, "idt77252-%d", card->index);
 
-       INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+       INIT_WORK(&card->tqueue, idt77252_softint);
 
        membase = pci_resource_start(pcidev, 1);
        srambase = pci_resource_start(pcidev, 2);
 
 void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
 void aoecmd_ata_rsp(struct sk_buff *);
 void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
 struct sk_buff *new_skb(ulong);
 
 int aoedev_init(void);
 
 /* this function performs work that has been deferred until sleeping is OK
  */
 void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
 {
-       struct aoedev *d = (struct aoedev *) vp;
+       struct aoedev *d = container_of(work, struct aoedev, work);
 
        if (d->flags & DEVFL_GDALLOC)
                aoeblk_gdalloc(d);
 
                        kfree(d);
                return NULL;
        }
-       INIT_WORK(&d->work, aoecmd_sleepwork, d);
+       INIT_WORK(&d->work, aoecmd_sleepwork);
        spin_lock_init(&d->lock);
        init_timer(&d->timer);
        d->timer.data = (ulong) d;
 
 
 static void run_fsm(void);
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
 
 static void schedule_fsm(void)
 {
        if (!nice)
-               schedule_work(&fsm_tq);
+               schedule_delayed_work(&fsm_tq, 0);
        else
                schedule_delayed_work(&fsm_tq, nice-1);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
        run_fsm();
 }
 
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
 static void (* ps_continuation)(void);
 static int (* ps_ready)(void);
 
 static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
 
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
 
 static void ps_set_intr(void (*continuation)(void), 
                        int (*ready)(void),
        if (!ps_tq_active) {
                ps_tq_active = 1;
                if (!ps_nice)
-                       schedule_work(&ps_tq);
+                       schedule_delayed_work(&ps_tq, 0);
                else
                        schedule_delayed_work(&ps_tq, ps_nice-1);
        }
        spin_unlock_irqrestore(&ps_spinlock,flags);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
        void (*con)(void);
        unsigned long flags;
        }
        ps_tq_active = 1;
        if (!ps_nice)
-               schedule_work(&ps_tq);
+               schedule_delayed_work(&ps_tq, 0);
        else
                schedule_delayed_work(&ps_tq, ps_nice-1);
        spin_unlock_irqrestore(&ps_spinlock,flags);
 
        return IRQ_RETVAL(handled);
 }
 
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
 {
-       struct carm_host *host = _data;
+       struct carm_host *host =
+               container_of(work, struct carm_host, fsm_task);
        unsigned long flags;
        unsigned int state;
        int rc, i, next_dev;
        host->pdev = pdev;
        host->flags = pci_dac ? FL_DAC : 0;
        spin_lock_init(&host->lock);
-       INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+       INIT_WORK(&host->fsm_task, carm_fsm_task);
        init_completion(&host->probe_comp);
 
        for (i = 0; i < ARRAY_SIZE(host->req); i++)
 
     int stalled_pipe);
 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_capacity *ret);
        schedule_work(&sc->reset_work);
 }
 
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
 {
-       struct ub_dev *sc = arg;
+       struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
        unsigned long flags;
        struct list_head *p;
        struct ub_lun *lun;
        usb_init_urb(&sc->work_urb);
        tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
        atomic_set(&sc->poison, 0);
-       INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+       INIT_WORK(&sc->reset_work, ub_reset_task);
        init_waitqueue_head(&sc->reset_wait);
 
        init_timer(&sc->work_timer);
 
        }
 }
 
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
 {
-       struct bcm203x_data *data = user_data;
+       struct bcm203x_data *data =
+               container_of(work, struct bcm203x_data, work);
 
        if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
                BT_ERR("Can't submit URB");
 
        release_firmware(firmware);
 
-       INIT_WORK(&data->work, bcm203x_work, (void *) data);
+       INIT_WORK(&data->work, bcm203x_work);
 
        usb_set_intfdata(intf, data);
 
 
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+       struct cyclades_port *info =
+               container_of(work, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-                   INIT_WORK(&info->tqueue, do_softint, info);
+                   INIT_WORK(&info->tqueue, do_softint);
                    init_waitqueue_head(&info->open_wait);
                    init_waitqueue_head(&info->close_wait);
                    init_waitqueue_head(&info->shutdown_wait);
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-                   INIT_WORK(&info->tqueue, do_softint, info);
+                   INIT_WORK(&info->tqueue, do_softint);
                    init_waitqueue_head(&info->open_wait);
                    init_waitqueue_head(&info->close_wait);
                    init_waitqueue_head(&info->shutdown_wait);
 
 
 
 static void 
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
 {
-       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
        drm_device_t *dev = blitq->dev;
        unsigned long irqsave;
        drm_via_sg_info_t *cur_sg;
                        DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
                }
                DRM_INIT_WAITQUEUE(&blitq->busy_queue);
-               INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+               INIT_WORK(&blitq->wq, via_dmablit_workqueue);
                init_timer(&blitq->poll_timer);
                blitq->poll_timer.function = &via_dmablit_timer;
                blitq->poll_timer.data = (unsigned long) blitq;
 
 static int info_ioctl(struct tty_struct *, struct file *,
                     unsigned int, unsigned long);
 static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
 static void pc_stop(struct tty_struct *);
 static void pc_start(struct tty_struct *);
 static void pc_throttle(struct tty_struct * tty);
 
                ch->brdchan        = bc;
                ch->mailbox        = gd; 
-               INIT_WORK(&ch->tqueue, do_softint, ch);
+               INIT_WORK(&ch->tqueue, do_softint);
                ch->board          = &boards[crd];
 
                spin_lock_irqsave(&epca_lock, flags);
 
 /* --------------------- Begin do_softint  ----------------------- */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 { /* Begin do_softint */
-       struct channel *ch = (struct channel *) private_;
+       struct channel *ch = container_of(work, struct channel, tqueue);
        /* Called in response to a modem change event */
        if (ch && ch->magic == EPCA_MAGIC)  { /* Begin EPCA_MAGIC */
                struct tty_struct *tty = ch->tty;
 
  * -------------------------------------------------------------------
  */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-       struct esp_struct       *info = (struct esp_struct *) private_;
+       struct esp_struct       *info =
+               container_of(work, struct esp_struct, tqueue);
        struct tty_struct       *tty;
        
        tty = info->tty;
  *     do_serial_hangup() -> tty->hangup() -> esp_hangup()
  * 
  */
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
 {
-       struct esp_struct       *info = (struct esp_struct *) private_;
+       struct esp_struct       *info =
+               container_of(work, struct esp_struct, tqueue_hangup);
        struct tty_struct       *tty;
        
        tty = info->tty;
                info->magic = ESP_MAGIC;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
-               INIT_WORK(&info->tqueue, do_softint, info);
-               INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+               INIT_WORK(&info->tqueue, do_softint);
+               INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
                info->config.rx_timeout = rx_timeout;
                info->config.flow_on = flow_on;
                info->config.flow_off = flow_off;
 
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
 {
        unsigned int tmp = get_rtc_ss();
        
                irq_active = 1;
                stop_rtc_timers = 0;
                lostint = 0;
-               INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+               INIT_WORK(&genrtc_task, genrtc_troutine);
                oldsecs = get_rtc_ss();
                init_timer(&timer_task);
 
 
 #define __ALIGNED__    __attribute__((__aligned__(sizeof(long))))
 
 struct hvsi_struct {
-       struct work_struct writer;
+       struct delayed_work writer;
        struct work_struct handshaker;
        wait_queue_head_t emptyq; /* woken when outbuf is emptied */
        wait_queue_head_t stateq; /* woken when HVSI state changes */
        return 0;
 }
 
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
 {
-       struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+       struct hvsi_struct *hp =
+               container_of(work, struct hvsi_struct, handshaker);
 
        if (hvsi_handshake(hp) >= 0)
                return;
 }
 
 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
 {
-       struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+       struct hvsi_struct *hp =
+               container_of(work, struct hvsi_struct, writer.work);
        unsigned long flags;
 #ifdef DEBUG
        static long start_j = 0;
                }
 
                hp = &hvsi_ports[hvsi_count];
-               INIT_WORK(&hp->writer, hvsi_write_worker, hp);
-               INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+               INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+               INIT_WORK(&hp->handshaker, hvsi_handshaker);
                init_waitqueue_head(&hp->emptyq);
                init_waitqueue_head(&hp->stateq);
                spin_lock_init(&hp->lock);
 
 static void serviceOutgoingFifo(i2eBordStrPtr);
 
 // Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 //***************
 //* Debug  Data *
                pCh->ClosingWaitTime  = 30*HZ;
 
                // Initialize task queue objects
-               INIT_WORK(&pCh->tqueue_input, do_input, pCh);
-               INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+               INIT_WORK(&pCh->tqueue_input, do_input);
+               INIT_WORK(&pCh->tqueue_status, do_status);
 
 #ifdef IP2DEBUG_TRACE
                pCh->trace = ip2trace;
 #ifdef USE_IQ
                        schedule_work(&pCh->tqueue_input);
 #else
-                       do_input(pCh);
+                       do_input(&pCh->tqueue_input);
 #endif
 
                        // Note we do not need to maintain any flow-control credits at this
 #ifdef USE_IQ
                                                schedule_work(&pCh->tqueue_status);
 #else
-                                               do_status(pCh);
+                                               do_status(&pCh->tqueue_status);
 #endif
                                        }
                                }
 
                         unsigned int set, unsigned int clear);
 
 static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
 static irqreturn_t ip2_interrupt(int irq, void *dev_id);
 static void ip2_poll(unsigned long arg);
 static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 static void ip2_wait_until_sent(PTTY,int);
 
                pCh++;
        }
 ex_exit:
-       INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+       INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
        return;
 
 err_release_region:
 
 
 /******************************************************************************/
-/* Function:   ip2_interrupt_bh(pB)                                           */
-/* Parameters: pB - pointer to the board structure                            */
+/* Function:   ip2_interrupt_bh(work)                                         */
+/* Parameters: work - pointer to the board structure                          */
 /* Returns:    Nothing                                                        */
 /*                                                                            */
 /* Description:                                                               */
 /*                                                                            */
 /******************************************************************************/
 static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
 {
+       i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
 //     pB better well be set or we have a problem!  We can only get
 //     here from the IMMEDIATE queue.  Here, we process the boards.
 //     Checking pB doesn't cost much and it saves us from the sanity checkers.
        ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
 }
 
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
 {
-       i2ChanStrPtr pCh = p;
+       i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
        unsigned long flags;
 
        ip2trace(CHANN, ITRC_INPUT, 21, 0 );
        }
 }
 
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
 {
-       i2ChanStrPtr pCh = p;
+       i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
        int status;
 
        status =  i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
 
 /*     Interrupt handlers      */
 
 
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
 {
-       struct isi_port *port = (struct isi_port *) data;
+       struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
        struct tty_struct *tty = port->tty;
 
        if (!tty)
 }
 
 /* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
 {
-       struct isi_port *port = data;
+       struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
        struct tty_struct *tty;
 
        tty = port->tty;
                        port->channel = channel;
                        port->close_delay = 50 * HZ/100;
                        port->closing_wait = 3000 * HZ/100;
-                       INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
-                       INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+                       INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+                       INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
                        port->status = 0;
                        init_waitqueue_head(&port->open_wait);
                        init_waitqueue_head(&port->close_wait);
 
 /*
  * static functions:
  */
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
 static int moxa_open(struct tty_struct *, struct file *);
 static void moxa_close(struct tty_struct *, struct file *);
 static int moxa_write(struct tty_struct *, const unsigned char *, int);
        for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
                ch->type = PORT_16550A;
                ch->port = i;
-               INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+               INIT_WORK(&ch->tqueue, do_moxa_softint);
                ch->tty = NULL;
                ch->close_delay = 5 * HZ / 10;
                ch->closing_wait = 30 * HZ;
 module_init(moxa_init);
 module_exit(moxa_exit);
 
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
 {
-       struct moxa_str *ch = (struct moxa_str *) private_;
+       struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
        struct tty_struct *tty;
 
        if (ch && (tty = ch->tty)) {
 
 /* static void   mxser_poll(unsigned long); */
 static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
 static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
 static int mxser_open(struct tty_struct *, struct file *);
 static void mxser_close(struct tty_struct *, struct file *);
 static int mxser_write(struct tty_struct *, const unsigned char *, int);
                info->custom_divisor = hwconf->baud_base[i] * 16;
                info->close_delay = 5 * HZ / 10;
                info->closing_wait = 30 * HZ;
-               INIT_WORK(&info->tqueue, mxser_do_softint, info);
+               INIT_WORK(&info->tqueue, mxser_do_softint);
                info->normal_termios = mxvar_sdriver->init_termios;
                init_waitqueue_head(&info->open_wait);
                init_waitqueue_head(&info->close_wait);
        return 0;
 }
 
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
 {
-       struct mxser_struct *info = private_;
+       struct mxser_struct *info =
+               container_of(work, struct mxser_struct, tqueue);
        struct tty_struct *tty;
 
        tty = info->tty;
 
 /*
  * Bottom half interrupt handlers
  */
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(MGSLPC_INFO *info);
 static void bh_status(MGSLPC_INFO *info);
 
 
     memset(info, 0, sizeof(MGSLPC_INFO));
     info->magic = MGSLPC_MAGIC;
-    INIT_WORK(&info->task, bh_handler, info);
+    INIT_WORK(&info->task, bh_handler);
     info->max_frame_size = 4096;
     info->close_delay = 5*HZ/10;
     info->closing_wait = 30*HZ;
        return rc;
 }
 
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
 {
-       MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+       MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
        int action;
 
        if (!info)
 
        sonypi_device.bluetooth_power = state;
 }
 
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
 {
        struct sonypi_keypress kp;
 
                        goto err_inpdev_unregister;
                }
 
-               INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+               INIT_WORK(&sonypi_device.input_work, input_keyrelease);
        }
 
        sonypi_enable(0);
 
  *     do_sx_hangup() -> tty->hangup() -> sx_hangup()
  *
  */
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
 {
-       struct specialix_port   *port = (struct specialix_port *) private_;
+       struct specialix_port   *port =
+               container_of(work, struct specialix_port, tqueue_hangup);
        struct tty_struct       *tty;
 
        func_enter();
 }
 
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-       struct specialix_port   *port = (struct specialix_port *) private_;
+       struct specialix_port   *port =
+               container_of(work, struct specialix_port, tqueue);
        struct tty_struct       *tty;
 
        func_enter();
        memset(sx_port, 0, sizeof(sx_port));
        for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
                sx_port[i].magic = SPECIALIX_MAGIC;
-               INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
-               INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+               INIT_WORK(&sx_port[i].tqueue, do_softint);
+               INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
                sx_port[i].close_delay = 50 * HZ/100;
                sx_port[i].closing_wait = 3000 * HZ/100;
                init_waitqueue_head(&sx_port[i].open_wait);
 
 /*
  * Bottom half interrupt handlers
  */
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
 static void mgsl_bh_receive(struct mgsl_struct *info);
 static void mgsl_bh_transmit(struct mgsl_struct *info);
 static void mgsl_bh_status(struct mgsl_struct *info);
 /*
  *     Perform bottom half processing of work items queued by ISR.
  */
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
 {
-       struct mgsl_struct *info = (struct mgsl_struct*)Context;
+       struct mgsl_struct *info =
+               container_of(work, struct mgsl_struct, task);
        int action;
 
        if (!info)
        } else {
                memset(info, 0, sizeof(struct mgsl_struct));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, mgsl_bh_handler, info);
+               INIT_WORK(&info->task, mgsl_bh_handler);
                info->max_frame_size = 4096;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
 
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static int  bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(struct slgt_info *info);
 static void isr_serial(struct slgt_info *info);
 static void isr_rdma(struct slgt_info *info);
 /*
  * perform bottom half processing
  */
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
 {
-       struct slgt_info *info = context;
+       struct slgt_info *info = container_of(work, struct slgt_info, task);
        int action;
 
        if (!info)
        } else {
                memset(info, 0, sizeof(struct slgt_info));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, bh_handler, info);
+               INIT_WORK(&info->task, bh_handler);
                info->max_frame_size = 4096;
                info->raw_rx_size = DMABUFSIZE;
                info->close_delay = 5*HZ/10;
        spin_lock_irqsave(&info->lock, flags);
        info->pending_bh |= BH_RECEIVE;
        spin_unlock_irqrestore(&info->lock, flags);
-       bh_handler(info);
+       bh_handler(&info->task);
 }
 
 
 static void set_rate(SLMP_INFO *info, u32 data_rate);
 
 static int  bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_receive(SLMP_INFO *info);
 static void bh_transmit(SLMP_INFO *info);
 static void bh_status(SLMP_INFO *info);
 
 /* Perform bottom half processing of work items queued by ISR.
  */
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
 {
-       SLMP_INFO *info = (SLMP_INFO*)Context;
+       SLMP_INFO *info = container_of(work, SLMP_INFO, task);
        int action;
 
        if (!info)
        } else {
                memset(info, 0, sizeof(SLMP_INFO));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, bh_handler, info);
+               INIT_WORK(&info->task, bh_handler);
                info->max_frame_size = 4096;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
 
        schedule_work(&chip->work);
 }
 
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
 {
-       struct tpm_chip *chip = ptr;
+       struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
 
        down(&chip->buffer_mutex);
        atomic_set(&chip->data_pending, 0);
        init_MUTEX(&chip->tpm_mutex);
        INIT_LIST_HEAD(&chip->list);
 
-       INIT_WORK(&chip->work, timeout_work, chip);
+       INIT_WORK(&chip->work, timeout_work);
 
        init_timer(&chip->user_read_timer);
        chip->user_read_timer.function = user_reader_timeout;
 
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
 {
-       struct cn_callback_data *d = data;
+       struct cn_callback_entry *cbq =
+               container_of(work, struct cn_callback_entry, work.work);
+       struct cn_callback_data *d = &cbq->data;
 
        d->callback(d->callback_priv);
 
        memcpy(&cbq->id.id, id, sizeof(struct cb_id));
        cbq->data.callback = callback;
        
-       INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+       INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
        return cbq;
 }
 
 
        spin_lock_bh(&dev->cbdev->queue_lock);
        list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
                if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
-                       if (likely(!test_bit(0, &__cbq->work.pending) &&
+                       if (likely(!test_bit(WORK_STRUCT_PENDING,
+                                            &__cbq->work.work.management) &&
                                        __cbq->data.ddata == NULL)) {
                                __cbq->data.callback_priv = msg;
 
                                __cbq->data.ddata = data;
                                __cbq->data.destruct_data = destruct_data;
 
-                               if (queue_work(dev->cbdev->cn_queue,
-                                               &__cbq->work))
+                               if (queue_delayed_work(
+                                           dev->cbdev->cn_queue,
+                                           &__cbq->work, 0))
                                        err = 0;
                        } else {
-                               struct work_struct *w;
                                struct cn_callback_data *d;
                                
-                               w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
-                               if (w) {
-                                       d = (struct cn_callback_data *)(w+1);
-
+                               __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+                               if (__cbq) {
+                                       d = &__cbq->data;
                                        d->callback_priv = msg;
                                        d->callback = __cbq->data.callback;
                                        d->ddata = data;
                                        d->destruct_data = destruct_data;
-                                       d->free = w;
+                                       d->free = __cbq;
 
-                                       INIT_LIST_HEAD(&w->entry);
-                                       w->pending = 0;
-                                       w->func = &cn_queue_wrapper;
-                                       w->data = d;
-                                       init_timer(&w->timer);
+                                       INIT_DELAYED_WORK(&__cbq->work,
+                                                         &cn_queue_wrapper);
                                        
-                                       if (queue_work(dev->cbdev->cn_queue, w))
+                                       if (queue_delayed_work(
+                                                   dev->cbdev->cn_queue,
+                                                   &__cbq->work, 0))
                                                err = 0;
                                        else {
-                                               kfree(w);
+                                               kfree(__cbq);
                                                err = -EINVAL;
                                        }
                                } else
 
 #define MAX_SAMPLING_DOWN_FACTOR               (10)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
 
 struct cpu_dbs_info_s {
        struct cpufreq_policy   *cur_policy;
  * is recursive for the same process. -Venki
  */
 static DEFINE_MUTEX    (dbs_mutex);
-static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
 
 struct dbs_tuners {
        unsigned int            sampling_rate;
        }
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 { 
        int i;
        lock_cpu_hotplug();
 
 static inline void dbs_timer_init(void)
 {
-       INIT_WORK(&dbs_work, do_dbs_timer, NULL);
        schedule_delayed_work(&dbs_work,
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        return;
 
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER   (1000)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 
 struct cpu_dbs_info_s {
        cputime64_t prev_cpu_idle;
        cputime64_t prev_cpu_wall;
        struct cpufreq_policy *cur_policy;
-       struct work_struct work;
+       struct delayed_work work;
+       enum dbs_sample sample_type;
        unsigned int enable;
        struct cpufreq_frequency_table *freq_table;
        unsigned int freq_lo;
        }
 }
 
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 {
        unsigned int cpu = smp_processor_id();
        struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       enum dbs_sample sample_type = dbs_info->sample_type;
        /* We want all CPUs to do sampling nearly on same jiffy */
        int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+       /* Permit rescheduling of this work item */
+       work_release(work);
+
        delay -= jiffies % delay;
 
        if (!dbs_info->enable)
                return;
        /* Common NORMAL_SAMPLE setup */
-       INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
        if (!dbs_tuners_ins.powersave_bias ||
-           (unsigned long) data == DBS_NORMAL_SAMPLE) {
+           sample_type == DBS_NORMAL_SAMPLE) {
                lock_cpu_hotplug();
                dbs_check_cpu(dbs_info);
                unlock_cpu_hotplug();
                if (dbs_info->freq_lo) {
                        /* Setup timer for SUB_SAMPLE */
-                       INIT_WORK(&dbs_info->work, do_dbs_timer,
-                                       (void *)DBS_SUB_SAMPLE);
+                       dbs_info->sample_type = DBS_SUB_SAMPLE;
                        delay = dbs_info->freq_hi_jiffies;
                }
        } else {
        delay -= jiffies % delay;
 
        ondemand_powersave_bias_init();
-       INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+       INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
        queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
 
        return t1;
 }
 
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
 {
        ulong t1, t2;
        int limit = 10;         /* arbitrary retry limit */
 
-       t1 = *(ulong *) arg;
+       t1 = new_time;
 
        mutex_lock(&ds1374_mutex);
 
                         "can't confirm time set from rtc chip\n");
 }
 
-static ulong new_time;
-
 static struct workqueue_struct *ds1374_workqueue;
 
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
 
 int ds1374_set_rtc_time(ulong nowtime)
 {
        if (in_interrupt())
                queue_work(ds1374_workqueue, &ds1374_work);
        else
-               ds1374_set_work(&new_time);
+               ds1374_set_work(NULL);
 
        return 0;
 }
 
 #include "config_roms.h"
 
 
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
 {
-       struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+       struct hpsb_host *host =
+               container_of(work, struct hpsb_host, delayed_reset.work);
        int generation = host->csr.generation + 1;
 
        /* The generation field rolls over to 2 rather than 0 per IEEE
 
        atomic_set(&h->generation, 0);
 
-       INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+       INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
        
        init_timer(&h->timeout);
        h->timeout.data = (unsigned long) h;
                 * Config ROM in the near future. */
                reset_delay = HZ;
 
-       PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+       PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
        schedule_delayed_work(&host->delayed_reset, reset_delay);
 
        return 0;
 
        struct class_device class_dev;
 
        int update_config_rom;
-       struct work_struct delayed_reset;
+       struct delayed_work delayed_reset;
        unsigned int config_roms;
 
        struct list_head addr_space;
 
                scsi_unblock_requests(scsi_id->scsi_host);
 }
 
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
 {
+       struct scsi_id_instance_data *scsi_id =
+               container_of(work, struct scsi_id_instance_data,
+                            protocol_work.work);
        quadlet_t data[2];
 
-       data[0] = ORB_SET_NODE_ID(
-                       ((struct scsi_id_instance_data *)p)->hi->host->node_id);
-       data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+       data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+       data[1] = scsi_id->last_orb_dma;
        sbp2util_cpu_to_be32_buffer(data, 8);
-       sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+       sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
 }
 
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
 {
-       sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+       struct scsi_id_instance_data *scsi_id =
+               container_of(work, struct scsi_id_instance_data,
+                            protocol_work.work);
+       sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
 }
 
 /*
        INIT_LIST_HEAD(&scsi_id->scsi_list);
        spin_lock_init(&scsi_id->sbp2_command_orb_lock);
        atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
-       INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+       INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
 
        ud->device.driver_data = scsi_id;
 
                 * We do not accept new commands until the job is over.
                 */
                scsi_block_requests(scsi_id->scsi_host);
-               PREPARE_WORK(&scsi_id->protocol_work,
+               PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
                             last_orb ? sbp2util_write_doorbell:
-                                       sbp2util_write_orb_pointer,
-                            scsi_id);
-               schedule_work(&scsi_id->protocol_work);
+                                       sbp2util_write_orb_pointer);
+               schedule_delayed_work(&scsi_id->protocol_work, 0);
        }
 }
 
 
        unsigned workarounds;
 
        atomic_t state;
-       struct work_struct protocol_work;
+       struct delayed_work protocol_work;
 };
 
 /* For use in scsi_id_instance_data.state */
 
        int status;
 };
 
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
 
 static DEFINE_MUTEX(lock);
 static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
 static struct workqueue_struct *addr_wq;
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
        return ret;
 }
 
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
 {
        struct addr_req *req, *temp_req;
        struct sockaddr_in *src_in, *dst_in;
 
        kfree(tprops);
 }
 
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
 {
-       struct ib_update_work *work = work_ptr;
+       struct ib_update_work *work =
+               container_of(_work, struct ib_update_work, work);
 
        ib_cache_update(work->device, work->port_num);
        kfree(work);
            event->event == IB_EVENT_CLIENT_REREGISTER) {
                work = kmalloc(sizeof *work, GFP_ATOMIC);
                if (work) {
-                       INIT_WORK(&work->work, ib_cache_task, work);
+                       INIT_WORK(&work->work, ib_cache_task);
                        work->device   = event->device;
                        work->port_num = event->element.port_num;
                        schedule_work(&work->work);
 
 };
 
 struct cm_work {
-       struct work_struct work;
+       struct delayed_work work;
        struct list_head list;
        struct cm_port *port;
        struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
        atomic_t work_count;
 };
 
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
 
 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 {
                return ERR_PTR(-ENOMEM);
 
        timewait_info->work.local_id = local_id;
-       INIT_WORK(&timewait_info->work.work, cm_work_handler,
-                 &timewait_info->work);
+       INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
        timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
        return timewait_info;
 }
        }
 }
 
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
 {
-       struct cm_work *work = data;
+       struct cm_work *work = container_of(_work, struct cm_work, work.work);
        int ret;
 
        switch (work->cm_event.event) {
         * we need to find the cm_id once we're in the context of the
         * worker thread, rather than holding a reference on it.
         */
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->local_id = cm_id->local_id;
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 out:
        return ret;
 }
                return;
        }
 
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = (struct cm_port *)mad_agent->context;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
 
        return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
 
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
 {
-       struct cma_work *work = data;
+       struct cma_work *work = container_of(_work, struct cma_work, work);
        struct rdma_id_private *id_priv = work->id;
        int destroy = 0;
 
                return -ENOMEM;
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
                return -ENOMEM;
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
        }
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ADDR_QUERY;
        work->new_state = CMA_ADDR_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
 
  * thread asleep on the destroy_comp list vs. an object destroyed
  * here synchronously when the last reference is removed.
  */
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
 {
-       struct iwcm_work *work = arg, lwork;
+       struct iwcm_work lwork, *work =
+               container_of(_work, struct iwcm_work, work);
        struct iwcm_id_private *cm_id_priv = work->cm_id;
        unsigned long flags;
        int empty;
                goto out;
        }
 
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_WORK(&work->work, cm_work_handler);
        work->cm_id = cm_id_priv;
        work->event = *iw_event;
 
 
 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                    struct ib_mad_private *mad);
 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
                              struct ib_mad_agent_private *agent_priv,
                              u8 mgmt_class);
        INIT_LIST_HEAD(&mad_agent_priv->wait_list);
        INIT_LIST_HEAD(&mad_agent_priv->done_list);
        INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-       INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+       INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
        INIT_LIST_HEAD(&mad_agent_priv->local_list);
-       INIT_WORK(&mad_agent_priv->local_work, local_completions,
-                  mad_agent_priv);
+       INIT_WORK(&mad_agent_priv->local_work, local_completions);
        atomic_set(&mad_agent_priv->refcount, 1);
        init_completion(&mad_agent_priv->comp);
 
 /*
  * IB MAD completion callback
  */
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
 {
        struct ib_mad_port_private *port_priv;
        struct ib_wc wc;
 
-       port_priv = (struct ib_mad_port_private *)data;
+       port_priv = container_of(work, struct ib_mad_port_private, work);
        ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
 
        while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
 }
 EXPORT_SYMBOL(ib_cancel_mad);
 
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_local_private *local;
        struct ib_wc wc;
        struct ib_mad_send_wc mad_send_wc;
 
-       mad_agent_priv = (struct ib_mad_agent_private *)data;
+       mad_agent_priv =
+               container_of(work, struct ib_mad_agent_private, local_work);
 
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
        while (!list_empty(&mad_agent_priv->local_list)) {
        return ret;
 }
 
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_send_wr_private *mad_send_wr;
        struct ib_mad_send_wc mad_send_wc;
        unsigned long flags, delay;
 
-       mad_agent_priv = (struct ib_mad_agent_private *)data;
+       mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+                                     timed_work.work);
        mad_send_wc.vendor_err = 0;
 
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
                ret = -ENOMEM;
                goto error8;
        }
-       INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+       INIT_WORK(&port_priv->work, ib_mad_completion_handler);
 
        spin_lock_irqsave(&ib_mad_port_list_lock, flags);
        list_add_tail(&port_priv->port_list, &ib_mad_port_list);
 
        struct list_head send_list;
        struct list_head wait_list;
        struct list_head done_list;
-       struct work_struct timed_work;
+       struct delayed_work timed_work;
        unsigned long timeout;
        struct list_head local_list;
        struct work_struct local_work;
 
 struct mad_rmpp_recv {
        struct ib_mad_agent_private *agent;
        struct list_head list;
-       struct work_struct timeout_work;
-       struct work_struct cleanup_work;
+       struct delayed_work timeout_work;
+       struct delayed_work cleanup_work;
        struct completion comp;
        enum rmpp_state state;
        spinlock_t lock;
        }
 }
 
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
 {
-       struct mad_rmpp_recv *rmpp_recv = data;
+       struct mad_rmpp_recv *rmpp_recv =
+               container_of(work, struct mad_rmpp_recv, timeout_work.work);
        struct ib_mad_recv_wc *rmpp_wc;
        unsigned long flags;
 
        ib_free_recv_mad(rmpp_wc);
 }
 
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
 {
-       struct mad_rmpp_recv *rmpp_recv = data;
+       struct mad_rmpp_recv *rmpp_recv =
+               container_of(work, struct mad_rmpp_recv, cleanup_work.work);
        unsigned long flags;
 
        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
 
        rmpp_recv->agent = agent;
        init_completion(&rmpp_recv->comp);
-       INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
-       INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+       INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+       INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
        spin_lock_init(&rmpp_recv->lock);
        rmpp_recv->state = RMPP_STATE_ACTIVE;
        atomic_set(&rmpp_recv->refcount, 1);
 
        kfree(sm_ah);
 }
 
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
 {
-       struct ib_sa_port *port = port_ptr;
+       struct ib_sa_port *port =
+               container_of(work, struct ib_sa_port, update_task);
        struct ib_sa_sm_ah *new_ah, *old_ah;
        struct ib_port_attr port_attr;
        struct ib_ah_attr   ah_attr;
                if (IS_ERR(sa_dev->port[i].agent))
                        goto err;
 
-               INIT_WORK(&sa_dev->port[i].update_task,
-                         update_sm_ah, &sa_dev->port[i]);
+               INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
        }
 
        ib_set_client_data(device, &sa_client, sa_dev);
                goto err;
 
        for (i = 0; i <= e - s; ++i)
-               update_sm_ah(&sa_dev->port[i]);
+               update_sm_ah(&sa_dev->port[i].update_task);
 
        return;
 
 
        up_write(¤t->mm->mmap_sem);
 }
 
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
 {
-       struct ib_umem_account_work *work = work_ptr;
+       struct ib_umem_account_work *work =
+               container_of(_work, struct ib_umem_account_work, work);
 
        down_write(&work->mm->mmap_sem);
        work->mm->locked_vm -= work->diff;
                return;
        }
 
-       INIT_WORK(&work->work, ib_umem_account, work);
+       INIT_WORK(&work->work, ib_umem_account);
        work->mm   = mm;
        work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
 
        unsigned long num_pages;
 };
 
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
 {
-       struct ipath_user_pages_work *work = ptr;
+       struct ipath_user_pages_work *work =
+               container_of(_work, struct ipath_user_pages_work, work);
 
        down_write(&work->mm->mmap_sem);
        work->mm->locked_vm -= work->num_pages;
 
        goto bail;
 
-       INIT_WORK(&work->work, user_pages_account, work);
+       INIT_WORK(&work->work, user_pages_account);
        work->mm = mm;
        work->num_pages = num_pages;
 
 
 module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
 MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
 
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
 {
        struct mthca_dev *dev, *tmpdev;
        LIST_HEAD(tlist);
 
 int __init mthca_catas_init(void)
 {
-       INIT_WORK(&catas_work, catas_reset, NULL);
+       INIT_WORK(&catas_work, catas_reset);
 
        catas_wq = create_singlethread_workqueue("mthca_catas");
        if (!catas_wq)
 
        struct list_head multicast_list;
        struct rb_root multicast_tree;
 
-       struct work_struct pkey_task;
-       struct work_struct mcast_task;
+       struct delayed_work pkey_task;
+       struct delayed_work mcast_task;
        struct work_struct flush_task;
        struct work_struct restart_task;
-       struct work_struct ah_reap_task;
+       struct delayed_work ah_reap_task;
 
        struct ib_device *ca;
        u8                port;
 
 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
 
 void ipoib_flush_paths(struct net_device *dev);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
 int ipoib_ib_dev_open(struct net_device *dev);
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
 void ipoib_dev_cleanup(struct net_device *dev);
 
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
 
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
 int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 
        spin_unlock_irq(&priv->tx_lock);
 }
 
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+       struct net_device *dev = priv->dev;
 
        __ipoib_reap_ah(dev);
 
        return 0;
 }
 
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *)_dev;
-       struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+       struct ipoib_dev_priv *cpriv, *priv =
+               container_of(work, struct ipoib_dev_priv, flush_task);
+       struct net_device *dev = priv->dev;
 
        if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
                ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
         */
        if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
                ipoib_ib_dev_up(dev);
-               ipoib_mcast_restart_task(dev);
+               ipoib_mcast_restart_task(&priv->restart_task);
        }
 
        mutex_lock(&priv->vlan_mutex);
 
        /* Flush any child interfaces too */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
-               ipoib_ib_dev_flush(cpriv->dev);
+               ipoib_ib_dev_flush(&cpriv->flush_task);
 
        mutex_unlock(&priv->vlan_mutex);
 }
  * change async notification is available.
  */
 
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, pkey_task.work);
+       struct net_device *dev = priv->dev;
 
        ipoib_pkey_dev_check_presence(dev);
 
 
        INIT_LIST_HEAD(&priv->dead_ahs);
        INIT_LIST_HEAD(&priv->multicast_list);
 
-       INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
-       INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
-       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
-       INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
-       INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
+       INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
+       INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
+       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+       INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+       INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
 
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
 
                mcast->backoff = 1;
                mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
                mutex_unlock(&mcast_mutex);
                complete(&mcast->done);
                return;
 
        if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
                if (status == -ETIMEDOUT)
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+                                          0);
                else
                        queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                           mcast->backoff * HZ);
                mcast->query_id = ret;
 }
 
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, mcast_task.work);
+       struct net_device *dev = priv->dev;
 
        if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
                return;
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_work(ipoib_workqueue, &priv->mcast_task);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        spin_lock_irq(&priv->lock);
        }
 }
 
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, restart_task);
+       struct net_device *dev = priv->dev;
        struct dev_mc_list *mclist;
        struct ipoib_mcast *mcast, *tmcast;
        LIST_HEAD(remove_list);
 
 
 static void iser_cq_tasklet_fn(unsigned long data);
 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
 
 static void iser_cq_event_callback(struct ib_event *cause, void *context)
 {
        init_waitqueue_head(&ib_conn->wait);
        atomic_set(&ib_conn->post_recv_buf_count, 0);
        atomic_set(&ib_conn->post_send_buf_count, 0);
-       INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
-                 ib_conn);
+       INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
        INIT_LIST_HEAD(&ib_conn->conn_list);
        spin_lock_init(&ib_conn->lock);
 
        return ret_val;
 }
 
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
 {
-       struct iser_conn *ib_conn = data;
+       struct iser_conn *ib_conn =
+               container_of(work, struct iser_conn, comperror_work);
 
        /* getting here when the state is UP means that the conn is being *
         * terminated asynchronously from the iSCSI layer's perspective.  */
 
        wait_for_completion(&target->done);
 }
 
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
 {
-       struct srp_target_port *target = target_ptr;
+       struct srp_target_port *target =
+               container_of(work, struct srp_target_port, work);
 
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state != SRP_TARGET_DEAD) {
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state == SRP_TARGET_CONNECTING) {
                target->state = SRP_TARGET_DEAD;
-               INIT_WORK(&target->work, srp_remove_work, target);
+               INIT_WORK(&target->work, srp_remove_work);
                schedule_work(&target->work);
        }
        spin_unlock_irq(target->scsi_host->host_lock);
 
  * were in.
  */
 static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
 {
-       struct lkkbd *lk = data;
+       struct lkkbd *lk = container_of(work, struct lkkbd, tq);
        int division;
        unsigned char leds_on = 0;
        unsigned char leds_off = 0;
 
        lk->serio = serio;
        lk->dev = input_dev;
-       INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+       INIT_WORK (&lk->tq, lkkbd_reinit);
        lk->bell_volume = bell_volume;
        lk->keyclick_volume = keyclick_volume;
        lk->ctrlclick_volume = ctrlclick_volume;
 
  * were in.
  */
 
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
 {
-       struct sunkbd *sunkbd = data;
+       struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
 
        wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
 
        sunkbd->serio = serio;
        sunkbd->dev = input_dev;
        init_waitqueue_head(&sunkbd->wait);
-       INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+       INIT_WORK(&sunkbd->tq, sunkbd_reinit);
        snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
 
        serio_set_drvdata(serio, sunkbd);
 
  * psmouse_resync() attempts to re-validate current protocol.
  */
 
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
 {
-       struct psmouse *psmouse = p, *parent = NULL;
+       struct psmouse *parent = NULL, *psmouse =
+               container_of(work, struct psmouse, resync_work);
        struct serio *serio = psmouse->ps2dev.serio;
        psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
        int failed = 0, enabled = 0;
                goto out;
 
        ps2_init(&psmouse->ps2dev, serio);
-       INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+       INIT_WORK(&psmouse->resync_work, psmouse_resync);
        psmouse->dev = input_dev;
        snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
 
 
 }
 
 void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, rcv_tq);
        struct sk_buff *skb;
        actcapi_msg *msg;
        __u16 ccmd;
 
 extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
 extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
 extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
 #ifdef DEBUG_MSG
 extern void actcapi_debug_msg(struct sk_buff *skb, int);
 #else
 
 }
 
 static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, snd_tq);
+
        switch (card->bus) {
                case ACT2000_BUS_ISA:
                        act2000_isa_send(card);
 }
 
 static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, poll_tq);
+
        switch (card->bus) {
                case ACT2000_BUS_ISA:
                        act2000_isa_receive(card);
        act2000_card * card = (act2000_card *)data;
        unsigned long flags;
 
-       act2000_receive(card);
+       act2000_receive(&card->poll_tq);
        spin_lock_irqsave(&card->lock, flags);
        mod_timer(&card->ptimer, jiffies+3);
        spin_unlock_irqrestore(&card->lock, flags);
        skb_queue_head_init(&card->sndq);
        skb_queue_head_init(&card->rcvq);
        skb_queue_head_init(&card->ackq);
-       INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
-       INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
-       INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+       INIT_WORK(&card->snd_tq, act2000_transmit);
+       INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+       INIT_WORK(&card->poll_tq, act2000_receive);
        init_timer(&card->ptimer);
        card->interface.owner = THIS_MODULE;
         card->interface.channels = ACT2000_BCH;
 
        }
 }
 
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
 {
-       struct capi_notifier *np = data;
+       struct capi_notifier *np =
+               container_of(work, struct capi_notifier, work);
 
        switch (np->cmd) {
        case KCI_CONTRUP:
        if (!np)
                return -ENOMEM;
 
-       INIT_WORK(&np->work, notify_handler, np);
+       INIT_WORK(&np->work, notify_handler);
        np->cmd = cmd;
        np->controller = controller;
        np->applid = applid;
        
 /* -------- Receiver ------------------------------------------ */
 
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct capi20_appl *ap = (struct capi20_appl *) _ap;
+       struct capi20_appl *ap =
+               container_of(work, struct capi20_appl, recv_work);
 
        if ((!ap) || (ap->release_in_progress))
                return;
        ap->callback = NULL;
        init_MUTEX(&ap->recv_sem);
        skb_queue_head_init(&ap->recv_queue);
-       INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+       INIT_WORK(&ap->recv_work, recv_handler);
        ap->release_in_progress = 0;
 
        write_unlock_irqrestore(&application_lock, flags);
 
 
 
 static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
 {
-
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
         struct PStack *stptr;
 
        if (!cs)
 void __devinit
 setup_Amd7930(struct IsdnCardState *cs)
 {
-        INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+        INIT_WORK(&cs->tqueue, Amd7930_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
 
        cs->tx_skb = NULL;
        cs->tx_cnt = 0;
        cs->event = 0;
-       cs->tqueue.data = cs;
 
        skb_queue_head_init(&cs->rq);
        skb_queue_head_init(&cs->sq);
 static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
 static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
 static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
 static void EChannel_proc_rcv(struct hisax_d_if *d_if);
 
 int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
        hisax_d_if->cs = cs;
        cs->hw.hisax_d_if = hisax_d_if;
        cs->cardmsg = hisax_cardmsg;
-       INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+       INIT_WORK(&cs->tqueue, hisax_bh);
        cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
        for (i = 0; i < 2; i++) {
                cs->bcs[i].BC_SetStack = hisax_bc_setstack;
        schedule_work(&cs->tqueue);
 }
 
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *st;
        int pr;
 
 
 /* bottom half handler for interrupt */
 /*************************************/
 static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
 {
+       hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
        u_char b;
        struct hfc4s8s_l1 *l1p;
        volatile u_char *fifo_stat;
                goto out;
        }
 
-       INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+       INIT_WORK(&hw->tqueue, hfc4s8s_bh);
 
        if (request_irq
            (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
 
 }
 
 static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
 {
-       if (!cs)
-               return;
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
+
        if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
                switch (cs->dc.hfcd.ph_state) {
                        case (0):
        cs->dbusytimer.function = (void *) hfc_dbusy_timer;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
-       INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+       INIT_WORK(&cs->tqueue, hfcd_bh);
 }
 
 /* handle L1 state changes */
 /***************************/
 static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        u_long  flags;
 //      struct PStack *stptr;
 
                Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
                /* At this point the needed PCI config is done */
                /* fifos are still not enabled */
-               INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+               INIT_WORK(&cs->tqueue,  hfcpci_bh);
                cs->setstack_d = setstack_hfcpci;
                cs->BC_Send_Data = &hfcpci_send_data;
                cs->readisac = NULL;
 
 /* handle L1 state changes */
 /***************************/
 static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        u_long flags;
 
        if (!cs)
        cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
-       INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+       INIT_WORK(&cs->tqueue, hfcsx_bh);
        cs->readisac = NULL;
        cs->writeisac = NULL;
        cs->readisacfifo = NULL;
 
 }
 
 static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
        
        if (!cs)
 void __devinit
 setup_icc(struct IsdnCardState *cs)
 {
-       INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+       INIT_WORK(&cs->tqueue, icc_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
 
 }
 
 static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
        
        if (!cs)
 void __devinit
 setup_isac(struct IsdnCardState *cs)
 {
-       INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+       INIT_WORK(&cs->tqueue, isac_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
 
 #define B_LL_OK                10
 
 static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
 {
+       struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
        BChannel_bh(bcs);
        if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
                ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
                cs->bcs[i].mode = 0;
                cs->bcs[i].hw.isar.dpath = i + 1;
                modeisar(&cs->bcs[i], 0, 0);
-               INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+               INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
        }
 }
 
 
 }
 
 void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
 {
+       struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
        if (!bcs)
                return;
        if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
 
        bcs->cs = cs;
        bcs->channel = bc;
-       INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+       INIT_WORK(&bcs->tqueue, BChannel_bh);
        spin_lock_init(&bcs->aclock);
        bcs->BC_SetStack = NULL;
        bcs->BC_Close = NULL;
 
 }
 
 static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
 
        if (!cs)
               id_list[cs->subtyp].card_name, cs->irq,
               cs->hw.w6692.iobase);
 
-       INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+       INIT_WORK(&cs->tqueue, W6692_bh);
        cs->readW6692 = &ReadW6692;
        cs->writeW6692 = &WriteW6692;
        cs->readisacfifo = &ReadISACfifo;
 
 /*
  * called from tq_immediate
  */
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
 {
-       isdn_net_local *lp = private;
+       isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
        struct sk_buff *skb;
 
        spin_lock_bh(&lp->xmit_lock);
        netdev->local->netdev = netdev;
        netdev->local->next = netdev->local;
 
-       INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+       INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
        spin_lock_init(&netdev->local->xmit_lock);
 
        netdev->local->isdn_device = -1;
 
 static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
 
 
-extern void pcbit_deliver(void * data);
-
 int pcbit_init_dev(int board, int mem_base, int irq)
 {
        struct pcbit_dev *dev;
        memset(dev->b2, 0, sizeof(struct pcbit_chan));
        dev->b2->id = 1;
 
-       INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+       INIT_WORK(&dev->qdelivery, pcbit_deliver);
 
        /*
         *  interrupts
 
  *  Prototypes
  */
 
-void pcbit_deliver(void *data);
 static void pcbit_transmit(struct pcbit_dev *dev);
 
 static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
  */
 
 void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
 {
        struct frame_buf *frame;
        unsigned long flags, msg;
-       struct pcbit_dev *dev = (struct pcbit_dev *) data;
+       struct pcbit_dev *dev =
+               container_of(work, struct pcbit_dev, qdelivery);
 
        spin_lock_irqsave(&dev->lock, flags);
 
 
 #define L2_RUNNING  5
 #define L2_ERROR    6
 
+extern void pcbit_deliver(struct work_struct *work);
+
 #endif
 
  * sysfs visibility
  */
 
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
 {
        struct device_node *np;
 
                                                  &smu->of_dev->dev);
 }
 
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
 
 static int smu_platform_probe(struct of_device* dev,
                              const struct of_device_id *match)
 
  * interrupt context.
  */
 static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
 
 static void kcryptd_queue_io(struct crypt_io *io)
 {
-       INIT_WORK(&io->work, kcryptd_do_work, io);
+       INIT_WORK(&io->work, kcryptd_do_work);
        queue_work(_kcryptd_workqueue, &io->work);
 }
 
        dec_pending(io, crypt_convert(cc, &ctx));
 }
 
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
 {
-       struct crypt_io *io = data;
+       struct crypt_io *io = container_of(work, struct crypt_io, work);
 
        if (io->post_process)
                process_read_endio(io);
 
 static kmem_cache_t *_mpio_cache;
 
 struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
 
 
 /*-----------------------------------------------
                INIT_LIST_HEAD(&m->priority_groups);
                spin_lock_init(&m->lock);
                m->queue_io = 1;
-               INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
-               INIT_WORK(&m->trigger_event, trigger_event, m);
+               INIT_WORK(&m->process_queued_ios, process_queued_ios);
+               INIT_WORK(&m->trigger_event, trigger_event);
                m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
        }
 }
 
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
 {
-       struct multipath *m = (struct multipath *) data;
+       struct multipath *m =
+               container_of(work, struct multipath, process_queued_ios);
        struct hw_handler *hwh = &m->hw_handler;
        struct pgpath *pgpath = NULL;
        unsigned init_required = 0, must_queue = 1;
  * An event is triggered whenever a path is taken out of use.
  * Includes path failure and PG bypass.
  */
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
 {
-       struct multipath *m = (struct multipath *) data;
+       struct multipath *m =
+               container_of(work, struct multipath, trigger_event);
 
        dm_table_event(m->ti->table);
 }
 
        do_writes(ms, &writes);
 }
 
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
        struct mirror_set *ms;
 
                dm_dirty_log_exit();
                return r;
        }
-       INIT_WORK(&_kmirrord_work, do_work, NULL);
+       INIT_WORK(&_kmirrord_work, do_work);
 
        r = dm_register_target(&mirror_target);
        if (r < 0) {
 
 #define SNAPSHOT_PAGES 256
 
 struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
 
 struct pending_exception {
        struct exception e;
        }
 
        bio_list_init(&s->queued_bios);
-       INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+       INIT_WORK(&s->queued_bios_work, flush_queued_bios);
 
        /* Add snapshot to the list of snapshots for this origin */
        /* Exceptions aren't triggered till snapshot_resume() is called */
        }
 }
 
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
 {
-       struct dm_snapshot *s = (struct dm_snapshot *) data;
+       struct dm_snapshot *s =
+               container_of(work, struct dm_snapshot, queued_bios_work);
        struct bio *queued_bios;
        unsigned long flags;
 
 
 /*
  * kcopyd does this every time it's woken up.
  */
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
        /*
         * The order that these are called is *very* important.
        }
 
        kcopyd_clients++;
-       INIT_WORK(&_kcopyd_work, do_work, NULL);
+       INIT_WORK(&_kcopyd_work, do_work);
        mutex_unlock(&kcopyd_init_lock);
        return 0;
 }
 
 
        unsigned long last_irq;
 
-       struct work_struct irq_check_work;
+       struct delayed_work irq_check_work;
 
        struct flexcop_device *fc_dev;
 };
        return 0;
 }
 
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
 {
-       struct flexcop_pci *fc_pci = data;
+       struct flexcop_pci *fc_pci =
+               container_of(work, struct flexcop_pci, irq_check_work.work);
        struct flexcop_device *fc = fc_pci->fc_dev;
 
        flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
        if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
                goto err_fc_exit;
 
-       INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+       INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
 
        return ret;
 
 
 
        struct dvbt_set_parameters_msg param;
        struct dvbt_get_status_msg status;
-       struct work_struct query_work;
+       struct delayed_work query_work;
 
        wait_queue_head_t poll_wq;
        int pending_fe_events;
 #ifdef ENABLE_RC
        struct input_dev *rc_input_dev;
        char phys[64];
-       struct work_struct rc_query_work;
+       struct delayed_work rc_query_work;
        int rc_input_event;
        u32 rc_last_code;
        unsigned long last_event_jiffies;
 
 #ifdef ENABLE_RC
 
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
 {
-       struct cinergyt2 *cinergyt2 = data;
+       struct cinergyt2 *cinergyt2 =
+               container_of(work, struct cinergyt2, rc_query_work.work);
        char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
        struct cinergyt2_rc_event rc_events[12];
        int n, len, i;
        strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
        cinergyt2->rc_input_event = KEY_MAX;
        cinergyt2->rc_last_code = ~0;
-       INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+       INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
 
        input_dev->name = DRIVER_NAME " remote control";
        input_dev->phys = cinergyt2->phys;
 
 #endif /* ENABLE_RC */
 
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
 {
-       struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+       struct cinergyt2 *cinergyt2 =
+               container_of(work, struct cinergyt2, query_work.work);
        char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
        struct dvbt_get_status_msg *s = &cinergyt2->status;
        uint8_t lock_bits;
 
        mutex_init(&cinergyt2->sem);
        init_waitqueue_head (&cinergyt2->poll_wq);
-       INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+       INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
 
        cinergyt2->udev = interface_to_usbdev(intf);
        cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
 
        int in_use;
        struct net_device_stats stats;
        u16 pid;
+       struct net_device *net;
        struct dvb_net *host;
        struct dmx_demux *demux;
        struct dmx_section_feed *secfeed;
 }
 
 
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct dvb_net_priv *priv = dev->priv;
+       struct dvb_net_priv *priv =
+               container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+       struct net_device *dev = priv->net;
 
        dvb_net_feed_stop(dev);
        priv->rx_mode = RX_MODE_UNI;
 }
 
 
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
 {
-       struct net_device *dev = data;
+       struct dvb_net_priv *priv =
+               container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+       struct net_device *dev = priv->net;
 
        if (netif_running(dev)) {
                dvb_net_feed_stop(dev);
        dvbnet->device[if_num] = net;
 
        priv = net->priv;
+       priv->net = net;
        priv->demux = dvbnet->demux;
        priv->pid = pid;
        priv->rx_mode = RX_MODE_UNI;
        priv->feedtype = feedtype;
        reset_ule(priv);
 
-       INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
-       INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+       INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+       INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
        mutex_init(&priv->mutex);
 
        net->base_addr = pid;
 
  *
  * TODO: Fix the repeat rate of the input device.
  */
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
 {
-       struct dvb_usb_device *d = data;
+       struct dvb_usb_device *d =
+               container_of(work, struct dvb_usb_device, rc_query_work.work);
        u32 event;
        int state;
 
 
        input_register_device(d->rc_input_dev);
 
-       INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+       INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
 
        info("schedule remote query interval to %d msecs.", d->props.rc_interval);
        schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
 
        /* remote control */
        struct input_dev *rc_input_dev;
        char rc_phys[64];
-       struct work_struct rc_query_work;
+       struct delayed_work rc_query_work;
        u32 last_event;
        int last_state;
 
 
        struct pardevice *pdev;
        struct parport *port;
        struct work_struct cb_task;
+       void (*cb_func)(void *cbdata);
+       void *cb_data;
        int open_count;
        wait_queue_head_t wq_stream;
        /* image state flags */
 #define PARPORT_CHUNK_SIZE     PAGE_SIZE
 
 
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+       void (*cb_func)(void *cbdata);
+       void *cb_data;
+       struct pp_cam_entry *cam;
+
+       cam = container_of(work, struct pp_cam_entry, cb_task);
+       cb_func = cam->cb_func;
+       cb_data = cam->cb_data;
+       work_release(work);
+
+       cb_func(cb_data);
+}
+
 /****************************************************************************
  *
  *  CPiA-specific  low-level parport functions for nibble uploads
        int retval = 0;
 
        if(cam->port->irq != PARPORT_IRQ_NONE) {
-               INIT_WORK(&cam->cb_task, cb, cbdata);
+               cam->cb_func = cb;
+               cam->cb_data = cbdata;
+               INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
        } else {
                retval = -1;
        }
 
        schedule_work(&ir->work);
 }
 
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
 {
-       struct cx88_IR *ir = data;
+       struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
        unsigned long timeout;
 
        cx88_ir_handle_key(ir);
        core->ir = ir;
 
        if (ir->polling) {
-               INIT_WORK(&ir->work, cx88_ir_work, ir);
+               INIT_WORK(&ir->work, cx88_ir_work);
                init_timer(&ir->timer);
                ir->timer.function = ir_timer;
                ir->timer.data = (unsigned long)ir;
 
        schedule_work(&ir->work);
 }
 
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
 {
-       struct IR_i2c *ir = data;
+       struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
        ir_key_poll(ir);
        mod_timer(&ir->timer, jiffies+HZ/10);
 }
               ir->input->name,ir->input->phys,adap->name);
 
        /* start polling via eventd */
-       INIT_WORK(&ir->work, ir_work, ir);
+       INIT_WORK(&ir->work, ir_work);
        init_timer(&ir->timer);
        ir->timer.function = ir_timer;
        ir->timer.data     = (unsigned long)ir;
 
 }
 
 
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
 {
+       struct pvr2_context *mp =
+               container_of(work, struct pvr2_context, workpoll);
        pvr2_context_enter(mp); do {
                pvr2_hdw_poll(mp->hdw);
        } while (0); pvr2_context_exit(mp);
 }
 
 
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
 {
+       struct pvr2_context *mp =
+               container_of(work, struct pvr2_context, workinit);
+
        pvr2_context_enter(mp); do {
                if (!pvr2_hdw_dev_ok(mp->hdw)) break;
                pvr2_hdw_setup(mp->hdw);
        }
 
        mp->workqueue = create_singlethread_workqueue("pvrusb2");
-       INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
-       INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+       INIT_WORK(&mp->workinit, pvr2_context_setup);
+       INIT_WORK(&mp->workpoll, pvr2_context_poll);
        queue_work(mp->workqueue,&mp->workinit);
  done:
        return mp;
 
        schedule_work(&s->work);
 }
 
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
 {
-       struct saa6588 *s = (struct saa6588 *)data;
+       struct saa6588 *s = container_of(work, struct saa6588, work);
 
        saa6588_i2c_poll(s);
        mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
        saa6588_configure(s);
 
        /* start polling via eventd */
-       INIT_WORK(&s->work, saa6588_work, s);
+       INIT_WORK(&s->work, saa6588_work);
        init_timer(&s->timer);
        s->timer.function = saa6588_timer;
        s->timer.data = (unsigned long)s;
 
        .minor         = -1,
 };
 
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
 {
-       struct saa7134_dev* dev = (struct saa7134_dev*) data;
+       struct saa7134_dev* dev =
+               container_of(work, struct saa7134_dev, empress_workqueue);
 
        if (dev->nosignal) {
                dprintk("no video signal\n");
                 "%s empress (%s)", dev->name,
                 saa7134_boards[dev->board].name);
 
-       INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+       INIT_WORK(&dev->empress_workqueue, empress_signal_update);
 
        err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
                                    empress_nr[dev->nr]);
                            sizeof(struct saa7134_buf),
                            dev);
 
-       empress_signal_update(dev);
+       empress_signal_update(&dev->empress_workqueue);
        return 0;
 }
 
 
 }
 
 static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
 {
-       MPT_ADAPTER             *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER             *ioc =
+               container_of(work, MPT_ADAPTER, fc_setup_reset_work);
        u64                     pn;
        struct mptfc_rport_info *ri;
 
 }
 
 static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
 {
-       MPT_ADAPTER             *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER             *ioc =
+               container_of(work, MPT_ADAPTER, fc_rescan_work);
        int                     ii;
        u64                     pn;
        struct mptfc_rport_info *ri;
         }
 
        spin_lock_init(&ioc->fc_rescan_work_lock);
-       INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
-       INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+       INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+       INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
 
        spin_lock_irqsave(&ioc->FreeQlock, flags);
 
 
        u32 total_received;
        struct net_device_stats stats;  /* Per device statistics */
 
-       struct work_struct post_buckets_task;
+       struct delayed_work post_buckets_task;
+       struct net_device *dev;
        unsigned long post_buckets_active;
 };
 
 static int  mpt_lan_open(struct net_device *dev);
 static int  mpt_lan_reset(struct net_device *dev);
 static int  mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
                                           int priority);
 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
                        priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
                spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
        } else {
-               mpt_lan_post_receive_buckets(dev);
+               mpt_lan_post_receive_buckets(priv);
                netif_wake_queue(dev);
        }
 
 
        dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 
-       mpt_lan_post_receive_buckets(dev);
+       mpt_lan_post_receive_buckets(priv);
        printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
                        IOC_AND_NETDEV_NAMES_s_s(dev));
 
        
        if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
                if (priority) {
-                       schedule_work(&priv->post_buckets_task);
+                       schedule_delayed_work(&priv->post_buckets_task, 0);
                } else {
                        schedule_delayed_work(&priv->post_buckets_task, 1);
                        dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
 /* Simple SGE's only at the moment */
 
 static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
 {
-       struct net_device *dev = dev_id;
-       struct mpt_lan_priv *priv = dev->priv;
+       struct net_device *dev = priv->dev;
        MPT_ADAPTER *mpt_dev = priv->mpt_dev;
        MPT_FRAME_HDR *mf;
        LANReceivePostRequest_t *pRecvReq;
        clear_bit(0, &priv->post_buckets_active);
 }
 
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+       mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+                                                 post_buckets_task.work));
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static struct net_device *
 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
 
        priv = netdev_priv(dev);
 
+       priv->dev = dev;
        priv->mpt_dev = mpt_dev;
        priv->pnum = pnum;
 
-       memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
-       INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+       memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+       INIT_DELAYED_WORK(&priv->post_buckets_task,
+                         mpt_lan_post_receive_buckets_work);
        priv->post_buckets_active = 0;
 
        dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
 
  *(Mutex LOCKED)
  */
 static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
 {
-       struct mptsas_discovery_event *ev = arg;
+       struct mptsas_discovery_event *ev =
+               container_of(work, struct mptsas_discovery_event, work);
        MPT_ADAPTER *ioc = ev->ioc;
 
        mutex_lock(&ioc->sas_discovery_mutex);
  * Work queue thread to clear the persitency table
  */
 static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
 {
-       MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
 
        mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
 }
  * Work queue thread to handle SAS hotplug events
  */
 static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
 {
-       struct mptsas_hotplug_event *ev = arg;
+       struct mptsas_hotplug_event *ev =
+               container_of(work, struct mptsas_hotplug_event, work);
        MPT_ADAPTER *ioc = ev->ioc;
        struct mptsas_phyinfo *phy_info;
        struct sas_rphy *rphy;
                        break;
                }
 
-               INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+               INIT_WORK(&ev->work, mptsas_hotplug_work);
                ev->ioc = ioc;
                ev->handle = le16_to_cpu(sas_event_data->DevHandle);
                ev->parent_handle =
         * Persistent table is full.
         */
                INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table, (void *)ioc);
+                   mptsas_persist_clear_table);
                schedule_work(&ioc->sas_persist_task);
                break;
        case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
                return;
        }
 
-       INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+       INIT_WORK(&ev->work, mptsas_hotplug_work);
        ev->ioc = ioc;
        ev->id = raid_event_data->VolumeID;
        ev->event_type = MPTSAS_IGNORE_EVENT;
        ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
        if (!ev)
                return;
-       INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+       INIT_WORK(&ev->work, mptsas_discovery_work);
        ev->ioc = ioc;
        schedule_work(&ev->work);
 };
                break;
        case MPI_EVENT_PERSISTENT_TABLE_FULL:
                INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table,
-                   (void *)ioc);
+                   mptsas_persist_clear_table);
                schedule_work(&ioc->sas_persist_task);
                break;
         case MPI_EVENT_SAS_DISCOVERY:
 
        int                     disk;
 };
 
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct _MPT_SCSI_HOST *hd = wqw->hd;
        struct Scsi_Host *shost = hd->ioc->sh;
        struct scsi_device *sdev;
                           disk);
                return;
        }
-       INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+       INIT_WORK(&wqw->work, mpt_work_wrapper);
        wqw->hd = hd;
        wqw->disk = disk;
 
  * renegotiate for a given target
  */
 static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct _MPT_SCSI_HOST *hd = wqw->hd;
        struct scsi_device *sdev;
 
        if (!wqw)
                return;
 
-       INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+       INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
        wqw->hd = hd;
 
        schedule_work(&wqw->work);
 
                        break;
                }
 
-               INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+               INIT_WORK(&evt->work, drv->event);
                queue_work(drv->event_queue, &evt->work);
                return 1;
        }
 
  *     new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
  *     again, otherwise send LCT NOTIFY to get informed on next LCT change.
  */
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
 {
+       struct i2o_exec_lct_notify_work *work =
+               container_of(_work, struct i2o_exec_lct_notify_work, work);
        u32 change_ind = 0;
        struct i2o_controller *c = work->c;
 
 
                work->c = c;
 
-               INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
-                         work);
+               INIT_WORK(&work->work, i2o_exec_lct_modified);
                queue_work(i2o_exec_driver.event_queue, &work->work);
                return 1;
        }
 
 /**
  *     i2o_exec_event - Event handling function
- *     @evt: Event which occurs
+ *     @work: Work item in occurring event
  *
  *     Handles events send by the Executive device. At the moment does not do
  *     anything useful.
  */
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
 {
+       struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
        if (likely(evt->i2o_dev))
                osm_debug("Event received from device: %d\n",
                          evt->i2o_dev->lct_data.tid);
 
 
 /**
  *     i2o_block_delayed_request_fn - delayed request queue function
- *     delayed_request: the delayed request with the queue to start
+ *     @work: the delayed request with the queue to start
  *
  *     If the request queue is stopped for a disk, and there is no open
  *     request, a new event is created, which calls this function to start
  *     the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
  *     be started again.
  */
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
 {
-       struct i2o_block_delayed_request *dreq = delayed_request;
+       struct i2o_block_delayed_request *dreq =
+               container_of(work, struct i2o_block_delayed_request,
+                            work.work);
        struct request_queue *q = dreq->queue;
        unsigned long flags;
 
        return 1;
 };
 
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
 {
+       struct i2o_event *evt = container_of(work, struct i2o_event, work);
        osm_debug("event received\n");
        kfree(evt);
 };
                                continue;
 
                        dreq->queue = q;
-                       INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
-                                 dreq);
+                       INIT_DELAYED_WORK(&dreq->work,
+                                         i2o_block_delayed_request_fn);
 
                        if (!queue_delayed_work(i2o_block_driver.event_queue,
                                                &dreq->work,
 
 
 /* I2O Block device delayed request */
 struct i2o_block_delayed_request {
-       struct work_struct work;
+       struct delayed_work work;
        struct request_queue *queue;
 };
 
 
        spin_unlock_irqrestore(&fm->lock, flags);
 }
 
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
 {
-       struct tifm_adapter *fm = adapter;
+       struct tifm_adapter *fm =
+               container_of(work, struct tifm_adapter, media_remover);
        unsigned long flags;
        int cnt;
        struct tifm_dev *sock;
        return base_addr + ((sock_num + 1) << 10);
 }
 
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
 {
-       struct tifm_adapter *fm = adapter;
+       struct tifm_adapter *fm =
+               container_of(work, struct tifm_adapter, media_inserter);
        unsigned long flags;
        tifm_media_id media_id;
        char *card_name = "xx";
        spin_unlock_irqrestore(&fm->lock, flags);
        flush_workqueue(fm->wq);
 
-       tifm_7xx1_remove_media(fm);
+       tifm_7xx1_remove_media(&fm->media_remover);
 
        pci_set_power_state(dev, PCI_D3hot);
         pci_disable_device(dev);
        if (!fm->sockets)
                goto err_out_free;
 
-       INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
-       INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+       INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+       INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
        fm->eject = tifm_7xx1_eject;
        pci_set_drvdata(dev, fm);
 
 
        flush_workqueue(fm->wq);
 
-       tifm_7xx1_remove_media(fm);
+       tifm_7xx1_remove_media(&fm->media_remover);
 
        writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
        free_irq(dev->irq, fm);
 
  */
 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
 {
-       if (delay)
-               mmc_schedule_delayed_work(&host->detect, delay);
-       else
-               mmc_schedule_work(&host->detect);
+       mmc_schedule_delayed_work(&host->detect, delay);
 }
 
 EXPORT_SYMBOL(mmc_detect_change);
 
 
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
 {
-       struct mmc_host *host = data;
+       struct mmc_host *host =
+               container_of(work, struct mmc_host, detect.work);
        struct list_head *l, *n;
        unsigned char power_mode;
 
                spin_lock_init(&host->lock);
                init_waitqueue_head(&host->wq);
                INIT_LIST_HEAD(&host->cards);
-               INIT_WORK(&host->detect, mmc_rescan, host);
+               INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 
                /*
                 * By default, hosts do not support SGIO or large requests.
  */
 int mmc_resume_host(struct mmc_host *host)
 {
-       mmc_rescan(host);
+       mmc_rescan(&host->detect.work);
 
        return 0;
 }
 
 void mmc_free_host_sysfs(struct mmc_host *host);
 
 int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
 void mmc_flush_scheduled_work(void);
 #endif
 
 
 static struct workqueue_struct *workqueue;
 
-/*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
-       return queue_work(workqueue, work);
-}
-
 /*
  * Internal function. Schedule delayed work in the MMC work queue.
  */
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
 {
        return queue_delayed_work(workqueue, work, delay);
 }
 
 
        struct mmc_request    *req;
        struct work_struct    cmd_handler;
-       struct work_struct    abort_handler;
+       struct delayed_work   abort_handler;
        wait_queue_head_t     can_eject;
 
        size_t                written_blocks;
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
 {
-       struct tifm_sd *host = data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        struct mmc_request *mrq;
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
 {
-       struct tifm_sd *host = (struct tifm_sd*)data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        struct mmc_request *mrq;
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
 {
+       struct tifm_sd *host =
+               container_of(work, struct tifm_sd, abort_handler.work);
+
        printk(KERN_ERR DRIVER_NAME
                ": card failed to respond for a long period of time");
-       tifm_eject(((struct tifm_sd*)data)->dev);
+       tifm_eject(host->dev);
 }
 
 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        .get_ro  = tifm_sd_ro
 };
 
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
 {
-       struct tifm_sd *host = (struct tifm_sd*)data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        unsigned long flags;
        spin_lock_irqsave(&sock->lock, flags);
        host->flags |= HOST_REG;
        PREPARE_WORK(&host->cmd_handler,
-                       no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
-                       data);
+                       no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
        spin_unlock_irqrestore(&sock->lock, flags);
        dev_dbg(&sock->dev, "adding host\n");
        mmc_add_host(mmc);
        host->dev = sock;
        host->clk_div = 61;
        init_waitqueue_head(&host->can_eject);
-       INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
-       INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+       INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+       INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
 
        tifm_set_drvdata(sock, mmc);
        sock->signal_irq = tifm_sd_signal_irq;
 
        u32 rx_config;
        struct rtl_extra_stats xstats;
 
-       struct work_struct thread;
+       struct delayed_work thread;
 
        struct mii_if_info mii;
        unsigned int regs_len;
 static void rtl8139_set_rx_mode (struct net_device *dev);
 static void __set_rx_mode (struct net_device *dev);
 static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
 static const struct ethtool_ops rtl8139_ethtool_ops;
 
 /* write MMIO register, with flush */
                (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
        spin_lock_init (&tp->lock);
        spin_lock_init (&tp->rx_lock);
-       INIT_WORK(&tp->thread, rtl8139_thread, dev);
+       INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
        tp->mii.dev = dev;
        tp->mii.mdio_read = mdio_read;
        tp->mii.mdio_write = mdio_write;
                 RTL_R8 (Config1));
 }
 
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8139_private *tp = netdev_priv(dev);
+       struct rtl8139_private *tp =
+               container_of(work, struct rtl8139_private, thread.work);
+       struct net_device *dev = tp->mii.dev;
        unsigned long thr_delay = next_tick;
 
        if (tp->watchdog_fired) {
                tp->watchdog_fired = 0;
-               rtl8139_tx_timeout_task(_data);
+               rtl8139_tx_timeout_task(work);
        } else if (rtnl_trylock()) {
                rtl8139_thread_iter (dev, tp, tp->mmio_addr);
                rtnl_unlock ();
        /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
 }
 
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8139_private *tp = netdev_priv(dev);
+       struct rtl8139_private *tp =
+               container_of(work, struct rtl8139_private, thread.work);
+       struct net_device *dev = tp->mii.dev;
        void __iomem *ioaddr = tp->mmio_addr;
        int i;
        u8 tmp8;
        struct rtl8139_private *tp = netdev_priv(dev);
 
        if (!tp->have_thread) {
-               INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+               INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
                schedule_delayed_work(&tp->thread, next_tick);
        } else
                tp->watchdog_fired = 1;
 
 }
 
 static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
 {
-       struct bnx2 *bp = data;
+       struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
        if (!netif_running(bp->dev))
                return;
        bp->pdev = pdev;
 
        spin_lock_init(&bp->phy_lock);
-       INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+       INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
        mem_len = MB_GET_CID_ADDR(17);
 
        return 0;
 }
 
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
 {
-       struct cas *cp = (struct cas *) data;
+       struct cas *cp = container_of(work, struct cas, reset_task);
 #if 0
        int pending = atomic_read(&cp->reset_task_pending);
 #else
        atomic_set(&cp->reset_task_pending_spare, 0);
        atomic_set(&cp->reset_task_pending_mtu, 0);
 #endif
-       INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+       INIT_WORK(&cp->reset_task, cas_reset_task);
 
        /* Default link parameters */
        if (link_mode >= 0 && link_mode <= 6)
 
        struct peespi *espi;
 
        struct port_info port[MAX_NPORTS];
-       struct work_struct stats_update_task;
+       struct delayed_work stats_update_task;
        struct timer_list stats_update_timer;
 
        struct semaphore mib_mutex;
 
  * Periodic accumulation of MAC statistics.  This is used only if the MAC
  * does not have any other way to prevent stats counter overflow.
  */
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
 {
        int i;
-       struct adapter *adapter = data;
+       struct adapter *adapter =
+               container_of(work, struct adapter, stats_update_task.work);
 
        for_each_port(adapter, i) {
                struct port_info *p = &adapter->port[i];
 /*
  * Processes elmer0 external interrupts in process context.
  */
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
 {
-       struct adapter *adapter = data;
+       struct adapter *adapter =
+               container_of(work, struct adapter, ext_intr_handler_task);
 
        elmer0_ext_intr_handler(adapter);
 
                        spin_lock_init(&adapter->async_lock);
 
                        INIT_WORK(&adapter->ext_intr_handler_task,
-                                 ext_intr_task, adapter);
-                       INIT_WORK(&adapter->stats_update_task, mac_stats_task,
-                                 adapter);
+                                 ext_intr_task);
+                       INIT_DELAYED_WORK(&adapter->stats_update_task,
+                                         mac_stats_task);
 #ifdef work_struct
                        init_timer(&adapter->stats_update_timer);
                        adapter->stats_update_timer.function = mac_stats_timer;
 
        schedule_work(&nic->tx_timeout_task);
 }
 
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
 {
-       struct nic *nic = netdev_priv(netdev);
+       struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+       struct net_device *netdev = nic->netdev;
 
        DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
                readb(&nic->csr->scb.status));
        nic->blink_timer.function = e100_blink_led;
        nic->blink_timer.data = (unsigned long)nic;
 
-       INIT_WORK(&nic->tx_timeout_task,
-               (void (*)(void *))e100_tx_timeout_task, netdev);
+       INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
        if((err = e100_alloc(nic))) {
                DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
 
        return ret;
 }
 
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
 {
        int ret;
-       struct net_device *dev = data;
-       struct ehea_port *port = netdev_priv(dev);
+       struct ehea_port *port =
+               container_of(work, struct ehea_port, reset_task);
+       struct net_device *dev = port->netdev;
 
        port->resets++;
        down(&port->port_lock);
        dev->tx_timeout = &ehea_tx_watchdog;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
-       INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+       INIT_WORK(&port->reset_task, ehea_reset_port);
 
        ehea_set_ethtool_ops(dev);
 
 
        int magic;
 
         struct pardevice *pdev;
+       struct net_device *dev;
        unsigned int work_running;
-       struct work_struct run_work;
+       struct delayed_work run_work;
        unsigned int modem;
        unsigned int bitrate;
        unsigned char stat;
 #define GETTICK(x)
 #endif /* __i386__ */
 
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
 {
+       struct net_device *dev;
        struct baycom_state *bc;
        struct parport *pp;
        unsigned char stat;
        unsigned char tmp[2];
        unsigned int time1 = 0, time2 = 0, time3 = 0;
        int cnt, cnt2;
-       
-       bc = netdev_priv(dev);
+
+       bc = container_of(work, struct baycom_state, run_work.work);
+       dev = bc->dev;
        if (!bc->work_running)
                return;
        baycom_int_freq(bc);
                 return -EBUSY;
         }
         dev->irq = /*pp->irq*/ 0;
-       INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+       INIT_DELAYED_WORK(&bc->run_work, epp_bh);
        bc->work_running = 1;
        bc->modem = EPP_CONVENTIONAL;
        if (eppconfig(bc))
        /*
         * initialize part of the baycom_state struct
         */
+       bc->dev = dev;
        bc->magic = BAYCOM_MAGIC;
        bc->cfg.fclk = 19666600;
        bc->cfg.bps = 9600;
 
        return ret;
 }
 
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
 {
-       struct mcs_cb *mcs = arg;
+       struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
        struct net_device *netdev = mcs->netdev;
 
        mcs_speed_change(mcs);
        irda_qos_bits_to_value(&mcs->qos);
 
        /* Speed change work initialisation*/
-       INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+       INIT_WORK(&mcs->work, mcs_speed_work);
 
        /* Override the network functions we need to use */
        ndev->hard_start_xmit = mcs_hard_xmit;
 
 
 struct sir_fsm {
        struct semaphore        sem;
-       struct work_struct      work;
+       struct delayed_work     work;
        unsigned                state, substate;
        int                     param;
        int                     result;
 
  * Both must be unlocked/restarted on completion - but only on final exit.
  */
 
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
 {
-       struct sir_dev *dev = data;
+       struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
        struct sir_fsm *fsm = &dev->fsm;
        int next_state;
        int ret = -1;
        fsm->param = param;
        fsm->result = 0;
 
-       INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
-       queue_work(irda_sir_wq, &fsm->work);
+       INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+       queue_delayed_work(irda_sir_wq, &fsm->work, 0);
        return 0;
 }
 
 
 
 struct veth_lpar_connection {
        HvLpIndex remote_lp;
-       struct work_struct statemachine_wq;
+       struct delayed_work statemachine_wq;
        struct veth_msg *msgs;
        int num_events;
        struct veth_cap_data local_caps;
 
 static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
 {
-       schedule_work(&cnx->statemachine_wq);
+       schedule_delayed_work(&cnx->statemachine_wq, 0);
 }
 
 static void veth_take_cap(struct veth_lpar_connection *cnx,
 }
 
 /* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
 {
-       struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+       struct veth_lpar_connection *cnx =
+               container_of(work, struct veth_lpar_connection,
+                            statemachine_wq.work);
        int rlp = cnx->remote_lp;
        int rc;
 
 
        cnx->remote_lp = rlp;
        spin_lock_init(&cnx->lock);
-       INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+       INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
 
        init_timer(&cnx->ack_timer);
        cnx->ack_timer.function = veth_timed_ack;
 
 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
 void ixgb_set_ethtool_ops(struct net_device *netdev);
 static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
                                  struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
        adapter->watchdog_timer.function = &ixgb_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
 
-       INIT_WORK(&adapter->tx_timeout_task,
-                 (void (*)(void *))ixgb_tx_timeout_task, netdev);
+       INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev)))
 }
 
 static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
 {
-       struct ixgb_adapter *adapter = netdev_priv(netdev);
+       struct ixgb_adapter *adapter =
+               container_of(work, struct ixgb_adapter, tx_timeout_task);
 
        adapter->tx_timeout_count++;
        ixgb_down(adapter, TRUE);
 
  * This watchdog is used to check whether the board has suffered
  * from a parity error and needs to be recovered.
  */
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
 {
-       struct myri10ge_priv *mgp = arg;
+       struct myri10ge_priv *mgp =
+               container_of(work, struct myri10ge_priv, watchdog_work);
        u32 reboot;
        int status;
        u16 cmd, vendor;
                    (unsigned long)mgp);
 
        SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
-       INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+       INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
        status = register_netdev(netdev);
        if (status != 0) {
                dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
 
        u8                      __iomem *base;
 
        struct pci_dev          *pci_dev;
+       struct net_device       *ndev;
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
        struct vlan_group       *vlgrp;
 }
 
 /* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
 {
-       struct net_device *ndev = _dev;
-       struct ns83820 *dev = PRIV(ndev);
+       struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+       struct net_device *ndev = dev->ndev;
 
        rx_refill(ndev, GFP_KERNEL);
        if (dev->rx_info.up)
 
        ndev = alloc_etherdev(sizeof(struct ns83820));
        dev = PRIV(ndev);
+       dev->ndev = ndev;
        err = -ENOMEM;
        if (!dev)
                goto out;
        SET_MODULE_OWNER(ndev);
        SET_NETDEV_DEV(ndev, &pci_dev->dev);
 
-       INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+       INIT_WORK(&dev->tq_refill, queue_refill);
        tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
 
        err = pci_enable_device(pci_dev);
 
  */
 
 typedef struct local_info_t {
+       struct net_device       *dev;
        struct pcmcia_device    *p_dev;
     dev_node_t node;
     struct net_device_stats stats;
  */
 static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
 static struct net_device_stats *do_get_stats(struct net_device *dev);
 static void set_addresses(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
     if (!dev)
            return -ENOMEM;
     local = netdev_priv(dev);
+    local->dev = dev;
     local->p_dev = link;
     link->priv = dev;
 
 #ifdef HAVE_TX_TIMEOUT
     dev->tx_timeout = do_tx_timeout;
     dev->watchdog_timeo = TX_TIMEOUT;
-    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
 #endif
 
     return xirc2ps_config(link);
 /*====================================================================*/
 
 static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
 {
-    struct net_device *dev = data;
+       local_info_t *local =
+               container_of(work, local_info_t, tx_timeout_task);
+       struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
     dev->trans_start = jiffies;
 
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
 static void phy_timer(unsigned long data);
 
 /* phy_start_machine:
 {
        int err = 0;
 
-       INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+       INIT_WORK(&phydev->phy_queue, phy_change);
 
        if (request_irq(phydev->irq, phy_interrupt,
                                IRQF_SHARED,
 
 
 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
 {
        int err;
-       struct phy_device *phydev = data;
+       struct phy_device *phydev =
+               container_of(work, struct phy_device, phy_queue);
 
        err = phy_disable_interrupts(phydev);
 
 
 #define PLIP_NIBBLE_WAIT        3000
 
 /* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
 
 /* Interrupt handler */
 static void plip_interrupt(int irq, void *dev_id);
 
 struct net_local {
        struct net_device_stats enet_stats;
+       struct net_device *dev;
        struct work_struct immediate;
-       struct work_struct deferred;
-       struct work_struct timer;
+       struct delayed_work deferred;
+       struct delayed_work timer;
        struct plip_local snd_data;
        struct plip_local rcv_data;
        struct pardevice *pardev;
        nl->nibble      = PLIP_NIBBLE_WAIT;
 
        /* Initialize task queue structures */
-       INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
-       INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+       INIT_WORK(&nl->immediate, plip_bh);
+       INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 
        if (dev->irq == -1)
-               INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+               INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 
        spin_lock_init(&nl->lock);
 }
    This routine is kicked by do_timer().
    Request `plip_bh' to be invoked. */
 static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl =
+               container_of(work, struct net_local, deferred.work);
 
        if (nl->is_deferred)
                schedule_work(&nl->immediate);
 
 /* Bottom half handler of PLIP. */
 static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl = container_of(work, struct net_local, immediate);
        struct plip_local *snd = &nl->snd_data;
        struct plip_local *rcv = &nl->rcv_data;
        plip_func f;
 
        nl->is_deferred = 0;
        f = connection_state_table[nl->connection];
-       if ((r = (*f)(dev, nl, snd, rcv)) != OK
-           && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+       if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+           && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
                nl->is_deferred = 1;
                schedule_delayed_work(&nl->deferred, 1);
        }
 }
 
 static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl =
+               container_of(work, struct net_local, timer.work);
 
        if (!(atomic_read (&nl->kill_timer))) {
-               plip_interrupt (-1, dev);
+               plip_interrupt (-1, nl->dev);
 
                schedule_delayed_work(&nl->timer, 1);
        }
                }
 
                nl = netdev_priv(dev);
+               nl->dev = dev;
                nl->pardev = parport_register_device(port, name, plip_preempt,
                                                 plip_wakeup, plip_interrupt,
                                                 0, dev);
 
                               "%s: Another function issued a reset to the "
                               "chip. ISR value = %x.\n", ndev->name, value);
                }
-               queue_work(qdev->workqueue, &qdev->reset_work);
+               queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
                spin_unlock(&qdev->adapter_lock);
        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
                ql_disable_interrupts(qdev);
        /*
         * Wake up the worker to process this event.
         */
-       queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+       queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
 }
 
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
 {
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, reset_work.work);
        struct net_device *ndev = qdev->ndev;
        u32 value;
        struct ql_tx_buf_cb *tx_cb;
        }
 }
 
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
 {
-       ql_cycle_adapter(qdev,QL_DO_RESET);
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+       ql_cycle_adapter(qdev, QL_DO_RESET);
 }
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
        netif_stop_queue(ndev);
 
        qdev->workqueue = create_singlethread_workqueue(ndev->name);
-       INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
-       INIT_WORK(&qdev->tx_timeout_work,
-                 (void (*)(void *))ql_tx_timeout_work, qdev);
+       INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+       INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
 
        init_timer(&qdev->adapter_timer);
        qdev->adapter_timer.function = ql3xxx_timer;
 
        u32 numPorts;
        struct net_device_stats stats;
        struct workqueue_struct *workqueue;
-       struct work_struct reset_work;
-       struct work_struct tx_timeout_work;
+       struct delayed_work reset_work;
+       struct delayed_work tx_timeout_work;
        u32 max_frame_size;
 };
 
 
 struct rtl8169_private {
        void __iomem *mmio_addr;        /* memory map physical address */
        struct pci_dev *pci_dev;        /* Index of PCI device */
+       struct net_device *dev;
        struct net_device_stats stats;  /* statistics of net device */
        spinlock_t lock;                /* spin lock flag */
        u32 msg_enable;
        void (*phy_reset_enable)(void __iomem *);
        unsigned int (*phy_reset_pending)(void __iomem *);
        unsigned int (*link_ok)(void __iomem *);
-       struct work_struct task;
+       struct delayed_work task;
        unsigned wol_enabled : 1;
 };
 
        SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
        tp = netdev_priv(dev);
+       tp->dev = dev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        if (retval < 0)
                goto err_free_rx;
 
-       INIT_WORK(&tp->task, NULL, dev);
+       INIT_DELAYED_WORK(&tp->task, NULL);
 
        rtl8169_hw_start(dev);
 
        tp->cur_tx = tp->dirty_tx = 0;
 }
 
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       PREPARE_WORK(&tp->task, task, dev);
+       PREPARE_DELAYED_WORK(&tp->task, task);
        schedule_delayed_work(&tp->task, 4);
 }
 
        netif_poll_enable(dev);
 }
 
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
 {
-       struct net_device *dev = _data;
+       struct rtl8169_private *tp =
+               container_of(work, struct rtl8169_private, task.work);
+       struct net_device *dev = tp->dev;
        int ret;
 
        if (netif_running(dev)) {
        }
 }
 
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8169_private *tp = netdev_priv(dev);
+       struct rtl8169_private *tp =
+               container_of(work, struct rtl8169_private, task.work);
+       struct net_device *dev = tp->dev;
 
        if (!netif_running(dev))
                return;
 
  * Description: Sets the link status for the adapter
  */
 
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
 {
-       nic_t *nic = (nic_t *) data;
+       nic_t *nic = container_of(work, nic_t, set_link_task);
        struct net_device *dev = nic->dev;
        XENA_dev_config_t __iomem *bar0 = nic->bar0;
        register u64 val64;
  * spin lock.
  */
 
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *) data;
-       nic_t *sp = dev->priv;
+       nic_t *sp = container_of(work, nic_t, rst_timer_task);
+       struct net_device *dev = sp->dev;
 
        s2io_card_down(sp);
        if (s2io_card_up(sp)) {
 
        dev->tx_timeout = &s2io_tx_watchdog;
        dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
-       INIT_WORK(&sp->rst_timer_task,
-                 (void (*)(void *)) s2io_restart_nic, dev);
-       INIT_WORK(&sp->set_link_task,
-                 (void (*)(void *)) s2io_set_link, sp);
+       INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+       INIT_WORK(&sp->set_link_task, s2io_set_link);
 
        pci_save_state(sp->pdev);
 
 
 static irqreturn_t s2io_isr(int irq, void *dev_id);
 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
 static int s2io_set_swapper(nic_t * sp);
 static void s2io_card_down(nic_t *nic);
 static int s2io_card_up(nic_t *nic);
 
 struct sis190_private {
        void __iomem *mmio_addr;
        struct pci_dev *pci_dev;
+       struct net_device *dev;
        struct net_device_stats stats;
        spinlock_t lock;
        u32 rx_buf_sz;
        netif_start_queue(dev);
 }
 
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct sis190_private *tp = netdev_priv(dev);
+       struct sis190_private *tp =
+               container_of(work, struct sis190_private, phy_task);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->mmio_addr;
        int phy_id = tp->mii_if.phy_id;
        u16 val;
        if (rc < 0)
                goto err_free_rx_1;
 
-       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+       INIT_WORK(&tp->phy_task, sis190_phy_task);
 
        sis190_request_timer(dev);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        tp = netdev_priv(dev);
+       tp->dev = dev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
 
        rc = pci_enable_device(pdev);
 
        sis190_init_rxfilter(dev);
 
-       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+       INIT_WORK(&tp->phy_task, sis190_phy_task);
 
        dev->open = sis190_open;
        dev->stop = sis190_close;
 
  * Since internal PHY is wired to a level triggered pin, can't
  * get an interrupt when carrier is detected.
  */
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
 {
-       struct net_device *dev = arg;
-       struct skge_port *skge = netdev_priv(arg);
+       struct skge_port *skge =
+               container_of(work, struct skge_port, link_thread.work);
+       struct net_device *dev = skge->netdev;
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
 
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
 {
-       struct skge_hw *hw = arg;
+       struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
        int port;
 
        mutex_lock(&hw->phy_mutex);
        skge->port = port;
 
        /* Only used for Genesis XMAC */
-       INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+       INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
 
        if (hw->chip_id != CHIP_ID_GENESIS) {
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 
        hw->pdev = pdev;
        mutex_init(&hw->phy_mutex);
-       INIT_WORK(&hw->phy_work, skge_extirq, hw);
+       INIT_WORK(&hw->phy_work, skge_extirq);
        spin_lock_init(&hw->hw_lock);
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 
 
        struct net_device_stats net_stats;
 
-       struct work_struct   link_thread;
+       struct delayed_work  link_thread;
        enum pause_control   flow_control;
        enum pause_status    flow_status;
        u8                   rx_csum;
 
  * called as task when tx hangs, resets interface (if interface is up)
  */
 static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
 {
-       struct net_device *netdev = data;
-       struct spider_net_card *card = netdev_priv(netdev);
+       struct spider_net_card *card =
+               container_of(work, struct spider_net_card, tx_timeout_task);
+       struct net_device *netdev = card->netdev;
 
        if (!(netdev->flags & IFF_UP))
                goto out;
        card = netdev_priv(netdev);
        card->netdev = netdev;
        card->msg_enable = SPIDER_NET_DEFAULT_MSG;
-       INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+       INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
        init_waitqueue_head(&card->waitq);
        atomic_set(&card->tx_timeout_task_counter, 0);
 
 
        }
 }
 
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
 {
-       struct gem *gp = (struct gem *) data;
+       struct gem *gp = container_of(work, struct gem, reset_task);
 
        mutex_lock(&gp->pm_mutex);
 
        gp->link_timer.function = gem_link_timer;
        gp->link_timer.data = (unsigned long) gp;
 
-       INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+       INIT_WORK(&gp->reset_task, gem_reset_task);
 
        gp->lstate = link_down;
        gp->timer_ticks = 0;
 
 }
 #endif
 
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
 {
-       struct tg3 *tp = _data;
+       struct tg3 *tp = container_of(work, struct tg3, reset_task);
        unsigned int restart_timer;
 
        tg3_full_lock(tp, 0);
 #endif
        spin_lock_init(&tp->lock);
        spin_lock_init(&tp->indirect_lock);
-       INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+       INIT_WORK(&tp->reset_task, tg3_reset_task);
 
        tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
        if (tp->regs == 0UL) {
 
 static int     TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
 static int      TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
 static void    TLan_tx_timeout( struct net_device *dev);
+static void    TLan_tx_timeout_work(struct work_struct *work);
 static int     tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static u32     TLan_HandleInvalid( struct net_device *, u16 );
        priv = netdev_priv(dev);
 
        priv->pciDev = pdev;
+       priv->dev = dev;
 
        /* Is this a PCI device? */
        if (pdev) {
 
        /* This will be used when we get an adapter error from
         * within our irq handler */
-       INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+       INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
 
        spin_lock_init(&priv->lock);
 
 }
 
 
+       /***************************************************************
+        *      TLan_tx_timeout_work
+        *
+        *      Returns: nothing
+        *
+        *      Params:
+        *              work    work item of device which timed out
+        *
+        **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+       TLanPrivateInfo *priv =
+               container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+       TLan_tx_timeout(priv->dev);
+}
+
+
 
        /***************************************************************
         *      TLan_StartTx
 
 typedef struct tlan_private_tag {
        struct net_device       *nextDevice;
        struct pci_dev          *pciDev;
+       struct net_device       *dev;
        void                    *dmaStorage;
        dma_addr_t              dmaStorageDMA;
        unsigned int            dmaSize;
 
 
 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
    of available transceivers.  */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct tulip_private *tp = netdev_priv(dev);
+       struct tulip_private *tp =
+               container_of(work, struct tulip_private, media_work);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->base_addr;
        int csr12 = ioread32(ioaddr + CSR12);
        int next_tick = 60*HZ;
 
 #include "tulip.h"
 
 
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct tulip_private *tp = netdev_priv(dev);
+       struct tulip_private *tp =
+               container_of(work, struct tulip_private, media_work);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->base_addr;
        u32 csr12 = ioread32(ioaddr + CSR12);
        int next_tick = 2*HZ;
 
        int valid_intrs;        /* CSR7 interrupt enable settings */
        int flags;
        void (*media_timer) (unsigned long);
-       void (*media_task) (void *);
+       work_func_t media_task;
 };
 
 
        int csr12_shadow;
        int pad0;               /* Used for 8-byte alignment */
        struct work_struct media_work;
+       struct net_device *dev;
 };
 
 
 
 /* 21142.c */
 extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
 void t21142_start_nway(struct net_device *dev);
 void t21142_lnk_change(struct net_device *dev, int csr5);
 
 void pnic_timer(unsigned long data);
 
 /* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
 void mxic_timer(unsigned long data);
 void comet_timer(unsigned long data);
 
 
         * it is zeroed and aligned in alloc_etherdev
         */
        tp = netdev_priv(dev);
+       tp->dev = dev;
 
        tp->rx_ring = pci_alloc_consistent(pdev,
                                           sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
        tp->timer.data = (unsigned long)dev;
        tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
 
-       INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+       INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
        dev->base_addr = (unsigned long)ioaddr;
 
 
 static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
 static void cpc_tty_flush_buffer(struct tty_struct *tty);
 static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
 static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
 static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
        cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
        cpc_tty->pc300dev = pc300dev; 
 
-       INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
-       INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+       INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+       INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
        
        cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
 
  * o call the line disc. read
  * o free memory
  */
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
 {
+       st_cpc_tty_area *cpc_tty;
        unsigned long port;
        int i, j;
-       st_cpc_tty_area *cpc_tty; 
        volatile st_cpc_rx_buf *buf;
        char flags=0,flg_rx=1; 
        struct tty_ldisc *ld;
 
        if (cpc_tty_cnt == 0) return;
-
        
        for (i=0; (i < 4) && flg_rx ; i++) {
                flg_rx = 0;
-               port = (unsigned long)data;
+
+               cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+               port = cpc_tty - cpc_tty_area;
+
                for (j=0; j < CPC_TTY_NPORTS; j++) {
                        cpc_tty = &cpc_tty_area[port];
                
  * o if need call line discipline wakeup
  * o call wake_up_interruptible
  */
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
 {
-       st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 
+       st_cpc_tty_area *cpc_tty =
+               container_of(work, st_cpc_tty_area, tty_tx_work);
        struct tty_struct *tty; 
 
        CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
 
        struct tasklet_struct isr_tasklet;
 
        /* Periodic tasks */
-       struct work_struct periodic_work;
+       struct delayed_work periodic_work;
        unsigned int periodic_state;
 
        struct work_struct restart_work;
 
        return badness;
 }
 
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
 {
-       struct bcm43xx_private *bcm = d;
+       struct bcm43xx_private *bcm =
+               container_of(work, struct bcm43xx_private, periodic_work.work);
        struct net_device *net_dev = bcm->net_dev;
        unsigned long flags;
        u32 savedirqs = 0;
 
 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
 {
-       struct work_struct *work = &(bcm->periodic_work);
+       struct delayed_work *work = &bcm->periodic_work;
 
        assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-       INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
-       schedule_work(work);
+       INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+       schedule_delayed_work(work, 0);
 }
 
 static void bcm43xx_security_init(struct bcm43xx_private *bcm)
        bcm43xx_periodic_tasks_setup(bcm);
 
        /*FIXME: This should be handled by softmac instead. */
-       schedule_work(&bcm->softmac->associnfo.work);
+       schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
 
 out:
        mutex_unlock(&(bcm)->mutex);
 /* Hard-reset the chip. Do not call this directly.
  * Use bcm43xx_controller_restart()
  */
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
 {
-       struct bcm43xx_private *bcm = _bcm;
+       struct bcm43xx_private *bcm =
+               container_of(work, struct bcm43xx_private, restart_work);
        struct bcm43xx_phyinfo *phy;
        int err = -ENODEV;
 
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
                return;
        printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
-       INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+       INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
        schedule_work(&bcm->restart_work);
 }
 
 
 struct net_device_stats *hostap_get_stats(struct net_device *dev);
 void hostap_setup_dev(struct net_device *dev, local_info_t *local,
                      int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
 int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
 int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
 void hostap_cleanup(local_info_t *local);
 
 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
 static void hostap_event_expired_sta(struct net_device *dev,
                                     struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
 static void prism2_send_mgmt(struct net_device *dev,
                             u16 type_subtype, char *body,
                             int body_len, u8 *addr, u16 tx_cb_idx);
        INIT_LIST_HEAD(&ap->sta_list);
 
        /* Initialize task queue structure for AP management */
-       INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+       INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
 
        ap->tx_callback_idx =
                hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
                printk(KERN_WARNING "%s: failed to register TX callback for "
                       "AP\n", local->dev->name);
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+       INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
 
        ap->tx_callback_auth =
                hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
 }
 
 
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
 {
-       struct ap_data *ap = (struct ap_data *) data;
+       struct ap_data *ap = container_of(work, struct ap_data,
+                                         add_sta_proc_queue);
        struct sta_info *sta;
        char name[20];
        struct add_sta_proc_data *entry, *prev;
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
 {
-       local_info_t *local = data;
+       struct ap_data *ap = container_of(work, struct ap_data,
+                                         wds_oper_queue);
+       local_info_t *local = ap->local;
        struct wds_oper_data *entry, *prev;
 
        spin_lock_bh(&local->lock);
 
 
 /* Called only as scheduled task after noticing card timeout in interrupt
  * context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, reset_queue);
 
        printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
        prism2_hw_reset(local->dev);
 
 /* Called only as a scheduled task when communications quality values should
  * be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
 {
-       local_info_t *local = data;
+       local_info_t *local =
+               container_of(work, local_info_t, comms_qual_update);
        prism2_update_comms_qual(local->dev);
 }
 
 }
 
 
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, set_tim_queue);
        struct set_tim_data *entry;
        u16 val;
 
        local->scan_channel_mask = 0xffff;
 
        /* Initialize task queue structures */
-       INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+       INIT_WORK(&local->reset_queue, handle_reset_queue);
        INIT_WORK(&local->set_multicast_list_queue,
-                 hostap_set_multicast_list_queue, local->dev);
+                 hostap_set_multicast_list_queue);
 
-       INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+       INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
        INIT_LIST_HEAD(&local->set_tim_list);
        spin_lock_init(&local->set_tim_lock);
 
-       INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+       INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
 
        /* Initialize tasklets for handling hardware IRQ related operations
         * outside hw IRQ handler */
 
 
 /* Called only as scheduled task after receiving info frames (used to avoid
  * pending too much time in HW IRQ handler). */
-static void handle_info_queue(void *data)
+static void handle_info_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, info_queue);
 
        if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
                               &local->pending_info))
 {
        skb_queue_head_init(&local->info_list);
 #ifndef PRISM2_NO_STATION_MODES
-       INIT_WORK(&local->info_queue, handle_info_queue, local);
+       INIT_WORK(&local->info_queue, handle_info_queue);
 #endif /* PRISM2_NO_STATION_MODES */
 }
 
 
 
 /* TODO: to be further implemented as soon as Prism2 fully supports
  *       GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(void *data)
+void hostap_set_multicast_list_queue(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *) data;
+       local_info_t *local =
+               container_of(work, local_info_t, set_multicast_list_queue);
+       struct net_device *dev = local->dev;
        struct hostap_interface *iface;
-       local_info_t *local;
 
        iface = netdev_priv(dev);
-       local = iface->local;
        if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
                            local->is_promisc)) {
                printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
 
                                     struct ipw2100_fw *fw);
 static int ipw2100_ucode_download(struct ipw2100_priv *priv,
                                  struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
 static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
 static struct iw_handler_def ipw2100_wx_handler_def;
 
                        queue_delayed_work(priv->workqueue, &priv->reset_work,
                                           priv->reset_backoff * HZ);
                else
-                       queue_work(priv->workqueue, &priv->reset_work);
+                       queue_delayed_work(priv->workqueue, &priv->reset_work,
+                                          0);
 
                if (priv->reset_backoff < MAX_RESET_BACKOFF)
                        priv->reset_backoff++;
        netif_stop_queue(priv->net_dev);
 }
 
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, reset_work.work);
        unsigned long flags;
        union iwreq_data wrqu = {
                .ap_addr = {
                return;
 
        if (priv->status & STATUS_SECURITY_UPDATED)
-               queue_work(priv->workqueue, &priv->security_work);
+               queue_delayed_work(priv->workqueue, &priv->security_work, 0);
 
-       queue_work(priv->workqueue, &priv->wx_event_work);
+       queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
 }
 
 static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
        return err;
 }
 
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, security_work.work);
+
        /* If we happen to have reconnected before we get a chance to
         * process this, then update the security settings--which causes
         * a disassociation to occur */
 
        priv->reset_backoff = 0;
        mutex_unlock(&priv->action_mutex);
-       ipw2100_reset_adapter(priv);
+       ipw2100_reset_adapter(&priv->reset_work.work);
        return 0;
 
       done:
        .get_drvinfo = ipw_ethtool_get_drvinfo,
 };
 
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
 {
-       struct ipw2100_priv *priv = adapter;
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, hang_check.work);
        unsigned long flags;
        u32 rtc = 0xa5a5a5a5;
        u32 len = sizeof(rtc);
        spin_unlock_irqrestore(&priv->low_lock, flags);
 }
 
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
 {
-       struct ipw2100_priv *priv = adapter;
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, rf_kill.work);
        unsigned long flags;
 
        spin_lock_irqsave(&priv->low_lock, flags);
 
        priv->workqueue = create_workqueue(DRV_NAME);
 
-       INIT_WORK(&priv->reset_work,
-                 (void (*)(void *))ipw2100_reset_adapter, priv);
-       INIT_WORK(&priv->security_work,
-                 (void (*)(void *))ipw2100_security_work, priv);
-       INIT_WORK(&priv->wx_event_work,
-                 (void (*)(void *))ipw2100_wx_event_work, priv);
-       INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
-       INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+       INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+       INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+       INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+       INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+       INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     ipw2100_irq_tasklet, (unsigned long)priv);
        .get_wireless_stats = ipw2100_wx_wireless_stats,
 };
 
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, wx_event_work.work);
        union iwreq_data wrqu;
        int len = ETH_ALEN;
 
 
        struct tasklet_struct irq_tasklet;
 
        struct workqueue_struct *workqueue;
-       struct work_struct reset_work;
-       struct work_struct security_work;
-       struct work_struct wx_event_work;
-       struct work_struct hang_check;
-       struct work_struct rf_kill;
+       struct delayed_work reset_work;
+       struct delayed_work security_work;
+       struct delayed_work wx_event_work;
+       struct delayed_work hang_check;
+       struct delayed_work rf_kill;
 
        u32 interrupts;
        int tx_interrupts;
 
 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
 static void ipw_rx_queue_replenish(void *);
 static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
 static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
 static int ipw_config(struct ipw_priv *);
 static int init_supported_rates(struct ipw_priv *priv,
                                struct ipw_supported_rates *prates);
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_link_on.work);
        mutex_lock(&priv->mutex);
-       ipw_led_link_on(data);
+       ipw_led_link_on(priv);
        mutex_unlock(&priv->mutex);
 }
 
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_link_off.work);
        mutex_lock(&priv->mutex);
-       ipw_led_link_off(data);
+       ipw_led_link_off(priv);
        mutex_unlock(&priv->mutex);
 }
 
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_act_off.work);
        mutex_lock(&priv->mutex);
-       ipw_led_activity_off(data);
+       ipw_led_activity_off(priv);
        mutex_unlock(&priv->mutex);
 }
 
        }
 }
 
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, adapter_restart);
        mutex_lock(&priv->mutex);
-       ipw_adapter_restart(data);
+       ipw_adapter_restart(priv);
        mutex_unlock(&priv->mutex);
 }
 
        }
 }
 
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, scan_check.work);
        mutex_lock(&priv->mutex);
-       ipw_scan_check(data);
+       ipw_scan_check(priv);
        mutex_unlock(&priv->mutex);
 }
 
        return 1;
 }
 
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, disassociate);
        mutex_lock(&priv->mutex);
-       ipw_disassociate(data);
+       ipw_disassociate(priv);
        mutex_unlock(&priv->mutex);
 }
 
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, system_config);
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
                           IPW_STATS_INTERVAL);
 }
 
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, gather_stats.work);
        mutex_lock(&priv->mutex);
-       ipw_gather_stats(data);
+       ipw_gather_stats(priv);
        mutex_unlock(&priv->mutex);
 }
 
                if (!(priv->status & STATUS_ROAMING)) {
                        priv->status |= STATUS_ROAMING;
                        if (!(priv->status & STATUS_SCANNING))
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                }
                return;
        }
 #ifdef CONFIG_IPW2200_MONITOR
                        if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
                                priv->status |= STATUS_SCAN_FORCED;
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                                break;
                        }
                        priv->status &= ~STATUS_SCAN_FORCED;
                                        /* Don't schedule if we aborted the scan */
                                        priv->status &= ~STATUS_ROAMING;
                        } else if (priv->status & STATUS_SCAN_PENDING)
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                        else if (priv->config & CFG_BACKGROUND_SCAN
                                 && priv->status & STATUS_ASSOCIATED)
                                queue_delayed_work(priv->workqueue,
        ipw_rx_queue_restock(priv);
 }
 
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, rx_replenish);
        mutex_lock(&priv->mutex);
-       ipw_rx_queue_replenish(data);
+       ipw_rx_queue_replenish(priv);
        mutex_unlock(&priv->mutex);
 }
 
        return 1;
 }
 
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, merge_networks);
        struct ieee80211_network *network = NULL;
        struct ipw_network_match match = {
                .network = priv->assoc_network
                           priv->assoc_request.beacon_interval);
 }
 
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, adhoc_check.work);
        mutex_lock(&priv->mutex);
-       ipw_adhoc_check(data);
+       ipw_adhoc_check(priv);
        mutex_unlock(&priv->mutex);
 }
 
        return err;
 }
 
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
-       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+static void ipw_request_passive_scan(struct work_struct *work)
+{
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, request_passive_scan);
+       ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
 }
 
-static int ipw_request_scan(struct ipw_priv *priv) {
-       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+static void ipw_request_scan(struct work_struct *work)
+{
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, request_scan.work);
+       ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
 }
 
-static void ipw_bg_abort_scan(void *data)
+static void ipw_bg_abort_scan(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, abort_scan);
        mutex_lock(&priv->mutex);
-       ipw_abort_scan(data);
+       ipw_abort_scan(priv);
        mutex_unlock(&priv->mutex);
 }
 
 /*
 * background support to run QoS activate functionality
 */
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, qos_activate);
 
        if (priv == NULL)
                return;
        priv->status &= ~STATUS_ROAMING;
 }
 
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, roam);
        mutex_lock(&priv->mutex);
-       ipw_roam(data);
+       ipw_roam(priv);
        mutex_unlock(&priv->mutex);
 }
 
                                                   &priv->request_scan,
                                                   SCAN_INTERVAL);
                        else
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                }
 
                return 0;
        return 1;
 }
 
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, associate);
        mutex_lock(&priv->mutex);
-       ipw_associate(data);
+       ipw_associate(priv);
        mutex_unlock(&priv->mutex);
 }
 
 
        IPW_DEBUG_WX("Start scan\n");
 
-       queue_work(priv->workqueue, &priv->request_scan);
+       queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 
        return 0;
 }
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, rf_kill.work);
        mutex_lock(&priv->mutex);
-       ipw_rf_kill(data);
+       ipw_rf_kill(priv);
        mutex_unlock(&priv->mutex);
 }
 
                queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
 }
 
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, link_up);
        mutex_lock(&priv->mutex);
-       ipw_link_up(data);
+       ipw_link_up(priv);
        mutex_unlock(&priv->mutex);
 }
 
 
        if (!(priv->status & STATUS_EXIT_PENDING)) {
                /* Queue up another scan... */
-               queue_work(priv->workqueue, &priv->request_scan);
+               queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
        }
 }
 
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, link_down);
        mutex_lock(&priv->mutex);
-       ipw_link_down(data);
+       ipw_link_down(priv);
        mutex_unlock(&priv->mutex);
 }
 
        init_waitqueue_head(&priv->wait_command_queue);
        init_waitqueue_head(&priv->wait_state);
 
-       INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
-       INIT_WORK(&priv->associate, ipw_bg_associate, priv);
-       INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
-       INIT_WORK(&priv->system_config, ipw_system_config, priv);
-       INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
-       INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
-       INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
-       INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
-       INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
-       INIT_WORK(&priv->request_scan,
-                 (void (*)(void *))ipw_request_scan, priv);
-       INIT_WORK(&priv->request_passive_scan,
-                 (void (*)(void *))ipw_request_passive_scan, priv);
-       INIT_WORK(&priv->gather_stats,
-                 (void (*)(void *))ipw_bg_gather_stats, priv);
-       INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
-       INIT_WORK(&priv->roam, ipw_bg_roam, priv);
-       INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
-       INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
-       INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
-       INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
-                 priv);
-       INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
-                 priv);
-       INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
-                 priv);
-       INIT_WORK(&priv->merge_networks,
-                 (void (*)(void *))ipw_merge_adhoc_network, priv);
+       INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+       INIT_WORK(&priv->associate, ipw_bg_associate);
+       INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+       INIT_WORK(&priv->system_config, ipw_system_config);
+       INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+       INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+       INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+       INIT_WORK(&priv->up, ipw_bg_up);
+       INIT_WORK(&priv->down, ipw_bg_down);
+       INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+       INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+       INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+       INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+       INIT_WORK(&priv->roam, ipw_bg_roam);
+       INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+       INIT_WORK(&priv->link_up, ipw_bg_link_up);
+       INIT_WORK(&priv->link_down, ipw_bg_link_down);
+       INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+       INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+       INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+       INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
 
 #ifdef CONFIG_IPW2200_QOS
-       INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
-                 priv);
+       INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
 #endif                         /* CONFIG_IPW2200_QOS */
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
 
                        /* If configure to try and auto-associate, kick
                         * off a scan. */
-                       queue_work(priv->workqueue, &priv->request_scan);
+                       queue_delayed_work(priv->workqueue,
+                                          &priv->request_scan, 0);
 
                        return 0;
                }
        return -EIO;
 }
 
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, up);
        mutex_lock(&priv->mutex);
-       ipw_up(data);
+       ipw_up(priv);
        mutex_unlock(&priv->mutex);
 }
 
        ipw_led_radio_off(priv);
 }
 
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, down);
        mutex_lock(&priv->mutex);
-       ipw_down(data);
+       ipw_down(priv);
        mutex_unlock(&priv->mutex);
 }
 
 
 
        struct workqueue_struct *workqueue;
 
-       struct work_struct adhoc_check;
+       struct delayed_work adhoc_check;
        struct work_struct associate;
        struct work_struct disassociate;
        struct work_struct system_config;
        struct work_struct rx_replenish;
-       struct work_struct request_scan;
+       struct delayed_work request_scan;
        struct work_struct request_passive_scan;
        struct work_struct adapter_restart;
-       struct work_struct rf_kill;
+       struct delayed_work rf_kill;
        struct work_struct up;
        struct work_struct down;
-       struct work_struct gather_stats;
+       struct delayed_work gather_stats;
        struct work_struct abort_scan;
        struct work_struct roam;
-       struct work_struct scan_check;
+       struct delayed_work scan_check;
        struct work_struct link_up;
        struct work_struct link_down;
 
        u32 led_ofdm_on;
        u32 led_ofdm_off;
 
-       struct work_struct led_link_on;
-       struct work_struct led_link_off;
-       struct work_struct led_act_off;
+       struct delayed_work led_link_on;
+       struct delayed_work led_link_off;
+       struct delayed_work led_act_off;
        struct work_struct merge_networks;
 
        struct ipw_cmd_log *cmdlog;
 
 }
 
 /* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct net_device *dev)
+static void orinoco_join_ap(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, join_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        int err;
        unsigned long flags;
 }
 
 /* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct net_device *dev)
+static void orinoco_send_wevents(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, wevent_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        union iwreq_data wrqu;
        int err;
 
 /* This must be called from user context, without locks held - use
  * schedule_work() */
-static void orinoco_reset(struct net_device *dev)
+static void orinoco_reset(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, reset_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        int err;
        unsigned long flags;
        priv->hw_unavailable = 1; /* orinoco_init() must clear this
                                   * before anything else touches the
                                   * hardware */
-       INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
-       INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev);
-       INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev);
+       INIT_WORK(&priv->reset_work, orinoco_reset);
+       INIT_WORK(&priv->join_work, orinoco_join_ap);
+       INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
 
        netif_carrier_off(dev);
        priv->last_linkstatus = 0xffff;
                printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
 
                /* Firmware reset */
-               orinoco_reset(dev);
+               orinoco_reset(&priv->reset_work);
        } else {
                printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
 
                return 0;
 
        if (priv->broken_disableport) {
-               orinoco_reset(dev);
+               orinoco_reset(&priv->reset_work);
                return 0;
        }
 
 
  * schedule_work(), thus we can as well use sleeping semaphore
  * locking */
 void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
 {
+       islpci_private *priv = container_of(work, islpci_private, stats_work);
        char *data;
        int j;
        struct obj_bss bss, *bss2;
  * interrupt context, no locks held.
  */
 void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
 {
-       struct islpci_mgmtframe *frame = data;
+       struct islpci_mgmtframe *frame =
+               container_of(work, struct islpci_mgmtframe, ws);
        struct net_device *ndev = frame->ndev;
        enum oid_num_t n = mgt_oidtonum(frame->header->oid);
 
 
 void prism54_mib_init(islpci_private *);
 
 struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
 
 void prism54_acl_init(struct islpci_acl *);
 void prism54_acl_clean(struct islpci_acl *);
 
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
 
 void prism54_wpa_bss_ie_init(islpci_private *priv);
 void prism54_wpa_bss_ie_clean(islpci_private *priv);
 
        priv->state_off = 1;
 
        /* initialize workqueue's */
-       INIT_WORK(&priv->stats_work,
-                 (void (*)(void *)) prism54_update_stats, priv);
+       INIT_WORK(&priv->stats_work, prism54_update_stats);
        priv->stats_timestamp = 0;
 
-       INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+       INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
        priv->reset_task_pending = 0;
 
        /* allocate various memory areas */
 
 }
 
 void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
 {
-       islpci_private *priv = (islpci_private *) data;
+       islpci_private *priv = container_of(work, islpci_private, reset_task);
        islpci_reset(priv, 1);
        netif_wake_queue(priv->ndev);
        priv->reset_task_pending = 0;
 
 int islpci_eth_transmit(struct sk_buff *, struct net_device *);
 int islpci_eth_receive(islpci_private *);
 void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
 
 #endif                         /* _ISL_GEN_H */
 
 
                        /* Create work to handle trap out of interrupt
                         * context. */
-                       INIT_WORK(&frame->ws, prism54_process_trap, frame);
+                       INIT_WORK(&frame->ws, prism54_process_trap);
                        schedule_work(&frame->ws);
 
                } else {
 
 
 #define LINK_LED_WORK_DELAY HZ
 
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
 {
-       struct zd_mac *mac = p;
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, housekeeping.link_led_work.work);
        struct zd_chip *chip = &mac->chip;
        struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
        int is_associated;
 
 static void housekeeping_init(struct zd_mac *mac)
 {
-       INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+       INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
 }
 
 static void housekeeping_enable(struct zd_mac *mac)
 
 };
 
 struct housekeeping {
-       struct work_struct link_led_work;
+       struct delayed_work link_led_work;
 };
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
 
 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
 
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
 
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
 static int work_enabled;
                b->sample_received = 0;
                b->sample_lost_overflow = 0;
                b->cpu = i;
-               INIT_WORK(&b->work, wq_sync_buffer, b);
+               INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
        }
        return 0;
 
  * By using schedule_delayed_work_on and then schedule_delayed_work
  * we guarantee this will stay on the correct cpu
  */
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
 {
-       struct oprofile_cpu_buffer * b = data;
+       struct oprofile_cpu_buffer * b =
+               container_of(work, struct oprofile_cpu_buffer, work.work);
        if (b->cpu != smp_processor_id()) {
                printk("WQ on CPU%d, prefer CPU%d\n",
                       smp_processor_id(), b->cpu);
 
        unsigned long sample_lost_overflow;
        unsigned long backtrace_aborted;
        int cpu;
-       struct work_struct work;
+       struct delayed_work work;
 } ____cacheline_aligned;
 
 extern struct oprofile_cpu_buffer cpu_buffer[];
 
        struct hotplug_slot *hotplug_slot;
        struct list_head        slot_list;
        char name[SLOT_NAME_SIZE];
-       struct work_struct work;        /* work for button event */
+       struct delayed_work work;       /* work for button event */
        struct mutex lock;
 };
 
 extern int     shpchp_unconfigure_device(struct slot *p_slot);
 extern void    shpchp_remove_ctrl_files(struct controller *ctrl);
 extern void    cleanup_slots(struct controller *ctrl);
-extern void    queue_pushbutton_work(void *data);
+extern void    queue_pushbutton_work(struct work_struct *work);
 
 
 #ifdef CONFIG_ACPI
 
                        goto error_info;
 
                slot->number = sun;
-               INIT_WORK(&slot->work, queue_pushbutton_work, slot);
+               INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
 
                /* register this slot with the hotplug pci core */
                hotplug_slot->private = slot;
 
 #include "../pci.h"
 #include "shpchp.h"
 
-static void interrupt_event_handler(void *data);
+static void interrupt_event_handler(struct work_struct *work);
 static int shpchp_enable_slot(struct slot *p_slot);
 static int shpchp_disable_slot(struct slot *p_slot);
 
 
        info->event_type = event_type;
        info->p_slot = p_slot;
-       INIT_WORK(&info->work, interrupt_event_handler, info);
+       INIT_WORK(&info->work, interrupt_event_handler);
 
        schedule_work(&info->work);
 
  * Handles all pending events and exits.
  *
  */
-static void shpchp_pushbutton_thread(void *data)
+static void shpchp_pushbutton_thread(struct work_struct *work)
 {
-       struct pushbutton_work_info *info = data;
+       struct pushbutton_work_info *info =
+               container_of(work, struct pushbutton_work_info, work);
        struct slot *p_slot = info->p_slot;
 
        mutex_lock(&p_slot->lock);
        kfree(info);
 }
 
-void queue_pushbutton_work(void *data)
+void queue_pushbutton_work(struct work_struct *work)
 {
-       struct slot *p_slot = data;
+       struct slot *p_slot = container_of(work, struct slot, work.work);
        struct pushbutton_work_info *info;
 
        info = kmalloc(sizeof(*info), GFP_KERNEL);
                return;
        }
        info->p_slot = p_slot;
-       INIT_WORK(&info->work, shpchp_pushbutton_thread, info);
+       INIT_WORK(&info->work, shpchp_pushbutton_thread);
 
        mutex_lock(&p_slot->lock);
        switch (p_slot->state) {
        }
 }
 
-static void interrupt_event_handler(void *data)
+static void interrupt_event_handler(struct work_struct *work)
 {
-       struct event_info *info = data;
+       struct event_info *info = container_of(work, struct event_info, work);
        struct slot *p_slot = info->p_slot;
 
        mutex_lock(&p_slot->lock);
 
 }
 
 
-static void pcmcia_delayed_add_pseudo_device(void *data)
+static void pcmcia_delayed_add_pseudo_device(struct work_struct *work)
 {
-       struct pcmcia_socket *s = data;
+       struct pcmcia_socket *s =
+               container_of(work, struct pcmcia_socket, device_add);
        pcmcia_device_add(s, 0);
        s->pcmcia_state.device_add_pending = 0;
 }
        init_waitqueue_head(&socket->queue);
 #endif
        INIT_LIST_HEAD(&socket->devices_list);
-       INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket);
+       INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device);
        memset(&socket->pcmcia_state, 0, sizeof(u8));
        socket->device_count = 0;
 
 
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
 {
-       struct rtc_device *rtc = data;
+       struct rtc_device *rtc =
+               container_of(work, struct rtc_device, uie_task);
        struct rtc_time tm;
        int num = 0;
        int err;
        spin_lock_init(&rtc->irq_lock);
        init_waitqueue_head(&rtc->irq_queue);
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-       INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+       INIT_WORK(&rtc->uie_task, rtc_uie_task);
        setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
 #endif
 
 
        hostdata->issue_queue = NULL;
        hostdata->disconnected_queue = NULL;
        
-       INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata);
+       INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
        
 #ifdef NCR5380_STATS
        for (i = 0; i < 8; ++i) {
 
        /* Run the coroutine if it isn't already running. */
        /* Kick off command processing */
-       schedule_work(&hostdata->coroutine);
+       schedule_delayed_work(&hostdata->coroutine, 0);
        return 0;
 }
 
  *     host lock and called routines may take the isa dma lock.
  */
 
-static void NCR5380_main(void *p)
+static void NCR5380_main(struct work_struct *work)
 {
-       struct NCR5380_hostdata *hostdata = p;
+       struct NCR5380_hostdata *hostdata =
+               container_of(work, struct NCR5380_hostdata, coroutine.work);
        struct Scsi_Host *instance = hostdata->host;
        Scsi_Cmnd *tmp, *prev;
        int done;
                }       /* if BASR_IRQ */
                spin_unlock_irqrestore(instance->host_lock, flags);
                if(!done)
-                       schedule_work(&hostdata->coroutine);
+                       schedule_delayed_work(&hostdata->coroutine, 0);
        } while (!done);
        return IRQ_HANDLED;
 }
 
        unsigned long time_expires;             /* in jiffies, set prior to sleeping */
        int select_time;                        /* timer in select for target response */
        volatile Scsi_Cmnd *selecting;
-       struct work_struct coroutine;           /* our co-routine */
+       struct delayed_work coroutine;          /* our co-routine */
 #ifdef NCR5380_STATS
        unsigned timebase;                      /* Base for time calcs */
        long time_read[8];                      /* time to do reads */
 #ifndef DONT_USE_INTR
 static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 #endif
-static void NCR5380_main(void *ptr);
+static void NCR5380_main(struct work_struct *work);
 static void NCR5380_print_options(struct Scsi_Host *instance);
 #ifdef NDEBUG
 static void NCR5380_print_phase(struct Scsi_Host *instance);
 
  * Run service completions on the card with interrupts enabled.
  *
  */
-static void run(void)
+static void run(struct work_struct *work)
 {
        struct aha152x_hostdata *hd;
 
                HOSTDATA(shpnt)->service=1;
 
                /* Poke the BH handler */
-               INIT_WORK(&aha152x_tq, (void *) run, NULL);
+               INIT_WORK(&aha152x_tq, run);
                schedule_work(&aha152x_tq);
        }
        DO_UNLOCK(flags);
 
        int base_hi;            /* Hi Base address for ECP-ISA chipset */
        int mode;               /* Transfer mode                */
        struct scsi_cmnd *cur_cmd;      /* Current queued command       */
-       struct work_struct imm_tq;      /* Polling interrupt stuff       */
+       struct delayed_work imm_tq;     /* Polling interrupt stuff       */
        unsigned long jstart;   /* Jiffies at start             */
        unsigned failed:1;      /* Failure flag                 */
        unsigned dp:1;          /* Data phase present           */
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void imm_interrupt(void *data)
+static void imm_interrupt(struct work_struct *work)
 {
-       imm_struct *dev = (imm_struct *) data;
+       imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
        struct scsi_cmnd *cmd = dev->cur_cmd;
        struct Scsi_Host *host = cmd->device->host;
        unsigned long flags;
                return;
        }
        if (imm_engine(dev, cmd)) {
-               INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
                schedule_delayed_work(&dev->imm_tq, 1);
                return;
        }
        cmd->result = DID_ERROR << 16;  /* default return code */
        cmd->SCp.phase = 0;     /* bus free */
 
-       INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
-       schedule_work(&dev->imm_tq);
+       schedule_delayed_work(&dev->imm_tq, 0);
 
        imm_pb_claim(dev);
 
        else
                ports = 8;
 
-       INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
+       INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
 
        err = -ENOMEM;
        host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
 
 
 /**
  * ipr_worker_thread - Worker thread
- * @data:              ioa config struct
+ * @work:              ioa config struct
  *
  * Called at task level from a work thread. This function takes care
  * of adding and removing device from the mid-layer as configuration
  * Return value:
  *     nothing
  **/
-static void ipr_worker_thread(void *data)
+static void ipr_worker_thread(struct work_struct *work)
 {
        unsigned long lock_flags;
        struct ipr_resource_entry *res;
        struct scsi_device *sdev;
        struct ipr_dump *dump;
-       struct ipr_ioa_cfg *ioa_cfg = data;
+       struct ipr_ioa_cfg *ioa_cfg =
+               container_of(work, struct ipr_ioa_cfg, work_q);
        u8 bus, target, lun;
        int did_work;
 
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-       INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+       INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
        init_waitqueue_head(&ioa_cfg->reset_wait_q);
        ioa_cfg->sdt_state = INACTIVE;
        if (ipr_enable_cache)
 
        return rc;
 }
 
-static void iscsi_xmitworker(void *data)
+static void iscsi_xmitworker(struct work_struct *work)
 {
-       struct iscsi_conn *conn = data;
+       struct iscsi_conn *conn =
+               container_of(work, struct iscsi_conn, xmitwork);
        int rc;
        /*
         * serialize Xmit worker on a per-connection basis.
        if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
                goto mgmtqueue_alloc_fail;
 
-       INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
+       INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
        /* allocate login_mtask used for the login/text sequences */
        spin_lock_bh(&session->lock);
 
  * Discover process only interrogates devices in order to discover the
  * domain.
  */
-static void sas_discover_domain(void *data)
+static void sas_discover_domain(struct work_struct *work)
 {
        int error = 0;
-       struct asd_sas_port *port = data;
+       struct sas_discovery_event *ev =
+               container_of(work, struct sas_discovery_event, work);
+       struct asd_sas_port *port = ev->port;
 
        sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
                        &port->disc.pending);
                    current->pid, error);
 }
 
-static void sas_revalidate_domain(void *data)
+static void sas_revalidate_domain(struct work_struct *work)
 {
        int res = 0;
-       struct asd_sas_port *port = data;
+       struct sas_discovery_event *ev =
+               container_of(work, struct sas_discovery_event, work);
+       struct asd_sas_port *port = ev->port;
 
        sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
                        &port->disc.pending);
        BUG_ON(ev >= DISC_NUM_EVENTS);
 
        sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
-                       &disc->disc_work[ev], port->ha->core.shost);
+                       &disc->disc_work[ev].work, port->ha->core.shost);
 
        return 0;
 }
 {
        int i;
 
-       static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
                [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
                [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
        };
 
        spin_lock_init(&disc->disc_event_lock);
        disc->pending = 0;
-       for (i = 0; i < DISC_NUM_EVENTS; i++)
-               INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
+       for (i = 0; i < DISC_NUM_EVENTS; i++) {
+               INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+               disc->disc_work[i].port = port;
+       }
 }
 
        BUG_ON(event >= HA_NUM_EVENTS);
 
        sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
-                       &sas_ha->ha_events[event], sas_ha->core.shost);
+                       &sas_ha->ha_events[event].work, sas_ha->core.shost);
 }
 
 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
        BUG_ON(event >= PORT_NUM_EVENTS);
 
        sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
-                       &phy->port_events[event], ha->core.shost);
+                       &phy->port_events[event].work, ha->core.shost);
 }
 
 static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
        BUG_ON(event >= PHY_NUM_EVENTS);
 
        sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
-                       &phy->phy_events[event], ha->core.shost);
+                       &phy->phy_events[event].work, ha->core.shost);
 }
 
 int sas_init_events(struct sas_ha_struct *sas_ha)
 {
-       static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
                [HAE_RESET] = sas_hae_reset,
        };
 
 
        spin_lock_init(&sas_ha->event_lock);
 
-       for (i = 0; i < HA_NUM_EVENTS; i++)
-               INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
+       for (i = 0; i < HA_NUM_EVENTS; i++) {
+               INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+               sas_ha->ha_events[i].ha = sas_ha;
+       }
 
        sas_ha->notify_ha_event = notify_ha_event;
        sas_ha->notify_port_event = notify_port_event;
 
 
 /* ---------- HA events ---------- */
 
-void sas_hae_reset(void *data)
+void sas_hae_reset(struct work_struct *work)
 {
-       struct sas_ha_struct *ha = data;
+       struct sas_ha_event *ev =
+               container_of(work, struct sas_ha_event, work);
+       struct sas_ha_struct *ha = ev->ha;
 
        sas_begin_event(HAE_RESET, &ha->event_lock,
                        &ha->pending);
 
 
 void sas_deform_port(struct asd_sas_phy *phy);
 
-void sas_porte_bytes_dmaed(void *);
-void sas_porte_broadcast_rcvd(void *);
-void sas_porte_link_reset_err(void *);
-void sas_porte_timer_event(void *);
-void sas_porte_hard_reset(void *);
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
 
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
 
-void sas_hae_reset(void *);
+void sas_hae_reset(struct work_struct *work);
 
 static inline void sas_queue_event(int event, spinlock_t *lock,
                                   unsigned long *pending,
 
 
 /* ---------- Phy events ---------- */
 
-static void sas_phye_loss_of_signal(void *data)
+static void sas_phye_loss_of_signal(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
                        &phy->phy_events_pending);
        sas_deform_port(phy);
 }
 
-static void sas_phye_oob_done(void *data)
+static void sas_phye_oob_done(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
                        &phy->phy_events_pending);
        phy->error = 0;
 }
 
-static void sas_phye_oob_error(void *data)
+static void sas_phye_oob_error(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct asd_sas_port *port = phy->port;
        struct sas_internal *i =
        }
 }
 
-static void sas_phye_spinup_hold(void *data)
+static void sas_phye_spinup_hold(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct sas_internal *i =
                to_sas_internal(sas_ha->core.shost->transportt);
 {
        int i;
 
-       static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
                [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
                [PHYE_OOB_DONE] = sas_phye_oob_done,
                [PHYE_OOB_ERROR] = sas_phye_oob_error,
                [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
        };
 
-       static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
                [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
                [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
                [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
 
                phy->error = 0;
                INIT_LIST_HEAD(&phy->port_phy_el);
-               for (k = 0; k < PORT_NUM_EVENTS; k++)
-                       INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
-                                 phy);
+               for (k = 0; k < PORT_NUM_EVENTS; k++) {
+                       INIT_WORK(&phy->port_events[k].work,
+                                 sas_port_event_fns[k]);
+                       phy->port_events[k].phy = phy;
+               }
+
+               for (k = 0; k < PHY_NUM_EVENTS; k++) {
+                       INIT_WORK(&phy->phy_events[k].work,
+                                 sas_phy_event_fns[k]);
+                       phy->phy_events[k].phy = phy;
+               }
 
-               for (k = 0; k < PHY_NUM_EVENTS; k++)
-                       INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
-                                 phy);
                phy->port = NULL;
                phy->ha = sas_ha;
                spin_lock_init(&phy->frame_rcvd_lock);
 
 
 /* ---------- SAS port events ---------- */
 
-void sas_porte_bytes_dmaed(void *data)
+void sas_porte_bytes_dmaed(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
                        &phy->port_events_pending);
        sas_form_port(phy);
 }
 
-void sas_porte_broadcast_rcvd(void *data)
+void sas_porte_broadcast_rcvd(struct work_struct *work)
 {
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        unsigned long flags;
        u32 prim;
-       struct asd_sas_phy *phy = data;
 
        sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
                        &phy->port_events_pending);
        sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
 }
 
-void sas_porte_link_reset_err(void *data)
+void sas_porte_link_reset_err(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
                        &phy->port_events_pending);
        sas_deform_port(phy);
 }
 
-void sas_porte_timer_event(void *data)
+void sas_porte_timer_event(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
                        &phy->port_events_pending);
        sas_deform_port(phy);
 }
 
-void sas_porte_hard_reset(void *data)
+void sas_porte_hard_reset(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
                        &phy->port_events_pending);
 
        int base;               /* Actual port address          */
        int mode;               /* Transfer mode                */
        struct scsi_cmnd *cur_cmd;      /* Current queued command       */
-       struct work_struct ppa_tq;      /* Polling interrupt stuff       */
+       struct delayed_work ppa_tq;     /* Polling interrupt stuff       */
        unsigned long jstart;   /* Jiffies at start             */
        unsigned long recon_tmo;        /* How many usecs to wait for reconnection (6th bit) */
        unsigned int failed:1;  /* Failure flag                 */
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void ppa_interrupt(void *data)
+static void ppa_interrupt(struct work_struct *work)
 {
-       ppa_struct *dev = (ppa_struct *) data;
+       ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
        struct scsi_cmnd *cmd = dev->cur_cmd;
 
        if (!cmd) {
                return;
        }
        if (ppa_engine(dev, cmd)) {
-               dev->ppa_tq.data = (void *) dev;
                schedule_delayed_work(&dev->ppa_tq, 1);
                return;
        }
        cmd->result = DID_ERROR << 16;  /* default return code */
        cmd->SCp.phase = 0;     /* bus free */
 
-       dev->ppa_tq.data = dev;
-       schedule_work(&dev->ppa_tq);
+       schedule_delayed_work(&dev->ppa_tq, 0);
 
        ppa_pb_claim(dev);
 
        else
                ports = 8;
 
-       INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev);
+       INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
 
        err = -ENOMEM;
        host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
 
  * the mid-level tries to sleep when it reaches the driver threshold
  * "host->can_queue". This can cause a panic if we were in our interrupt code.
  **/
-static void qla4xxx_do_dpc(void *data)
+static void qla4xxx_do_dpc(struct work_struct *work)
 {
-       struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+       struct scsi_qla_host *ha =
+               container_of(work, struct scsi_qla_host, dpc_work);
        struct ddb_entry *ddb_entry, *dtemp;
 
        DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
                ret = -ENODEV;
                goto probe_failed;
        }
-       INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+       INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
 
        ret = request_irq(pdev->irq, qla4xxx_intr_handler,
                          SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
 
 #define FC_MGMTSRVR_PORTID             0x00000a
 
 
-static void fc_timeout_deleted_rport(void *data);
-static void fc_timeout_fail_rport_io(void *data);
-static void fc_scsi_scan_rport(void *data);
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
 
 /*
  * Attribute counts pre object type...
  *     1 on success / 0 already queued / < 0 for error
  **/
 static int
-fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
                                unsigned long delay)
 {
        if (unlikely(!fc_host_devloss_work_q(shost))) {
                return -EINVAL;
        }
 
-       if (delay == 0)
-               return queue_work(fc_host_devloss_work_q(shost), work);
-
        return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
 }
 
  * fc_starget_delete - called to delete the scsi decendents of an rport
  *                  (target and all sdevs)
  *
- * @data:      remote port to be operated on.
+ * @work:      remote port to be operated on.
  **/
 static void
-fc_starget_delete(void *data)
+fc_starget_delete(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, stgt_delete_work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        unsigned long flags;
        struct fc_internal *i = to_fc_internal(shost->transportt);
 /**
  * fc_rport_final_delete - finish rport termination and delete it.
  *
- * @data:      remote port to be deleted.
+ * @work:      remote port to be deleted.
  **/
 static void
-fc_rport_final_delete(void *data)
+fc_rport_final_delete(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, rport_delete_work);
        struct device *dev = &rport->dev;
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
 
        /* Delete SCSI target and sdevs */
        if (rport->scsi_target_id != -1)
-               fc_starget_delete(data);
+               fc_starget_delete(&rport->stgt_delete_work);
        else if (i->f->dev_loss_tmo_callbk)
                i->f->dev_loss_tmo_callbk(rport);
        else if (i->f->terminate_rport_io)
        rport->channel = channel;
        rport->fast_io_fail_tmo = -1;
 
-       INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
-       INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
-       INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
-       INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
-       INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
+       INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+       INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+       INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+       INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+       INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
 
        spin_lock_irqsave(shost->host_lock, flags);
 
                        }
 
                        if (match) {
-                               struct work_struct *work = 
+                               struct delayed_work *work =
                                                        &rport->dev_loss_work;
 
                                memcpy(&rport->node_name, &ids->node_name,
  *                       was a SCSI target (thus was blocked), and failed
  *                       to return in the alloted time.
  * 
- * @data:      rport target that failed to reappear in the alloted time.
+ * @work:      rport target that failed to reappear in the alloted time.
  **/
 static void
-fc_timeout_deleted_rport(void  *data)
+fc_timeout_deleted_rport(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, dev_loss_work.work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
        unsigned long flags;
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
  *                       disconnected SCSI target.
  *
- * @data:      rport to terminate io on.
+ * @work:      rport to terminate io on.
  *
  * Notes: Only requests the failure of the io, not that all are flushed
  *    prior to returning.
  **/
 static void
-fc_timeout_fail_rport_io(void  *data)
+fc_timeout_fail_rport_io(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, fail_io_work.work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
 
 /**
  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
  *
- * @data:      remote port to be scanned.
+ * @work:      remote port to be scanned.
  **/
 static void
-fc_scsi_scan_rport(void *data)
+fc_scsi_scan_rport(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, scan_work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        unsigned long flags;
 
 
        return 0;
 }
 
-static void session_recovery_timedout(void *data)
+static void session_recovery_timedout(struct work_struct *work)
 {
-       struct iscsi_cls_session *session = data;
+       struct iscsi_cls_session *session =
+               container_of(work, struct iscsi_cls_session,
+                            recovery_work.work);
 
        dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
                  "out after %d secs\n", session->recovery_tmo);
 
        session->transport = transport;
        session->recovery_tmo = 120;
-       INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
+       INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
        INIT_LIST_HEAD(&session->host_list);
        INIT_LIST_HEAD(&session->sess_list);
 
 
 };
 
 static void
-spi_dv_device_work_wrapper(void *data)
+spi_dv_device_work_wrapper(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct scsi_device *sdev = wqw->sdev;
 
        kfree(wqw);
                return;
        }
 
-       INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
+       INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
        wqw->sdev = sdev;
 
        schedule_work(&wqw->work);
 
  * Drivers can provide word-at-a-time i/o primitives, or provide
  * transfer-at-a-time ones to leverage dma or fifo hardware.
  */
-static void bitbang_work(void *_bitbang)
+static void bitbang_work(struct work_struct *work)
 {
-       struct spi_bitbang      *bitbang = _bitbang;
+       struct spi_bitbang      *bitbang =
+               container_of(work, struct spi_bitbang, work);
        unsigned long           flags;
 
        spin_lock_irqsave(&bitbang->lock, flags);
        if (!bitbang->master || !bitbang->chipselect)
                return -EINVAL;
 
-       INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+       INIT_WORK(&bitbang->work, bitbang_work);
        spin_lock_init(&bitbang->lock);
        INIT_LIST_HEAD(&bitbang->queue);
 
 
        const struct cxacru_modem_type *modem_type;
 
        int line_status;
-       struct work_struct poll_work;
+       struct delayed_work poll_work;
 
        /* contol handles */
        struct mutex cm_serialize;
        return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance);
+static void cxacru_poll_status(struct work_struct *work);
 
 static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
                struct atm_dev *atm_dev)
        }
 
        /* Start status polling */
-       cxacru_poll_status(instance);
+       cxacru_poll_status(&instance->poll_work.work);
        return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance)
+static void cxacru_poll_status(struct work_struct *work)
 {
+       struct cxacru_data *instance =
+               container_of(work, struct cxacru_data, poll_work.work);
        u32 buf[CXINF_MAX] = {};
        struct usbatm_data *usbatm = instance->usbatm;
        struct atm_dev *atm_dev = usbatm->atm_dev;
 
        mutex_init(&instance->cm_serialize);
 
-       INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance);
+       INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
 
        usbatm_instance->driver_data = instance;
 
 
 
        struct speedtch_params params; /* set in probe, constant afterwards */
 
-       struct work_struct status_checker;
+       struct delayed_work status_checker;
 
        unsigned char last_status;
 
        return ret;
 }
 
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
 {
+       struct speedtch_instance_data *instance =
+               container_of(work, struct speedtch_instance_data,
+                            status_checker.work);
        struct usbatm_data *usbatm = instance->usbatm;
        struct atm_dev *atm_dev = usbatm->atm_dev;
        unsigned char *buf = instance->scratch_buffer;
 {
        struct speedtch_instance_data *instance = (void *)data;
 
-       schedule_work(&instance->status_checker);
+       schedule_delayed_work(&instance->status_checker, 0);
 
        /* The following check is racy, but the race is harmless */
        if (instance->poll_delay < MAX_POLL_DELAY)
        if (int_urb) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
                if (!ret)
-                       schedule_work(&instance->status_checker);
+                       schedule_delayed_work(&instance->status_checker, 0);
                else {
                        atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
 
        if ((int_urb = instance->int_urb)) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
-               schedule_work(&instance->status_checker);
+               schedule_delayed_work(&instance->status_checker, 0);
                if (ret < 0) {
                        atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        goto fail;
 
        usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
-       INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+       INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
 
        instance->status_checker.timer.function = speedtch_status_poll;
        instance->status_checker.timer.data = (unsigned long)instance;
 
 /*
  * The uea_load_page() function must be called within a process context
  */
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
 {
-       struct uea_softc *sc = xsc;
+       struct uea_softc *sc = container_of(work, struct uea_softc, task);
        u16 pageno = sc->pageno;
        u16 ovl = sc->ovl;
        struct block_info bi;
 
        uea_enters(INS_TO_USBDEV(sc));
 
-       INIT_WORK(&sc->task, uea_load_page, sc);
+       INIT_WORK(&sc->task, uea_load_page);
        init_waitqueue_head(&sc->sync_q);
        init_waitqueue_head(&sc->cmv_ack_wait);
 
 
                schedule_work(&acm->work);
 }
 
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
 {
-       struct acm *acm = private;
+       struct acm *acm = container_of(work, struct acm, work);
        dbg("Entering acm_softint.");
        
        if (!ACM_READY(acm))
        acm->rx_buflimit = num_rx_buf;
        acm->urb_task.func = acm_rx_tasklet;
        acm->urb_task.data = (unsigned long) acm;
-       INIT_WORK(&acm->work, acm_softint, acm);
+       INIT_WORK(&acm->work, acm_softint);
        spin_lock_init(&acm->throttle_lock);
        spin_lock_init(&acm->write_lock);
        spin_lock_init(&acm->read_lock);
 
 
 #define        LED_CYCLE_PERIOD        ((2*HZ)/3)
 
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
 {
-       struct usb_hub          *hub = __hub;
+       struct usb_hub          *hub =
+               container_of(work, struct usb_hub, leds.work);
        struct usb_device       *hdev = hub->hdev;
        unsigned                i;
        unsigned                changed = 0;
  * talking to TTs must queue control transfers (not just bulk and iso), so
  * both can talk to the same hub concurrently.
  */
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
 {
-       struct usb_hub          *hub = arg;
+       struct usb_hub          *hub =
+               container_of(work, struct usb_hub, tt.kevent);
        unsigned long           flags;
 
        spin_lock_irqsave (&hub->tt.lock, flags);
 
        spin_lock_init (&hub->tt.lock);
        INIT_LIST_HEAD (&hub->tt.clear_list);
-       INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+       INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
        switch (hdev->descriptor.bDeviceProtocol) {
                case 0:
                        break;
        INIT_LIST_HEAD(&hub->event_list);
        hub->intfdev = &intf->dev;
        hub->hdev = hdev;
-       INIT_WORK(&hub->leds, led_work, hub);
+       INIT_DELAYED_WORK(&hub->leds, led_work);
 
        usb_set_intfdata (intf, hub);
 
                /* hub LEDs are probably harder to miss than syslog */
                if (hub->has_indicators) {
                        hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
-                       schedule_work (&hub->leds);
+                       schedule_delayed_work (&hub->leds, 0);
                }
        }
        kfree(qual);
                                if (hub->has_indicators) {
                                        hub->indicator[port1-1] =
                                                INDICATOR_AMBER_BLINK;
-                                       schedule_work (&hub->leds);
+                                       schedule_delayed_work (&hub->leds, 0);
                                }
                                status = -ENOTCONN;     /* Don't retry */
                                goto loop_disable;
 
 
        unsigned                has_indicators:1;
        enum hub_led_mode       indicator[USB_MAXCHILDREN];
-       struct work_struct      leds;
+       struct delayed_work     leds;
 };
 
 #endif /* __LINUX_HUB_H */
 
 };
 
 /* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
 {
-       struct set_config_request *req = _req;
+       struct set_config_request *req =
+               container_of(work, struct set_config_request, work);
 
        usb_lock_device(req->udev);
        usb_set_configuration(req->udev, req->config);
                return -ENOMEM;
        req->udev = udev;
        req->config = config;
-       INIT_WORK(&req->work, driver_set_config_work, req);
+       INIT_WORK(&req->work, driver_set_config_work);
 
        usb_get_dev(udev);
        if (!schedule_work(&req->work)) {
 
 #ifdef CONFIG_USB_SUSPEND
 
 /* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {
-       struct usb_device       *udev = _udev;
+       struct usb_device *udev =
+               container_of(work, struct usb_device, autosuspend.work);
 
        usb_pm_lock(udev);
        udev->auto_pm = 1;
 
 #else
 
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {}
 
 #endif
 
 #ifdef CONFIG_PM
        mutex_init(&dev->pm_mutex);
-       INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+       INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
 #endif
        return dev;
 }
 
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
 {
-       struct eth_dev          *dev = _dev;
+       struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 
        if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
                if (netif_running (dev->net))
        dev = netdev_priv(net);
        spin_lock_init (&dev->lock);
        spin_lock_init (&dev->req_lock);
-       INIT_WORK (&dev->work, eth_work, dev);
+       INIT_WORK (&dev->work, eth_work);
        INIT_LIST_HEAD (&dev->tx_reqs);
        INIT_LIST_HEAD (&dev->rx_reqs);
 
 
         u16 queue_next;
         struct urb *urb_list[ENDP_QUEUE_SIZE];
         struct list_head urb_more;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 struct u132_ring {
         unsigned in_use:1;
         u8 number;
         struct u132 *u132;
         struct u132_endp *curr_endp;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 #define OHCI_QUIRK_AMD756 0x01
 #define OHCI_QUIRK_SUPERIO 0x02
         u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
         int flags;
         unsigned long next_statechange;
-        struct work_struct monitor;
+        struct delayed_work monitor;
         int num_endpoints;
         struct u132_addr addr[MAX_U132_ADDRS];
         struct u132_udev udev[MAX_U132_UDEVS];
         if (delta > 0) {
                 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
                         return;
-        } else if (queue_work(workqueue, &ring->scheduler))
+        } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
                 return;
         kref_put(&u132->kref, u132_hcd_delete);
         return;
 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &endp->scheduler, delta))
-                        kref_get(&endp->kref);
-        } else if (queue_work(workqueue, &endp->scheduler))
-                kref_get(&endp->kref);
-        return;
+       if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+               kref_get(&endp->kref);
 }
 
 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
 
 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
-                        kref_get(&u132->kref);
-                }
-        } else if (queue_work(workqueue, &u132->monitor))
-                kref_get(&u132->kref);
-        return;
+       if (queue_delayed_work(workqueue, &u132->monitor, delta))
+               kref_get(&u132->kref);
 }
 
 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta))
-                        return;
-        } else if (queue_work(workqueue, &u132->monitor))
-                return;
-        kref_put(&u132->kref, u132_hcd_delete);
-        return;
+       if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+               kref_put(&u132->kref, u132_hcd_delete);
 }
 
 static void u132_monitor_cancel_work(struct u132 *u132)
         return 0;
 }
 
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
 {
-        struct u132 *u132 = data;
+        struct u132 *u132 = container_of(work, struct u132, monitor.work);
         if (u132->going > 1) {
                 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
                         , u132->going);
         }
 }
 
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
 /*
 * this work function is only executed from the work queue
 *
 */
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
 {
-        struct u132_ring *ring = data;
+        struct u132_ring *ring =
+               container_of(work, struct u132_ring, scheduler.work);
         struct u132 *u132 = ring->u132;
         down(&u132->scheduler_lock);
         if (ring->in_use) {
         }
 }
 
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
 {
         struct u132_ring *ring;
-        struct u132_endp *endp = data;
+        struct u132_endp *endp =
+               container_of(work, struct u132_endp, scheduler.work);
         struct u132 *u132 = endp->u132;
         down(&u132->scheduler_lock);
         ring = endp->ring;
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         endp->dequeueing = 0;
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
                 ring->number = rings + 1;
                 ring->length = 0;
                 ring->curr_endp = NULL;
-                INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
-                        (void *)ring);
+                INIT_DELAYED_WORK(&ring->scheduler,
+                                 u132_hcd_ring_work_scheduler);
         } down(&u132->sw_lock);
-        INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+        INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
         while (ports-- > 0) {
                 struct u132_port *port = &u132->port[ports];
                 port->u132 = u132;
 
 }
 
 /* Workqueue routine to reset the device */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
 {
-       struct hid_device *hid = (struct hid_device *) _hid;
+       struct hid_device *hid =
+               container_of(work, struct hid_device, reset_work);
        int rc_lock, rc;
 
        dev_dbg(&hid->intf->dev, "resetting device\n");
 
        init_waitqueue_head(&hid->wait);
 
-       INIT_WORK(&hid->reset_work, hid_reset, hid);
+       INIT_WORK(&hid->reset_work, hid_reset);
        setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
 
        spin_lock_init(&hid->inlock);
 
         struct usb_device *udev;
         struct usb_interface *interface;
         struct usb_class_driver *class;
-        struct work_struct status_work;
-        struct work_struct command_work;
-        struct work_struct respond_work;
+        struct delayed_work status_work;
+        struct delayed_work command_work;
+        struct delayed_work respond_work;
         struct u132_platform_data platform_data;
         struct resource resources[0];
         struct platform_device platform_dev;
 
 static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        return;
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
 
 static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        return;
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
 static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        return;
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
         return;
 }
 
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, command_work.work);
+
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
         return;
 }
 
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, respond_work.work);
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
 * after the FTDI has been synchronized
 *
 */
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, status_work.work);
         int work_delay_in_msec = 0;
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 ftdi->class = NULL;
                 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
                         "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
-                INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
-                        (void *)ftdi);
+                INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+                INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+                INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
                 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
                 return 0;
         } else {
 
        unsigned char *data;
        dma_addr_t data_dma;
 
-       struct work_struct do_notify;
-       struct work_struct do_resubmit;
+       struct delayed_work do_notify;
+       struct delayed_work do_resubmit;
        unsigned long input_events;
        unsigned long sensor_events;
 };
        }
 
        if (kit->input_events || kit->sensor_events)
-               schedule_work(&kit->do_notify);
+               schedule_delayed_work(&kit->do_notify, 0);
 
 resubmit:
        status = usb_submit_urb(urb, SLAB_ATOMIC);
                        kit->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-       struct interfacekit *kit = data;
+       struct interfacekit *kit =
+               container_of(work, struct interfacekit, do_notify.work);
        int i;
        char sysfs_file[8];
 
        }
 }
 
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
 {
-       set_outputs(data);
+       struct interfacekit *kit =
+               container_of(work, struct interfacekit, do_resubmit.work);
+       set_outputs(kit);
 }
 
 #define show_set_output(value)         \
 
        kit->udev = usb_get_dev(dev);
        kit->intf = intf;
-       INIT_WORK(&kit->do_notify, do_notify, kit);
-       INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+       INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+       INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
        usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        interfacekit_irq, kit, endpoint->bInterval);
 
        unsigned char *data;
        dma_addr_t data_dma;
 
-       struct work_struct do_notify;
+       struct delayed_work do_notify;
        unsigned long input_events;
        unsigned long speed_events;
        unsigned long exceed_events;
                set_bit(1, &mc->exceed_events);
 
        if (mc->input_events || mc->exceed_events || mc->speed_events)
-               schedule_work(&mc->do_notify);
+               schedule_delayed_work(&mc->do_notify, 0);
 
 resubmit:
        status = usb_submit_urb(urb, SLAB_ATOMIC);
                        mc->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-       struct motorcontrol *mc = data;
+       struct motorcontrol *mc =
+               container_of(work, struct motorcontrol, do_notify.work);
        int i;
        char sysfs_file[8];
 
        mc->udev = usb_get_dev(dev);
        mc->intf = intf;
        mc->acceleration[0] = mc->acceleration[1] = 10;
-       INIT_WORK(&mc->do_notify, do_notify, mc);
+       INIT_DELAYED_WORK(&mc->do_notify, do_notify);
        usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        motorcontrol_irq, mc, endpoint->bInterval);
 
        int suspend_lowmem_ctrl;
        int linkstate;
        int opened;
-       struct work_struct lowmem_work;
+       struct delayed_work lowmem_work;
 
        struct usb_device *dev;
        struct net_device *net;
        kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
 }
 
-static void kaweth_resubmit_tl(void *d)
+static void kaweth_resubmit_tl(struct work_struct *work)
 {
-       struct kaweth_device *kaweth = (struct kaweth_device *)d;
+       struct kaweth_device *kaweth =
+               container_of(work, struct kaweth_device, lowmem_work.work);
 
        if (IS_BLOCKED(kaweth->status))
                return;
 
        /* kaweth is zeroed as part of alloc_netdev */
 
-       INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth);
+       INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
 
        SET_MODULE_OWNER(netdev);
 
 
 static struct workqueue_struct *pegasus_workqueue = NULL;
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
 {
-       pegasus_t *pegasus = data;
+       pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
        set_carrier(pegasus->net);
        if (!(pegasus->flags & PEGASUS_UNPLUG)) {
                queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
 
        tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
 
-       INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+       INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
 
        pegasus->intf = intf;
        pegasus->usb = dev;
 
        int                     dev_index;
        int                     intr_interval;
        struct tasklet_struct   rx_tl;
-       struct work_struct      carrier_check;
+       struct delayed_work     carrier_check;
        struct urb              *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
        struct sk_buff          *rx_pool[RX_SKBS];
        struct sk_buff          *rx_skb;
 
  * especially now that control transfers can be queued.
  */
 static void
-kevent (void *data)
+kevent (struct work_struct *work)
 {
-       struct usbnet           *dev = data;
+       struct usbnet           *dev =
+               container_of(work, struct usbnet, kevent);
        int                     status;
 
        /* usb_clear_halt() needs a thread context */
        skb_queue_head_init (&dev->done);
        dev->bh.func = usbnet_bh;
        dev->bh.data = (unsigned long) dev;
-       INIT_WORK (&dev->kevent, kevent, dev);
+       INIT_WORK (&dev->kevent, kevent);
        dev->delay.function = usbnet_bh;
        dev->delay.data = (unsigned long) dev;
        init_timer (&dev->delay);
 
        struct circ_buf *rx_buf;        /* read buffer */
        int rx_flags;                   /* for throttilng */
        struct work_struct rx_work;     /* work cue for the receiving line */
+       struct usb_serial_port *port;   /* USB port with which associated */
 };
 
 /* Private methods */
        schedule_work(&port->work);
 }
 
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
 {
-       struct usb_serial_port *port = params;
-       struct aircable_private *priv = usb_get_serial_port_data(port);
+       struct aircable_private *priv =
+               container_of(work, struct aircable_private, rx_work);
+       struct usb_serial_port *port = priv->port;
        struct tty_struct *tty;
        unsigned char *data;
        int count;
        }
 
        priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
-       INIT_WORK(&priv->rx_work, aircable_read, port);
+       priv->port = port;
+       INIT_WORK(&priv->rx_work, aircable_read);
 
        usb_set_serial_port_data(serial->port[0], priv);
 
                                        package_length - shift);
                        }
                }
-               aircable_read(port);
+               aircable_read(&priv->rx_work);
        }
 
        /* Schedule the next read _if_ we are still open */
 
        int dp_in_close;                        /* close in progress */
        wait_queue_head_t dp_close_wait;        /* wait queue for close */
        struct work_struct dp_wakeup_work;
+       struct usb_serial_port *dp_port;
 };
 
 
 /* Local Function Declarations */
 
 static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
 static int digi_write_oob_command( struct usb_serial_port *port,
        unsigned char *buf, int count, int interruptible );
 static int digi_write_inb_command( struct usb_serial_port *port,
 *  on writes.
 */
 
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
 {
-       struct usb_serial_port *port = arg;
+       struct digi_port *priv =
+               container_of(work, struct digi_port, dp_wakeup_work);
+       struct usb_serial_port *port = priv->dp_port;
        unsigned long flags;
-       struct digi_port *priv = usb_get_serial_port_data(port);
 
 
        spin_lock_irqsave( &priv->dp_port_lock, flags );
                init_waitqueue_head( &priv->dp_flush_wait );
                priv->dp_in_close = 0;
                init_waitqueue_head( &priv->dp_close_wait );
-               INIT_WORK(&priv->dp_wakeup_work,
-                               digi_wakeup_write_lock, serial->port[i]);
+               INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+               priv->dp_port = serial->port[i];
 
                /* initialize write wait queue for this port */
                init_waitqueue_head( &serial->port[i]->write_wait );
 
        char prev_status, diff_status;        /* Used for TIOCMIWAIT */
        __u8 rx_flags;          /* receive state flags (throttling) */
        spinlock_t rx_lock;     /* spinlock for receive state */
-       struct work_struct rx_work;
+       struct delayed_work rx_work;
+       struct usb_serial_port *port;
        int rx_processed;
        unsigned long rx_bytes;
 
 static int  ftdi_chars_in_buffer       (struct usb_serial_port *port);
 static void ftdi_write_bulk_callback   (struct urb *urb);
 static void ftdi_read_bulk_callback    (struct urb *urb);
-static void ftdi_process_read          (void *param);
+static void ftdi_process_read          (struct work_struct *work);
 static void ftdi_set_termios           (struct usb_serial_port *port, struct termios * old);
 static int  ftdi_tiocmget               (struct usb_serial_port *port, struct file *file);
 static int  ftdi_tiocmset              (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
                port->read_urb->transfer_buffer_length = BUFSZ;
        }
 
-       INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+       INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+       priv->port = port;
 
        /* Free port's existing write urb and transfer buffer. */
        if (port->write_urb) {
        priv->rx_bytes += countread;
        spin_unlock_irqrestore(&priv->rx_lock, flags);
 
-       ftdi_process_read(port);
+       ftdi_process_read(&priv->rx_work.work);
 
 } /* ftdi_read_bulk_callback */
 
 
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
 { /* ftdi_process_read */
-       struct usb_serial_port *port = (struct usb_serial_port*)param;
+       struct ftdi_private *priv =
+               container_of(work, struct ftdi_private, rx_work.work);
+       struct usb_serial_port *port = priv->port;
        struct urb *urb;
        struct tty_struct *tty;
-       struct ftdi_private *priv;
        char error_flag;
        unsigned char *data;
 
        spin_unlock_irqrestore(&priv->rx_lock, flags);
 
        if (actually_throttled)
-               schedule_work(&priv->rx_work);
+               schedule_delayed_work(&priv->rx_work, 0);
 }
 
 static int __init ftdi_init (void)
 
        int                     tx_throttled;
        struct work_struct                      wakeup_work;
        struct work_struct                      unthrottle_work;
+       struct usb_serial       *serial;
+       struct usb_serial_port  *port;
 };
 
 
 };
 #endif
 
-static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
+static void keyspan_pda_wakeup_write(struct work_struct *work)
 {
-
+       struct keyspan_pda_private *priv =
+               container_of(work, struct keyspan_pda_private, wakeup_work);
+       struct usb_serial_port *port = priv->port;
        struct tty_struct *tty = port->tty;
 
        /* wake up port processes */
        tty_wakeup(tty);
 }
 
-static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
+static void keyspan_pda_request_unthrottle(struct work_struct *work)
 {
+       struct keyspan_pda_private *priv =
+               container_of(work, struct keyspan_pda_private, unthrottle_work);
+       struct usb_serial *serial = priv->serial;
        int result;
 
        dbg(" request_unthrottle");
                return (1); /* error */
        usb_set_serial_port_data(serial->port[0], priv);
        init_waitqueue_head(&serial->port[0]->write_wait);
-       INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write,
-                       (void *)(serial->port[0]));
-       INIT_WORK(&priv->unthrottle_work,
-                       (void *)keyspan_pda_request_unthrottle,
-                       (void *)(serial));
+       INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+       INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+       priv->serial = serial;
+       priv->port = serial->port[0];
        return (0);
 }
 
 
        schedule_work(&port->work);
 }
 
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
 {
-       struct usb_serial_port *port = private;
+       struct usb_serial_port *port =
+               container_of(work, struct usb_serial_port, work);
        struct tty_struct *tty;
 
        dbg("%s - port %d", __FUNCTION__, port->number);
                port->serial = serial;
                spin_lock_init(&port->lock);
                mutex_init(&port->mutex);
-               INIT_WORK(&port->work, usb_serial_port_work, port);
+               INIT_WORK(&port->work, usb_serial_port_work);
                serial->port[i] = port;
        }
 
 
        struct list_head        rx_urbs_submitted;
        struct list_head        rx_urb_q;
        struct work_struct      rx_work;
+       struct usb_serial_port  *port;
        struct list_head        tx_urbs_free;
        struct list_head        tx_urbs_submitted;
 };
 static int start_port_read(struct usb_serial_port *port);
 static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
 static struct list_head *list_first(struct list_head *head);
-static void rx_data_softint(void *private);
+static void rx_data_softint(struct work_struct *work);
 
 static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
 static int firm_open(struct usb_serial_port *port);
                spin_lock_init(&info->lock);
                info->flags = 0;
                info->mcr = 0;
-               INIT_WORK(&info->rx_work, rx_data_softint, port);
+               INIT_WORK(&info->rx_work, rx_data_softint);
+               info->port = port;
 
                INIT_LIST_HEAD(&info->rx_urbs_free);
                INIT_LIST_HEAD(&info->rx_urbs_submitted);
        spin_unlock_irqrestore(&info->lock, flags);
 
        if (actually_throttled)
-               rx_data_softint(port);
+               rx_data_softint(&info->rx_work);
 
        return;
 }
 }
 
 
-static void rx_data_softint(void *private)
+static void rx_data_softint(struct work_struct *work)
 {
-       struct usb_serial_port *port = (struct usb_serial_port *)private;
-       struct whiteheat_private *info = usb_get_serial_port_data(port);
+       struct whiteheat_private *info =
+               container_of(work, struct whiteheat_private, rx_work);
+       struct usb_serial_port *port = info->port;
        struct tty_struct *tty = port->tty;
        struct whiteheat_urb_wrap *wrap;
        struct urb *urb;
 
                softback_top = 0;
 }
 
-static void fb_flashcursor(void *private)
+static void fb_flashcursor(struct work_struct *work)
 {
-       struct fb_info *info = private;
+       struct fb_info *info = container_of(work, struct fb_info, queue);
        struct fbcon_ops *ops = info->fbcon_par;
        struct display *p;
        struct vc_data *vc = NULL;
        if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
            !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
                if (!info->queue.func)
-                       INIT_WORK(&info->queue, fb_flashcursor, info);
+                       INIT_WORK(&info->queue, fb_flashcursor);
 
                init_timer(&ops->cursor_timer);
                ops->cursor_timer.function = cursor_timer_handler;
 
 };
 
 static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
                          poll_table * p);
 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
        m->rbuf = NULL;
        m->wpos = m->wsize = 0;
        m->wbuf = NULL;
-       INIT_WORK(&m->rq, v9fs_read_work, m);
-       INIT_WORK(&m->wq, v9fs_write_work, m);
+       INIT_WORK(&m->rq, v9fs_read_work);
+       INIT_WORK(&m->wq, v9fs_write_work);
        m->wsched = 0;
        memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
        m->poll_task = NULL;
 /**
  * v9fs_write_work - called when a transport can send some data
  */
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
 {
        int n, err;
        struct v9fs_mux_data *m;
        struct v9fs_req *req;
 
-       m = a;
+       m = container_of(work, struct v9fs_mux_data, wq);
 
        if (m->err < 0) {
                clear_bit(Wworksched, &m->wsched);
 /**
  * v9fs_read_work - called when there is some data to be read from a transport
  */
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
 {
        int n, err;
        struct v9fs_mux_data *m;
        struct v9fs_fcall *rcall;
        char *rbuf;
 
-       m = a;
+       m = container_of(work, struct v9fs_mux_data, rq);
 
        if (m->err < 0)
                return;
 
 
 struct greedy {
        struct gfs2_holder gr_gh;
-       struct work_struct gr_work;
+       struct delayed_work gr_work;
 };
 
 struct gfs2_gl_hash_bucket {
        glops->go_xmote_th(gl, state, flags);
 }
 
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
 {
-       struct greedy *gr = data;
+       struct greedy *gr = container_of(work, struct greedy, gr_work.work);
        struct gfs2_holder *gh = &gr->gr_gh;
        struct gfs2_glock *gl = gh->gh_gl;
        const struct gfs2_glock_operations *glops = gl->gl_ops;
 
        gfs2_holder_init(gl, 0, 0, gh);
        set_bit(HIF_GREEDY, &gh->gh_iflags);
-       INIT_WORK(&gr->gr_work, greedy_work, gr);
+       INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
 
        set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
        schedule_delayed_work(&gr->gr_work, time);
 
                server->rcv.ptr = (unsigned char*)&server->rcv.buf;
                server->rcv.len = 10;
                server->rcv.state = 0;
-               INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
-               INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+               INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+               INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
                sock->sk->sk_write_space = ncp_tcp_write_space;
        } else {
-               INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
-               INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+               INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+               INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
                server->timeout_tm.data = (unsigned long)server;
                server->timeout_tm.function = ncpdgram_timeout_call;
        }
 
        }
 }
 
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, rcv.tq);
        struct socket* sock;
        
        sock = server->ncp_sock;
        }
 }
 
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, timeout_tq);
        mutex_lock(&server->rcv.creq_mutex);
        __ncpdgram_timeout_proc(server);
        mutex_unlock(&server->rcv.creq_mutex);
        }
 }
 
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, rcv.tq);
 
        mutex_lock(&server->rcv.creq_mutex);
        __ncptcp_rcv_proc(server);
        mutex_unlock(&server->rcv.creq_mutex);
 }
 
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, tx.tq);
        
        mutex_lock(&server->rcv.creq_mutex);
        __ncptcp_try_send(server);
 
 }
 
 static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
 
 __be32
 nfsd4_renew(clientid_t *clid)
 }
 
 void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
 {
        time_t t;
 
 
        return status;
 }
 
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
 {
        int status;
-       struct ocfs2_super *osb = data;
+       struct ocfs2_super *osb =
+               container_of(work, struct ocfs2_super,
+                            osb_truncate_log_wq.work);
 
        mlog_entry_void();
 
        /* ocfs2_truncate_log_shutdown keys on the existence of
         * osb->osb_tl_inode so we don't set any of the osb variables
         * until we're sure all is well. */
-       INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+       INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+                         ocfs2_truncate_log_worker);
        osb->osb_tl_bh    = tl_bh;
        osb->osb_tl_inode = tl_inode;
 
 
         * recognizes a node going up and down in one iteration */
        u64                     hr_generation;
 
-       struct work_struct      hr_write_timeout_work;
+       struct delayed_work     hr_write_timeout_work;
        unsigned long           hr_last_timeout_start;
 
        /* Used during o2hb_check_slot to hold a copy of the block
        int               wc_error;
 };
 
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
 {
-       struct o2hb_region *reg = arg;
+       struct o2hb_region *reg =
+               container_of(work, struct o2hb_region,
+                            hr_write_timeout_work.work);
 
        mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
             "milliseconds\n", reg->hr_dev_name,
                goto out;
        }
 
-       INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg);
+       INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout);
 
        /*
         * A node is considered live after it has beat LIVE_THRESHOLD
 
        o2quo_fence_self();
 }
 
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
 {
        int quorum;
        int lowest_hb, lowest_reachable = 0, fence = 0;
        struct o2quo_state *qs = &o2quo_state;
 
        spin_lock_init(&qs->qs_lock);
-       INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+       INIT_WORK(&qs->qs_work, o2quo_make_decision);
 }
 
 void o2quo_exit(void)
 
                 [O2NET_ERR_DIED]       = -EHOSTDOWN,};
 
 /* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
 static void o2net_idle_timer(unsigned long data);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 
        o2nm_node_get(node);
        sc->sc_node = node;
 
-       INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
-       INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
-       INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
-       INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+       INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+       INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+       INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+       INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
        init_timer(&sc->sc_idle_timeout);
        sc->sc_idle_timeout.function = o2net_idle_timer;
                sc_put(sc);
 }
 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
-                                       struct work_struct *work,
+                                       struct delayed_work *work,
                                        int delay)
 {
        sc_get(sc);
                sc_put(sc);
 }
 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
-                                        struct work_struct *work)
+                                        struct delayed_work *work)
 {
        if (cancel_delayed_work(work))
                sc_put(sc);
  * ourselves as state_change couldn't get the nn_lock and call set_nn_state
  * itself.
  */
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_shutdown_work);
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
        sclog(sc, "shutting down\n");
 /* this work func is triggerd by data ready.  it reads until it can read no
  * more.  it interprets 0, eof, as fatal.  if data_ready hits while we're doing
  * our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container, sc_rx_work);
        int ret;
 
        do {
 
 /* called when a connect completes and after a sock is accepted.  the
  * rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_connect_work);
 
        mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
               (unsigned long long)O2NET_PROTOCOL_VERSION,
 }
 
 /* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_keepalive_work.work);
 
        o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
        sc_put(sc);
  * having a connect attempt fail, etc. This centralizes the logic which decides
  * if a connect attempt should be made or if we should give up and all future
  * transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_connect_work.work);
        struct o2net_sock_container *sc = NULL;
        struct o2nm_node *node = NULL, *mynode = NULL;
        struct socket *sock = NULL;
        struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
-       int ret = 0;
+       int ret = 0, stop;
 
        /* if we're greater we initiate tx, otherwise we accept */
        if (o2nm_this_node() <= o2net_num_from_nn(nn))
 
        spin_lock(&nn->nn_lock);
        /* see if we already have one pending or have given up */
-       if (nn->nn_sc || nn->nn_persistent_error)
-               arg = NULL;
+       stop = (nn->nn_sc || nn->nn_persistent_error);
        spin_unlock(&nn->nn_lock);
-       if (arg == NULL) /* *shrug*, needed some indicator */
+       if (stop)
                goto out;
 
        nn->nn_last_connect_attempt = jiffies;
        return;
 }
 
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_connect_expired.work);
 
        spin_lock(&nn->nn_lock);
        if (!nn->nn_sc_valid) {
        spin_unlock(&nn->nn_lock);
 }
 
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_still_up.work);
 
        o2quo_hb_still_up(o2net_num_from_nn(nn));
 }
        return ret;
 }
 
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
 {
-       struct socket *sock = arg;
+       struct socket *sock = o2net_listen_sock;
        while (o2net_accept_one(sock) == 0)
                cond_resched();
 }
        write_unlock_bh(&sock->sk->sk_callback_lock);
 
        o2net_listen_sock = sock;
-       INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+       INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
        sock->sk->sk_reuse = 1;
        ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
                struct o2net_node *nn = o2net_nn_from_num(i);
 
                spin_lock_init(&nn->nn_lock);
-               INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
-               INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
-               INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+               INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+               INIT_DELAYED_WORK(&nn->nn_connect_expired,
+                                 o2net_connect_expired);
+               INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
                /* until we see hb from a node we'll return einval */
                nn->nn_persistent_error = -ENOTCONN;
                init_waitqueue_head(&nn->nn_sc_wq);
 
         * connect attempt fails and so can be self-arming.  shutdown is
         * careful to first mark the nn such that no connects will be attempted
         * before canceling delayed connect work and flushing the queue. */
-       struct work_struct              nn_connect_work;
+       struct delayed_work             nn_connect_work;
        unsigned long                   nn_last_connect_attempt;
 
        /* this is queued as nodes come up and is canceled when a connection is
         * established.  this expiring gives up on the node and errors out
         * transmits */
-       struct work_struct              nn_connect_expired;
+       struct delayed_work             nn_connect_expired;
 
        /* after we give up on a socket we wait a while before deciding
         * that it is still heartbeating and that we should do some
         * quorum work */
-       struct work_struct              nn_still_up;
+       struct delayed_work             nn_still_up;
 };
 
 struct o2net_sock_container {
        struct work_struct      sc_shutdown_work;
 
        struct timer_list       sc_idle_timeout;
-       struct work_struct      sc_keepalive_work;
+       struct delayed_work     sc_keepalive_work;
 
        unsigned                sc_handshake_ok:1;
 
 
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
  * they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
 
 struct dlm_lock_resource;
 struct dlm_work_item;
 
 
        spin_lock_init(&dlm->work_lock);
        INIT_LIST_HEAD(&dlm->work_list);
-       INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+       INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
 
        kref_init(&dlm->dlm_refs);
        dlm->dlm_state = DLM_CTXT_NEW;
 
 }
 
 /* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
 {
-       struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+       struct dlm_ctxt *dlm =
+               container_of(work, struct dlm_ctxt, dispatched_work);
        LIST_HEAD(tmp_list);
        struct list_head *iter, *iter2;
        struct dlm_work_item *item;
 
                BUG();
 }
 
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
 
 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
 {
        if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
                user_dlm_grab_inode_ref(lockres);
 
-               INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
-                         lockres);
+               INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
 
                queue_work(user_dlm_worker, &lockres->l_work);
                lockres->l_flags |= USER_LOCK_QUEUED;
        iput(inode);
 }
 
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
 {
        int new_level, status;
-       struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+       struct user_lock_res *lockres =
+               container_of(work, struct user_lock_res, l_work);
        struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
        mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
 
  * NOTE: This function can and will sleep on recovery of other nodes
  * during cluster locking, just like any other ocfs2 process.
  */
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
 {
        int ret;
-       struct ocfs2_super *osb = data;
-       struct ocfs2_journal *journal = osb->journal;
+       struct ocfs2_journal *journal =
+               container_of(work, struct ocfs2_journal, j_recovery_work);
+       struct ocfs2_super *osb = journal->j_osb;
        struct ocfs2_dinode *la_dinode, *tl_dinode;
        struct ocfs2_la_recovery_item *item;
        struct list_head *p, *n;
 
 }
 
 /* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
 
 /*
  *  Journal Control:
 
        /* Truncate log info */
        struct inode                    *osb_tl_inode;
        struct buffer_head              *osb_tl_bh;
-       struct work_struct              osb_truncate_log_wq;
+       struct delayed_work             osb_truncate_log_wq;
 
        struct ocfs2_node_map           osb_recovering_orphan_dirs;
        unsigned int                    *osb_orphan_wipes;
 
        spin_lock_init(&journal->j_lock);
        journal->j_trans_id = (unsigned long) 1;
        INIT_LIST_HEAD(&journal->j_la_cleanups);
-       INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+       INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
        journal->j_state = OCFS2_JOURNAL_FREE;
 
        /* get some pseudo constants for clustersize bits */
 
                               struct reiserfs_journal *journal);
 static int dirty_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
 static void queue_log_writer(struct super_block *s);
 
 /* values for join in do_journal_begin_r */
        if (reiserfs_mounted_fs_count <= 1)
                commit_wq = create_workqueue("reiserfs");
 
-       INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+       INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+       journal->j_work_sb = p_s_sb;
        return 0;
       free_and_return:
        free_journal_ram(p_s_sb);
 /*
 ** writeback the pending async commits to disk
 */
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
 {
-       struct super_block *p_s_sb = p;
-       struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+       struct reiserfs_journal *journal =
+               container_of(work, struct reiserfs_journal, j_work.work);
+       struct super_block *p_s_sb = journal->j_work_sb;
        struct reiserfs_journal_list *jl;
        struct list_head *entry;
 
 
  */
 STATIC void
 xfs_end_bio_delalloc(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
  */
 STATIC void
 xfs_end_bio_written(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
  */
 STATIC void
 xfs_end_bio_unwritten(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
        bhv_vnode_t             *vp = ioend->io_vnode;
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
        ioend->io_size = 0;
 
        if (type == IOMAP_UNWRITTEN)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
        else if (type == IOMAP_DELAY)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
        else
-               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
        return ioend;
 }
 
 
 STATIC void
 xfs_buf_iodone_work(
-       void                    *v)
+       struct work_struct      *work)
 {
-       xfs_buf_t               *bp = (xfs_buf_t *)v;
+       xfs_buf_t               *bp =
+               container_of(work, xfs_buf_t, b_iodone_work);
 
        if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
 
        if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
                if (schedule) {
-                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
                        queue_work(xfslogd_workqueue, &bp->b_iodone_work);
                } else {
-                       xfs_buf_iodone_work(bp);
+                       xfs_buf_iodone_work(&bp->b_iodone_work);
                }
        } else {
                up(&bp->b_iodonesema);
 
 struct cn_callback_entry {
        struct list_head callback_entry;
        struct cn_callback *cb;
-       struct work_struct work;
+       struct delayed_work work;
        struct cn_queue_dev *pdev;
 
        struct cn_callback_id id;
 
 int cn_cb_equal(struct cb_id *, struct cb_id *);
 
-void cn_queue_wrapper(void *data);
+void cn_queue_wrapper(struct work_struct *work);
 
 extern int cn_already_initialized;
 
 
        int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
 
        /* Event handler */
-       void (*event) (struct i2o_event *);
+       work_func_t event;
 
        struct workqueue_struct *event_queue;   /* Event queue */
 
 
        struct mmc_card         *card_busy;     /* the MMC card claiming host */
        struct mmc_card         *card_selected; /* the selected MMC card */
 
-       struct work_struct      detect;
+       struct delayed_work     detect;
 
        unsigned long           private[0] ____cacheline_aligned;
 };
 
        } unexpected_packet;
 };
 
-extern void ncp_tcp_rcv_proc(void *server);
-extern void ncp_tcp_tx_proc(void *server);
-extern void ncpdgram_rcv_proc(void *server);
-extern void ncpdgram_timeout_proc(void *server);
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
 extern void ncpdgram_timeout_call(unsigned long server);
 extern void ncp_tcp_data_ready(struct sock* sk, int len);
 extern void ncp_tcp_write_space(struct sock* sk);
 
        int j_errno;
 
        /* when flushing ordered buffers, throttle new ordered writers */
-       struct work_struct j_work;
+       struct delayed_work j_work;
+       struct super_block *j_work_sb;
        atomic_t j_async_throttle;
 };
 
 
        size_t subbufs_consumed;        /* count of sub-buffers consumed */
        struct rchan *chan;             /* associated channel */
        wait_queue_head_t read_wait;    /* reader wait queue */
-       struct work_struct wake_readers; /* reader wake-up work struct */
+       struct delayed_work wake_readers; /* reader wake-up work struct */
        struct dentry *dentry;          /* channel file dentry */
        struct kref kref;               /* channel buffer refcount */
        struct page **page_array;       /* array of current buffer pages */
 
 
        int pm_usage_cnt;               /* usage counter for autosuspend */
 #ifdef CONFIG_PM
-       struct work_struct autosuspend; /* for delayed autosuspends */
+       struct delayed_work autosuspend; /* for delayed autosuspends */
        struct mutex pm_mutex;          /* protects PM operations */
 
        unsigned auto_pm:1;             /* autosuspend/resume in progress */
 
        /* Scan retries remaining */
        int scan_retry;
 
-       struct work_struct work;
-       struct work_struct timeout;
+       struct delayed_work work;
+       struct delayed_work timeout;
 };
 
 struct ieee80211softmac_bss_info {
 
 void sctp_inq_free(struct sctp_inq *);
 void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
 
 /* This is the structure we use to hold outbound chunks.  You push
  * chunks in and they automatically pop out the other end as bundled
 
         void *lldd_dev;
 };
 
+struct sas_discovery_event {
+       struct work_struct work;
+       struct asd_sas_port *port;
+};
+
 struct sas_discovery {
        spinlock_t disc_event_lock;
-       struct work_struct disc_work[DISC_NUM_EVENTS];
+       struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
        unsigned long    pending;
        u8     fanout_sas_addr[8];
        u8     eeds_a[8];
        void *lldd_port;          /* not touched by the sas class code */
 };
 
+struct asd_sas_event {
+       struct work_struct work;
+       struct asd_sas_phy *phy;
+};
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
 struct asd_sas_phy {
 /* private: */
        /* protected by ha->event_lock */
-       struct work_struct   port_events[PORT_NUM_EVENTS];
-       struct work_struct   phy_events[PHY_NUM_EVENTS];
+       struct asd_sas_event   port_events[PORT_NUM_EVENTS];
+       struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
 
        unsigned long port_events_pending;
        unsigned long phy_events_pending;
        int               queue_thread_kill;
 };
 
+struct sas_ha_event {
+       struct work_struct work;
+       struct sas_ha_struct *ha;
+};
+
 struct sas_ha_struct {
 /* private: */
        spinlock_t       event_lock;
-       struct work_struct ha_events[HA_NUM_EVENTS];
+       struct sas_ha_event ha_events[HA_NUM_EVENTS];
        unsigned long    pending;
 
        struct scsi_core core;
 
        u8 flags;
        struct list_head peers;
        struct device dev;
-       struct work_struct dev_loss_work;
+       struct delayed_work dev_loss_work;
        struct work_struct scan_work;
-       struct work_struct fail_io_work;
+       struct delayed_work fail_io_work;
        struct work_struct stgt_delete_work;
        struct work_struct rport_delete_work;
 } __attribute__((aligned(sizeof(unsigned long))));
 
 
        /* recovery fields */
        int recovery_tmo;
-       struct work_struct recovery_work;
+       struct delayed_work recovery_work;
 
        int target_id;
 
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
        unsigned int power_up;  /* power states */
        struct workqueue_struct *power_workq;
-       struct work_struct power_work;
+       struct delayed_work power_work;
 #endif
        struct device dev;
 };
 
        unsigned char rcs0;
        unsigned char rcs1;
        struct workqueue_struct *workqueue;
-       struct work_struct work;
+       struct delayed_work work;
        void *change_callback_private;
        void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
 };
 
  *     reason waking is deferred is that calling directly from write
  *     causes problems if you're writing from say the scheduler.
  */
-static void wakeup_readers(void *private)
+static void wakeup_readers(struct work_struct *work)
 {
-       struct rchan_buf *buf = private;
+       struct rchan_buf *buf =
+               container_of(work, struct rchan_buf, wake_readers.work);
        wake_up_interruptible(&buf->read_wait);
 }
 
        if (init) {
                init_waitqueue_head(&buf->read_wait);
                kref_init(&buf->kref);
-               INIT_WORK(&buf->wake_readers, NULL, NULL);
+               INIT_DELAYED_WORK(&buf->wake_readers, NULL);
        } else {
                cancel_delayed_work(&buf->wake_readers);
                flush_scheduled_work();
                        buf->padding[old_subbuf];
                smp_mb();
                if (waitqueue_active(&buf->read_wait)) {
-                       PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
+                       PREPARE_DELAYED_WORK(&buf->wake_readers,
+                                            wakeup_readers);
                        schedule_delayed_work(&buf->wake_readers, 1);
                }
        }
 
 }
 
 #ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
 }
  */
 int lru_add_drain_all(void)
 {
-       return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+       return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
 #else
 
 
 #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
 
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
 static void lec_arp_expire_arp(unsigned long data);
 
 /* 
         INIT_HLIST_HEAD(&priv->lec_no_forward);
         INIT_HLIST_HEAD(&priv->mcast_fwds);
        spin_lock_init(&priv->lec_arp_lock);
-       INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+       INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
        schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
 }
 
  *       to ESI_FORWARD_DIRECT. This causes the flush period to end
  *       regardless of the progress of the flush protocol.
  */
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
 {
        unsigned long flags;
-       struct lec_priv *priv = data;
+       struct lec_priv *priv =
+               container_of(work, struct lec_priv, lec_arp_work.work);
        struct hlist_node *node, *next;
        struct lec_arp_table *entry;
        unsigned long now;
 
        spinlock_t lec_arp_lock;
        struct atm_vcc *mcast_vcc;              /* Default Multicast Send VCC */
        struct atm_vcc *lecd;
-       struct work_struct lec_arp_work;        /* C10 */
+       struct delayed_work lec_arp_work;       /* C10 */
        unsigned int maximum_unknown_frame_count;
                                                /*
                                                 * Within the period of time defined by this variable, the client will send
 
        kfree(data);
 }
 
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
 {
-       struct hci_conn *conn = data;
+       struct hci_conn *conn = container_of(work, struct hci_conn, work);
        int i;
 
        if (device_register(&conn->dev) < 0) {
 
        dev_set_drvdata(&conn->dev, conn);
 
-       INIT_WORK(&conn->work, add_conn, (void *) conn);
+       INIT_WORK(&conn->work, add_conn);
 
        schedule_work(&conn->work);
 }
 
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
 {
-       struct hci_conn *conn = data;
+       struct hci_conn *conn = container_of(work, struct hci_conn, work);
        device_del(&conn->dev);
 }
 
 {
        BT_DBG("conn %p", conn);
 
-       INIT_WORK(&conn->work, del_conn, (void *) conn);
+       INIT_WORK(&conn->work, del_conn);
 
        schedule_work(&conn->work);
 }
 
  * Called from work queue to allow for calling functions that
  * might sleep (such as speed check), and to debounce.
  */
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
 {
-       struct net_device *dev = arg;
        struct net_bridge_port *p;
+       struct net_device *dev;
        struct net_bridge *br;
 
+       dev = container_of(work, struct net_bridge_port,
+                          carrier_check.work)->dev;
+       work_release(work);
+
        rtnl_lock();
        p = dev->br_port;
        if (!p)
        p->port_no = index;
        br_init_port(p);
        p->state = BR_STATE_DISABLED;
-       INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+       INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
        br_stp_port_timer_init(p);
 
        kobject_init(&p->kobj);
 
        struct timer_list               hold_timer;
        struct timer_list               message_age_timer;
        struct kobject                  kobj;
-       struct work_struct              carrier_check;
+       struct delayed_work             carrier_check;
        struct rcu_head                 rcu;
 };
 
 
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
        unsigned long flags;
        struct sk_buff *skb;
        }
 }
 
-static DECLARE_WORK(send_queue, queue_process, NULL);
+static DECLARE_WORK(send_queue, queue_process);
 
 void netpoll_queue(struct sk_buff *skb)
 {
 
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&dccp_death_row),
        .twkill_work    = __WORK_INITIALIZER(dccp_death_row.twkill_work,
-                                            inet_twdr_twkill_work,
-                                            &dccp_death_row),
+                                            inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
        .twcal_hand     = -1,
 
 }
 
 void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
 {
-       struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+       struct ieee80211softmac_device *mac =
+               container_of(work, struct ieee80211softmac_device,
+                            associnfo.timeout.work);
        struct ieee80211softmac_network *n;
 
        mutex_lock(&mac->associnfo.mutex);
 
 /* This function is called to handle userspace requests (asynchronously) */
 void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
 {
-       struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+       struct ieee80211softmac_device *mac =
+               container_of(work, struct ieee80211softmac_device,
+                            associnfo.work.work);
        struct ieee80211softmac_network *found = NULL;
        struct ieee80211_network *net = NULL, *best = NULL;
        int bssvalid;
                                network->authenticated = 0;
                                /* we don't want to do this more than once ... */
                                network->auth_desynced_once = 1;
-                               schedule_work(&mac->associnfo.work);
+                               schedule_delayed_work(&mac->associnfo.work, 0);
                                break;
                        }
                default:
        ieee80211softmac_disassoc(mac);
 
        /* try to reassociate */
-       schedule_work(&mac->associnfo.work);
+       schedule_delayed_work(&mac->associnfo.work, 0);
 
        return 0;
 }
                dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
                return 0;
        }
-       schedule_work(&mac->associnfo.work);
+       schedule_delayed_work(&mac->associnfo.work, 0);
 
        return 0;
 }
 
 
 #include "ieee80211softmac_priv.h"
 
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
 
 /* Queues an auth request to the desired AP */
 int
        auth->mac = mac;
        auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
        auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
-       INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+       INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
        
        /* Lock (for list) */
        spin_lock_irqsave(&mac->lock, flags);
 
        /* add to list */
        list_add_tail(&auth->list, &mac->auth_queue);
-       schedule_work(&auth->work);
+       schedule_delayed_work(&auth->work, 0);
        spin_unlock_irqrestore(&mac->lock, flags);
        
        return 0;
 
 /* Sends an auth request to the desired AP and handles timeouts */
 static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
 {
        struct ieee80211softmac_device *mac;
        struct ieee80211softmac_auth_queue_item *auth;
        struct ieee80211softmac_network *net;
        unsigned long flags;
 
-       auth = (struct ieee80211softmac_auth_queue_item *)data;
+       auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+                           work.work);
        net = auth->net;
        mac = auth->mac;
 
 
 /* Sends a response to an auth challenge (for shared key auth). */
 static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
 {
-       struct ieee80211softmac_auth_queue_item *aq = _aq;
+       struct ieee80211softmac_auth_queue_item *aq =
+               container_of(work, struct ieee80211softmac_auth_queue_item,
+                            work.work);
 
        /* Send our response */
        ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
                         * we have obviously already sent the initial auth
                         * request. */
                        cancel_delayed_work(&aq->work);
-                       INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
-                       schedule_work(&aq->work);
+                       INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+                       schedule_delayed_work(&aq->work, 0);
                        spin_unlock_irqrestore(&mac->lock, flags);
                        return 0;
                case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
        ieee80211softmac_deauth_from_net(mac, net);
 
        /* let's try to re-associate */
-       schedule_work(&mac->associnfo.work);
+       schedule_delayed_work(&mac->associnfo.work, 0);
        return 0;
 }
 
 
 
 static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
 {
-       struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
-       kfree(d);
+       struct ieee80211softmac_event *pevent =
+               container_of(work, struct ieee80211softmac_event, work.work);
+       struct ieee80211softmac_event event = *pevent;
+       kfree(pevent);
        
        event.fun(event.mac->dev, event.event_type, event.context);
 }
                return -ENOMEM;
        
        eventptr->event_type = event;
-       INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+       INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
        eventptr->fun = fun;
        eventptr->context = context;
        eventptr->mac = mac;
                                /* User may have subscribed to ANY event, so
                                 * we tell them which event triggered it. */
                                eventptr->event_type = event;
-                               schedule_work(&eventptr->work);
+                               schedule_delayed_work(&eventptr->work, 0);
                        }
                }
 }
 
        INIT_LIST_HEAD(&softmac->events);
 
        mutex_init(&softmac->associnfo.mutex);
-       INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
-       INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+       INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+       INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
        softmac->start_scan = ieee80211softmac_start_scan_implementation;
        softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
        softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
 
 /* private definitions and prototypes */
 
 /*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
 /* for internal use if scanning is needed */
 int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
 void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
 int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
 
 /*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
 int ieee80211softmac_handle_assoc_response(struct net_device * dev,
                                           struct ieee80211_assoc_response * resp,
                                           struct ieee80211_network * network);
                                     struct ieee80211_disassoc * disassoc);
 int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
                                        struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
 void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
 void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
 
        struct ieee80211softmac_device  *mac;   /* SoftMAC device */
        u8 retry;                               /* Retry limit */
        u8 state;                               /* Auth State */
-       struct work_struct              work;   /* Work queue */
+       struct delayed_work             work;   /* Work queue */
 };
 
 /* scanning information */
           stop:1;
        u8 skip_flags;
        struct completion finished;
-       struct work_struct softmac_scan;
+       struct delayed_work softmac_scan;
+       struct ieee80211softmac_device *mac;
 };
 
 /* private event struct */
        struct list_head list;
        int event_type;
        void *event_context;
-       struct work_struct work;
+       struct delayed_work work;
        notify_function_ptr fun;
        void *context;
        struct ieee80211softmac_device *mac;
 
 
 
 /* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
 {
        int invalid_channel;
        u8 current_channel_idx;
-       struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
-       struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+       struct ieee80211softmac_scaninfo *si =
+               container_of(work, struct ieee80211softmac_scaninfo,
+                            softmac_scan.work);
+       struct ieee80211softmac_device *sm = si->mac;
        unsigned long flags;
 
        while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
        struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
        if (unlikely(!info))
                return NULL;
-       INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+       INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+       info->mac = mac;
        init_completion(&info->finished);
        return info;
 }
        sm->scaninfo->started = 1;
        sm->scaninfo->stop = 0;
        INIT_COMPLETION(sm->scaninfo->finished);
-       schedule_work(&sm->scaninfo->softmac_scan);
+       schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
        spin_unlock_irqrestore(&sm->lock, flags);
        return 0;
 }
 
 
        sm->associnfo.associating = 1;
        /* queue lower level code to do work (if necessary) */
-       schedule_work(&sm->associnfo.work);
+       schedule_delayed_work(&sm->associnfo.work, 0);
 out:
        mutex_unlock(&sm->associnfo.mutex);
 
                /* force reassociation */
                mac->associnfo.bssvalid = 0;
                if (mac->associnfo.associated)
-                       schedule_work(&mac->associnfo.work);
+                       schedule_delayed_work(&mac->associnfo.work, 0);
        } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
                /* the bssid we have is no longer fixed */
                mac->associnfo.bssfixed = 0;
                /* tell the other code that this bssid should be used no matter what */
                mac->associnfo.bssfixed = 1;
                /* queue associate if new bssid or (old one again and not associated) */
-               schedule_work(&mac->associnfo.work);
+               schedule_delayed_work(&mac->associnfo.work, 0);
         }
 
  out:
 
  *     Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD   1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
 {
        update_defense_level();
        if (atomic_read(&ip_vs_dropentry))
 
 static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
 static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
 static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
 static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
 static void ircomm_tty_stop(struct tty_struct *tty);
 
                self->flow = FLOW_STOP;
 
                self->line = line;
-               INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+               INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
                self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
                self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
                self->close_delay = 5*HZ/10;
 }
 
 /*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
  *
  *    We use this routine to give the write wakeup to the user at at a
  *    safe time (as fast as possible after write have completed). This 
  *    can be compared to the Tx interrupt.
  */
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
 {
-       struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+       struct ircomm_tty_cb *self =
+               container_of(work, struct ircomm_tty_cb, tqueue);
        struct tty_struct *tty;
        unsigned long flags;
        struct sk_buff *skb, *ctrl_skb;
 
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
 
 
 /* 1st Level Abstractions. */
 
        /* Create an input queue.  */
        sctp_inq_init(&asoc->base.inqueue);
-       sctp_inq_set_th_handler(&asoc->base.inqueue,
-                                   (void (*)(void *))sctp_assoc_bh_rcv,
-                                   asoc);
+       sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
 
        /* Create an output queue.  */
        sctp_outq_init(asoc, &asoc->outqueue);
 }
 
 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
 {
+       struct sctp_association *asoc =
+               container_of(work, struct sctp_association,
+                            base.inqueue.immediate);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sock *sk;
 
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
 
 /*
  * Initialize the base fields of the endpoint structure.
        sctp_inq_init(&ep->base.inqueue);
 
        /* Set its top-half handler */
-       sctp_inq_set_th_handler(&ep->base.inqueue,
-                               (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+       sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
 
        /* Initialize the bind addr area */
        sctp_bind_addr_init(&ep->base.bind_addr, 0);
 /* Do delayed input processing.  This is scheduled by sctp_rcv().
  * This may be called on BH or task time.
  */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
 {
+       struct sctp_endpoint *ep =
+               container_of(work, struct sctp_endpoint,
+                            base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
        struct sctp_transport *transport;
 
        queue->in_progress = NULL;
 
        /* Create a task for delivering data.  */
-       INIT_WORK(&queue->immediate, NULL, NULL);
+       INIT_WORK(&queue->immediate, NULL);
 
        queue->malloced = 0;
 }
         * on the BH related data structures.
         */
        list_add_tail(&chunk->list, &q->in_chunk_list);
-       q->immediate.func(q->immediate.data);
+       q->immediate.func(&q->immediate);
 }
 
 /* Extract a chunk from an SCTP inqueue.
  * The intent is that this routine will pull stuff out of the
  * inqueue and process it.
  */
-void sctp_inq_set_th_handler(struct sctp_inq *q,
-                                void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
 {
-       INIT_WORK(&q->immediate, callback, arg);
+       INIT_WORK(&q->immediate, callback);
 }
 
 
        xfrm_pol_put(policy);
 }
 
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
 {
        struct xfrm_policy *policy;
        struct hlist_node *entry, *tmp;
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
        int dir, total;
 
        mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 /* Generate new index... KAME seems to generate them ordered by cost
  * of an absolute inpredictability of ordering of rules. This will not pass. */
                        panic("XFRM: failed to allocate bydst hash\n");
        }
 
-       INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+       INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
        register_netdevice_notifier(&xfrm_dev_notifier);
 }
 
 
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
        struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
        unsigned long nsize, osize;
        mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
 EXPORT_SYMBOL(km_waitq);
        kfree(x);
 }
 
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
 {
        struct xfrm_state *x;
        struct hlist_node *entry, *tmp;
                panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
        xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
 
-       INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+       INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
 }
 
 
 };
 
 struct gpio_notification {
+       struct delayed_work work;
        notify_func_t notify;
        void *data;
        void *gpio_private;
-       struct work_struct work;
        struct mutex mutex;
 };
 
 
        ftr_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void ftr_handle_notify(void *data)
+static void ftr_handle_notify(struct work_struct *work)
 {
-       struct gpio_notification *notif = data;
+       struct gpio_notification *notif =
+               container_of(work, struct gpio_notification, work.work);
 
        mutex_lock(¬if->mutex);
        if (notif->notify)
 
        ftr_gpio_all_amps_off(rt);
        rt->implementation_private = 0;
-       INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify,
-                 &rt->headphone_notify);
-       INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify,
-                 &rt->line_in_notify);
-       INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
-                 &rt->line_out_notify);
+       INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
        mutex_init(&rt->headphone_notify.mutex);
        mutex_init(&rt->line_in_notify.mutex);
        mutex_init(&rt->line_out_notify.mutex);
 {
        struct gpio_notification *notif = data;
 
-       schedule_work(¬if->work);
+       schedule_delayed_work(¬if->work, 0);
 
        return IRQ_HANDLED;
 }
 
        pmf_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void pmf_handle_notify(void *data)
+static void pmf_handle_notify(struct work_struct *work)
 {
-       struct gpio_notification *notif = data;
+       struct gpio_notification *notif =
+               container_of(work, struct gpio_notification, work.work);
 
        mutex_lock(¬if->mutex);
        if (notif->notify)
 {
        pmf_gpio_all_amps_off(rt);
        rt->implementation_private = 0;
-       INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify,
-                 &rt->headphone_notify);
-       INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify,
-                 &rt->line_in_notify);
-       INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
-                 &rt->line_out_notify);
+       INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
        mutex_init(&rt->headphone_notify.mutex);
        mutex_init(&rt->line_in_notify.mutex);
        mutex_init(&rt->line_out_notify.mutex);
 {
        struct gpio_notification *notif = data;
 
-       schedule_work(¬if->work);
+       schedule_delayed_work(¬if->work, 0);
 }
 
 static int pmf_set_notify(struct gpio_runtime *rt,
 
 
 #define AK4114_ADDR                    0x00 /* fixed address */
 
-static void ak4114_stats(void *);
+static void ak4114_stats(struct work_struct *work);
 
 static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
 {
        reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
        /* bring up statistics / event queing */
        chip->init = 0;
-       INIT_WORK(&chip->work, ak4114_stats, chip);
+       INIT_DELAYED_WORK(&chip->work, ak4114_stats);
        queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
 }
 
        return res;
 }
 
-static void ak4114_stats(void *data)
+static void ak4114_stats(struct work_struct *work)
 {
-       struct ak4114 *chip = (struct ak4114 *)data;
+       struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
        if (chip->init)
                return;
 
 static struct snd_ac97_build_ops null_build_ops;
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-static void do_update_power(void *data)
+static void do_update_power(struct work_struct *work)
 {
-       update_power_regs(data);
+       update_power_regs(
+               container_of(work, struct snd_ac97, power_work.work));
 }
 #endif
 
        mutex_init(&ac97->page_mutex);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
        ac97->power_workq = create_workqueue("ac97");
-       INIT_WORK(&ac97->power_work, do_update_power, ac97);
+       INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
 #endif
 
 #ifdef CONFIG_PCI
 
 /*
  * process queueud unsolicited events
  */
-static void process_unsol_events(void *data)
+static void process_unsol_events(struct work_struct *work)
 {
-       struct hda_bus *bus = data;
-       struct hda_bus_unsolicited *unsol = bus->unsol;
+       struct hda_bus_unsolicited *unsol =
+               container_of(work, struct hda_bus_unsolicited, work);
+       struct hda_bus *bus = unsol->bus;
        struct hda_codec *codec;
        unsigned int rp, caddr, res;
 
                kfree(unsol);
                return -ENOMEM;
        }
-       INIT_WORK(&unsol->work, process_unsol_events, bus);
+       INIT_WORK(&unsol->work, process_unsol_events);
+       unsol->bus = bus;
        bus->unsol = unsol;
        return 0;
 }
 
        /* workqueue */
        struct workqueue_struct *workq;
        struct work_struct work;
+       struct hda_bus *bus;
 };
 
 /*
 
 }
 
 static struct work_struct device_change;
+static struct snd_pmac *device_change_chip;
 
-static void device_change_handler(void *self)
+static void device_change_handler(struct work_struct *work)
 {
-       struct snd_pmac *chip = self;
+       struct snd_pmac *chip = device_change_chip;
        struct pmac_tumbler *mix;
        int headphone, lineout;
 
        chip->resume = tumbler_resume;
 #endif
 
-       INIT_WORK(&device_change, device_change_handler, (void *)chip);
+       INIT_WORK(&device_change, device_change_handler);
+       device_change_chip = chip;
 
 #ifdef PMAC_SUPPORT_AUTOMUTE
        if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)