]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
WorkStruct: Separate delayable and non-delayable events.
authorDavid Howells <dhowells@redhat.com>
Wed, 22 Nov 2006 14:54:01 +0000 (14:54 +0000)
committerDavid Howells <dhowells@redhat.com>
Wed, 22 Nov 2006 14:54:01 +0000 (14:54 +0000)
Separate delayable work items from non-delayable work items be splitting them
into a separate structure (delayed_work), which incorporates a work_struct and
the timer_list removed from work_struct.

The work_struct struct is huge, and this limits it's usefulness.  On a 64-bit
architecture it's nearly 100 bytes in size.  This reduces that by half for the
non-delayable type of event.

Signed-Off-By: David Howells <dhowells@redhat.com>
22 files changed:
arch/x86_64/kernel/mce.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/char/random.c
drivers/char/tty_io.c
fs/aio.c
fs/nfs/client.c
fs/nfs/namespace.c
include/linux/aio.h
include/linux/kbd_kern.h
include/linux/libata.h
include/linux/nfs_fs_sb.h
include/linux/sunrpc/rpc_pipe_fs.h
include/linux/sunrpc/xprt.h
include/linux/tty.h
include/linux/workqueue.h
kernel/workqueue.c
mm/slab.c
net/core/link_watch.c
net/sunrpc/cache.c
net/sunrpc/rpc_pipe.c
net/sunrpc/xprtsock.c

index bbea88801d883cc8928c6a9c91fe5bf5efbf4c9f..5306f26309052e86756ce3d1102d2ae300eebf36 100644 (file)
@@ -307,7 +307,7 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
 
 static int check_interval = 5 * 60; /* 5 minutes */
 static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
 
 static void mcheck_check_cpu(void *info)
 {
index 915a55a6cc14ea8dc230cefb31f15f8b9c48dc04..0bb4b4dced76d984a7b883f1d7be07ad1386c3b6 100644 (file)
@@ -937,12 +937,9 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
        if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
                return;
 
-       PREPARE_WORK(&ap->port_task, fn, data);
+       PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
 
-       if (!delay)
-               rc = queue_work(ata_wq, &ap->port_task);
-       else
-               rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+       rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
 
        /* rc == 0 means that another user is using port task */
        WARN_ON(rc == 0);
@@ -5320,8 +5317,8 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
        ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
-       INIT_WORK(&ap->port_task, NULL, NULL);
-       INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
+       INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
+       INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
        INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
        INIT_LIST_HEAD(&ap->eh_done_q);
        init_waitqueue_head(&ap->eh_wait_q);
index 02b2b2787d9b85a33a420fc77b5dc18aac7e29ad..9f6b7cc74fd9159a241e661932211e71ed97e650 100644 (file)
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
        if (ap->pflags & ATA_PFLAG_LOADING)
                ap->pflags &= ~ATA_PFLAG_LOADING;
        else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-               queue_work(ata_aux_wq, &ap->hotplug_task);
+               queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
 
        if (ap->pflags & ATA_PFLAG_RECOVERED)
                ata_port_printk(ap, KERN_INFO, "EH complete\n");
index eb6b13f4211aed23994734537ecc81a3919dfcb2..f2ab61f3e8aee10a6b64589d47d63f258cdd77b7 100644 (file)
@@ -1424,7 +1424,7 @@ static unsigned int ip_cnt;
 
 static void rekey_seq_generator(void *private_);
 
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
 
 /*
  * Lock avoidance:
index e90ea39c7c4b1112fd7d7db878b96d8531a0f162..7297acfe520ca3789a6dd8763985bda6ce3e3e53 100644 (file)
@@ -3580,7 +3580,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
        tty->overrun_time = jiffies;
        tty->buf.head = tty->buf.tail = NULL;
        tty_buffer_init(tty);
-       INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+       INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
        init_MUTEX(&tty->buf.pty_sem);
        mutex_init(&tty->termios_mutex);
        init_waitqueue_head(&tty->write_wait);
index 94766599db00d3ca530eb37da388e1e63b900d35..11a1a7100ad6bfb0c703007f77c5e95a928f7201 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        INIT_LIST_HEAD(&ctx->active_reqs);
        INIT_LIST_HEAD(&ctx->run_list);
-       INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+       INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx);
 
        if (aio_setup_ring(ctx) < 0)
                goto out_freectx;
@@ -876,7 +876,7 @@ static void aio_kick_handler(void *data)
         * we're in a worker thread already, don't use queue_delayed_work,
         */
        if (requeue)
-               queue_work(aio_wq, &ctx->wq);
+               queue_delayed_work(aio_wq, &ctx->wq, 0);
 }
 
 
index 5fea638743e4107aadcd8cbd18cd46d78e331cf0..6f0487d6f44a3a48529213c0a2e9541033a713f5 100644 (file)
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
        INIT_LIST_HEAD(&clp->cl_state_owners);
        INIT_LIST_HEAD(&clp->cl_unused);
        spin_lock_init(&clp->cl_lock);
-       INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+       INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
        clp->cl_boot_time = CURRENT_TIME;
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
index ec1114b33d8954a3d2436646d9d735112e240e0e..5ed798bc1cf7f7e75dad6f6387694631f5ee9497 100644 (file)
@@ -21,7 +21,8 @@
 static void nfs_expire_automounts(void *list);
 
 LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts,
+                           &nfs_automount_list);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
 static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
index 0d71c0041f137e09eb48c06017af91fafb84c22d..9e350fd44d7787d061393d7512e65513d4cf960f 100644 (file)
@@ -194,7 +194,7 @@ struct kioctx {
 
        struct aio_ring_info    ring_info;
 
-       struct work_struct      wq;
+       struct delayed_work     wq;
 };
 
 /* prototypes */
index efe0ee4cc80baee508567f08c72b3f7d03277e43..06c58c423fe17668987839f71695c8bf64983a49 100644 (file)
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
        if (t->buf.tail != NULL)
                t->buf.tail->commit = t->buf.tail->used;
        spin_unlock_irqrestore(&t->buf.lock, flags);
-       schedule_work(&t->buf.work);
+       schedule_delayed_work(&t->buf.work, 0);
 }
 
 #endif
index abd2debebca2525a79395ac1d389388d9bc85e91..5f04006e8dd2ce018d02885408a6aa14fffb0f45 100644 (file)
@@ -568,8 +568,8 @@ struct ata_port {
        struct ata_host         *host;
        struct device           *dev;
 
-       struct work_struct      port_task;
-       struct work_struct      hotplug_task;
+       struct delayed_work     port_task;
+       struct delayed_work     hotplug_task;
        struct work_struct      scsi_rescan_task;
 
        unsigned int            hsm_task_state;
index 7ccfc7ef0a83afd2a58193ddcaa103f442e051cb..95796e6924f1d10be492771595275b7c169dde3a 100644 (file)
@@ -51,7 +51,7 @@ struct nfs_client {
 
        unsigned long           cl_lease_time;
        unsigned long           cl_last_renewal;
-       struct work_struct      cl_renewd;
+       struct delayed_work     cl_renewd;
 
        struct rpc_wait_queue   cl_rpcwaitq;
 
index a2eb9b4a9de32ef2f5ef2b7605b88ace1dfc316e..4a68125b6de6308a37b6133973088da421e08fd4 100644 (file)
@@ -30,7 +30,7 @@ struct rpc_inode {
 #define RPC_PIPE_WAIT_FOR_OPEN 1
        int flags;
        struct rpc_pipe_ops *ops;
-       struct work_struct queue_timeout;
+       struct delayed_work queue_timeout;
 };
 
 static inline struct rpc_inode *
index 60394fbc4c704d62d05325c119f66da040416291..3e04c1512fc444f6a8330612165f3d4626aad0a9 100644 (file)
@@ -177,7 +177,7 @@ struct rpc_xprt {
        unsigned long           connect_timeout,
                                bind_timeout,
                                reestablish_timeout;
-       struct work_struct      connect_worker;
+       struct delayed_work     connect_worker;
        unsigned short          port;
 
        /*
index 44091c0db0b46b473234172a91d983319648e7ce..c1f716446161282539591f6a86f8508e6fc45e1e 100644 (file)
@@ -53,7 +53,7 @@ struct tty_buffer {
 };
 
 struct tty_bufhead {
-       struct work_struct              work;
+       struct delayed_work work;
        struct semaphore pty_sem;
        spinlock_t lock;
        struct tty_buffer *head;        /* Queue head */
index 9bca3539a1e5a965023bd20c148320d96f7d1e27..9faaccae570ee987f33fd2ce1a5438e5b7ef596a 100644 (file)
@@ -17,6 +17,10 @@ struct work_struct {
        void (*func)(void *);
        void *data;
        void *wq_data;
+};
+
+struct delayed_work {
+       struct work_struct work;
        struct timer_list timer;
 };
 
@@ -28,32 +32,48 @@ struct execute_work {
         .entry = { &(n).entry, &(n).entry },                   \
        .func = (f),                                            \
        .data = (d),                                            \
+       }
+
+#define __DELAYED_WORK_INITIALIZER(n, f, d) {                  \
+       .work = __WORK_INITIALIZER((n).work, (f), (d)),         \
        .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
        }
 
 #define DECLARE_WORK(n, f, d)                                  \
        struct work_struct n = __WORK_INITIALIZER(n, f, d)
 
+#define DECLARE_DELAYED_WORK(n, f, d)                          \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
+
 /*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function and data pointers
  */
 #define PREPARE_WORK(_work, _func, _data)                      \
        do {                                                    \
-               (_work)->func = _func;                          \
-               (_work)->data = _data;                          \
+               (_work)->func = (_func);                        \
+               (_work)->data = (_data);                        \
        } while (0)
 
+#define PREPARE_DELAYED_WORK(_work, _func, _data)              \
+       PREPARE_WORK(&(_work)->work, (_func), (_data))
+
 /*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
  */
 #define INIT_WORK(_work, _func, _data)                         \
        do {                                                    \
                INIT_LIST_HEAD(&(_work)->entry);                \
                (_work)->pending = 0;                           \
                PREPARE_WORK((_work), (_func), (_data));        \
+       } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func, _data)         \
+       do {                                                    \
+               INIT_WORK(&(_work)->work, (_func), (_data));    \
                init_timer(&(_work)->timer);                    \
        } while (0)
 
+
 extern struct workqueue_struct *__create_workqueue(const char *name,
                                                    int singlethread);
 #define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,24 +82,24 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-       struct work_struct *work, unsigned long delay);
+       struct delayed_work *work, unsigned long delay);
 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
 
 extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
 
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
 extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
 extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
 
 extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
-                                      struct work_struct *);
+                                      struct delayed_work *);
 int execute_in_process_context(void (*fn)(void *), void *,
                               struct execute_work *);
 
@@ -88,13 +108,13 @@ int execute_in_process_context(void (*fn)(void *), void *,
  * function may still be running on return from cancel_delayed_work().  Run
  * flush_scheduled_work() to wait on it.
  */
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
 {
        int ret;
 
        ret = del_timer_sync(&work->timer);
        if (ret)
-               clear_bit(0, &work->pending);
+               clear_bit(0, &work->work.pending);
        return ret;
 }
 
index 17c2f03d2c27f9928cf175db8b01a929426d130c..44fc54b7decf9d17ccef37ecdc05b985a0ac289d 100644 (file)
@@ -122,29 +122,33 @@ EXPORT_SYMBOL_GPL(queue_work);
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-       struct work_struct *work = (struct work_struct *)__data;
-       struct workqueue_struct *wq = work->wq_data;
+       struct delayed_work *dwork = (struct delayed_work *)__data;
+       struct workqueue_struct *wq = dwork->work.wq_data;
        int cpu = smp_processor_id();
 
        if (unlikely(is_single_threaded(wq)))
                cpu = singlethread_cpu;
 
-       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
+
+       if (delay == 0)
+               return queue_work(wq, work);
 
        if (!test_and_set_bit(0, &work->pending)) {
                BUG_ON(timer_pending(timer));
@@ -153,7 +157,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
                /* This stores wq for the moment, for the timer_fn */
                work->wq_data = wq;
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer(timer);
                ret = 1;
@@ -172,10 +176,11 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
 
        if (!test_and_set_bit(0, &work->pending)) {
                BUG_ON(timer_pending(timer));
@@ -184,7 +189,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
                /* This stores wq for the moment, for the timer_fn */
                work->wq_data = wq;
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer_on(timer, cpu);
                ret = 1;
@@ -468,31 +473,31 @@ EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work(keventd_wq, work, delay);
+       return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
  * @delay: number of jiffies to wait
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
 int schedule_delayed_work_on(int cpu,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+       return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
@@ -539,12 +544,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *                     work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-                                      struct work_struct *work)
+                                      struct delayed_work *dwork)
 {
-       while (!cancel_delayed_work(work))
+       while (!cancel_delayed_work(dwork))
                flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,11 +557,11 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *                     work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-       cancel_rearming_delayed_workqueue(keventd_wq, work);
+       cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
index 3c4a7e34eddc4de763feea96ce1b654f06bd9052..a65bc5e992c3671bb255f8b723e15ed989e33168 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -753,7 +753,7 @@ int slab_is_available(void)
        return g_cpucache_up == FULL;
 }
 
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
@@ -916,16 +916,16 @@ static void next_reap_node(void)
  */
 static void __devinit start_cpu_timer(int cpu)
 {
-       struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+       struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
 
        /*
         * When this gets called from do_initcalls via cpucache_init(),
         * init_workqueues() has already run, so keventd will be setup
         * at that time.
         */
-       if (keventd_up() && reap_work->func == NULL) {
+       if (keventd_up() && reap_work->work.func == NULL) {
                init_reap_node(cpu);
-               INIT_WORK(reap_work, cache_reap, NULL);
+               INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
                schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
        }
 }
index 4b36114744c57f983c8af674ee45610f6e830ac1..f2ed09e25dfd63fdba8d1d9f8e9e16550dfab239 100644 (file)
@@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
 static unsigned long linkwatch_nextevent;
 
 static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
 
 static LIST_HEAD(lweventlist);
 static DEFINE_SPINLOCK(lweventlist_lock);
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
                        unsigned long delay = linkwatch_nextevent - jiffies;
 
                        /* If we wrap around we'll delay it by at most HZ. */
-                       if (!delay || delay > HZ)
-                               schedule_work(&linkwatch_work);
-                       else
-                               schedule_delayed_work(&linkwatch_work, delay);
+                       if (delay > HZ)
+                               delay = 0;
+                       schedule_delayed_work(&linkwatch_work, delay);
                }
        }
 }
index 00cb388ece032cec8aeba948bb97c0e653eede5e..d5725cb1491eee9555074496b6d31dcae5144fa2 100644 (file)
@@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
 static struct file_operations cache_flush_operations;
 
 static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
 
 void cache_register(struct cache_detail *cd)
 {
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
        spin_unlock(&cache_list_lock);
 
        /* start the cleaning process */
-       schedule_work(&cache_cleaner);
+       schedule_delayed_work(&cache_cleaner, 0);
 }
 
 int cache_unregister(struct cache_detail *cd)
index 9a0b41a97f90764f84636811137a76cdfff5ac3b..97be3f7fed4481cba7d57c24d89d3338b2ad8030 100644 (file)
@@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
                INIT_LIST_HEAD(&rpci->pipe);
                rpci->pipelen = 0;
                init_waitqueue_head(&rpci->waitq);
-               INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+               INIT_DELAYED_WORK(&rpci->queue_timeout,
+                                   rpc_timeout_upcall_queue, rpci);
                rpci->ops = NULL;
        }
 }
index 757fc91ef25d8621e9af5f1e162bda754b90f0fa..3c7532cd009e27e7c56bd9f944d31d06cf90c984 100644 (file)
@@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
                        xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
        } else {
                dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
-               schedule_work(&xprt->connect_worker);
+               schedule_delayed_work(&xprt->connect_worker, 0);
 
                /* flush_scheduled_work can sleep... */
                if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
        /* XXX: header size can vary due to auth type, IPv6, etc. */
        xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
 
-       INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+       INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
        xprt->bind_timeout = XS_BIND_TO;
        xprt->connect_timeout = XS_UDP_CONN_TO;
        xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
        xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
        xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
 
-       INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+       INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
        xprt->bind_timeout = XS_BIND_TO;
        xprt->connect_timeout = XS_TCP_CONN_TO;
        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;