From aa85ea5b89c36c51200d795dd788139bd9b8cf50 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 30 Mar 2009 22:05:15 -0600 Subject: [PATCH] cpumask: use new cpumask_ functions in core code. Impact: cleanup Time to clean up remaining laggards using the old cpu_ functions. Signed-off-by: Rusty Russell Cc: Greg Kroah-Hartman Cc: Ingo Molnar Cc: Trond.Myklebust@netapp.com --- drivers/base/cpu.c | 2 +- include/linux/cpuset.h | 4 ++-- kernel/workqueue.c | 6 +++--- mm/allocpercpu.c | 2 +- mm/vmstat.c | 2 +- net/sunrpc/svc.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 5b257a57bc5..e62a4ccea54 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -119,7 +119,7 @@ static ssize_t print_cpus_map(char *buf, const struct cpumask *map) #define print_cpus_func(type) \ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ { \ - return print_cpus_map(buf, &cpu_##type##_map); \ + return print_cpus_map(buf, cpu_##type##_mask); \ } \ static struct sysdev_class_attribute attr_##type##_map = \ _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 90c6074a36c..2e0d79678de 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -90,12 +90,12 @@ static inline void cpuset_init_smp(void) {} static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - *mask = cpu_possible_map; + cpumask_copy(mask, cpu_possible_mask); } static inline void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask) { - *mask = cpu_possible_map; + cpumask_copy(mask, cpu_possible_mask); } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d..9aedd9fd825 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq) might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_cpu_mask_nr(cpu, *cpu_map) + for_each_cpu(cpu, cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work) wq = cwq->wq; cpu_map = wq_cpu_map(wq); - for_each_cpu_mask_nr(cpu, *cpu_map) + for_each_cpu(cpu, cpu_map) wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } @@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq) list_del(&wq->list); spin_unlock(&workqueue_lock); - for_each_cpu_mask_nr(cpu, *cpu_map) + for_each_cpu(cpu, cpu_map) cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); cpu_maps_update_done(); diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 1882923bc70..139d5b7b662 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -143,7 +143,7 @@ void free_percpu(void *__pdata) { if (unlikely(!__pdata)) return; - __percpu_depopulate_mask(__pdata, &cpu_possible_map); + __percpu_depopulate_mask(__pdata, cpu_possible_mask); kfree(__percpu_disguise(__pdata)); } EXPORT_SYMBOL_GPL(free_percpu); diff --git a/mm/vmstat.c b/mm/vmstat.c index 91149746bb8..8cd81ea1ddc 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); - for_each_cpu_mask_nr(cpu, *cpumask) { + for_each_cpu(cpu, cpumask) { struct vm_event_state *this = &per_cpu(vm_event_states, cpu); for (i = 0; i < NR_VM_EVENT_ITEMS; i++) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index c51fed4d1af..bb507e2bb94 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -312,7 +312,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) switch (m->mode) { case SVC_POOL_PERCPU: { - set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); + set_cpus_allowed_ptr(task, cpumask_of(node)); break; } case SVC_POOL_PERNODE: -- 2.41.1