From: Mike Travis Date: Sat, 5 Apr 2008 01:11:10 +0000 (-0700) Subject: nodemask: use new node_to_cpumask_ptr function X-Git-Tag: v2.6.26-rc1~1132^2~31 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=c5f59f0833df945eef7ff35f3dc6ba61c5f293dd;p=linux-2.6-omap-h63xx.git nodemask: use new node_to_cpumask_ptr function * Use new node_to_cpumask_ptr. This creates a pointer to the cpumask for a given node. This definition is in mm patch: asm-generic-add-node_to_cpumask_ptr-macro.patch * Use new set_cpus_allowed_ptr function. Depends on: [mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch [sched-devel]: sched: add new set_cpus_allowed_ptr function [x86/latest]: x86: add cpus_scnprintf function Cc: Greg Kroah-Hartman Cc: Greg Banks Cc: H. Peter Anvin Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- diff --git a/drivers/base/node.c b/drivers/base/node.c index e59861f18ce..8e3f25bb8f8 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -22,14 +22,15 @@ static struct sysdev_class node_class = { static ssize_t node_read_cpumap(struct sys_device * dev, char * buf) { struct node *node_dev = to_node(dev); - cpumask_t mask = node_to_cpumask(node_dev->sysdev.id); + node_to_cpumask_ptr(mask, node_dev->sysdev.id); int len; /* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */ BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2); - len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask); - len += sprintf(buf + len, "\n"); + len = cpumask_scnprintf(buf, PAGE_SIZE-2, *mask); + buf[len++] = '\n'; + buf[len] = '\0'; return len; } diff --git a/kernel/sched.c b/kernel/sched.c index 1a8252385c4..9f7980f8ec0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6448,7 +6448,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, * * Should use nodemask_t. */ -static int find_next_best_node(int node, unsigned long *used_nodes) +static int find_next_best_node(int node, nodemask_t *used_nodes) { int i, n, val, min_val, best_node = 0; @@ -6462,7 +6462,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes) continue; /* Skip already used nodes */ - if (test_bit(n, used_nodes)) + if (node_isset(n, *used_nodes)) continue; /* Simple min distance search */ @@ -6474,14 +6474,13 @@ static int find_next_best_node(int node, unsigned long *used_nodes) } } - set_bit(best_node, used_nodes); + node_set(best_node, *used_nodes); return best_node; } /** * sched_domain_node_span - get a cpumask for a node's sched_domain * @node: node whose cpumask we're constructing - * @size: number of nodes to include in this span * * Given a node, construct a good cpumask for its sched_domain to span. It * should be one that prevents unnecessary balancing, but also spreads tasks @@ -6489,22 +6488,22 @@ static int find_next_best_node(int node, unsigned long *used_nodes) */ static cpumask_t sched_domain_node_span(int node) { - DECLARE_BITMAP(used_nodes, MAX_NUMNODES); - cpumask_t span, nodemask; + nodemask_t used_nodes; + cpumask_t span; + node_to_cpumask_ptr(nodemask, node); int i; cpus_clear(span); - bitmap_zero(used_nodes, MAX_NUMNODES); + nodes_clear(used_nodes); - nodemask = node_to_cpumask(node); - cpus_or(span, span, nodemask); - set_bit(node, used_nodes); + cpus_or(span, span, *nodemask); + node_set(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { - int next_node = find_next_best_node(node, used_nodes); + int next_node = find_next_best_node(node, &used_nodes); - nodemask = node_to_cpumask(next_node); - cpus_or(span, span, nodemask); + node_to_cpumask_ptr_next(nodemask, next_node); + cpus_or(span, span, *nodemask); } return span; @@ -6901,6 +6900,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) for (j = 0; j < MAX_NUMNODES; j++) { cpumask_t tmp, notcovered; int n = (i + j) % MAX_NUMNODES; + node_to_cpumask_ptr(pnodemask, n); cpus_complement(notcovered, covered); cpus_and(tmp, notcovered, *cpu_map); @@ -6908,8 +6908,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) if (cpus_empty(tmp)) break; - nodemask = node_to_cpumask(n); - cpus_and(tmp, tmp, nodemask); + cpus_and(tmp, tmp, *pnodemask); if (cpus_empty(tmp)) continue; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 402a504f122..32e796af12a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) int n, val; int min_val = INT_MAX; int best_node = -1; + node_to_cpumask_ptr(tmp, 0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { @@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) } for_each_node_state(n, N_HIGH_MEMORY) { - cpumask_t tmp; /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) @@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) val += (n < node); /* Give preference to headless and unused nodes */ - tmp = node_to_cpumask(n); - if (!cpus_empty(tmp)) + node_to_cpumask_ptr_next(tmp, n); + if (!cpus_empty(*tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ diff --git a/mm/slab.c b/mm/slab.c index 04b308c3bc5..03927cb5ec9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu) struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); + node_to_cpumask_ptr(mask, node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; - cpumask_t mask; - mask = node_to_cpumask(node); /* cpu is dead; no one can alloc from it. */ nc = cachep->array[cpu]; cachep->array[cpu] = NULL; @@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu) if (nc) free_block(cachep, nc->entry, nc->avail, node); - if (!cpus_empty(mask)) { + if (!cpus_empty(*mask)) { spin_unlock_irq(&l3->list_lock); goto free_array_cache; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 4046434046e..f80a5b7c057 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1647,11 +1647,10 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; - cpumask_t cpumask; + node_to_cpumask_ptr(cpumask, pgdat->node_id); - cpumask = node_to_cpumask(pgdat->node_id); - if (!cpus_empty(cpumask)) - set_cpus_allowed(tsk, cpumask); + if (!cpus_empty(*cpumask)) + set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; /* @@ -1880,17 +1879,16 @@ out: static int __devinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - pg_data_t *pgdat; - cpumask_t mask; int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_HIGH_MEMORY) { - pgdat = NODE_DATA(nid); - mask = node_to_cpumask(pgdat->node_id); - if (any_online_cpu(mask) != NR_CPUS) + pg_data_t *pgdat = NODE_DATA(nid); + node_to_cpumask_ptr(mask, pgdat->node_id); + + if (any_online_cpu(*mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ - set_cpus_allowed(pgdat->kswapd, mask); + set_cpus_allowed_ptr(pgdat->kswapd, mask); } } return NOTIFY_OK; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index a290e152329..090af78d68b 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -301,7 +301,6 @@ static inline int svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) { struct svc_pool_map *m = &svc_pool_map; - unsigned int node; /* or cpu */ /* * The caller checks for sv_nrpools > 1, which @@ -314,16 +313,23 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) default: return 0; case SVC_POOL_PERCPU: - node = m->pool_to[pidx]; + { + unsigned int cpu = m->pool_to[pidx]; + *oldmask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(node)); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); return 1; + } case SVC_POOL_PERNODE: - node = m->pool_to[pidx]; + { + unsigned int node = m->pool_to[pidx]; + node_to_cpumask_ptr(nodecpumask, node); + *oldmask = current->cpus_allowed; - set_cpus_allowed(current, node_to_cpumask(node)); + set_cpus_allowed_ptr(current, nodecpumask); return 1; } + } } /*