From 31a6b11fed6ceec07ec4bdfefae56b8252d450cf Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 3 Apr 2008 11:39:43 -0700 Subject: [PATCH] [IA64] Implement smp_call_function_mask for ia64 This interface provides more flexible functionality for smp infrastructure ... e.g. KVM frequently needs to operate on a subset of cpus. Signed-off-by: Xiantao Zhang Signed-off-by: Tony Luck --- arch/ia64/kernel/smp.c | 82 ++++++++++++++++++++++++++++++++++++++++++ include/asm-ia64/smp.h | 3 ++ 2 files changed, 85 insertions(+) diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa5f4a..9a9d4c48933 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -209,6 +209,19 @@ send_IPI_allbutself (int op) } } +/* + * Called with preemption disabled. + */ +static inline void +send_IPI_mask(cpumask_t mask, int op) +{ + unsigned int cpu; + + for_each_cpu_mask(cpu, mask) { + send_IPI_single(cpu, op); + } +} + /* * Called with preemption disabled. */ @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int } EXPORT_SYMBOL(smp_call_function_single); +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * The set of cpus to run on. Must not include the current cpu. + * The function to run. This must be fast and non-blocking. + * An arbitrary pointer to pass to the function. + * If true, wait (atomically) until function + * has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) +{ + struct call_data_struct data; + cpumask_t allbutself; + int cpus; + + spin_lock(&call_lock); + allbutself = cpu_online_map; + cpu_clear(smp_processor_id(), allbutself); + + cpus_and(mask, mask, allbutself); + cpus = cpus_weight(mask); + if (!cpus) { + spin_unlock(&call_lock); + return 0; + } + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); + + call_data = &data; + mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ + + /* Send a message to other CPUs */ + if (cpus_equal(mask, allbutself)) + send_IPI_allbutself(IPI_CALL_FUNC); + else + send_IPI_mask(mask, IPI_CALL_FUNC); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); + call_data = NULL; + + spin_unlock(&call_lock); + return 0; + +} +EXPORT_SYMBOL(smp_call_function_mask); + /* * this function sends a 'generic call function' IPI to all other CPUs * in the system. diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417..ec5f355fb7e 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -38,6 +38,9 @@ ia64_get_lid (void) return lid.f.id << 8 | lid.f.eid; } +extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), + void *info, int wait); + #define hard_smp_processor_id() ia64_get_lid() #ifdef CONFIG_SMP -- 2.41.1