#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
-
- if (!cpus_empty(mask))
- send_IPI_mask(&mask, vector);
+ send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
- send_IPI_mask(&cpu_online_map, vector);
+ send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_MACH_IPI_H */
#ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
- if (!cpus_empty(mask))
- send_IPI_mask(&mask, vector);
+ send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
- send_IPI_mask(&cpu_online_map, vector);
+ send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_ES7000_IPI_H */
int (*probe)(void);
int (*apic_id_registered)(void);
- const cpumask_t *(*target_cpus)(void);
+ const struct cpumask *(*target_cpus)(void);
int int_delivery_mode;
int int_dest_mode;
int ESR_DISABLE;
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
- unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+ unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
- void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
#ifdef CONFIG_SMP
/* ipi */
- void (*send_IPI_mask)(const cpumask_t *mask, int vector);
- void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+ int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
#endif
u32 int_delivery_mode;
u32 int_dest_mode;
int (*apic_id_registered)(void);
- const cpumask_t *(*target_cpus)(void);
- void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
+ const struct cpumask *(*target_cpus)(void);
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void);
/* ipi */
- void (*send_IPI_mask)(const cpumask_t *mask, int vector);
- void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+ int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* */
- unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+ unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
unsigned int (*phys_pkg_id)(int index_msb);
native_apic_mem_write(APIC_ICR, cfg);
}
-static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask_sequence(const struct cpumask *mask,
+ int vector)
{
unsigned long flags;
unsigned long query_cpu;
* - mbligh
*/
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask) {
+ for_each_cpu(query_cpu, mask) {
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
-static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
{
unsigned long flags;
unsigned int query_cpu;
/* See Hack comment above */
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask)
+ for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(
per_cpu(x86_cpu_to_apicid, query_cpu),
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-static inline const cpumask_t *target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
{
#ifdef CONFIG_SMP
- return &cpu_online_map;
+ return cpu_online_mask;
#else
- return &cpumask_of_cpu(0);
+ return cpumask_of(0);
#endif
}
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- return cpus_addr(*cpumask)[0];
+ return cpumask_bits(cpumask)[0];
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
#endif
}
-static inline void vector_allocation_domain(int cpu, cpumask_t *retmask)
+static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
/* Avoid include hell */
#define NMI_VECTOR 0x02
-void send_IPI_mask_bitmask(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
void __send_IPI_shortcut(unsigned int shortcut, int vector);
extern int no_broadcast;
#define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
#else
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_bitmask(mask, vector);
}
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
#endif
static inline void __local_send_IPI_allbutself(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask_allbutself(&cpu_online_map, vector);
+ send_IPI_mask_allbutself(cpu_online_mask, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
}
static inline void __local_send_IPI_all(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask(&cpu_online_map, vector);
+ send_IPI_mask(cpu_online_mask, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
#ifndef __ASM_NUMAQ_IPI_H
#define __ASM_NUMAQ_IPI_H
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
-
- if (!cpus_empty(mask))
- send_IPI_mask(&mask, vector);
+ send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
- send_IPI_mask(&cpu_online_map, vector);
+ send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_NUMAQ_IPI_H */
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
- void (*send_call_func_ipi)(const cpumask_t *mask);
+ void (*send_call_func_ipi)(const struct cpumask *mask);
void (*send_call_func_single_ipi)(int cpu);
};
void native_play_dead(void);
void play_dead_common(void);
-void native_send_call_func_ipi(const cpumask_t *mask);
+void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
extern void prefill_possible_map(void);
return 1;
}
-static const cpumask_t *flat_target_cpus(void)
+static const struct cpumask *flat_target_cpus(void)
{
- return &cpu_online_map;
+ return cpu_online_mask;
}
-static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
- *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } };
+ cpumask_clear(retmask);
+ cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
/*
local_irq_restore(flags);
}
-static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector)
+static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
- unsigned long mask = cpus_addr(*cpumask)[0];
+ unsigned long mask = cpumask_bits(cpumask)[0];
_flat_send_IPI_mask(mask, vector);
}
-static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector)
+static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
+ int vector)
{
- unsigned long mask = cpus_addr(*cpumask)[0];
+ unsigned long mask = cpumask_bits(cpumask)[0];
int cpu = smp_processor_id();
if (cpu < BITS_PER_LONG)
int hotplug = 0;
#endif
if (hotplug || vector == NMI_VECTOR) {
- if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) {
- unsigned long mask = cpus_addr(cpu_online_map)[0];
+ if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
+ unsigned long mask = cpumask_bits(cpu_online_mask)[0];
if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
static void flat_send_IPI_all(int vector)
{
if (vector == NMI_VECTOR)
- flat_send_IPI_mask(&cpu_online_map, vector);
+ flat_send_IPI_mask(cpu_online_mask, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
}
return physid_isset(read_xapic_id(), phys_cpu_present_map);
}
-static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
+ return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
}
static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
return 0;
}
-static const cpumask_t *physflat_target_cpus(void)
+static const struct cpumask *physflat_target_cpus(void)
{
- return &cpu_online_map;
+ return cpu_online_mask;
}
-static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
- cpus_clear(*retmask);
- cpu_set(cpu, *retmask);
+ cpumask_clear(retmask);
+ cpumask_set_cpu(cpu, retmask);
}
-static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector)
+static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
send_IPI_mask_sequence(cpumask, vector);
}
-static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
+static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector)
{
send_IPI_mask_allbutself(cpumask, vector);
static void physflat_send_IPI_allbutself(int vector)
{
- send_IPI_mask_allbutself(&cpu_online_map, vector);
+ send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static void physflat_send_IPI_all(int vector)
{
- physflat_send_IPI_mask(&cpu_online_map, vector);
+ physflat_send_IPI_mask(cpu_online_mask, vector);
}
-static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = first_cpu(*cpumask);
+ cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
-static const cpumask_t *x2apic_target_cpus(void)
+static const struct cpumask *x2apic_target_cpus(void)
{
- return &cpumask_of_cpu(0);
+ return cpumask_of(0);
}
/*
* for now each logical cpu is in its own vector allocation domain.
*/
-static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
- cpus_clear(*retmask);
- cpu_set(cpu, *retmask);
+ cpumask_clear(retmask);
+ cpumask_set_cpu(cpu, retmask);
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
* at once. We have 16 cpu's in a cluster. This will minimize IPI register
* writes.
*/
-static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask)
+ for_each_cpu(query_cpu, mask)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
-static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
{
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask)
+ for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
static void x2apic_send_IPI_all(int vector)
{
- x2apic_send_IPI_mask(&cpu_online_map, vector);
+ x2apic_send_IPI_mask(cpu_online_mask, vector);
}
static int x2apic_apic_id_registered(void)
return 1;
}
-static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = first_cpu(*cpumask);
+ cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
-static const cpumask_t *x2apic_target_cpus(void)
+static const struct cpumask *x2apic_target_cpus(void)
{
- return &cpumask_of_cpu(0);
+ return cpumask_of(0);
}
-static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
- cpus_clear(*retmask);
- cpu_set(cpu, *retmask);
+ cpumask_clear(retmask);
+ cpumask_set_cpu(cpu, retmask);
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
x2apic_icr_write(cfg, apicid);
}
-static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask) {
+ for_each_cpu(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
-static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
{
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask) {
+ for_each_cpu(query_cpu, mask) {
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_apicid, query_cpu),
static void x2apic_send_IPI_all(int vector)
{
- x2apic_send_IPI_mask(&cpu_online_map, vector);
+ x2apic_send_IPI_mask(cpu_online_mask, vector);
}
static int x2apic_apic_id_registered(void)
return 1;
}
-static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = first_cpu(*cpumask);
+ cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
-static const cpumask_t *uv_target_cpus(void)
+static const struct cpumask *uv_target_cpus(void)
{
- return &cpumask_of_cpu(0);
+ return cpumask_of(0);
}
-static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
- cpus_clear(*retmask);
- cpu_set(cpu, *retmask);
+ cpumask_clear(retmask);
+ cpumask_set_cpu(cpu, retmask);
}
int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
-static void uv_send_IPI_mask(const cpumask_t *mask, int vector)
+static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
{
unsigned int cpu;
- for_each_cpu_mask_nr(cpu, *mask)
+ for_each_cpu(cpu, mask)
uv_send_IPI_one(cpu, vector);
}
-static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
- for_each_cpu_mask_nr(cpu, *mask)
+ for_each_cpu(cpu, mask)
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
}
static void uv_send_IPI_all(int vector)
{
- uv_send_IPI_mask(&cpu_online_map, vector);
+ uv_send_IPI_mask(cpu_online_mask, vector);
}
static int uv_apic_id_registered(void)
{
}
-static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = first_cpu(*cpumask);
+ cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
/*
* This is only used on smaller machines.
*/
-void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector)
+void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
{
- unsigned long mask = cpus_addr(*cpumask)[0];
+ unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
local_irq_save(flags);
- WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+ WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__send_IPI_dest_field(mask, vector);
local_irq_restore(flags);
}
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
*/
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask)
+ for_each_cpu(query_cpu, mask)
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
local_irq_restore(flags);
}
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
/* See Hack comment above */
local_irq_save(flags);
- for_each_cpu_mask_nr(query_cpu, *mask)
+ for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
vector);
WARN_ON(1);
return;
}
- send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+ send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
}
void native_send_call_func_single_ipi(int cpu)
{
- send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
-void native_send_call_func_ipi(const cpumask_t *mask)
+void native_send_call_func_ipi(const struct cpumask *mask)
{
cpumask_t allbutself;
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
-static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector)
+static void xen_send_IPI_mask(const struct cpumask *mask,
+ enum ipi_vector vector)
{
unsigned cpu;
- for_each_cpu_and(cpu, mask, &cpu_online_map)
+ for_each_cpu_and(cpu, mask, cpu_online_mask)
xen_send_IPI_one(cpu, vector);
}
-static void xen_smp_send_call_function_ipi(const cpumask_t *mask)
+static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{
int cpu;
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
- for_each_cpu_mask_nr(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
if (xen_vcpu_stolen(cpu)) {
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
break;
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- xen_send_IPI_mask(&cpumask_of_cpu(cpu),
+ xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
}