return apicid;
}
-static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask))
- return cpu_to_logical_apicid(cpu);
+ cpu = cpumask_any_and(cpumask, andmask);
+ if (cpu < nr_cpu_ids)
+ return cpu_to_logical_apicid(cpu);
return BAD_APICID;
}
return apicid;
}
-static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpu;
int apicid = 0;
- num_bits_set = cpus_weight(*cpumask);
- num_bits_set2 = cpus_weight(*andmask);
- num_bits_set = min_t(int, num_bits_set, num_bits_set2);
+ num_bits_set = cpumask_weight(cpumask);
+ num_bits_set2 = cpumask_weight(andmask);
+ num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
#if defined CONFIG_ES7000_CLUSTERED_APIC
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask)
- apicid = cpu_to_logical_apicid(cpu);
+ cpu = cpumask_first_and(cpumask, andmask);
+ apicid = cpu_to_logical_apicid(cpu);
+
while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) {
+ if (cpumask_test_cpu(cpu, cpumask) &&
+ cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask,
- const cpumask_t *andmask);
+ unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask);
void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
#ifdef CONFIG_SMP
void (*send_IPI_self)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask,
- const cpumask_t *andmask);
+ unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask);
unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
return cpus_addr(*cpumask)[0];
}
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
- unsigned long mask1 = cpus_addr(*cpumask)[0];
- unsigned long mask2 = cpus_addr(*andmask)[0];
+ unsigned long mask1 = cpumask_bits(cpumask)[0];
+ unsigned long mask2 = cpumask_bits(andmask)[0];
return (unsigned int)(mask1 & mask2);
}
return (int) 0xF;
}
-static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
return (int) 0xF;
}
return apicid;
}
-static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpu;
int apicid = 0;
- num_bits_set = cpus_weight(*cpumask);
- num_bits_set2 = cpus_weight(*andmask);
- num_bits_set = min_t(int, num_bits_set, num_bits_set2);
+ num_bits_set = cpumask_weight(cpumask);
+ num_bits_set2 = cpumask_weight(andmask);
+ num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
return 0xFF;
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask)
- apicid = cpu_to_logical_apicid(cpu);
+ cpu = cpumask_first_and(cpumask, andmask);
+ apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) {
+ if (cpumask_test_cpu(cpu, cpumask)
+ && cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
}
-static unsigned int flat_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
- unsigned long mask1 = cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
- unsigned long mask2 = cpus_addr(*andmask)[0] & APIC_ALL_CPUS;
+ unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
+ unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
- return (int)(mask1 & mask2);
+ return mask1 & mask2;
}
static unsigned int phys_pkg_id(int index_msb)
return BAD_APICID;
}
-static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static unsigned int
+physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask))
- return per_cpu(x86_cpu_to_apicid, cpu);
+ cpu = cpumask_any_and(cpumask, andmask);
+ if (cpu < nr_cpu_ids)
+ return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask))
- return per_cpu(x86_cpu_to_apicid, cpu);
+ cpu = cpumask_any_and(cpumask, andmask);
+ if (cpu < nr_cpu_ids)
+ return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask))
- return per_cpu(x86_cpu_to_apicid, cpu);
+ cpu = cpumask_any_and(cpumask, andmask);
+ if (cpu < nr_cpu_ids)
+ return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
return BAD_APICID;
}
-static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
- const cpumask_t *andmask)
+static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
- if (cpu_isset(cpu, *andmask))
- return per_cpu(x86_cpu_to_apicid, cpu);
+ cpu = cpumask_any_and(cpumask, andmask);
+ if (cpu < nr_cpu_ids)
+ return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}