*/
int sis_apic_bug = -1;
+int first_free_entry = NR_IRQS;
/*
* # of IRQ routing registers
*/
#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+int pin_map_size = PIN_MAP_SIZE;
+
/*
* This is performance-critical, we want to do it O(1)
*
*/
static void add_pin_to_irq(unsigned int irq, int apic, int pin)
{
- static int first_free_entry = NR_IRQS;
struct irq_pin_list *entry = irq_2_pin + irq;
while (entry->next)
if (entry->pin != -1) {
entry->next = first_free_entry;
entry = irq_2_pin + entry->next;
- if (++first_free_entry >= PIN_MAP_SIZE)
+ if (++first_free_entry >= pin_map_size)
panic("io_apic.c: whoops");
}
entry->apic = apic;
int i, j;
for_each_online_cpu(i) {
- for (j = 0; j < NR_IRQS; j++) {
+ for (j = 0; j < nr_irqs; j++) {
if (!irq_desc[j].action)
continue;
/* Is it a significant load ? */
if (!cpu_online(i))
continue;
package_index = CPU_TO_PACKAGEINDEX(i);
- for (j = 0; j < NR_IRQS; j++) {
+ for (j = 0; j < nr_irqs; j++) {
unsigned long value_now, delta;
/* Is this an active IRQ or balancing disabled ? */
if (!irq_desc[j].action || irq_balancing_disabled(j))
*/
move_this_load = 0;
selected_irq = -1;
- for (j = 0; j < NR_IRQS; j++) {
+ for (j = 0; j < nr_irqs; j++) {
/* Is this an active IRQ? */
if (!irq_desc[j].action)
continue;
long time_remaining = balanced_irq_interval;
/* push everything to CPU 0 to give us a starting point. */
- for (i = 0 ; i < NR_IRQS ; i++) {
+ for (i = 0 ; i < nr_irqs ; i++) {
irq_desc[i].pending_mask = cpumask_of_cpu(0);
set_pending_irq(i, cpumask_of_cpu(0));
}
physical_balance = 1;
for_each_online_cpu(i) {
- irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
- irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
+ irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * nr_irqs, GFP_KERNEL);
+ irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * nr_irqs, GFP_KERNEL);
if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
printk(KERN_ERR "balanced_irq_init: out of memory");
goto failed;
}
}
printk(KERN_DEBUG "IRQ to pin mappings:\n");
- for (i = 0; i < NR_IRQS; i++) {
+ for (i = 0; i < nr_irqs; i++) {
struct irq_pin_list *entry = irq_2_pin + i;
if (entry->pin < 0)
continue;
int i, apic;
unsigned long flags;
- for (i = 0; i < PIN_MAP_SIZE; i++) {
+ for (i = 0; i < pin_map_size; i++) {
irq_2_pin[i].pin = -1;
irq_2_pin[i].next = 0;
}
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- for (irq = 0; irq < NR_IRQS ; irq++) {
+ for (irq = 0; irq < nr_irqs ; irq++) {
if (IO_APIC_IRQ(irq) && !irq_vector[irq]) {
/*
* Hmm.. We don't have an entry for this,
irq = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
- for (new = (NR_IRQS - 1); new >= 0; new--) {
+ for (new = (nr_irqs - 1); new >= 0; new--) {
if (platform_legacy_irq(new))
continue;
if (irq_vector[new] != 0)
#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+int pin_map_size = PIN_MAP_SIZE;
/*
* This is performance-critical, we want to do it O(1)
*
int pin; \
struct irq_pin_list *entry = irq_2_pin + irq; \
\
- BUG_ON(irq >= NR_IRQS); \
+ BUG_ON(irq >= nr_irqs); \
for (;;) { \
unsigned int reg; \
pin = entry->pin; \
int apic, pin;
struct irq_pin_list *entry = irq_2_pin + irq;
- BUG_ON(irq >= NR_IRQS);
+ BUG_ON(irq >= nr_irqs);
for (;;) {
unsigned int reg;
apic = entry->apic;
* shared ISA-space IRQs, so we have to support them. We are super
* fast in the common case, and fast for shared ISA-space IRQs.
*/
+int first_free_entry = NR_IRQS;
static void add_pin_to_irq(unsigned int irq, int apic, int pin)
{
- static int first_free_entry = NR_IRQS;
struct irq_pin_list *entry = irq_2_pin + irq;
- BUG_ON(irq >= NR_IRQS);
+ BUG_ON(irq >= nr_irqs);
while (entry->next)
entry = irq_2_pin + entry->next;
if (entry->pin != -1) {
entry->next = first_free_entry;
entry = irq_2_pin + entry->next;
- if (++first_free_entry >= PIN_MAP_SIZE)
+ if (++first_free_entry >= pin_map_size)
panic("io_apic.c: ran out of irq_2_pin entries!");
}
entry->apic = apic;
best_guess = irq;
}
}
- BUG_ON(best_guess >= NR_IRQS);
+ BUG_ON(best_guess >= nr_irqs);
return best_guess;
}
irq += nr_ioapic_registers[i++];
irq += pin;
}
- BUG_ON(irq >= NR_IRQS);
+ BUG_ON(irq >= nr_irqs);
return irq;
}
int cpu;
struct irq_cfg *cfg;
- BUG_ON((unsigned)irq >= NR_IRQS);
+ BUG_ON((unsigned)irq >= nr_irqs);
cfg = &irq_cfg[irq];
/* Only try and allocate irqs on cpus that are present */
cpumask_t mask;
int cpu, vector;
- BUG_ON((unsigned)irq >= NR_IRQS);
+ BUG_ON((unsigned)irq >= nr_irqs);
cfg = &irq_cfg[irq];
BUG_ON(!cfg->vector);
int irq, vector;
/* Mark the inuse vectors */
- for (irq = 0; irq < NR_IRQS; ++irq) {
+ for (irq = 0; irq < nr_irqs; ++irq) {
if (!cpu_isset(cpu, irq_cfg[irq].domain))
continue;
vector = irq_cfg[irq].vector;
}
}
printk(KERN_DEBUG "IRQ to pin mappings:\n");
- for (i = 0; i < NR_IRQS; i++) {
+ for (i = 0; i < nr_irqs; i++) {
struct irq_pin_list *entry = irq_2_pin + i;
if (entry->pin < 0)
continue;
int i, apic;
unsigned long flags;
- for (i = 0; i < PIN_MAP_SIZE; i++) {
+ for (i = 0; i < pin_map_size; i++) {
irq_2_pin[i].pin = -1;
irq_2_pin[i].next = 0;
}
{
int irq;
- for (irq = 0; irq < NR_IRQS; irq++) {
+ for (irq = 0; irq < nr_irqs; irq++) {
struct irq_desc *desc = irq_desc + irq;
if (desc->status & IRQ_MOVE_PENDING) {
unsigned long flags;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __get_cpu_var(vector_irq)[vector];
- if (irq >= NR_IRQS)
+ if (irq >= nr_irqs)
continue;
desc = irq_desc + irq;
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- for (irq = 0; irq < NR_IRQS ; irq++) {
+ for (irq = 0; irq < nr_irqs ; irq++) {
if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
/*
* Hmm.. We don't have an entry for this,
irq = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
- for (new = (NR_IRQS - 1); new >= 0; new--) {
+ for (new = (nr_irqs - 1); new >= 0; new--) {
if (platform_legacy_irq(new))
continue;
if (irq_cfg[new].vector != 0)
int overflow, irq = ~regs->orig_ax;
struct irq_desc *desc = irq_desc + irq;
- if (unlikely((unsigned)irq >= NR_IRQS)) {
+ if (unlikely((unsigned)irq >= nr_irqs)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
seq_putc(p, '\n');
}
- if (i < NR_IRQS) {
+ if (i < nr_irqs) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
+ } else if (i == nr_irqs) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", nmi_count(j));
unsigned int irq;
static int warned;
- for (irq = 0; irq < NR_IRQS; irq++) {
+ for (irq = 0; irq < nr_irqs; irq++) {
cpumask_t mask;
if (irq == 2)
continue;
seq_putc(p, '\n');
}
- if (i < NR_IRQS) {
+ if (i < nr_irqs) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
+ } else if (i == nr_irqs) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
stack_overflow_check(regs);
#endif
- if (likely(irq < NR_IRQS))
+ if (likely(irq < nr_irqs))
generic_handle_irq(irq);
else {
if (!disable_apic)
unsigned int irq;
static int warned;
- for (irq = 0; irq < NR_IRQS; irq++) {
+ for (irq = 0; irq < nr_irqs; irq++) {
cpumask_t mask;
int break_affinity = 0;
int set_affinity = 1;
*/
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
- if (i >= NR_IRQS)
+ if (i >= nr_irqs)
break;
/* SYSCALL_VECTOR was reserved in trap_init. */
if (!test_bit(vector, used_vectors))
init_bsp_APIC();
init_8259A(0);
- for (i = 0; i < NR_IRQS; i++) {
+ for (i = 0; i < nr_irqs; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
+extern int pin_map_size;
+extern int first_free_entry;
+
static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);