Patches contributed by Eötvös Lorand University
commit 4588c1f0354ac96a358b3f9e8e4331c51cf3336f
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Sep 6 14:19:17 2008 +0200
x86: HPET_MSI Basic HPET_MSI setup code, cleanups
small style cleanups.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 3f10d16a8348..03d3655734b4 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,39 +1,39 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hpet.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
-#include <linux/pm.h>
-#include <linux/interrupt.h>
#include <linux/cpu.h>
+#include <linux/pm.h>
+#include <linux/io.h>
#include <asm/fixmap.h>
-#include <asm/hpet.h>
#include <asm/i8253.h>
-#include <asm/io.h>
+#include <asm/hpet.h>
-#define HPET_MASK CLOCKSOURCE_MASK(32)
-#define HPET_SHIFT 22
+#define HPET_MASK CLOCKSOURCE_MASK(32)
+#define HPET_SHIFT 22
/* FSEC = 10^-15
NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000L
+#define FSEC_PER_NSEC 1000000L
/*
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
-unsigned long hpet_address;
-static void __iomem *hpet_virt_address;
+unsigned long hpet_address;
+static void __iomem *hpet_virt_address;
struct hpet_dev {
- struct clock_event_device evt;
- unsigned int num;
- int cpu;
- unsigned int irq;
- unsigned int flags;
- char name[10];
+ struct clock_event_device evt;
+ unsigned int num;
+ int cpu;
+ unsigned int irq;
+ unsigned int flags;
+ char name[10];
};
unsigned long hpet_readl(unsigned long a)
@@ -70,7 +70,7 @@ static inline void hpet_clear_mapping(void)
static int boot_hpet_disable;
int hpet_force_user;
-static int __init hpet_setup(char* str)
+static int __init hpet_setup(char *str)
{
if (str) {
if (!strncmp("disable", str, 7))
@@ -91,7 +91,7 @@ __setup("nohpet", disable_hpet);
static inline int is_hpet_capable(void)
{
- return (!boot_hpet_disable && hpet_address);
+ return !boot_hpet_disable && hpet_address;
}
/*
@@ -122,10 +122,10 @@ static void hpet_reserve_platform_timers(unsigned long id)
nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
- memset(&hd, 0, sizeof (hd));
- hd.hd_phys_address = hpet_address;
- hd.hd_address = hpet;
- hd.hd_nirqs = nrtimers;
+ memset(&hd, 0, sizeof(hd));
+ hd.hd_phys_address = hpet_address;
+ hd.hd_address = hpet;
+ hd.hd_nirqs = nrtimers;
hpet_reserve_timer(&hd, 0);
#ifdef CONFIG_HPET_EMULATE_RTC
@@ -141,8 +141,8 @@ static void hpet_reserve_platform_timers(unsigned long id)
hd.hd_irq[1] = HPET_LEGACY_RTC;
for (i = 2; i < nrtimers; timer++, i++) {
- hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
- Tn_INT_ROUTE_CNF_SHIFT;
+ hd.hd_irq[i] = (readl(&timer->hpet_config) &
+ Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
}
hpet_alloc(&hd);
@@ -244,7 +244,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
unsigned long cfg, cmp, now;
uint64_t delta;
- switch(mode) {
+ switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
delta >>= evt->shift;
commit c59d85a7b7822b83fc9783314543eea0ca860480
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 28 08:56:33 2008 +0200
sparseirq: export nr_irqs on m68k/sparc/s390
Stephen Rothwell reported such build failures on m68k/sparc/s390:
> ERROR: "nr_irqs" [drivers/net/hamradio/baycom_ser_fdx.ko] undefined!
> ERROR: "nr_irqs" [drivers/net/3c59x.ko] undefined!
export nr_irqs on these architectures too.
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 74453d15692e..44169e4cd91d 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -47,6 +47,7 @@
#endif
int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL(nr_irqs);
extern u32 auto_irqhandler_fixup[];
extern u32 user_irqhandler_fixup[];
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 14eb5496c8a8..3624c4a0037a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -18,6 +18,7 @@
#include <linux/profile.h>
int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL(nr_irqs);
/*
* show_interrupts is needed by /proc/interrupts.
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 059598b7e0f0..4b99e3ce3916 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -57,6 +57,7 @@
#endif /* SMP */
int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL(nr_irqs);
unsigned long __raw_local_irq_save(void)
{
commit 0c425cec64eb0c0d0dd7037c21a25585cbe3636c
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Aug 18 13:04:26 2008 +0200
warning: fix arch x86 kernel io_apic c
fix warning:
arch/x86/kernel/io_apic.c: In function ‘print_local_APIC’:
arch/x86/kernel/io_apic.c:1786: warning: format ‘%08x’ expects type ‘unsigned int’, but argument 2 has type ‘u64’
arch/x86/kernel/io_apic.c:1787: warning: format ‘%08x’ expects type ‘unsigned int’, but argument 2 has type ‘u64’
By creating uniform behavior on 32-bit and 64-bit and printing out the ICR
value in two 32-bit words.
Code has changed:
text data bss dec hex filename
22901 19650 17040 59591 e8c7 io_apic.o.before
22899 19650 17040 59589 e8c5 io_apic.o.after
Due to the 32-bit cast narrowing the printed out value on 64-bit.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index f7e80262cbbb..34c74cf5c244 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1774,8 +1774,8 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
}
icr = apic_icr_read();
- printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
- printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
+ printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
v = apic_read(APIC_LVTT);
printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
commit 54168ed7f2a4f3fc2780e645124ae952598da601
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Aug 20 09:07:45 2008 +0200
x86: make io_apic_32.c the same as io_apic_64.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 3ed36041c81e..fba6d6ee3480 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -35,7 +35,7 @@
#include <linux/htirq.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
-#include <linux/jiffies.h> /* time_after() */
+#include <linux/jiffies.h> /* time_after() */
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
@@ -64,8 +64,8 @@
#define __apicdebuginit(type) static type __init
/*
- * Is the SiS APIC rmw bug present ?
- * -1 = don't know, 0 = no, 1 = yes
+ * Is the SiS APIC rmw bug present ?
+ * -1 = don't know, 0 = no, 1 = yes
*/
int sis_apic_bug = -1;
@@ -102,7 +102,7 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
int skip_ioapic_setup;
-static int __init parse_noapic(char *arg)
+static int __init parse_noapic(char *str)
{
/* disable IO-APIC */
disable_ioapic_setup();
@@ -188,7 +188,7 @@ static void __init init_work(void *data)
irq_cfgx[legacy_count - 1].next = NULL;
}
-#define for_each_irq_cfg(cfg) \
+#define for_each_irq_cfg(cfg) \
for (cfg = irq_cfgx; cfg; cfg = cfg->next)
DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
@@ -262,7 +262,6 @@ static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
irq_cfgx = cfg;
cfg->irq = irq;
printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
-
#ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
{
/* dump the results */
@@ -384,9 +383,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
*/
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
- volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
- if (sis_apic_bug)
- writel(reg, &io_apic->index);
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ if (sis_apic_bug)
+ writel(reg, &io_apic->index);
writel(value, &io_apic->data);
}
@@ -494,11 +493,20 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
apic = entry->apic;
pin = entry->pin;
+#ifdef CONFIG_INTR_REMAP
+ /*
+ * With interrupt-remapping, destination information comes
+ * from interrupt-remapping table entry.
+ */
+ if (!irq_remapped(irq))
+ io_apic_write(apic, 0x11 + pin*2, dest);
+#else
io_apic_write(apic, 0x11 + pin*2, dest);
+#endif
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
- io_apic_modify(apic, 0x10 + pin *2, reg);
+ io_apic_modify(apic, 0x10 + pin*2, reg);
if (!entry->next)
break;
entry = entry->next;
@@ -513,6 +521,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
@@ -529,12 +538,12 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
*/
dest = SET_APIC_LOGICAL_ID(dest);
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&ioapic_lock, flags);
__target_IO_APIC_irq(irq, dest, cfg->vector);
- irq_to_desc(irq)->affinity = mask;
+ desc->affinity = mask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-
#endif /* CONFIG_SMP */
/*
@@ -699,7 +708,7 @@ static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
#endif
-static void mask_IO_APIC_irq(unsigned int irq)
+static void mask_IO_APIC_irq (unsigned int irq)
{
unsigned long flags;
@@ -708,7 +717,7 @@ static void mask_IO_APIC_irq(unsigned int irq)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void unmask_IO_APIC_irq(unsigned int irq)
+static void unmask_IO_APIC_irq (unsigned int irq)
{
unsigned long flags;
@@ -725,14 +734,13 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
entry = ioapic_read_entry(apic, pin);
if (entry.delivery_mode == dest_SMI)
return;
-
/*
* Disable it in the IO-APIC irq-routing table:
*/
ioapic_mask_entry(apic, pin);
}
-static void clear_IO_APIC(void)
+static void clear_IO_APIC (void)
{
int apic, pin;
@@ -741,7 +749,7 @@ static void clear_IO_APIC(void)
clear_IO_APIC_pin(apic, pin);
}
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
void send_IPI_self(int vector)
{
unsigned int cfg;
@@ -756,9 +764,9 @@ void send_IPI_self(int vector)
*/
apic_write(APIC_ICR, cfg);
}
-#endif /* !CONFIG_SMP */
-
+#endif /* !CONFIG_SMP && CONFIG_X86_32*/
+#ifdef CONFIG_X86_32
/*
* support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
* specific CPU-side IRQs.
@@ -797,6 +805,75 @@ static int __init ioapic_pirq_setup(char *str)
}
__setup("pirq=", ioapic_pirq_setup);
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_INTR_REMAP
+/* I/O APIC RTE contents at the OS boot up */
+static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
+
+/*
+ * Saves and masks all the unmasked IO-APIC RTE's
+ */
+int save_mask_IO_APIC_setup(void)
+{
+ union IO_APIC_reg_01 reg_01;
+ unsigned long flags;
+ int apic, pin;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+ */
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_01.raw = io_apic_read(apic, 1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+ }
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ early_ioapic_entries[apic] =
+ kzalloc(sizeof(struct IO_APIC_route_entry) *
+ nr_ioapic_registers[apic], GFP_KERNEL);
+ if (!early_ioapic_entries[apic])
+ return -ENOMEM;
+ }
+
+ for (apic = 0; apic < nr_ioapics; apic++)
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ struct IO_APIC_route_entry entry;
+
+ entry = early_ioapic_entries[apic][pin] =
+ ioapic_read_entry(apic, pin);
+ if (!entry.mask) {
+ entry.mask = 1;
+ ioapic_write_entry(apic, pin, entry);
+ }
+ }
+ return 0;
+}
+
+void restore_IO_APIC_setup(void)
+{
+ int apic, pin;
+
+ for (apic = 0; apic < nr_ioapics; apic++)
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
+ ioapic_write_entry(apic, pin,
+ early_ioapic_entries[apic][pin]);
+}
+
+void reinit_intr_remapped_IO_APIC(int intr_remapping)
+{
+ /*
+ * for now plain restore of previous settings.
+ * TBD: In the case of OS enabling interrupt-remapping,
+ * IO-APIC RTE's need to be setup to point to interrupt-remapping
+ * table entries. for now, do a plain restore, and wait for
+ * the setup_IO_APIC_irqs() to do proper initialization.
+ */
+ restore_IO_APIC_setup();
+}
+#endif
/*
* Find the IRQ entry number of a certain pin.
@@ -848,7 +925,7 @@ static int __init find_isa_irq_apic(int irq, int type)
}
if (i < mp_irq_entries) {
int apic;
- for (apic = 0; apic < nr_ioapics; apic++) {
+ for(apic = 0; apic < nr_ioapics; apic++) {
if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
return apic;
}
@@ -867,10 +944,10 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
{
int apic, i, best_guess = -1;
- apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
- "slot:%d, pin:%d.\n", bus, slot, pin);
+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
+ bus, slot, pin);
if (test_bit(bus, mp_bus_not_pci)) {
- printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
+ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
return -1;
}
for (i = 0; i < mp_irq_entries; i++) {
@@ -885,7 +962,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
!mp_irqs[i].mp_irqtype &&
(bus == lbus) &&
(slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
- int irq = pin_2_irq(i, apic, mp_irqs[i].mp_dstirq);
+ int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
if (!(apic || IO_APIC_IRQ(irq)))
continue;
@@ -902,6 +979,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
}
return best_guess;
}
+
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
@@ -918,6 +996,7 @@ static int EISA_ELCR(unsigned int irq)
"Broken MPtable reports ISA irq %d\n", irq);
return 0;
}
+
#endif
/* ISA interrupts are always polarity zero edge triggered,
@@ -954,36 +1033,36 @@ static int MPBIOS_polarity(int idx)
/*
* Determine IRQ line polarity (high active or low active):
*/
- switch (mp_irqs[idx].mp_irqflag & 3) {
- case 0: /* conforms, ie. bus-type dependent polarity */
- {
- polarity = test_bit(bus, mp_bus_not_pci)?
- default_ISA_polarity(idx):
- default_PCI_polarity(idx);
- break;
- }
- case 1: /* high active */
- {
- polarity = 0;
- break;
- }
- case 2: /* reserved */
- {
- printk(KERN_WARNING "broken BIOS!!\n");
- polarity = 1;
- break;
- }
- case 3: /* low active */
- {
- polarity = 1;
- break;
- }
- default: /* invalid */
+ switch (mp_irqs[idx].mp_irqflag & 3)
{
- printk(KERN_WARNING "broken BIOS!!\n");
- polarity = 1;
- break;
- }
+ case 0: /* conforms, ie. bus-type dependent polarity */
+ if (test_bit(bus, mp_bus_not_pci))
+ polarity = default_ISA_polarity(idx);
+ else
+ polarity = default_PCI_polarity(idx);
+ break;
+ case 1: /* high active */
+ {
+ polarity = 0;
+ break;
+ }
+ case 2: /* reserved */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ polarity = 1;
+ break;
+ }
+ case 3: /* low active */
+ {
+ polarity = 1;
+ break;
+ }
+ default: /* invalid */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ polarity = 1;
+ break;
+ }
}
return polarity;
}
@@ -996,67 +1075,67 @@ static int MPBIOS_trigger(int idx)
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
- switch ((mp_irqs[idx].mp_irqflag>>2) & 3) {
- case 0: /* conforms, ie. bus-type dependent */
+ switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
{
- trigger = test_bit(bus, mp_bus_not_pci)?
- default_ISA_trigger(idx):
- default_PCI_trigger(idx);
+ case 0: /* conforms, ie. bus-type dependent */
+ if (test_bit(bus, mp_bus_not_pci))
+ trigger = default_ISA_trigger(idx);
+ else
+ trigger = default_PCI_trigger(idx);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
- switch (mp_bus_id_to_type[bus]) {
- case MP_BUS_ISA: /* ISA pin */
- {
- /* set before the switch */
+ switch (mp_bus_id_to_type[bus]) {
+ case MP_BUS_ISA: /* ISA pin */
+ {
+ /* set before the switch */
+ break;
+ }
+ case MP_BUS_EISA: /* EISA pin */
+ {
+ trigger = default_EISA_trigger(idx);
+ break;
+ }
+ case MP_BUS_PCI: /* PCI pin */
+ {
+ /* set before the switch */
+ break;
+ }
+ case MP_BUS_MCA: /* MCA pin */
+ {
+ trigger = default_MCA_trigger(idx);
+ break;
+ }
+ default:
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ trigger = 1;
+ break;
+ }
+ }
+#endif
break;
- }
- case MP_BUS_EISA: /* EISA pin */
+ case 1: /* edge */
{
- trigger = default_EISA_trigger(idx);
+ trigger = 0;
break;
}
- case MP_BUS_PCI: /* PCI pin */
+ case 2: /* reserved */
{
- /* set before the switch */
+ printk(KERN_WARNING "broken BIOS!!\n");
+ trigger = 1;
break;
}
- case MP_BUS_MCA: /* MCA pin */
+ case 3: /* level */
{
- trigger = default_MCA_trigger(idx);
+ trigger = 1;
break;
}
- default:
+ default: /* invalid */
{
printk(KERN_WARNING "broken BIOS!!\n");
- trigger = 1;
+ trigger = 0;
break;
}
}
-#endif
- break;
- }
- case 1: /* edge */
- {
- trigger = 0;
- break;
- }
- case 2: /* reserved */
- {
- printk(KERN_WARNING "broken BIOS!!\n");
- trigger = 1;
- break;
- }
- case 3: /* level */
- {
- trigger = 1;
- break;
- }
- default: /* invalid */
- {
- printk(KERN_WARNING "broken BIOS!!\n");
- trigger = 0;
- break;
- }
- }
return trigger;
}
@@ -1082,9 +1161,9 @@ static int pin_2_irq(int idx, int apic, int pin)
if (mp_irqs[idx].mp_dstirq != pin)
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
- if (test_bit(bus, mp_bus_not_pci))
+ if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].mp_srcbusirq;
- else {
+ } else {
/*
* PCI IRQs are mapped in order
*/
@@ -1092,14 +1171,14 @@ static int pin_2_irq(int idx, int apic, int pin)
while (i < apic)
irq += nr_ioapic_registers[i++];
irq += pin;
-
- /*
- * For MPS mode, so far only needed by ES7000 platform
- */
- if (ioapic_renumber_irq)
- irq = ioapic_renumber_irq(apic, irq);
+ /*
+ * For MPS mode, so far only needed by ES7000 platform
+ */
+ if (ioapic_renumber_irq)
+ irq = ioapic_renumber_irq(apic, irq);
}
+#ifdef CONFIG_X86_32
/*
* PCI IRQ command line redirection. Yes, limits are hardcoded.
*/
@@ -1116,6 +1195,8 @@ static int pin_2_irq(int idx, int apic, int pin)
}
}
}
+#endif
+
return irq;
}
@@ -1145,74 +1226,70 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
- unsigned int old_vector;
- int cpu;
- struct irq_cfg *cfg;
+ static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
+ unsigned int old_vector;
+ int cpu;
+ struct irq_cfg *cfg;
- cfg = irq_cfg(irq);
+ cfg = irq_cfg(irq);
- /* Only try and allocate irqs on cpus that are present */
- cpus_and(mask, mask, cpu_online_map);
+ /* Only try and allocate irqs on cpus that are present */
+ cpus_and(mask, mask, cpu_online_map);
- if ((cfg->move_in_progress) || cfg->move_cleanup_count)
- return -EBUSY;
+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+ return -EBUSY;
- old_vector = cfg->vector;
- if (old_vector) {
- cpumask_t tmp;
- cpus_and(tmp, cfg->domain, mask);
- if (!cpus_empty(tmp))
- return 0;
- }
+ old_vector = cfg->vector;
+ if (old_vector) {
+ cpumask_t tmp;
+ cpus_and(tmp, cfg->domain, mask);
+ if (!cpus_empty(tmp))
+ return 0;
+ }
- for_each_cpu_mask_nr(cpu, mask) {
- cpumask_t domain, new_mask;
- int new_cpu;
- int vector, offset;
+ for_each_cpu_mask_nr(cpu, mask) {
+ cpumask_t domain, new_mask;
+ int new_cpu;
+ int vector, offset;
- domain = vector_allocation_domain(cpu);
- cpus_and(new_mask, domain, cpu_online_map);
+ domain = vector_allocation_domain(cpu);
+ cpus_and(new_mask, domain, cpu_online_map);
- vector = current_vector;
- offset = current_offset;
+ vector = current_vector;
+ offset = current_offset;
next:
- vector += 8;
- if (vector >= first_system_vector) {
- /* If we run out of vectors on large boxen, must share them. */
- offset = (offset + 1) % 8;
- vector = FIRST_DEVICE_VECTOR + offset;
- }
- if (unlikely(current_vector == vector))
- continue;
+ vector += 8;
+ if (vector >= first_system_vector) {
+ /* If we run out of vectors on large boxen, must share them. */
+ offset = (offset + 1) % 8;
+ vector = FIRST_DEVICE_VECTOR + offset;
+ }
+ if (unlikely(current_vector == vector))
+ continue;
#ifdef CONFIG_X86_64
- if (vector == IA32_SYSCALL_VECTOR)
- goto next;
+ if (vector == IA32_SYSCALL_VECTOR)
+ goto next;
#else
- if (vector == SYSCALL_VECTOR)
- goto next;
+ if (vector == SYSCALL_VECTOR)
+ goto next;
#endif
- for_each_cpu_mask_nr(new_cpu, new_mask)
- if (per_cpu(vector_irq, new_cpu)[vector] != -1)
- goto next;
- /* Found one! */
- current_vector = vector;
- current_offset = offset;
- if (old_vector) {
- cfg->move_in_progress = 1;
- cfg->old_domain = cfg->domain;
- }
- printk(KERN_DEBUG "assign_irq_vector: irq %d vector %#x cpu ", irq, vector);
- for_each_cpu_mask_nr(new_cpu, new_mask) {
- per_cpu(vector_irq, new_cpu)[vector] = irq;
- printk(KERN_CONT " %d ", new_cpu);
+ for_each_cpu_mask_nr(new_cpu, new_mask)
+ if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ goto next;
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+ if (old_vector) {
+ cfg->move_in_progress = 1;
+ cfg->old_domain = cfg->domain;
}
- printk(KERN_CONT "\n");
- cfg->vector = vector;
- cfg->domain = domain;
- return 0;
- }
- return -ENOSPC;
+ for_each_cpu_mask_nr(new_cpu, new_mask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq;
+ cfg->vector = vector;
+ cfg->domain = domain;
+ return 0;
+ }
+ return -ENOSPC;
}
static int assign_irq_vector(int irq, cpumask_t mask)
@@ -1223,7 +1300,6 @@ static int assign_irq_vector(int irq, cpumask_t mask)
spin_lock_irqsave(&vector_lock, flags);
err = __assign_irq_vector(irq, mask);
spin_unlock_irqrestore(&vector_lock, flags);
-
return err;
}
@@ -1269,36 +1345,39 @@ void __setup_vector_irq(int cpu)
cfg = irq_cfg(irq);
if (!cpu_isset(cpu, cfg->domain))
per_cpu(vector_irq, cpu)[vector] = -1;
- }
+ }
}
static struct irq_chip ioapic_chip;
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip ir_ioapic_chip;
+#endif
-#define IOAPIC_AUTO -1
-#define IOAPIC_EDGE 0
-#define IOAPIC_LEVEL 1
+#define IOAPIC_AUTO -1
+#define IOAPIC_EDGE 0
+#define IOAPIC_LEVEL 1
#ifdef CONFIG_X86_32
static inline int IO_APIC_irq_trigger(int irq)
{
- int apic, idx, pin;
+ int apic, idx, pin;
- for (apic = 0; apic < nr_ioapics; apic++) {
- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
- idx = find_irq_entry(apic, pin, mp_INT);
- if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
- return irq_trigger(idx);
- }
- }
- /*
- * nonexistent IRQs are edge default
- */
- return 0;
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ idx = find_irq_entry(apic, pin, mp_INT);
+ if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
+ return irq_trigger(idx);
+ }
+ }
+ /*
+ * nonexistent IRQs are edge default
+ */
+ return 0;
}
#else
static inline int IO_APIC_irq_trigger(int irq)
{
- return 1;
+ return 1;
}
#endif
@@ -1318,13 +1397,27 @@ static void ioapic_register_intr(int irq, unsigned long trigger)
else
desc->status &= ~IRQ_LEVEL;
+#ifdef CONFIG_INTR_REMAP
+ if (irq_remapped(irq)) {
+ desc->status |= IRQ_MOVE_PCNTXT;
+ if (trigger)
+ set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
+ handle_fasteoi_irq,
+ "fasteoi");
+ else
+ set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
+ handle_edge_irq, "edge");
+ return;
+ }
+#endif
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
set_irq_chip_and_handler_name(irq, &ioapic_chip,
- handle_fasteoi_irq, "fasteoi");
+ handle_fasteoi_irq,
+ "fasteoi");
else
set_irq_chip_and_handler_name(irq, &ioapic_chip,
- handle_edge_irq, "edge");
+ handle_edge_irq, "edge");
}
static int setup_ioapic_entry(int apic, int irq,
@@ -1337,11 +1430,45 @@ static int setup_ioapic_entry(int apic, int irq,
*/
memset(entry,0,sizeof(*entry));
- entry->delivery_mode = INT_DELIVERY_MODE;
- entry->dest_mode = INT_DEST_MODE;
- entry->dest = destination;
+#ifdef CONFIG_INTR_REMAP
+ if (intr_remapping_enabled) {
+ struct intel_iommu *iommu = map_ioapic_to_ir(apic);
+ struct irte irte;
+ struct IR_IO_APIC_route_entry *ir_entry =
+ (struct IR_IO_APIC_route_entry *) entry;
+ int index;
+
+ if (!iommu)
+ panic("No mapping iommu for ioapic %d\n", apic);
+
+ index = alloc_irte(iommu, irq, 1);
+ if (index < 0)
+ panic("Failed to allocate IRTE for ioapic %d\n", apic);
+
+ memset(&irte, 0, sizeof(irte));
+
+ irte.present = 1;
+ irte.dst_mode = INT_DEST_MODE;
+ irte.trigger_mode = trigger;
+ irte.dlvry_mode = INT_DELIVERY_MODE;
+ irte.vector = vector;
+ irte.dest_id = IRTE_DEST(destination);
+
+ modify_irte(irq, &irte);
+
+ ir_entry->index2 = (index >> 15) & 0x1;
+ ir_entry->zero = 0;
+ ir_entry->format = 1;
+ ir_entry->index = (index & 0x7fff);
+ } else
+#endif
+ {
+ entry->delivery_mode = INT_DELIVERY_MODE;
+ entry->dest_mode = INT_DEST_MODE;
+ entry->dest = destination;
+ }
- entry->mask = 0; /* enable IRQ */
+ entry->mask = 0; /* enable IRQ */
entry->trigger = trigger;
entry->polarity = polarity;
entry->vector = vector;
@@ -1351,12 +1478,11 @@ static int setup_ioapic_entry(int apic, int irq,
*/
if (trigger)
entry->mask = 1;
-
return 0;
}
static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
- int trigger, int polarity)
+ int trigger, int polarity)
{
struct irq_cfg *cfg;
struct IO_APIC_route_entry entry;
@@ -1420,10 +1546,10 @@ static void __init setup_IO_APIC_irqs(void)
}
irq = pin_2_irq(idx, apic, pin);
-
+#ifdef CONFIG_X86_32
if (multi_timer_check(apic, irq))
continue;
-
+#endif
add_pin_to_irq(irq, apic, pin);
setup_IO_APIC_irq(apic, pin, irq,
@@ -1443,6 +1569,11 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
{
struct IO_APIC_route_entry entry;
+#ifdef CONFIG_INTR_REMAP
+ if (intr_remapping_enabled)
+ return;
+#endif
+
memset(&entry, 0, sizeof(entry));
/*
@@ -1461,7 +1592,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
* The timer IRQ doesn't have to know that behind the
* scene we may have a 8259A-master in AEOI mode ...
*/
- ioapic_register_intr(0, IOAPIC_EDGE);
+ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
/*
* Add it to the IO-APIC irq-routing table:
@@ -1501,17 +1632,18 @@ __apicdebuginit(void) print_IO_APIC(void)
reg_01.raw = io_apic_read(apic, 1);
if (reg_01.bits.version >= 0x10)
reg_02.raw = io_apic_read(apic, 2);
- if (reg_01.bits.version >= 0x20)
- reg_03.raw = io_apic_read(apic, 3);
+ if (reg_01.bits.version >= 0x20)
+ reg_03.raw = io_apic_read(apic, 3);
spin_unlock_irqrestore(&ioapic_lock, flags);
+ printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
- printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
+ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
@@ -1548,7 +1680,10 @@ __apicdebuginit(void) print_IO_APIC(void)
entry = ioapic_read_entry(apic, i);
- printk(KERN_DEBUG " %02x %02X ", i, entry.dest);
+ printk(KERN_DEBUG " %02x %03X ",
+ i,
+ entry.dest
+ );
printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
entry.mask,
@@ -1567,7 +1702,7 @@ __apicdebuginit(void) print_IO_APIC(void)
struct irq_pin_list *entry = cfg->irq_2_pin;
if (!entry)
continue;
- printk(KERN_DEBUG "IRQ%d ", i);
+ printk(KERN_DEBUG "IRQ%d ", cfg->irq);
for (;;) {
printk("-> %d:%d", entry->apic, entry->pin);
if (!entry->next)
@@ -1614,8 +1749,7 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
smp_processor_id(), hard_smp_processor_id());
v = apic_read(APIC_ID);
- printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
- GET_APIC_ID(v));
+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
v = apic_read(APIC_LVR);
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
ver = GET_APIC_VERSION(v);
@@ -1624,7 +1758,7 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
v = apic_read(APIC_TASKPRI);
printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
v = apic_read(APIC_ARBPRI);
printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
v & APIC_ARBPRI_MASK);
@@ -1650,9 +1784,10 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
printk(KERN_DEBUG "... APIC IRR field:\n");
print_APIC_bitfield(APIC_IRR);
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
- if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
+ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
+
v = apic_read(APIC_ESR);
printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
}
@@ -1710,11 +1845,11 @@ __apicdebuginit(void) print_PIC(void)
v = inb(0xa0) << 8 | inb(0x20);
printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
- outb(0x0b, 0xa0);
- outb(0x0b, 0x20);
+ outb(0x0b,0xa0);
+ outb(0x0b,0x20);
v = inb(0xa0) << 8 | inb(0x20);
- outb(0x0a, 0xa0);
- outb(0x0a, 0x20);
+ outb(0x0a,0xa0);
+ outb(0x0a,0x20);
spin_unlock_irqrestore(&i8259A_lock, flags);
@@ -1739,16 +1874,19 @@ fs_initcall(print_all_ICs);
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-static void __init enable_IO_APIC(void)
+void __init enable_IO_APIC(void)
{
union IO_APIC_reg_01 reg_01;
int i8259_apic, i8259_pin;
- int i, apic;
+ int apic;
unsigned long flags;
+#ifdef CONFIG_X86_32
+ int i;
if (!pirqs_enabled)
for (i = 0; i < MAX_PIRQS; i++)
pirq_entries[i] = -1;
+#endif
/*
* The number of IO-APIC IRQ registers (== #pins):
@@ -1759,7 +1897,7 @@ static void __init enable_IO_APIC(void)
spin_unlock_irqrestore(&ioapic_lock, flags);
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
}
- for (apic = 0; apic < nr_ioapics; apic++) {
+ for(apic = 0; apic < nr_ioapics; apic++) {
int pin;
/* See if any of the pins is in ExtINT mode */
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
@@ -1830,16 +1968,18 @@ void disable_IO_APIC(void)
entry.dest_mode = 0; /* Physical */
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
- entry.dest = read_apic_id();
+ entry.dest = read_apic_id();
/*
* Add it to the IO-APIC irq-routing table:
*/
ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
}
+
disconnect_bsp_APIC(ioapic_i8259.pin != -1);
}
+#ifdef CONFIG_X86_32
/*
* function to set the IO-APIC physical IDs based on the
* values stored in the MPC table.
@@ -1940,8 +2080,6 @@ static void __init setup_ioapic_ids_from_mpc(void)
reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(apic, 0, reg_00.raw);
- spin_unlock_irqrestore(&ioapic_lock, flags);
/*
* Sanity check
@@ -1955,6 +2093,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
apic_printk(APIC_VERBOSE, " ok.\n");
}
}
+#endif
int no_timer_check __initdata;
@@ -1994,9 +2133,10 @@ static int __init timer_irq_works(void)
* might have cached one ExtINT interrupt. Finally, at
* least one tick may be lost due to delays.
*/
+
+ /* jiffies wrap? */
if (time_after(jiffies, t1 + 4))
return 1;
-
return 0;
}
@@ -2014,8 +2154,6 @@ static int __init timer_irq_works(void)
*/
/*
- * Startup quirk:
- *
* Starting up a edge-triggered IO-APIC interrupt is
* nasty - we need to make sure that we get the edge.
* If it is already asserted for some reason, we need
@@ -2023,9 +2161,8 @@ static int __init timer_irq_works(void)
*
* This is not complete - we should be able to fake
* an edge even if it isn't on the 8259A...
- *
- * (We do this for level-triggered IRQs too - it cannot hurt.)
*/
+
static unsigned int startup_ioapic_irq(unsigned int irq)
{
int was_pending = 0;
@@ -2043,70 +2180,254 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
return was_pending;
}
+#ifdef CONFIG_X86_64
static int ioapic_retrigger_irq(unsigned int irq)
{
- send_IPI_self(irq_cfg(irq)->vector);
+
+ struct irq_cfg *cfg = irq_cfg(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
+ spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
-
-#ifdef CONFIG_SMP
-asmlinkage void smp_irq_move_cleanup_interrupt(void)
+#else
+static int ioapic_retrigger_irq(unsigned int irq)
{
- unsigned vector, me;
- ack_APIC_irq();
- irq_enter();
-
- me = smp_processor_id();
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- unsigned int irq;
- struct irq_desc *desc;
- struct irq_cfg *cfg;
- irq = __get_cpu_var(vector_irq)[vector];
+ send_IPI_self(irq_cfg(irq)->vector);
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
+ return 1;
+}
+#endif
- cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
- if (!cfg->move_cleanup_count)
- goto unlock;
+/*
+ * Level and edge triggered IO-APIC interrupts need different handling,
+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
+ * handled with the level-triggered descriptor, but that one has slightly
+ * more overhead. Level-triggered interrupts cannot be handled with the
+ * edge-triggered handler, without risking IRQ storms and other ugly
+ * races.
+ */
- if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
- goto unlock;
+#ifdef CONFIG_SMP
- __get_cpu_var(vector_irq)[vector] = -1;
- cfg->move_cleanup_count--;
-unlock:
- spin_unlock(&desc->lock);
- }
+#ifdef CONFIG_INTR_REMAP
+static void ir_irq_migration(struct work_struct *work);
- irq_exit();
-}
+static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
-static void irq_complete_move(unsigned int irq)
+/*
+ * Migrate the IO-APIC irq in the presence of intr-remapping.
+ *
+ * For edge triggered, irq migration is a simple atomic update(of vector
+ * and cpu destination) of IRTE and flush the hardware cache.
+ *
+ * For level triggered, we need to modify the io-apic RTE aswell with the update
+ * vector information, along with modifying IRTE with vector and destination.
+ * So irq migration for level triggered is little bit more complex compared to
+ * edge triggered migration. But the good news is, we use the same algorithm
+ * for level triggered migration as we have today, only difference being,
+ * we now initiate the irq migration from process context instead of the
+ * interrupt context.
+ *
+ * In future, when we do a directed EOI (combined with cpu EOI broadcast
+ * suppression) to the IO-APIC, level triggered irq migration will also be
+ * as simple as edge triggered migration and we can do the irq migration
+ * with a simple atomic update to IO-APIC RTE.
+ */
+static void migrate_ioapic_irq(int irq, cpumask_t mask)
{
- struct irq_cfg *cfg = irq_cfg(irq);
- unsigned vector, me;
+ struct irq_cfg *cfg;
+ struct irq_desc *desc;
+ cpumask_t tmp, cleanup_mask;
+ struct irte irte;
+ int modify_ioapic_rte;
+ unsigned int dest;
+ unsigned long flags;
- if (likely(!cfg->move_in_progress))
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
return;
- vector = ~get_irq_regs()->orig_ax;
- me = smp_processor_id();
- if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
- cpumask_t cleanup_mask;
+ if (get_irte(irq, &irte))
+ return;
- cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
- cfg->move_cleanup_count = cpus_weight(cleanup_mask);
- send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ if (assign_irq_vector(irq, mask))
+ return;
+
+ cfg = irq_cfg(irq);
+ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ desc = irq_to_desc(irq);
+ modify_ioapic_rte = desc->status & IRQ_LEVEL;
+ if (modify_ioapic_rte) {
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __target_IO_APIC_irq(irq, dest, cfg->vector);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+ irte.vector = cfg->vector;
+ irte.dest_id = IRTE_DEST(dest);
+
+ /*
+ * Modified the IRTE and flushes the Interrupt entry cache.
+ */
+ modify_irte(irq, &irte);
+
+ if (cfg->move_in_progress) {
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ cfg->move_in_progress = 0;
+ }
+
+ desc->affinity = mask;
+}
+
+static int migrate_irq_remapped_level(int irq)
+{
+ int ret = -1;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ mask_IO_APIC_irq(irq);
+
+ if (io_apic_level_ack_pending(irq)) {
+ /*
+ * Interrupt in progress. Migrating irq now will change the
+ * vector information in the IO-APIC RTE and that will confuse
+ * the EOI broadcast performed by cpu.
+ * So, delay the irq migration to the next instance.
+ */
+ schedule_delayed_work(&ir_migration_work, 1);
+ goto unmask;
+ }
+
+ /* everthing is clear. we have right of way */
+ migrate_ioapic_irq(irq, desc->pending_mask);
+
+ ret = 0;
+ desc->status &= ~IRQ_MOVE_PENDING;
+ cpus_clear(desc->pending_mask);
+
+unmask:
+ unmask_IO_APIC_irq(irq);
+ return ret;
+}
+
+static void ir_irq_migration(struct work_struct *work)
+{
+ unsigned int irq;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(irq, desc) {
+ if (desc->status & IRQ_MOVE_PENDING) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->chip->set_affinity ||
+ !(desc->status & IRQ_MOVE_PENDING)) {
+ desc->status &= ~IRQ_MOVE_PENDING;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ continue;
+ }
+
+ desc->chip->set_affinity(irq, desc->pending_mask);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ }
+}
+
+/*
+ * Migrates the IRQ destination in the process context.
+ */
+static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc->status & IRQ_LEVEL) {
+ desc->status |= IRQ_MOVE_PENDING;
+ desc->pending_mask = mask;
+ migrate_irq_remapped_level(irq);
+ return;
+ }
+
+ migrate_ioapic_irq(irq, mask);
+}
+#endif
+
+asmlinkage void smp_irq_move_cleanup_interrupt(void)
+{
+ unsigned vector, me;
+ ack_APIC_irq();
+#ifdef CONFIG_X86_64
+ exit_idle();
+#endif
+ irq_enter();
+
+ me = smp_processor_id();
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ unsigned int irq;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
+ cfg = irq_cfg(irq);
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
+ goto unlock;
+
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cfg->move_cleanup_count--;
+unlock:
+ spin_unlock(&desc->lock);
+ }
+
+ irq_exit();
+}
+
+static void irq_complete_move(unsigned int irq)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+ unsigned vector, me;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ vector = ~get_irq_regs()->orig_ax;
+ me = smp_processor_id();
+ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
+ cpumask_t cleanup_mask;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
cfg->move_in_progress = 0;
}
}
#else
static inline void irq_complete_move(unsigned int irq) {}
#endif
+#ifdef CONFIG_INTR_REMAP
+static void ack_x2apic_level(unsigned int irq)
+{
+ ack_x2APIC_irq();
+}
+
+static void ack_x2apic_edge(unsigned int irq)
+{
+ ack_x2APIC_irq();
+}
+#endif
static void ack_apic_edge(unsigned int irq)
{
@@ -2118,55 +2439,55 @@ static void ack_apic_edge(unsigned int irq)
#ifdef CONFIG_X86_64
static void ack_apic_level(unsigned int irq)
{
- int do_unmask_irq = 0;
+ int do_unmask_irq = 0;
- irq_complete_move(irq);
+ irq_complete_move(irq);
#ifdef CONFIG_GENERIC_PENDING_IRQ
- /* If we are moving the irq we need to mask it */
- if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
- do_unmask_irq = 1;
- mask_IO_APIC_irq(irq);
- }
+ /* If we are moving the irq we need to mask it */
+ if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
+ do_unmask_irq = 1;
+ mask_IO_APIC_irq(irq);
+ }
#endif
- /*
- * We must acknowledge the irq before we move it or the acknowledge will
- * not propagate properly.
- */
- ack_APIC_irq();
-
- /* Now we can move and renable the irq */
- if (unlikely(do_unmask_irq)) {
- /* Only migrate the irq if the ack has been received.
- *
- * On rare occasions the broadcast level triggered ack gets
- * delayed going to ioapics, and if we reprogram the
- * vector while Remote IRR is still set the irq will never
- * fire again.
- *
- * To prevent this scenario we read the Remote IRR bit
- * of the ioapic. This has two effects.
- * - On any sane system the read of the ioapic will
- * flush writes (and acks) going to the ioapic from
- * this cpu.
- * - We get to see if the ACK has actually been delivered.
- *
- * Based on failed experiments of reprogramming the
- * ioapic entry from outside of irq context starting
- * with masking the ioapic entry and then polling until
- * Remote IRR was clear before reprogramming the
- * ioapic I don't trust the Remote IRR bit to be
- * completey accurate.
- *
- * However there appears to be no other way to plug
- * this race, so if the Remote IRR bit is not
- * accurate and is causing problems then it is a hardware bug
- * and you can go talk to the chipset vendor about it.
- */
- if (!io_apic_level_ack_pending(irq))
- move_masked_irq(irq, desc);
- unmask_IO_APIC_irq(irq);
- }
+ /*
+ * We must acknowledge the irq before we move it or the acknowledge will
+ * not propagate properly.
+ */
+ ack_APIC_irq();
+
+ /* Now we can move and renable the irq */
+ if (unlikely(do_unmask_irq)) {
+ /* Only migrate the irq if the ack has been received.
+ *
+ * On rare occasions the broadcast level triggered ack gets
+ * delayed going to ioapics, and if we reprogram the
+ * vector while Remote IRR is still set the irq will never
+ * fire again.
+ *
+ * To prevent this scenario we read the Remote IRR bit
+ * of the ioapic. This has two effects.
+ * - On any sane system the read of the ioapic will
+ * flush writes (and acks) going to the ioapic from
+ * this cpu.
+ * - We get to see if the ACK has actually been delivered.
+ *
+ * Based on failed experiments of reprogramming the
+ * ioapic entry from outside of irq context starting
+ * with masking the ioapic entry and then polling until
+ * Remote IRR was clear before reprogramming the
+ * ioapic I don't trust the Remote IRR bit to be
+ * completey accurate.
+ *
+ * However there appears to be no other way to plug
+ * this race, so if the Remote IRR bit is not
+ * accurate and is causing problems then it is a hardware bug
+ * and you can go talk to the chipset vendor about it.
+ */
+ if (!io_apic_level_ack_pending(irq))
+ move_masked_irq(irq);
+ unmask_IO_APIC_irq(irq);
+ }
}
#else
atomic_t irq_mis_count;
@@ -2177,25 +2498,25 @@ static void ack_apic_level(unsigned int irq)
irq_complete_move(irq);
move_native_irq(irq);
-/*
- * It appears there is an erratum which affects at least version 0x11
- * of I/O APIC (that's the 82093AA and cores integrated into various
- * chipsets). Under certain conditions a level-triggered interrupt is
- * erroneously delivered as edge-triggered one but the respective IRR
- * bit gets set nevertheless. As a result the I/O unit expects an EOI
- * message but it will never arrive and further interrupts are blocked
- * from the source. The exact reason is so far unknown, but the
- * phenomenon was observed when two consecutive interrupt requests
- * from a given source get delivered to the same CPU and the source is
- * temporarily disabled in between.
- *
- * A workaround is to simulate an EOI message manually. We achieve it
- * by setting the trigger mode to edge and then to level when the edge
- * trigger mode gets detected in the TMR of a local APIC for a
- * level-triggered interrupt. We mask the source for the time of the
- * operation to prevent an edge-triggered interrupt escaping meanwhile.
- * The idea is from Manfred Spraul. --macro
- */
+ /*
+ * It appears there is an erratum which affects at least version 0x11
+ * of I/O APIC (that's the 82093AA and cores integrated into various
+ * chipsets). Under certain conditions a level-triggered interrupt is
+ * erroneously delivered as edge-triggered one but the respective IRR
+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
+ * message but it will never arrive and further interrupts are blocked
+ * from the source. The exact reason is so far unknown, but the
+ * phenomenon was observed when two consecutive interrupt requests
+ * from a given source get delivered to the same CPU and the source is
+ * temporarily disabled in between.
+ *
+ * A workaround is to simulate an EOI message manually. We achieve it
+ * by setting the trigger mode to edge and then to level when the edge
+ * trigger mode gets detected in the TMR of a local APIC for a
+ * level-triggered interrupt. We mask the source for the time of the
+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
+ * The idea is from Manfred Spraul. --macro
+ */
i = irq_cfg(irq)->vector;
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
@@ -2225,6 +2546,20 @@ static struct irq_chip ioapic_chip __read_mostly = {
.retrigger = ioapic_retrigger_irq,
};
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip ir_ioapic_chip __read_mostly = {
+ .name = "IR-IO-APIC",
+ .startup = startup_ioapic_irq,
+ .mask = mask_IO_APIC_irq,
+ .unmask = unmask_IO_APIC_irq,
+ .ack = ack_x2apic_edge,
+ .eoi = ack_x2apic_level,
+#ifdef CONFIG_SMP
+ .set_affinity = set_ir_ioapic_affinity_irq,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+#endif
static inline void init_IO_APIC_traps(void)
{
@@ -2282,7 +2617,7 @@ static void unmask_lapic_irq(unsigned int irq)
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
-static void ack_lapic_irq(unsigned int irq)
+static void ack_lapic_irq (unsigned int irq)
{
ack_APIC_irq();
}
@@ -2383,12 +2718,12 @@ static inline void __init unlock_ExtINT_logic(void)
static int disable_timer_pin_1 __initdata;
/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init parse_disable_timer_pin_1(char *arg)
+static int __init disable_timer_pin_setup(char *arg)
{
disable_timer_pin_1 = 1;
return 0;
}
-early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
+early_param("disable_timer_pin_1", disable_timer_pin_setup);
int timer_through_8259 __initdata;
@@ -2397,6 +2732,8 @@ int timer_through_8259 __initdata;
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
+ *
+ * FIXME: really need to revamp this for all platforms.
*/
static inline void __init check_timer(void)
{
@@ -2408,8 +2745,8 @@ static inline void __init check_timer(void)
local_irq_save(flags);
- ver = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(ver);
+ ver = apic_read(APIC_LVR);
+ ver = GET_APIC_VERSION(ver);
/*
* get/set the timer IRQ vector:
@@ -2428,7 +2765,9 @@ static inline void __init check_timer(void)
*/
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
+#ifdef CONFIG_X86_32
timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+#endif
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2447,6 +2786,10 @@ static inline void __init check_timer(void)
* 8259A.
*/
if (pin1 == -1) {
+#ifdef CONFIG_INTR_REMAP
+ if (intr_remapping_enabled)
+ panic("BIOS bug: timer not connected to IO-APIC");
+#endif
pin1 = pin2;
apic1 = apic2;
no_pin1 = 1;
@@ -2473,6 +2816,10 @@ static inline void __init check_timer(void)
clear_IO_APIC_pin(0, pin1);
goto out;
}
+#ifdef CONFIG_INTR_REMAP
+ if (intr_remapping_enabled)
+ panic("timer doesn't work through Interrupt-remapped IO-APIC");
+#endif
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2512,7 +2859,9 @@ static inline void __init check_timer(void)
"through the IO-APIC - disabling NMI Watchdog!\n");
nmi_watchdog = NMI_NONE;
}
+#ifdef CONFIG_X86_32
timer_ack = 0;
+#endif
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
@@ -2570,17 +2919,25 @@ static inline void __init check_timer(void)
void __init setup_IO_APIC(void)
{
+
+#ifdef CONFIG_X86_32
enable_IO_APIC();
+#else
+ /*
+ * calling enable_IO_APIC() is moved to setup_local_APIC for BP
+ */
+#endif
io_apic_irqs = ~PIC_IRQS;
- printk("ENABLING IO-APIC IRQs\n");
-
- /*
- * Set up IO-APIC IRQ routing.
- */
- if (!acpi_ioapic)
- setup_ioapic_ids_from_mpc();
+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+ /*
+ * Set up IO-APIC IRQ routing.
+ */
+#ifdef CONFIG_X86_32
+ if (!acpi_ioapic)
+ setup_ioapic_ids_from_mpc();
+#endif
sync_Arb_IDs();
setup_IO_APIC_irqs();
init_IO_APIC_traps();
@@ -2588,15 +2945,15 @@ void __init setup_IO_APIC(void)
}
/*
- * Called after all the initialization is done. If we didnt find any
- * APIC bugs then we can allow the modify fast path
+ * Called after all the initialization is done. If we didnt find any
+ * APIC bugs then we can allow the modify fast path
*/
static int __init io_apic_bug_finalize(void)
{
- if (sis_apic_bug == -1)
- sis_apic_bug = 0;
- return 0;
+ if (sis_apic_bug == -1)
+ sis_apic_bug = 0;
+ return 0;
}
late_initcall(io_apic_bug_finalize);
@@ -2605,7 +2962,7 @@ struct sysfs_ioapic_data {
struct sys_device dev;
struct IO_APIC_route_entry entry[0];
};
-static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
{
@@ -2615,8 +2972,8 @@ static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
- for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
- entry[i] = ioapic_read_entry(dev->id, i);
+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
+ *entry = ioapic_read_entry(dev->id, i);
return 0;
}
@@ -2653,14 +3010,14 @@ static struct sysdev_class ioapic_sysdev_class = {
static int __init ioapic_init_sysfs(void)
{
- struct sys_device *dev;
- int i, size, error = 0;
+ struct sys_device * dev;
+ int i, size, error;
error = sysdev_class_register(&ioapic_sysdev_class);
if (error)
return error;
- for (i = 0; i < nr_ioapics; i++) {
+ for (i = 0; i < nr_ioapics; i++ ) {
size = sizeof(struct sys_device) + nr_ioapic_registers[i]
* sizeof(struct IO_APIC_route_entry);
mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
@@ -2691,18 +3048,18 @@ device_initcall(ioapic_init_sysfs);
unsigned int create_irq_nr(unsigned int irq_want)
{
/* Allocate an unused irq */
- unsigned int irq, new;
+ unsigned int irq;
+ unsigned int new;
unsigned long flags;
struct irq_cfg *cfg_new;
#ifndef CONFIG_HAVE_SPARSE_IRQ
- /* only can use bus/dev/fn.. when per_cpu vector is used */
irq_want = nr_irqs - 1;
#endif
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
- for (new = (nr_irqs - 1); new > 0; new--) {
+ for (new = irq_want; new > 0; new--) {
if (platform_legacy_irq(new))
continue;
cfg_new = irq_cfg(new);
@@ -2725,7 +3082,14 @@ unsigned int create_irq_nr(unsigned int irq_want)
int create_irq(void)
{
- return create_irq_nr(nr_irqs - 1);
+ int irq;
+
+ irq = create_irq_nr(nr_irqs - 1);
+
+ if (irq == 0)
+ irq = -1;
+
+ return irq;
}
void destroy_irq(unsigned int irq)
@@ -2734,6 +3098,9 @@ void destroy_irq(unsigned int irq)
dynamic_irq_cleanup(irq);
+#ifdef CONFIG_INTR_REMAP
+ free_irte(irq);
+#endif
spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
@@ -2759,25 +3126,54 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
- msg->address_hi = MSI_ADDR_BASE_HI;
- msg->address_lo =
- MSI_ADDR_BASE_LO |
- ((INT_DEST_MODE == 0) ?
- MSI_ADDR_DEST_MODE_PHYSICAL:
- MSI_ADDR_DEST_MODE_LOGICAL) |
- ((INT_DELIVERY_MODE != dest_LowestPrio) ?
- MSI_ADDR_REDIRECTION_CPU:
- MSI_ADDR_REDIRECTION_LOWPRI) |
- MSI_ADDR_DEST_ID(dest);
-
- msg->data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- ((INT_DELIVERY_MODE != dest_LowestPrio) ?
- MSI_DATA_DELIVERY_FIXED:
- MSI_DATA_DELIVERY_LOWPRI) |
- MSI_DATA_VECTOR(cfg->vector);
+#ifdef CONFIG_INTR_REMAP
+ if (irq_remapped(irq)) {
+ struct irte irte;
+ int ir_index;
+ u16 sub_handle;
+
+ ir_index = map_irq_to_irte_handle(irq, &sub_handle);
+ BUG_ON(ir_index == -1);
+
+ memset (&irte, 0, sizeof(irte));
+
+ irte.present = 1;
+ irte.dst_mode = INT_DEST_MODE;
+ irte.trigger_mode = 0; /* edge */
+ irte.dlvry_mode = INT_DELIVERY_MODE;
+ irte.vector = cfg->vector;
+ irte.dest_id = IRTE_DEST(dest);
+
+ modify_irte(irq, &irte);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->data = sub_handle;
+ msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
+ MSI_ADDR_IR_SHV |
+ MSI_ADDR_IR_INDEX1(ir_index) |
+ MSI_ADDR_IR_INDEX2(ir_index);
+ } else
+#endif
+ {
+ msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ ((INT_DEST_MODE == 0) ?
+ MSI_ADDR_DEST_MODE_PHYSICAL:
+ MSI_ADDR_DEST_MODE_LOGICAL) |
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU:
+ MSI_ADDR_REDIRECTION_LOWPRI) |
+ MSI_ADDR_DEST_ID(dest);
+ msg->data =
+ MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED:
+ MSI_DATA_DELIVERY_LOWPRI) |
+ MSI_DATA_VECTOR(cfg->vector);
+ }
return err;
}
@@ -2788,6 +3184,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
@@ -2808,8 +3205,61 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- irq_to_desc(irq)->affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
+
+#ifdef CONFIG_INTR_REMAP
+/*
+ * Migrate the MSI irq to another cpumask. This migration is
+ * done in the process context using interrupt-remapping hardware.
+ */
+static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+ struct irq_cfg *cfg;
+ unsigned int dest;
+ cpumask_t tmp, cleanup_mask;
+ struct irte irte;
+ struct irq_desc *desc;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ return;
+
+ if (get_irte(irq, &irte))
+ return;
+
+ if (assign_irq_vector(irq, mask))
+ return;
+
+ cfg = irq_cfg(irq);
+ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ irte.vector = cfg->vector;
+ irte.dest_id = IRTE_DEST(dest);
+
+ /*
+ * atomically update the IRTE with the new destination and vector.
+ */
+ modify_irte(irq, &irte);
+
+ /*
+ * After this point, all the interrupts will start arriving
+ * at the new destination. So, time to cleanup the previous
+ * vector allocation.
+ */
+ if (cfg->move_in_progress) {
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ cfg->move_in_progress = 0;
+ }
+
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
+}
+#endif
#endif /* CONFIG_SMP */
/*
@@ -2827,6 +3277,45 @@ static struct irq_chip msi_chip = {
.retrigger = ioapic_retrigger_irq,
};
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip msi_ir_chip = {
+ .name = "IR-PCI-MSI",
+ .unmask = unmask_msi_irq,
+ .mask = mask_msi_irq,
+ .ack = ack_x2apic_edge,
+#ifdef CONFIG_SMP
+ .set_affinity = ir_set_msi_irq_affinity,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
+/*
+ * Map the PCI dev to the corresponding remapping hardware unit
+ * and allocate 'nvec' consecutive interrupt-remapping table entries
+ * in it.
+ */
+static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
+{
+ struct intel_iommu *iommu;
+ int index;
+
+ iommu = map_dev_to_ir(dev);
+ if (!iommu) {
+ printk(KERN_ERR
+ "Unable to map PCI %s to iommu\n", pci_name(dev));
+ return -ENOENT;
+ }
+
+ index = alloc_irte(iommu, irq, nvec);
+ if (index < 0) {
+ printk(KERN_ERR
+ "Unable to allocate %d IRTE for PCI %s\n", nvec,
+ pci_name(dev));
+ return -ENOSPC;
+ }
+ return index;
+}
+#endif
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
{
@@ -2840,7 +3329,17 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
set_irq_msi(irq, desc);
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
+#ifdef CONFIG_INTR_REMAP
+ if (irq_remapped(irq)) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ /*
+ * irq migration in process context
+ */
+ desc->status |= IRQ_MOVE_PCNTXT;
+ set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
+ } else
+#endif
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
return 0;
}
@@ -2859,59 +3358,164 @@ static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
- int irq, ret;
-
+ unsigned int irq;
+ int ret;
unsigned int irq_want;
irq_want = build_irq_for_pci_dev(dev) + 0x100;
irq = create_irq_nr(irq_want);
-
if (irq == 0)
return -1;
+#ifdef CONFIG_INTR_REMAP
+ if (!intr_remapping_enabled)
+ goto no_ir;
+
+ ret = msi_alloc_irte(dev, irq, 1);
+ if (ret < 0)
+ goto error;
+no_ir:
+#endif
ret = setup_msi_irq(dev, desc, irq);
if (ret < 0) {
destroy_irq(irq);
return ret;
- }
-
+ }
return 0;
+
+#ifdef CONFIG_INTR_REMAP
+error:
+ destroy_irq(irq);
+ return ret;
+#endif
}
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- unsigned int irq;
- int ret, sub_handle;
- struct msi_desc *desc;
- unsigned int irq_want;
-
- irq_want = build_irq_for_pci_dev(dev) + 0x100;
- sub_handle = 0;
- list_for_each_entry(desc, &dev->msi_list, list) {
- irq = create_irq_nr(irq_want--);
- if (irq == 0)
- return -1;
- ret = setup_msi_irq(dev, desc, irq);
- if (ret < 0)
- goto error;
- sub_handle++;
- }
- return 0;
+ unsigned int irq;
+ int ret, sub_handle;
+ struct msi_desc *desc;
+ unsigned int irq_want;
+
+#ifdef CONFIG_INTR_REMAP
+ struct intel_iommu *iommu = 0;
+ int index = 0;
+#endif
+
+ irq_want = build_irq_for_pci_dev(dev) + 0x100;
+ sub_handle = 0;
+ list_for_each_entry(desc, &dev->msi_list, list) {
+ irq = create_irq_nr(irq_want--);
+ if (irq == 0)
+ return -1;
+#ifdef CONFIG_INTR_REMAP
+ if (!intr_remapping_enabled)
+ goto no_ir;
+
+ if (!sub_handle) {
+ /*
+ * allocate the consecutive block of IRTE's
+ * for 'nvec'
+ */
+ index = msi_alloc_irte(dev, irq, nvec);
+ if (index < 0) {
+ ret = index;
+ goto error;
+ }
+ } else {
+ iommu = map_dev_to_ir(dev);
+ if (!iommu) {
+ ret = -ENOENT;
+ goto error;
+ }
+ /*
+ * setup the mapping between the irq and the IRTE
+ * base index, the sub_handle pointing to the
+ * appropriate interrupt remap table entry.
+ */
+ set_irte_irq(irq, iommu, index, sub_handle);
+ }
+no_ir:
+#endif
+ ret = setup_msi_irq(dev, desc, irq);
+ if (ret < 0)
+ goto error;
+ sub_handle++;
+ }
+ return 0;
error:
- destroy_irq(irq);
- return ret;
+ destroy_irq(irq);
+ return ret;
}
-
void arch_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);
}
-#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_DMAR
+#ifdef CONFIG_SMP
+static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
+{
+ struct irq_cfg *cfg;
+ struct msi_msg msg;
+ unsigned int dest;
+ cpumask_t tmp;
+ struct irq_desc *desc;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ return;
+
+ if (assign_irq_vector(irq, mask))
+ return;
+
+ cfg = irq_cfg(irq);
+ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ dmar_msi_read(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ dmar_msi_write(irq, &msg);
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
+}
+#endif /* CONFIG_SMP */
+
+struct irq_chip dmar_msi_type = {
+ .name = "DMAR_MSI",
+ .unmask = dmar_msi_unmask,
+ .mask = dmar_msi_mask,
+ .ack = ack_apic_edge,
+#ifdef CONFIG_SMP
+ .set_affinity = dmar_msi_set_affinity,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+ int ret;
+ struct msi_msg msg;
+ ret = msi_compose_msg(NULL, irq, &msg);
+ if (ret < 0)
+ return ret;
+ dmar_msi_write(irq, &msg);
+ set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+ "edge");
+ return 0;
+}
+#endif
+
+#endif /* CONFIG_PCI_MSI */
/*
* Hypertransport interrupt support
*/
@@ -2938,6 +3542,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
struct irq_cfg *cfg;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
@@ -2951,7 +3556,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
dest = cpu_mask_to_apicid(tmp);
target_ht_irq(irq, dest, cfg->vector);
- irq_to_desc(irq)->affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif
@@ -2974,7 +3580,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
tmp = TARGET_CPUS;
err = assign_irq_vector(irq, tmp);
- if ( !err) {
+ if (!err) {
struct ht_irq_msg msg;
unsigned dest;
@@ -3007,11 +3613,12 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
#endif /* CONFIG_HT_IRQ */
/* --------------------------------------------------------------------------
- ACPI-based IOAPIC Configuration
+ ACPI-based IOAPIC Configuration
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI
+#ifdef CONFIG_X86_32
int __init io_apic_get_unique_id(int ioapic, int apic_id)
{
union IO_APIC_reg_00 reg_00;
@@ -3086,7 +3693,6 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
return apic_id;
}
-
int __init io_apic_get_version(int ioapic)
{
union IO_APIC_reg_01 reg_01;
@@ -3098,9 +3704,9 @@ int __init io_apic_get_version(int ioapic)
return reg_01.bits.version;
}
+#endif
-
-int __init io_apic_get_redir_entries(int ioapic)
+int __init io_apic_get_redir_entries (int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3113,10 +3719,10 @@ int __init io_apic_get_redir_entries(int ioapic)
}
-int io_apic_set_pci_routing(int ioapic, int pin, int irq, int triggering, int polarity)
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
if (!IO_APIC_IRQ(irq)) {
- printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
ioapic);
return -EINVAL;
}
@@ -3132,6 +3738,7 @@ int io_apic_set_pci_routing(int ioapic, int pin, int irq, int triggering, int po
return 0;
}
+
int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
{
int i;
@@ -3163,7 +3770,6 @@ void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
struct irq_cfg *cfg;
- struct irq_desc *desc;
if (skip_ioapic_setup == 1)
return;
@@ -3184,43 +3790,124 @@ void __init setup_ioapic_dest(void)
setup_IO_APIC_irq(ioapic, pin, irq,
irq_trigger(irq_entry),
irq_polarity(irq_entry));
- else {
- desc = irq_to_desc(irq);
+#ifdef CONFIG_INTR_REMAP
+ else if (intr_remapping_enabled)
+ set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
+#endif
+ else
set_ioapic_affinity_irq(irq, TARGET_CPUS);
- }
}
}
}
#endif
+#ifdef CONFIG_X86_64
+#define IOAPIC_RESOURCE_NAME_SIZE 11
+
+static struct resource *ioapic_resources;
+
+static struct resource * __init ioapic_setup_resources(void)
+{
+ unsigned long n;
+ struct resource *res;
+ char *mem;
+ int i;
+
+ if (nr_ioapics <= 0)
+ return NULL;
+
+ n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+ n *= nr_ioapics;
+
+ mem = alloc_bootmem(n);
+ res = (void *)mem;
+
+ if (mem != NULL) {
+ mem += sizeof(struct resource) * nr_ioapics;
+
+ for (i = 0; i < nr_ioapics; i++) {
+ res[i].name = mem;
+ res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ sprintf(mem, "IOAPIC %u", i);
+ mem += IOAPIC_RESOURCE_NAME_SIZE;
+ }
+ }
+
+ ioapic_resources = res;
+
+ return res;
+}
+#endif
+
void __init ioapic_init_mappings(void)
{
unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
int i;
+#ifdef CONFIG_X86_64
+ struct resource *ioapic_res;
+ ioapic_res = ioapic_setup_resources();
+#endif
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mp_apicaddr;
- if (!ioapic_phys) {
- printk(KERN_ERR
- "WARNING: bogus zero IO-APIC "
- "address found in MPTABLE, "
- "disabling IO/APIC support!\n");
- smp_found_config = 0;
- skip_ioapic_setup = 1;
- goto fake_ioapic_page;
- }
+#ifdef CONFIG_X86_32
+ if (!ioapic_phys) {
+ printk(KERN_ERR
+ "WARNING: bogus zero IO-APIC "
+ "address found in MPTABLE, "
+ "disabling IO/APIC support!\n");
+ smp_found_config = 0;
+ skip_ioapic_setup = 1;
+ goto fake_ioapic_page;
+ }
+#endif
} else {
+#ifdef CONFIG_X86_32
fake_ioapic_page:
+#endif
ioapic_phys = (unsigned long)
- alloc_bootmem_pages(PAGE_SIZE);
+ alloc_bootmem_pages(PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
- printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
- __fix_to_virt(idx), ioapic_phys);
+ apic_printk(APIC_VERBOSE,
+ "mapped IOAPIC to %08lx (%08lx)\n",
+ __fix_to_virt(idx), ioapic_phys);
idx++;
+
+#ifdef CONFIG_X86_64
+ if (ioapic_res != NULL) {
+ ioapic_res->start = ioapic_phys;
+ ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+ ioapic_res++;
+ }
+#endif
}
}
+#ifdef CONFIG_X86_64
+static int __init ioapic_insert_resources(void)
+{
+ int i;
+ struct resource *r = ioapic_resources;
+
+ if (!r) {
+ printk(KERN_ERR
+ "IO APIC resources could be not be allocated.\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_ioapics; i++) {
+ insert_resource(&iomem_resource, r);
+ r++;
+ }
+
+ return 0;
+}
+
+/* Insert the IO APIC resources after PCI initialization has occured to handle
+ * IO APICS that are mapped in on a BAR in PCI space. */
+late_initcall(ioapic_insert_resources);
+#endif
commit e955b5398b660a204854bdff059d050b44090879
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Aug 19 20:50:37 2008 -0700
sparseirq: fix lockdep
-tip testing found this lockdep splat:
[ 0.000000] Initializing CPU#0
[ 0.000000] found new irq_desc for irq 0
[ 0.000000] INFO: trying to register non-static key.
[ 0.000000] the code is fine but needs lockdep annotation.
[ 0.000000] turning off the locking correctness validator.
[ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.27-rc3-tip-00191-g98ccb89-dirty #1
[ 0.000000] [<c0153c22>] register_lock_class+0x3d2/0x400
[ 0.000000] [<c0104d87>] ? mcount_call+0x5/0xa
[ 0.000000] [<c0154f3a>] __lock_acquire+0x22a/0x5d0
[ 0.000000] [<c0104d87>] ? mcount_call+0x5/0xa
[ 0.000000] [<c0155351>] lock_acquire+0x71/0xa0
[ 0.000000] [<c016d61f>] ? set_irq_chip+0x3f/0x90
[ 0.000000] [<c070f148>] _spin_lock_irqsave+0x58/0x90
[ 0.000000] [<c016d61f>] ? set_irq_chip+0x3f/0x90
[ 0.000000] [<c016d61f>] set_irq_chip+0x3f/0x90
[ 0.000000] [<c016d7e0>] ? handle_level_irq+0x0/0xe0
[ 0.000000] [<c016da1a>] set_irq_chip_and_handler_name+0x1a/0x40
[ 0.000000] [<c0a396c1>] init_ISA_irqs+0x51/0xa0
[ 0.000000] [<c0a4a365>] pre_intr_init_hook+0x25/0x30
[ 0.000000] [<c0a39723>] native_init_IRQ+0x13/0x370
[ 0.000000] [<c015569c>] ? lock_release+0xcc/0x1d0
[ 0.000000] [<c0104d87>] ? mcount_call+0x5/0xa
[ 0.000000] [<c070dc22>] ? __mutex_unlock_slowpath+0x92/0x110
[ 0.000000] [<c070dcad>] ? mutex_unlock+0xd/0x10
[ 0.000000] [<c0135f62>] ? cpu_maps_update_done+0x12/0x20
[ 0.000000] [<c06c6743>] ? register_cpu_notifier+0x23/0x30
[ 0.000000] [<c011e8ae>] init_IRQ+0xe/0x10
[ 0.000000] [<c0a357a5>] start_kernel+0x1c5/0x340
[ 0.000000] [<c0a35280>] ? unknown_bootoption+0x0/0x210
[ 0.000000] [<c0a3506b>] i386_start_kernel+0x6b/0x80
[ 0.000000] =======================
[ 0.000000] found new irq_desc for irq 1
[ 0.000000] found new irq_desc for irq 2
[ 0.000000] found new irq_desc for irq 3
this:
static void init_one_irq_desc(struct irq_desc *desc)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
#ifdef CONFIG_TRACE_IRQFLAGS
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
#endif
}
should be unconditional.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 6d174390f3a0..24c83a3cee4d 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -18,13 +18,10 @@
#include "internals.h"
-#ifdef CONFIG_TRACE_IRQFLAGS
-
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
static struct lock_class_key irq_desc_lock_class;
-#endif
/**
* handle_bad_irq - handle spurious and unhandled irqs
@@ -75,9 +72,7 @@ static struct irq_desc irq_desc_init = {
static void init_one_irq_desc(struct irq_desc *desc)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
-#ifdef CONFIG_TRACE_IRQFLAGS
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-#endif
}
extern int after_bootmem;
commit 3bf52a4df3ccd25d4154797977c556a2a8b3bc1e
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Aug 19 20:50:29 2008 -0700
irq: sparse irqs, fix IRQ auto-probe crash
fix:
[ 10.631533] calling yenta_socket_init+0x0/0x20
[ 10.631533] Yenta: CardBus bridge found at 0000:15:00.0 [17aa:2012]
[ 10.631533] Yenta: Using INTVAL to route CSC interrupts to PCI
[ 10.631533] Yenta: Routing CardBus interrupts to PCI
[ 10.631533] Yenta TI: socket 0000:15:00.0, mfunc 0x01d01002, devctl 0x64
[ 10.731599] BUG: unable to handle kernel NULL pointer dereference at 00000040
[ 10.731838] IP: [<c0c95b5f>] _spin_lock_irq+0xf/0x20
[ 10.732221] *pde = 00000000
[ 10.732741] Oops: 0002 [#1] SMP
[ 10.733453]
[ 10.734253] Pid: 1, comm: swapper Tainted: G W (2.6.27-rc3-tip-00173-gd7eaa4f-dirty #1)
[ 10.735188] EIP: 0060:[<c0c95b5f>] EFLAGS: 00010002 CPU: 0
[ 10.735523] EIP is at _spin_lock_irq+0xf/0x20
[ 10.735523] EAX: 00000040 EBX: 00000000 ECX: f6e04c90 EDX: 00000100
[ 10.735523] ESI: 000000df EDI: f6e04c90 EBP: f7867df0 ESP: f7867df0
[ 10.735523] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
[ 10.735523] Process swapper (pid: 1, ti=f7867000 task=f7870000 task.ti=f7867000)
[ 10.735523] Stack: f7867e04 c0155fbd 00000000 00000000 f6e04c90 f7867e5c c0c6e319 c0f6a074
[ 10.735523] f6e04c90 000017aa 00002012 c112b648 f791f240 c112b5e0 f7867e44 c010440b
[ 10.735523] f791f240 f791f29c c112b8ec f791f240 00000000 f7867e5c c048f893 03c0b648
[ 10.735523] Call Trace:
[ 10.735523] [<c0155fbd>] ? probe_irq_on+0x3d/0x140
[ 10.735523] [<c0c6e319>] ? yenta_probe+0x529/0x640
[ 10.735523] [<c010440b>] ? mcount_call+0x5/0xa
[ 10.735523] [<c048f893>] ? pci_match_device+0xa3/0xb0
[ 10.735523] [<c048fc1e>] ? pci_device_probe+0x5e/0x80
[ 10.735523] [<c0515423>] ? driver_probe_device+0x83/0x180
[ 10.735523] [<c0515594>] ? __driver_attach+0x74/0x80
[ 10.735523] [<c0514b69>] ? bus_for_each_dev+0x49/0x70
[ 10.735523] [<c051528e>] ? driver_attach+0x1e/0x20
[ 10.735523] [<c0515520>] ? __driver_attach+0x0/0x80
[ 10.735523] [<c05150d3>] ? bus_add_driver+0x1a3/0x220
[ 10.735523] [<c048fb60>] ? pci_device_remove+0x0/0x40
[ 10.735523] [<c05157f4>] ? driver_register+0x54/0x130
[ 10.735523] [<c048fe2f>] ? __pci_register_driver+0x4f/0x90
[ 10.735523] [<c11e9419>] ? yenta_socket_init+0x19/0x20
[ 10.735523] [<c0101125>] ? do_one_initcall+0x35/0x160
[ 10.735523] [<c11e9400>] ? yenta_socket_init+0x0/0x20
[ 10.735523] [<c01391a6>] ? __queue_work+0x36/0x50
[ 10.735523] [<c013922d>] ? queue_work_on+0x3d/0x50
[ 10.735523] [<c11a2758>] ? kernel_init+0x148/0x210
[ 10.735523] [<c11a2610>] ? kernel_init+0x0/0x210
[ 10.735523] [<c01043f3>] ? kernel_thread_helper+0x7/0x10
[ 10.735523] =======================
[ 10.735523] Code: 10 38 f2 74 06 f3 90 8a 10 eb f6 5d 89 c8 c3 8d b6 00 00 00 00 8d bc 27 00 00 00 00 55 89 e5 e8 a4 e8 46 ff fa ba 00 01 00 00 90 <66> 0f c1 10 38 f2 74 06 f3 90 8a 10 eb f6 5d c3 90 55 89 e5 53
as auto-probing wants to iterate over existing irqs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index c45ab718cf07..b3a5549ea81e 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -40,6 +40,8 @@ unsigned long probe_irq_on(void)
*/
for (i = nr_irqs-1; i > 0; i--) {
desc = irq_to_desc(i);
+ if (!desc)
+ continue;
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
@@ -70,6 +72,8 @@ unsigned long probe_irq_on(void)
*/
for (i = nr_irqs-1; i > 0; i--) {
desc = irq_to_desc(i);
+ if (!desc)
+ continue;
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
@@ -93,6 +97,8 @@ unsigned long probe_irq_on(void)
unsigned int status;
desc = irq_to_desc(i);
+ if (!desc)
+ continue;
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -134,6 +140,8 @@ unsigned int probe_irq_mask(unsigned long val)
struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
+ if (!desc)
+ continue;
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -177,6 +185,8 @@ int probe_irq_off(unsigned long val)
struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
+ if (!desc)
+ continue;
spin_lock_irq(&desc->lock);
status = desc->status;
commit a84488c213a8cfc29200344a6fb6357d48c8ed85
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Aug 19 20:50:31 2008 -0700
irq: sparse irqs, fix #3
fix non-APIC UP build:
arch/x86/kernel/built-in.o: In function `setup_arch':
: undefined reference to `pin_map_size'
arch/x86/kernel/built-in.o: In function `setup_arch':
: undefined reference to `first_free_entry'
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 558ec26b08e2..02e3a6697977 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1074,8 +1074,10 @@ void __init setup_arch(char **cmdline_p)
nr_irqs = 32 * nr_cpu_ids + 224;
init_cpu_to_node();
#endif
+#ifdef CONFIG_X86_IO_APIC
pin_map_size = nr_irqs * 2;
first_free_entry = nr_irqs;
+#endif
init_apic_mappings();
ioapic_init_mappings();
commit fa42d10dd5e1ff373061c0526f272106512301f9
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Aug 19 20:50:30 2008 -0700
irq: sparse irqs, export nr_irqs
fix:
Building modules, stage 2.
MODPOST 458 modules
ERROR: "nr_irqs" [drivers/serial/8250.ko] undefined!
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e94eeca09ea9..6ce3bcc2b8f7 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -48,6 +48,7 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc)
* Controller mappings for all interrupt sources:
*/
int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct irq_desc irq_desc_init __initdata = {
commit 5fef06e8c8c52aa7170dbbb068aa996d83738d38
Merge: 0c5d1eb77a8b 278429cff880
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Oct 16 16:51:32 2008 +0200
Merge branch 'linus' into genirq
commit b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40
Merge: 4f962d4d6592 278429cff880
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Oct 15 13:46:29 2008 +0200
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/kernel/Makefile
include/asm-x86/pda.h
diff --cc arch/x86/kernel/Makefile
index d001739d8b06,0d41f0343dc0..50632e16d01c
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@@ -13,21 -20,21 +20,22 @@@ endi
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
CFLAGS_hpet.o := $(nostackp)
- CFLAGS_tsc_64.o := $(nostackp)
+ CFLAGS_tsc.o := $(nostackp)
+CFLAGS_paravirt.o := $(nostackp)
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
- obj-y += traps_$(BITS).o irq_$(BITS).o
+ obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o
- obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
+ obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
+ obj-$(CONFIG_X86_VISWS) += visws_quirks.o
+ obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
- obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
- obj-y += bootflag.o e820_$(BITS).o
+ obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
+ obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o
- obj-$(CONFIG_X86_64) += bugs_64.o
- obj-y += tsc_$(BITS).o io_delay.o rtc.o
+ obj-y += tsc.o io_delay.o rtc.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
diff --cc arch/x86/kernel/process_64.c
index f73cfbc2c281,cd8c0ed02b7e..749d5f888d4d
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@@ -147,27 -102,13 +103,24 @@@ static inline void play_dead(void
void cpu_idle(void)
{
current_thread_info()->status |= TS_POLLING;
+
+ /*
+ * If we're the non-boot CPU, nothing set the PDA stack
+ * canary up for us - and if we are the boot CPU we have
+ * a 0 stack canary. This is a good place for updating
+ * it, as we wont ever return from this function (so the
+ * invalid canaries already on the stack wont ever
+ * trigger):
+ */
+ boot_init_stack_canary();
+
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick();
+ tick_nohz_stop_sched_tick(1);
while (!need_resched()) {
- void (*idle)(void);
rmb();
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
+
if (cpu_is_offline(smp_processor_id()))
play_dead();
/*
@@@ -648,11 -634,13 +646,12 @@@ __switch_to(struct task_struct *prev_p
*/
prev->usersp = read_pda(oldrsp);
write_pda(oldrsp, next->usersp);
- write_pda(pcurrent, next_p);
+ write_pda(pcurrent, next_p);
write_pda(kernelstack,
- (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
+ (unsigned long)task_stack_page(next_p) +
+ THREAD_SIZE - PDA_STACKOFFSET);
#ifdef CONFIG_CC_STACKPROTECTOR
- write_pda(stack_canary, next_p->stack_canary);
/*
* Build time only check to make sure the stack_canary is at
* offset 40 in the pda; this is a gcc ABI requirement
diff --cc include/asm-x86/pda.h
index a5ff5bb76299,80860afffbdb..45fd2aee8d6a
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@@ -19,7 -19,11 +19,9 @@@ struct x8664_pda
unsigned long stack_canary; /* 40 stack canary value */
/* gcc-ABI: this canary MUST be at
offset 40!!! */
-#endif
char *irqstackptr;
+ short nodenumber; /* number of current node (32k max) */
+ short in_bootmem; /* pda lives in bootmem */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
short mmu_state;
@@@ -131,5 -134,4 +132,6 @@@ do {
#define PDA_STACKOFFSET (5*8)
+#define refresh_stack_canary() write_pda(stack_canary, current->stack_canary)
- #endif
++
+ #endif /* ASM_X86__PDA_H */
diff --cc include/linux/sched.h
index f0132f9ef4dd,c226c7b82946..1a7e8461db5a
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@@ -1968,21 -1965,15 +1964,28 @@@ static inline unsigned long *end_of_sta
#endif
+ static inline int object_is_on_stack(void *obj)
+ {
+ void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+
extern void thread_info_cache_init(void);
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+ n++;
+ } while (!*n);
+
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/