Patches contributed by Eötvös Lorand University
commit 9b03638963e4b239dff1b424b91062a710d4b2e9
Merge: 03b30d151a91 69507c065393
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 23 11:10:03 2009 +0100
Merge branch 'tracing/ftrace' into tracing/core
commit 99d0000f710f3432182761f65f9658f1cf0bf455
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 23 11:09:15 2009 +0100
x86, xen: fix hardirq.h merge fallout
Impact: build fix
This build error:
arch/x86/xen/suspend.c:22: error: implicit declaration of function 'fix_to_virt'
arch/x86/xen/suspend.c:22: error: 'FIX_PARAVIRT_BOOTMAP' undeclared (first use in this function)
arch/x86/xen/suspend.c:22: error: (Each undeclared identifier is reported only once
arch/x86/xen/suspend.c:22: error: for each function it appears in.)
triggers because the hardirq.h unification removed an implicit fixmap.h
include - on which arch/x86/xen/suspend.c depended. Add the fixmap.h
include explicitly.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 212ffe012b76..95be7b434724 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -6,6 +6,7 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
+#include <asm/fixmap.h>
#include "xen-ops.h"
#include "mmu.h"
commit 05e3423c8577126800841bc55de8a509f2433dca
Merge: bfe2a3c3b5bf 99d0000f710f
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 23 11:00:43 2009 +0100
Merge branch 'core/percpu' into perfcounters/core
commit bfe2a3c3b5bf479788d5d5c5561346be6b169043
Merge: 77835492ed48 35d266a24796
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 23 10:20:15 2009 +0100
Merge branch 'core/percpu' into perfcounters/core
Conflicts:
arch/x86/include/asm/hardirq_32.h
arch/x86/include/asm/hardirq_64.h
Semantic merge:
arch/x86/include/asm/hardirq.h
[ added apic_perf_irqs field. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --cc arch/x86/include/asm/hardirq.h
index 000787df66e6,176f058e7159..46ebed797e4f
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@@ -1,8 -1,47 +1,48 @@@
- #ifdef CONFIG_X86_32
- # include "hardirq_32.h"
- #else
- # include "hardirq_64.h"
+ #ifndef _ASM_X86_HARDIRQ_H
+ #define _ASM_X86_HARDIRQ_H
+
+ #include <linux/threads.h>
+ #include <linux/irq.h>
+
+ typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+ unsigned int irq0_irqs;
+ #ifdef CONFIG_X86_LOCAL_APIC
+ unsigned int apic_timer_irqs; /* arch dependent */
+ unsigned int irq_spurious_count;
+ #endif
++ unsigned int apic_perf_irqs;
+ #ifdef CONFIG_SMP
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+ unsigned int irq_tlb_count;
+ #endif
+ #ifdef CONFIG_X86_MCE
+ unsigned int irq_thermal_count;
+ # ifdef CONFIG_X86_64
+ unsigned int irq_threshold_count;
+ # endif
#endif
+ } ____cacheline_aligned irq_cpustat_t;
+
+ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
+ /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+ #define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
+ #define __ARCH_IRQ_STAT
+
+ #define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
+
+ #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
+
+ #define __ARCH_SET_SOFTIRQ_PENDING
+
+ #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+ #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
+
+ extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
commit 35d266a24796f02f63299cfe5009dfc0d5a0e820
Merge: 03b486322e99 2de3a5f7956e
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 23 10:06:18 2009 +0100
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
commit 03b30d151a918364c1c7d08bcb3e167be0a3746f
Merge: b43f70933e77 3690b5e6fd9d
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Jan 22 10:26:30 2009 +0100
Merge branch 'tracing/ftrace' into tracing/core
commit 77835492ed489c0b870f82f4c50687bd267acc0a
Merge: af37501c7921 1de9e8e70f5a
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 21 16:37:27 2009 +0100
Merge commit 'v2.6.29-rc2' into perfcounters/core
Conflicts:
include/linux/syscalls.h
diff --cc include/linux/syscalls.h
index a1d177ce0a08,16875f89e6a7..fc81937009f5
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@@ -54,7 -54,7 +54,8 @@@ struct compat_stat
struct compat_timeval;
struct robust_list_head;
struct getcpu_cache;
+ struct old_linux_dirent;
+struct perf_counter_hw_event;
#include <linux/types.h>
#include <linux/aio_abi.h>
commit 5b221278d61e3907a5e4104a844b63bc8bb3d43a
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 21 11:30:07 2009 +0100
x86: uv cleanup, build fix #2
Fix more build-failure fallout from the UV cleanup - the UV drivers
were not updated to include <asm/uv/uv.h>.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index f93f03a9e6e9..1b5f579df15f 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -19,6 +19,8 @@
#ifndef __GRU_H__
#define __GRU_H__
+#include <asm/uv/uv.h>
+
/*
* GRU architectural definitions
*/
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 7b4cbd5e03e9..069ad3a1c2ac 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -15,6 +15,8 @@
#include <linux/mutex.h>
+#include <asm/uv/uv.h>
+
#ifdef CONFIG_IA64
#include <asm/system.h>
#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
commit ace6c6c840878342f698f0da6588dd5ded755369
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 21 10:32:44 2009 +0100
x86: make x86_32 use tlb_64.c, build fix, clean up X86_L1_CACHE_BYTES
Fix:
arch/x86/mm/tlb.c:47: error: ‘CONFIG_X86_INTERNODE_CACHE_BYTES’ undeclared here (not in a function)
The CONFIG_X86_INTERNODE_CACHE_BYTES symbol is only defined on 64-bit,
because vsmp support is 64-bit only. Define it on 32-bit too - where it
will always be equal to X86_L1_CACHE_BYTES.
Also move the default of X86_L1_CACHE_BYTES (which is separate from the
more commonly used L1_CACHE_SHIFT kconfig symbol) from 128 bytes to
64 bytes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index cdf4a9623237..8eb50ba9161e 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -292,15 +292,13 @@ config X86_CPU
# Define implied options from the CPU selection here
config X86_L1_CACHE_BYTES
int
- default "128" if GENERIC_CPU || MPSC
- default "64" if MK8 || MCORE2
- depends on X86_64
+ default "128" if MPSC
+ default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
config X86_INTERNODE_CACHE_BYTES
int
default "4096" if X86_VSMP
default X86_L1_CACHE_BYTES if !X86_VSMP
- depends on X86_64
config X86_CMPXCHG
def_bool X86_64 || (X86_32 && !M386)
commit 198030782cedf25391e67e7c88b04f87a5eb6563
Merge: 4ec71fa2d2c3 92181f190b64
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 21 10:39:51 2009 +0100
Merge branch 'x86/mm' into core/percpu
Conflicts:
arch/x86/mm/fault.c
diff --cc arch/x86/mm/fault.c
index 37242c405f16,033292dc9e21..65709a6aa6ee
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@@ -430,6 -429,190 +430,196 @@@ static noinline void pgtable_bad(struc
}
#endif
+ static noinline void no_context(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ struct task_struct *tsk = current;
++ unsigned long *stackend;
++
+ #ifdef CONFIG_X86_64
+ unsigned long flags;
+ int sig;
+ #endif
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * X86_32
+ * Valid to do another page fault here, because if this fault
+ * had been triggered by is_prefetch fixup_exception would have
+ * handled it.
+ *
+ * X86_64
+ * Hall of shame of CPU/BIOS bugs.
+ */
+ if (is_prefetch(regs, error_code, address))
+ return;
+
+ if (is_errata93(regs, address))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ #ifdef CONFIG_X86_32
+ bust_spinlocks(1);
+ #else
+ flags = oops_begin();
+ #endif
+
+ show_fault_oops(regs, error_code, address);
+
++ stackend = end_of_stack(tsk);
++ if (*stackend != STACK_END_MAGIC)
++ printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
++
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
+
+ #ifdef CONFIG_X86_32
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+ #else
+ sig = SIGKILL;
+ if (__die("Oops", regs, error_code))
+ sig = 0;
+ /* Executive summary in case the body of the oops scrolled away */
+ printk(KERN_EMERG "CR2: %016lx\n", address);
+ oops_end(flags, regs, sig);
+ #endif
+ }
+
+ static void __bad_area_nosemaphore(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address,
+ int si_code)
+ {
+ struct task_struct *tsk = current;
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & PF_USER) {
+ /*
+ * It's possible to have interrupts off here.
+ */
+ local_irq_enable();
+
+ /*
+ * Valid to do another page fault here because this one came
+ * from user space.
+ */
+ if (is_prefetch(regs, error_code, address))
+ return;
+
+ if (is_errata100(regs, address))
+ return;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+ printk_ratelimit()) {
+ printk(
+ "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, task_pid_nr(tsk), address,
+ (void *) regs->ip, (void *) regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+ }
+
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+ return;
+ }
+
+ if (is_f00f_bug(regs, address))
+ return;
+
+ no_context(regs, error_code, address);
+ }
+
+ static noinline void bad_area_nosemaphore(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
+ }
+
+ static void __bad_area(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address,
+ int si_code)
+ {
+ struct mm_struct *mm = current->mm;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+ up_read(&mm->mmap_sem);
+
+ __bad_area_nosemaphore(regs, error_code, address, si_code);
+ }
+
+ static noinline void bad_area(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ __bad_area(regs, error_code, address, SEGV_MAPERR);
+ }
+
+ static noinline void bad_area_access_error(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ __bad_area(regs, error_code, address, SEGV_ACCERR);
+ }
+
+ /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
+ static void out_of_memory(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ up_read(¤t->mm->mmap_sem);
+ pagefault_out_of_memory();
+ }
+
+ static void do_sigbus(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address)
+ {
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & PF_USER))
+ no_context(regs, error_code, address);
+ #ifdef CONFIG_X86_32
+ /* User space => ok to do another page fault */
+ if (is_prefetch(regs, error_code, address))
+ return;
+ #endif
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+ }
+
+ static noinline void mm_fault_error(struct pt_regs *regs,
+ unsigned long error_code, unsigned long address, unsigned int fault)
+ {
+ if (fault & VM_FAULT_OOM)
+ out_of_memory(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGBUS)
+ do_sigbus(regs, error_code, address);
+ else
+ BUG();
+ }
+
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & PF_WRITE) && !pte_write(*pte))
diff --cc arch/x86/mm/tlb.c
index b3ca1b940654,8cfea5d14517..72a6d4ebe34d
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@@ -42,10 -46,10 +42,10 @@@ union smp_flush_state
struct mm_struct *flush_mm;
unsigned long flush_va;
spinlock_t tlbstate_lock;
+ DECLARE_BITMAP(flush_cpumask, NR_CPUS);
};
- char pad[SMP_CACHE_BYTES];
- } ____cacheline_aligned;
+ char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
+ } ____cacheline_internodealigned_in_smp;
/* State is put into the per CPU data section, but padded
to a full cache line because other CPUs can access it and we don't
@@@ -135,9 -129,9 +135,9 @@@ void smp_invalidate_interrupt(struct pt
* Use that to determine where the sender put the data.
*/
sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
- f = &per_cpu(flush_state, sender);
+ f = &flush_state[sender];
- if (!cpu_isset(cpu, f->flush_cpumask))
+ if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
goto out;
/*
* This was a BUG() but until someone can quote me the