Patches contributed by Eötvös Lorand University
commit 74b6eb6b937df07d0757e8642b7538b07da4290f
Merge: 6a385db5ce7f 2d4d57db692e 8f6d86dc4178 b38b06659055 d5e397cb49b5 e56d0cfe7790 dbca1df48e89 fb746d0e1365 6522869c3466 d639bab8da86 042cbaf88ab4 5662a2f8e731 3b4b75700a24 30a0fb947a68
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 28 23:13:53 2009 +0100
Merge branches 'x86/asm', 'x86/cleanups', 'x86/cpudetect', 'x86/debug', 'x86/doc', 'x86/header-fixes', 'x86/mm', 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess' and 'x86/urgent' into x86/core
diff --cc arch/x86/include/asm/spinlock.h
index d17c91981da2,4d3dcc51cacd,2bd6b111a414,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,d17c91981da2,2bd6b111a414,d17c91981da2..139b4249a5ec
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@@@@@@@@@@@@@@ -172,70 -172,70 -172,8 -172,70 -172,70 -172,70 -172,70 -172,70 -172,70 -172,70 -172,70 -172,70 -172,8 -172,70 +172,8 @@@@@@@@@@@@@@@ static inline int __ticket_spin_is_cont
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
-- --------- -#ifdef CONFIG_PARAVIRT
-- --------- -/*
-- --------- - * Define virtualization-friendly old-style lock byte lock, for use in
-- --------- - * pv_lock_ops if desired.
-- --------- - *
-- --------- - * This differs from the pre-2.6.24 spinlock by always using xchgb
-- --------- - * rather than decb to take the lock; this allows it to use a
-- --------- - * zero-initialized lock structure. It also maintains a 1-byte
-- --------- - * contention counter, so that we can implement
-- --------- - * __byte_spin_is_contended.
-- --------- - */
-- --------- -struct __byte_spinlock {
-- --------- - s8 lock;
-- --------- - s8 spinners;
-- --------- -};
-- --------- -
-- --------- -static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
-- --------- -{
-- --------- - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-- --------- - return bl->lock != 0;
-- --------- -}
-- --------- -
-- --------- -static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
-- --------- -{
-- --------- - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-- --------- - return bl->spinners != 0;
-- --------- -}
-- --------- -
-- --------- -static inline void __byte_spin_lock(raw_spinlock_t *lock)
-- --------- -{
-- --------- - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-- --------- - s8 val = 1;
- --------- -
- --------- - asm("1: xchgb %1, %0\n"
- --------- - " test %1,%1\n"
- --------- - " jz 3f\n"
- --------- - " " LOCK_PREFIX "incb %2\n"
- --------- - "2: rep;nop\n"
- --------- - " cmpb $1, %0\n"
- --------- - " je 2b\n"
- --------- - " " LOCK_PREFIX "decb %2\n"
- --------- - " jmp 1b\n"
- --------- - "3:"
- --------- - : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
- --------- -}
- --------- -
- --------- -static inline int __byte_spin_trylock(raw_spinlock_t *lock)
- --------- -{
- --------- - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- --------- - u8 old = 1;
- --------- -
- --------- - asm("xchgb %1,%0"
- --------- - : "+m" (bl->lock), "+q" (old) : : "memory");
++ +++++++++ +#ifndef CONFIG_PARAVIRT
- asm("1: xchgb %1, %0\n"
- " test %1,%1\n"
- " jz 3f\n"
- " " LOCK_PREFIX "incb %2\n"
- "2: rep;nop\n"
- " cmpb $1, %0\n"
- " je 2b\n"
- " " LOCK_PREFIX "decb %2\n"
- " jmp 1b\n"
- "3:"
- : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
- }
-
- static inline int __byte_spin_trylock(raw_spinlock_t *lock)
- {
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- u8 old = 1;
-
- asm("xchgb %1,%0"
- : "+m" (bl->lock), "+q" (old) : : "memory");
-
-- --------- - return old == 0;
-- --------- -}
-- --------- -
-- --------- -static inline void __byte_spin_unlock(raw_spinlock_t *lock)
-- --------- -{
-- --------- - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-- --------- - smp_wmb();
-- --------- - bl->lock = 0;
-- --------- -}
-- --------- -#else /* !CONFIG_PARAVIRT */
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
diff --cc arch/x86/include/asm/system.h
index 2fcc70bc85f3,fa47b1e6a866,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a,8e626ea33a1a..c22383743f36
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@@@@@@@@@@@@@@ -108,22 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 -94,19 +108,22 @@@@@@@@@@@@@@@ do {
"call __switch_to\n\t" \
".globl thread_return\n" \
"thread_return:\n\t" \
------------- "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+++++++++++++ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+++++++++++++ __switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
- ------------ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
- ------------ "jc ret_from_fork\n\t" \
+ ++++++++++++ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ ++++++++++++ "jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
+++++++++++++ __switch_canary_oparam \
: [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
- ------------ [tif_fork] "i" (TIF_FORK), \
+ ++++++++++++ [_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
------------- [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+++++++++++++ [current_task] "m" (per_cpu_var(current_task)) \
+++++++++++++ __switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
diff --cc arch/x86/kernel/cpu/intel.c
index 549f2ada55f5,8ea6929e974c,8ea6929e974c,20ce03acf04b,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,8ea6929e974c,430e5c38a544..5deefae9064d
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@@@@@@@@@@@@@@ -29,19 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,6 -29,19 +29,19 @@@@@@@@@@@@@@@
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
++++++++++++ /* Unmask CPUID levels if masked: */
- if (c->x86 == 6 && c->x86_model >= 15) {
+++++++++++++ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
++++++++++++ u64 misc_enable;
++++++++++++
++++++++++++ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
++++++++++++
++++++++++++ if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
++++++++++++ misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
++++++++++++ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
++++++++++++ c->cpuid_level = cpuid_eax(0);
++++++++++++ }
++++++++++++ }
++++++++++++
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
diff --cc arch/x86/mm/fault.c
index 65709a6aa6ee,9e268b6b204e,9e268b6b204e,90dfae511a41,90dfae511a41,90dfae511a41,9e268b6b204e,8f4b859a04b3,90dfae511a41,90dfae511a41,90dfae511a41,90dfae511a41,9e268b6b204e,90dfae511a41..d3eee74f830a
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@@@@@@@@@@@@@@ -415,12 -414,12 -414,12 -414,12 -414,12 -414,12 -414,12 -414,11 -414,12 -414,12 -414,12 -414,12 -414,12 -414,12 +415,11 @@@@@@@@@@@@@@@ static noinline void pgtable_bad(struc
{
unsigned long flags = oops_begin();
int sig = SIGKILL;
------ ------ struct task_struct *tsk;
++++++ ++++++ struct task_struct *tsk = current;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
------ ------ current->comm, address);
++++++ ++++++ tsk->comm, address);
dump_pagetable(address);
------- ------ tsk = current;
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
@@@@@@@@@@@@@@@ -430,196 -429,6 -429,6 -429,6 -429,6 -429,6 -429,6 -428,190 -429,6 -429,6 -429,6 -429,6 -429,6 -429,6 +429,196 @@@@@@@@@@@@@@@
}
#endif
++++++ ++++++static noinline void no_context(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ struct task_struct *tsk = current;
+++++++++++++ unsigned long *stackend;
+++++++++++++
++++++ ++++++#ifdef CONFIG_X86_64
++++++ ++++++ unsigned long flags;
++++++ ++++++ int sig;
++++++ ++++++#endif
++++++ ++++++
++++++ ++++++ /* Are we prepared to handle this kernel fault? */
++++++ ++++++ if (fixup_exception(regs))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ /*
++++++ ++++++ * X86_32
++++++ ++++++ * Valid to do another page fault here, because if this fault
++++++ ++++++ * had been triggered by is_prefetch fixup_exception would have
++++++ ++++++ * handled it.
++++++ ++++++ *
++++++ ++++++ * X86_64
++++++ ++++++ * Hall of shame of CPU/BIOS bugs.
++++++ ++++++ */
++++++ ++++++ if (is_prefetch(regs, error_code, address))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ if (is_errata93(regs, address))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ /*
++++++ ++++++ * Oops. The kernel tried to access some bad page. We'll have to
++++++ ++++++ * terminate things with extreme prejudice.
++++++ ++++++ */
++++++ ++++++#ifdef CONFIG_X86_32
++++++ ++++++ bust_spinlocks(1);
++++++ ++++++#else
++++++ ++++++ flags = oops_begin();
++++++ ++++++#endif
++++++ ++++++
++++++ ++++++ show_fault_oops(regs, error_code, address);
++++++ ++++++
+++++++++++++ stackend = end_of_stack(tsk);
+++++++++++++ if (*stackend != STACK_END_MAGIC)
+++++++++++++ printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
+++++++++++++
++++++ ++++++ tsk->thread.cr2 = address;
++++++ ++++++ tsk->thread.trap_no = 14;
++++++ ++++++ tsk->thread.error_code = error_code;
++++++ ++++++
++++++ ++++++#ifdef CONFIG_X86_32
++++++ ++++++ die("Oops", regs, error_code);
++++++ ++++++ bust_spinlocks(0);
++++++ ++++++ do_exit(SIGKILL);
++++++ ++++++#else
++++++ ++++++ sig = SIGKILL;
++++++ ++++++ if (__die("Oops", regs, error_code))
++++++ ++++++ sig = 0;
++++++ ++++++ /* Executive summary in case the body of the oops scrolled away */
++++++ ++++++ printk(KERN_EMERG "CR2: %016lx\n", address);
++++++ ++++++ oops_end(flags, regs, sig);
++++++ ++++++#endif
++++++ ++++++}
++++++ ++++++
++++++ ++++++static void __bad_area_nosemaphore(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address,
++++++ ++++++ int si_code)
++++++ ++++++{
++++++ ++++++ struct task_struct *tsk = current;
++++++ ++++++
++++++ ++++++ /* User mode accesses just cause a SIGSEGV */
++++++ ++++++ if (error_code & PF_USER) {
++++++ ++++++ /*
++++++ ++++++ * It's possible to have interrupts off here.
++++++ ++++++ */
++++++ ++++++ local_irq_enable();
++++++ ++++++
++++++ ++++++ /*
++++++ ++++++ * Valid to do another page fault here because this one came
++++++ ++++++ * from user space.
++++++ ++++++ */
++++++ ++++++ if (is_prefetch(regs, error_code, address))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ if (is_errata100(regs, address))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
++++++ ++++++ printk_ratelimit()) {
++++++ ++++++ printk(
++++++ ++++++ "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
++++++ ++++++ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
++++++ ++++++ tsk->comm, task_pid_nr(tsk), address,
++++++ ++++++ (void *) regs->ip, (void *) regs->sp, error_code);
++++++ ++++++ print_vma_addr(" in ", regs->ip);
++++++ ++++++ printk("\n");
++++++ ++++++ }
++++++ ++++++
++++++ ++++++ tsk->thread.cr2 = address;
++++++ ++++++ /* Kernel addresses are always protection faults */
++++++ ++++++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++++++ ++++++ tsk->thread.trap_no = 14;
++++++ ++++++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++++++ ++++++ return;
++++++ ++++++ }
++++++ ++++++
++++++ ++++++ if (is_f00f_bug(regs, address))
++++++ ++++++ return;
++++++ ++++++
++++++ ++++++ no_context(regs, error_code, address);
++++++ ++++++}
++++++ ++++++
++++++ ++++++static noinline void bad_area_nosemaphore(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
++++++ ++++++}
++++++ ++++++
++++++ ++++++static void __bad_area(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address,
++++++ ++++++ int si_code)
++++++ ++++++{
++++++ ++++++ struct mm_struct *mm = current->mm;
++++++ ++++++
++++++ ++++++ /*
++++++ ++++++ * Something tried to access memory that isn't in our memory map..
++++++ ++++++ * Fix it, but check if it's kernel or user first..
++++++ ++++++ */
++++++ ++++++ up_read(&mm->mmap_sem);
++++++ ++++++
++++++ ++++++ __bad_area_nosemaphore(regs, error_code, address, si_code);
++++++ ++++++}
++++++ ++++++
++++++ ++++++static noinline void bad_area(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ __bad_area(regs, error_code, address, SEGV_MAPERR);
++++++ ++++++}
++++++ ++++++
++++++ ++++++static noinline void bad_area_access_error(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ __bad_area(regs, error_code, address, SEGV_ACCERR);
++++++ ++++++}
++++++ ++++++
++++++ ++++++/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
++++++ ++++++static void out_of_memory(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ /*
++++++ ++++++ * We ran out of memory, call the OOM killer, and return the userspace
++++++ ++++++ * (which will retry the fault, or kill us if we got oom-killed).
++++++ ++++++ */
++++++ ++++++ up_read(¤t->mm->mmap_sem);
++++++ ++++++ pagefault_out_of_memory();
++++++ ++++++}
++++++ ++++++
++++++ ++++++static void do_sigbus(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address)
++++++ ++++++{
++++++ ++++++ struct task_struct *tsk = current;
++++++ ++++++ struct mm_struct *mm = tsk->mm;
++++++ ++++++
++++++ ++++++ up_read(&mm->mmap_sem);
++++++ ++++++
++++++ ++++++ /* Kernel mode? Handle exceptions or die */
++++++ ++++++ if (!(error_code & PF_USER))
++++++ ++++++ no_context(regs, error_code, address);
++++++ ++++++#ifdef CONFIG_X86_32
++++++ ++++++ /* User space => ok to do another page fault */
++++++ ++++++ if (is_prefetch(regs, error_code, address))
++++++ ++++++ return;
++++++ ++++++#endif
++++++ ++++++ tsk->thread.cr2 = address;
++++++ ++++++ tsk->thread.error_code = error_code;
++++++ ++++++ tsk->thread.trap_no = 14;
++++++ ++++++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++++++ ++++++}
++++++ ++++++
++++++ ++++++static noinline void mm_fault_error(struct pt_regs *regs,
++++++ ++++++ unsigned long error_code, unsigned long address, unsigned int fault)
++++++ ++++++{
++++++ ++++++ if (fault & VM_FAULT_OOM)
++++++ ++++++ out_of_memory(regs, error_code, address);
++++++ ++++++ else if (fault & VM_FAULT_SIGBUS)
++++++ ++++++ do_sigbus(regs, error_code, address);
++++++ ++++++ else
++++++ ++++++ BUG();
++++++ ++++++}
++++++ ++++++
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & PF_WRITE) && !pte_write(*pte))
diff --cc arch/x86/mm/pat.c
index 7b61036427df,85cbd3cd3723,85cbd3cd3723,430cb44dd3f4,8b08fb955274,160c42d3eb8f,85cbd3cd3723,8b08fb955274,8b08fb955274,8b08fb955274,8b08fb955274,8b08fb955274,85cbd3cd3723,ffc88cc00fda..9127e31c7268
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@@@@@@@@@@@@@@ -618,13 -601,12 -601,12 -610,13 -601,13 -628,13 -601,12 -601,13 -601,13 -601,13 -601,13 -601,13 -601,12 -618,12 +627,13 @@@@@@@@@@@@@@@ void unmap_devmem(unsigned long pfn, un
* Reserved non RAM regions only and after successful reserve_memtype,
* this func also keeps identity mapping (if any) in sync with this new prot.
*/
-- - --static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
++ + ++static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
++ + ++ int strict_prot)
{
int is_ram = 0;
- int ret;
+ int id_sz, ret;
unsigned long flags;
-- - -- unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
++ + ++ unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
is_ram = pagerange_is_ram(paddr, paddr + size);
@@@@@@@@@@@@@@@ -643,35 -625,26 -625,26 -635,35 -626,35 -653,27 -625,26 -626,35 -626,35 -626,35 -626,35 -626,35 -625,26 -642,26 +652,35 @@@@@@@@@@@@@@@
return ret;
if (flags != want_flags) {
-- - -- free_memtype(paddr, paddr + size);
-- - -- printk(KERN_ERR
-- - -- "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
-- - -- current->comm, current->pid,
-- - -- cattr_name(want_flags),
-- - -- (unsigned long long)paddr,
-- - -- (unsigned long long)(paddr + size),
-- - -- cattr_name(flags));
-- - -- return -EINVAL;
++ + ++ if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
++ + ++ free_memtype(paddr, paddr + size);
++ + ++ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
++ + ++ " for %Lx-%Lx, got %s\n",
++ + ++ current->comm, current->pid,
++ + ++ cattr_name(want_flags),
++ + ++ (unsigned long long)paddr,
++ + ++ (unsigned long long)(paddr + size),
++ + ++ cattr_name(flags));
++ + ++ return -EINVAL;
++ + ++ }
++ + ++ /*
++ + ++ * We allow returning different type than the one requested in
++ + ++ * non strict case.
++ + ++ */
++ + ++ *vma_prot = __pgprot((pgprot_val(*vma_prot) &
++ + ++ (~_PAGE_CACHE_MASK)) |
++ + ++ flags);
}
- if (kernel_map_sync_memtype(paddr, size, flags)) {
+ /* Need to keep identity mapping in sync */
+ if (paddr >= __pa(high_memory))
+ return 0;
+
+ id_sz = (__pa(high_memory) < paddr + size) ?
+ __pa(high_memory) - paddr :
+ size;
+
+ if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
free_memtype(paddr, paddr + size);
printk(KERN_ERR
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
commit 6a385db5ce7f1fd2c68ec511e44587b67dab8fca
Merge: 18e352e4a734 4369f1fb7cd4
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 28 23:12:55 2009 +0100
Merge branch 'core/percpu' into x86/core
Conflicts:
kernel/irq/handle.c
diff --cc kernel/irq/handle.c
index 3aba8d12f328,375d68cd5bf0..f51eaee921b6
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@@ -146,8 -133,10 +145,12 @@@ int __init early_irq_init(void
int legacy_count;
int i;
+ init_irq_default_affinity();
+
+ /* initialize nr_irqs based on nr_cpu_ids */
+ arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
@@@ -233,8 -229,8 +243,10 @@@ int __init early_irq_init(void
int count;
int i;
+ init_irq_default_affinity();
+
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
commit 4a66a82be78e1b6f9f83074423bf42a52251414c
Merge: 32c0bd962411 cc2f6d90e950 ba2607fe9c1f
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Jan 27 14:30:57 2009 +0100
Merge branches 'tracing/blktrace', 'tracing/kmemtrace' and 'tracing/urgent' into tracing/core
commit 4369f1fb7cd4cf777312f43e1cb9aa5504fc4125
Merge: 3ddeb51d9c83 cf3997f50762
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Jan 27 12:03:24 2009 +0100
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts:
arch/x86/kernel/setup_percpu.c
Semantic conflict:
arch/x86/kernel/cpu/common.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --cc arch/x86/kernel/cpu/common.c
index 99904f288d6a,0c766b80d915..652fdc9a757a
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@@ -52,6 -52,15 +52,15 @@@ cpumask_var_t cpu_initialized_mask
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
+ /* correctly size the local cpu masks */
-void setup_cpu_local_masks(void)
++void __init setup_cpu_local_masks(void)
+ {
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+ }
+
#else /* CONFIG_X86_32 */
cpumask_t cpu_callin_map;
commit 3ddeb51d9c83931c1ca6abf76a38934c5a1ed918
Merge: 5a611268b69f 5ee810072175
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Jan 27 12:01:51 2009 +0100
Merge branch 'linus' into core/percpu
Conflicts:
arch/x86/kernel/setup_percpu.c
diff --cc arch/x86/kernel/setup_percpu.c
index 90b8e154bb53,01161077a49c..e553803cd2db
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@@ -77,26 -50,7 +77,26 @@@ static void __init setup_node_to_cpumas
static inline void setup_node_to_cpumask_map(void) { }
#endif
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
+#ifdef CONFIG_X86_64
+
+/* correctly size the local cpu masks */
- static void setup_cpu_local_masks(void)
++static void __init setup_cpu_local_masks(void)
+{
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
+#else /* CONFIG_X86_32 */
+
+static inline void setup_cpu_local_masks(void)
+{
+}
+
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
/*
* Copy data used in early init routines from the initial arrays to the
* per cpu data areas. These arrays then become expendable and the
commit d5e397cb49b53381e4c99a064ca733c665646de8
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 26 06:09:00 2009 +0100
x86: improve early fault/irq printout
Impact: add a stack dump to early IRQs/faults
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 9f1410711607..84d05a4d7fc4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -577,6 +577,9 @@ ignore_int:
pushl 40(%esp)
pushl $int_msg
call printk
+
+ call dump_stack
+
addl $(5*4),%esp
popl %ds
popl %es
@@ -652,7 +655,7 @@ early_recursion_flag:
.long 0
int_msg:
- .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
fault_msg:
/* fault info: */
commit 34707bcd0452aba644396767bc9fb61585bdab4f
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 26 14:18:05 2009 +0100
x86, debug: remove early_printk() #ifdefs from head_32.S
Impact: cleanup
Remove such constructs:
#ifdef CONFIG_EARLY_PRINTK
call early_printk
#else
call printk
#endif
Not only are they ugly, they are also pointless: a call to printk()
maps to early_printk during early bootup anyway, if CONFIG_EARLY_PRINTK
is enabled.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e835b4eea70b..9f1410711607 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -548,11 +548,7 @@ early_fault:
pushl %eax
pushl %edx /* trapno */
pushl $fault_msg
-#ifdef CONFIG_EARLY_PRINTK
- call early_printk
-#else
call printk
-#endif
#endif
call dump_stack
hlt_loop:
@@ -580,11 +576,7 @@ ignore_int:
pushl 32(%esp)
pushl 40(%esp)
pushl $int_msg
-#ifdef CONFIG_EARLY_PRINTK
- call early_printk
-#else
call printk
-#endif
addl $(5*4),%esp
popl %ds
popl %es
commit 5ce1b1ed27d4ab1d81b8543a96f488bba2071576
Merge: 7e49fcce1bda 9005f3ebebfc
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 26 14:01:52 2009 +0100
Merge branches 'tracing/ftrace' and 'tracing/function-graph-tracer' into tracing/core
commit 99fb4d349db7e7dacb2099c5cc320a9e2d31c1ef
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 26 04:30:41 2009 +0100
x86: unmask CPUID levels on Intel CPUs, fix
Impact: fix boot hang on pre-model-15 Intel CPUs
rdmsrl_safe() does not work in very early bootup code yet, because we
dont have the pagefault handler installed yet so exception section
does not get parsed. rdmsr_safe() will just crash and hang the bootup.
So limit the MSR_IA32_MISC_ENABLE MSR read to those CPU types that
support it.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 43c1dcf0bec7..549f2ada55f5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -29,14 +29,17 @@
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
- u64 misc_enable;
-
- /* Unmask CPUID levels if masked */
- if (!rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_enable) &&
- (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)) {
- misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- c->cpuid_level = cpuid_eax(0);
+ /* Unmask CPUID levels if masked: */
+ if (c->x86 == 6 && c->x86_model >= 15) {
+ u64 misc_enable;
+
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+ if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
+ misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
+ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ c->cpuid_level = cpuid_eax(0);
+ }
}
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
commit e1b4d1143651fb3838be1117785b6e0386fa151f
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Jan 25 16:57:00 2009 +0100
x86: use standard PIT frequency
the RDC and ELAN platforms use slighly different PIT clocks, resulting in
a timex.h hack that changes PIT_TICK_RATE during build time. But if a
tester enables any of these platform support .config options, the PIT
will be miscalibrated on standard PC platforms.
So use one frequency - in a subsequent patch we'll add a quirk to allow
x86 platforms to define different PIT frequencies.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
index 1287dc1347d6..b5c9d45c981f 100644
--- a/arch/x86/include/asm/timex.h
+++ b/arch/x86/include/asm/timex.h
@@ -1,18 +1,13 @@
-/* x86 architecture timex specifications */
#ifndef _ASM_X86_TIMEX_H
#define _ASM_X86_TIMEX_H
#include <asm/processor.h>
#include <asm/tsc.h>
-#ifdef CONFIG_X86_ELAN
-# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */
-#elif defined(CONFIG_X86_RDC321X)
-# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */
-#else
-# define PIT_TICK_RATE 1193182 /* Underlying HZ */
-#endif
-#define CLOCK_TICK_RATE PIT_TICK_RATE
+/* The PIT ticks at this frequency (in HZ): */
+#define PIT_TICK_RATE 1193182
+
+#define CLOCK_TICK_RATE PIT_TICK_RATE
#define ARCH_HAS_READ_CURRENT_TIMER