Patches contributed by Eötvös Lorand University
commit 3ec96783e3c1d21bf9a1fa3f238f8354c92827f6
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Apr 25 18:25:25 2008 +0200
x86: make clear_fixmap() available on 64-bit as well
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
index 382eb271a892..5bd206973dca 100644
--- a/include/asm-x86/fixmap.h
+++ b/include/asm-x86/fixmap.h
@@ -1,5 +1,13 @@
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
#ifdef CONFIG_X86_32
# include "fixmap_32.h"
#else
# include "fixmap_64.h"
#endif
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
+#endif
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index eb1665125c44..4b96148e90c1 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -10,8 +10,8 @@
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
+#ifndef _ASM_FIXMAP_32_H
+#define _ASM_FIXMAP_32_H
/* used by vmalloc.c, vsyscall.lds.S.
@@ -121,9 +121,6 @@ extern void reserve_top_address(unsigned long reserve);
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index f3d76858c0e6..355d26a75a82 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -8,8 +8,8 @@
* Copyright (C) 1998 Ingo Molnar
*/
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
+#ifndef _ASM_FIXMAP_64_H
+#define _ASM_FIXMAP_64_H
#include <linux/kernel.h>
#include <asm/apicdef.h>
commit 126e01bf92dfc5f0ba91e88be02c473e1506d7d9
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Apr 25 00:25:08 2008 +0200
softlockup: fix NOHZ wakeup
David Miller reported:
|--------------->
the following commit:
| commit 27ec4407790d075c325e1f4da0a19c56953cce23
| Author: Ingo Molnar <mingo@elte.hu>
| Date: Thu Feb 28 21:00:21 2008 +0100
|
| sched: make cpu_clock() globally synchronous
|
| Alexey Zaytsev reported (and bisected) that the introduction of
| cpu_clock() in printk made the timestamps jump back and forth.
|
| Make cpu_clock() more reliable while still keeping it fast when it's
| called frequently.
|
| Signed-off-by: Ingo Molnar <mingo@elte.hu>
causes watchdog triggers when a cpu exits NOHZ state when it has been
there for >= the soft lockup threshold, for example here are some
messages from a 128 cpu Niagara2 box:
[ 168.106406] BUG: soft lockup - CPU#11 stuck for 128s! [dd:3239]
[ 168.989592] BUG: soft lockup - CPU#21 stuck for 86s! [swapper:0]
[ 168.999587] BUG: soft lockup - CPU#29 stuck for 91s! [make:4511]
[ 168.999615] BUG: soft lockup - CPU#2 stuck for 85s! [swapper:0]
[ 169.020514] BUG: soft lockup - CPU#37 stuck for 91s! [swapper:0]
[ 169.020514] BUG: soft lockup - CPU#45 stuck for 91s! [sh:4515]
[ 169.020515] BUG: soft lockup - CPU#69 stuck for 92s! [swapper:0]
[ 169.020515] BUG: soft lockup - CPU#77 stuck for 92s! [swapper:0]
[ 169.020515] BUG: soft lockup - CPU#61 stuck for 92s! [swapper:0]
[ 169.112554] BUG: soft lockup - CPU#85 stuck for 92s! [swapper:0]
[ 169.112554] BUG: soft lockup - CPU#101 stuck for 92s! [swapper:0]
[ 169.112554] BUG: soft lockup - CPU#109 stuck for 92s! [swapper:0]
[ 169.112554] BUG: soft lockup - CPU#117 stuck for 92s! [swapper:0]
[ 169.171483] BUG: soft lockup - CPU#40 stuck for 80s! [dd:3239]
[ 169.331483] BUG: soft lockup - CPU#13 stuck for 86s! [swapper:0]
[ 169.351500] BUG: soft lockup - CPU#43 stuck for 101s! [dd:3239]
[ 169.531482] BUG: soft lockup - CPU#9 stuck for 129s! [mkdir:4565]
[ 169.595754] BUG: soft lockup - CPU#20 stuck for 93s! [swapper:0]
[ 169.626787] BUG: soft lockup - CPU#52 stuck for 93s! [swapper:0]
[ 169.626787] BUG: soft lockup - CPU#84 stuck for 92s! [swapper:0]
[ 169.636812] BUG: soft lockup - CPU#116 stuck for 94s! [swapper:0]
It's simple enough to trigger this by doing a 10 minute sleep after a
fresh bootup then starting a parallel kernel build.
I suspect this might be reintroducing a problem we've had and fixed
before, see the thread:
http://marc.info/?l=linux-kernel&m=119546414004065&w=2
<---------------|
touch the softlockup watchdog when exiting NOHZ state - we are
obviously not locked up.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d358d4e3a958..b854a895591e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void)
sub_preempt_count(HARDIRQ_OFFSET);
}
+ touch_softlockup_watchdog();
/*
* Cancel the scheduled timer and restore the tick
*/
commit 88a411c07b6fedcfc97b8dc51ae18540bd2beda0
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Apr 3 09:06:13 2008 +0200
seqlock: livelock fix
Thomas Gleixner debugged a particularly ugly seqlock related livelock:
do not process the seq-read section if we know it beforehand that the
test at the end of the section will fail ...
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 26e4925bc35b..632205ccc25d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -85,23 +85,29 @@ static inline int write_tryseqlock(seqlock_t *sl)
/* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = sl->sequence;
+ unsigned ret;
+
+repeat:
+ ret = sl->sequence;
smp_rmb();
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+
return ret;
}
-/* Test if reader processed invalid data.
- * If initial values is odd,
- * then writer had already started when section was entered
- * If sequence value changed
- * then writer changed data while in section
- *
- * Using xor saves one conditional branch.
+/*
+ * Test if reader processed invalid data.
+ *
+ * If sequence value changed then writer changed data while in section.
*/
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
+static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();
- return (iv & 1) | (sl->sequence ^ iv);
+
+ return (sl->sequence != start);
}
@@ -122,20 +128,26 @@ typedef struct seqcount {
/* Start of read using pointer to a sequence counter only. */
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = s->sequence;
+ unsigned ret;
+
+repeat:
+ ret = s->sequence;
smp_rmb();
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
return ret;
}
-/* Test if reader processed invalid data.
- * Equivalent to: iv is odd or sequence number has changed.
- * (iv & 1) || (*s != iv)
- * Using xor saves one conditional branch.
+/*
+ * Test if reader processed invalid data because sequence number has changed.
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
+static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return (iv & 1) | (s->sequence ^ iv);
+
+ return s->sequence != start;
}
commit 1ec1fe73dfb711f9ea5a0ef8a7e3af5b6ac8b653
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Mar 19 20:30:40 2008 +0100
x86: xen unify x86 add common mm pgtable c fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d526b46ae188..ed16b7704a3c 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -200,6 +200,24 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
return 1;
}
+
+void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+ paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
+
+ /* Note: almost everything apart from _PAGE_PRESENT is
+ reserved at the pmd (PDPT) level. */
+ set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+
+ /*
+ * According to Intel App note "TLBs, Paging-Structure Caches,
+ * and Their Invalidation", April 2007, document 317080-001,
+ * section 8.1: in PAE mode we explicitly have to flush the
+ * TLB via cr3 if the top-level pgd is changed...
+ */
+ if (mm == current->active_mm)
+ write_cr3(read_cr3());
+}
#else /* !CONFIG_X86_PAE */
/* No need to prepopulate any pagetable entries in non-PAE modes. */
static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
index d60edb14f85e..aaa322cb4b6e 100644
--- a/include/asm-x86/pgalloc_32.h
+++ b/include/asm-x86/pgalloc_32.h
@@ -62,23 +62,8 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
-static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
-{
- paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
-
- /* Note: almost everything apart from _PAGE_PRESENT is
- reserved at the pmd (PDPT) level. */
- set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
- /*
- * According to Intel App note "TLBs, Paging-Structure Caches,
- * and Their Invalidation", April 2007, document 317080-001,
- * section 8.1: in PAE mode we explicitly have to flush the
- * TLB via cr3 if the top-level pgd is changed...
- */
- if (mm == current->active_mm)
- write_cr3(read_cr3());
-}
#endif /* CONFIG_X86_PAE */
#endif /* _I386_PGALLOC_H */
commit 1f56cf1c58c81f7ecf16f5e99ac4a333d9dc9aea
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Apr 18 21:42:36 2008 +0200
/dev/mem: make promisc the default
default to the old semantics.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0c1890c41279..239fd9fba0a5 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -7,7 +7,6 @@ source "lib/Kconfig.debug"
config NONPROMISC_DEVMEM
bool "Disable promiscuous /dev/mem"
- default y
help
The /dev/mem file by default only allows userspace access to PCI
space and the BIOS code and data regions. This is sufficient for
commit 28eb559b5b0b9b51b9165a9b8faa75b0bb91ca8d
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Apr 3 10:14:33 2008 +0200
pat: cleanups
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 1489aafbfa71..ef8b64b89c7d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -284,7 +284,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
struct memtype *saved_ptr;
if (parse->start >= end) {
- printk("New Entry\n");
+ pr_debug("New Entry\n");
list_add(&new_entry->nd, parse->nd.prev);
new_entry = NULL;
break;
@@ -386,7 +386,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
break;
}
- printk("Overlap at 0x%Lx-0x%Lx\n",
+ printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
list_add(&new_entry->nd, &saved_ptr->nd);
@@ -396,7 +396,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}
if (err) {
- printk(
+ printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
start, end, cattr_name(new_entry->type),
cattr_name(req_type));
@@ -408,16 +408,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (new_entry) {
/* No conflict. Not yet added to the list. Add to the tail */
list_add_tail(&new_entry->nd, &memtype_list);
- printk("New Entry\n");
- }
+ pr_debug("New Entry\n");
+ }
if (ret_type) {
- printk(
+ pr_debug(
"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
start, end, cattr_name(actual_type),
cattr_name(req_type), cattr_name(*ret_type));
} else {
- printk(
+ pr_debug(
"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
start, end, cattr_name(actual_type),
cattr_name(req_type));
@@ -454,11 +454,11 @@ int free_memtype(u64 start, u64 end)
spin_unlock(&memtype_lock);
if (err) {
- printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n",
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
current->comm, current->pid, start, end);
}
- printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end);
+ pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end);
return err;
}
@@ -529,7 +529,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (pfn <= max_pfn_mapped &&
ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
free_memtype(offset, offset + size);
- printk(KERN_DEBUG
+ printk(KERN_INFO
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
current->comm, current->pid,
cattr_name(flags),
@@ -550,7 +550,7 @@ void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
reserve_memtype(addr, addr + size, want_flags, &flags);
if (flags != want_flags) {
- printk(KERN_DEBUG
+ printk(KERN_INFO
"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
commit a4928cffe6435caf427ae673131a633c1329dbf3
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 23 13:20:56 2008 +0200
"make namespacecheck" fixes
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 687208190b06..8317401170b8 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -902,7 +902,7 @@ void __init init_bsp_APIC(void)
apic_write_around(APIC_LVT1, value);
}
-void __cpuinit lapic_setup_esr(void)
+static void __cpuinit lapic_setup_esr(void)
{
unsigned long oldvalue, value, maxlvt;
if (lapic_is_integrated() && !esr_disable) {
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 9e8e5c050c55..bf83157337e4 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -429,7 +429,7 @@ void __init setup_boot_APIC_clock(void)
* set the DUMMY flag again and force the broadcast mode in the
* clockevents layer.
*/
-void __cpuinit check_boot_apic_timer_broadcast(void)
+static void __cpuinit check_boot_apic_timer_broadcast(void)
{
if (!disable_apic_timer ||
(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
@@ -834,7 +834,7 @@ void __cpuinit setup_local_APIC(void)
preempt_enable();
}
-void __cpuinit lapic_setup_esr(void)
+static void __cpuinit lapic_setup_esr(void)
{
unsigned maxlvt = lapic_get_maxlvt();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7adad088e373..77de848bd1fb 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -550,7 +550,7 @@ static void hard_enable_TSC(void)
write_cr4(read_cr4() & ~X86_CR4_TSD);
}
-void enable_TSC(void)
+static void enable_TSC(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOTSC))
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 891af1a1b48a..131c2ee7ac56 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -562,7 +562,7 @@ static void hard_enable_TSC(void)
write_cr4(read_cr4() & ~X86_CR4_TSD);
}
-void enable_TSC(void)
+static void enable_TSC(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOTSC))
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 78828b0f604f..455d3c80960b 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -442,7 +442,7 @@ static void __init reserve_ebda_region(void)
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-void __init setup_bootmem_allocator(void);
+static void __init setup_bootmem_allocator(void);
static unsigned long __init setup_memory(void)
{
/*
@@ -477,7 +477,7 @@ static unsigned long __init setup_memory(void)
return max_low_pfn;
}
-void __init zone_sizes_init(void)
+static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6a925394bc7e..ade371f9663a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -184,7 +184,7 @@ static void unmap_cpu_to_node(int cpu)
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = BAD_APICID };
-void map_cpu_to_logical_apicid(void)
+static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
int apicid = logical_smp_processor_id();
@@ -197,7 +197,7 @@ void map_cpu_to_logical_apicid(void)
map_cpu_to_node(cpu, node);
}
-void unmap_cpu_to_logical_apicid(int cpu)
+static void unmap_cpu_to_logical_apicid(int cpu)
{
cpu_2_logical_apicid[cpu] = BAD_APICID;
unmap_cpu_to_node(cpu);
@@ -211,7 +211,7 @@ void unmap_cpu_to_logical_apicid(int cpu)
* Report back to the Boot Processor.
* Running on AP.
*/
-void __cpuinit smp_callin(void)
+static void __cpuinit smp_callin(void)
{
int cpuid, phys_id;
unsigned long timeout;
@@ -436,7 +436,7 @@ static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
#endif
}
-void __cpuinit smp_checks(void)
+static void __cpuinit smp_checks(void)
{
if (smp_b_stepping)
printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
@@ -565,7 +565,7 @@ void __init smp_alloc_memory(void)
}
#endif
-void impress_friends(void)
+static void impress_friends(void)
{
int cpu;
unsigned long bogosum = 0;
@@ -1287,7 +1287,7 @@ void cpu_exit_clear(void)
}
# endif /* CONFIG_X86_32 */
-void remove_siblinginfo(int cpu)
+static void remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index 1558e513757e..df224a8774cb 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
spin_unlock(&f->tlbstate_lock);
}
-int __cpuinit init_smp_flush(void)
+static int __cpuinit init_smp_flush(void)
{
int i;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index edff4c985485..61efa2f7d564 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,7 +216,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
return 0;
}
-long __vsyscall(3) venosys_1(void)
+static long __vsyscall(3) venosys_1(void)
{
return -ENOSYS;
}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 6791b8334bc6..2c24bea92c66 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -324,7 +324,7 @@ static const struct file_operations ptdump_fops = {
.release = single_release,
};
-int pt_dump_init(void)
+static int pt_dump_init(void)
{
struct dentry *pe;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f7823a172868..c29ebd037254 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -993,7 +993,7 @@ static const struct file_operations dpa_fops = {
.release = single_release,
};
-int __init debug_pagealloc_proc_init(void)
+static int __init debug_pagealloc_proc_init(void)
{
struct dentry *de;
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index fb43d89f46f3..3890234e5b26 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -163,7 +163,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pxm, apic_id, node);
}
-int update_end_of_memory(unsigned long end) {return -1;}
+static int update_end_of_memory(unsigned long end) {return -1;}
static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static inline int save_add_info(void) {return 1;}
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 62ebdec394b9..1ebaa5cd3112 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -199,7 +199,6 @@ static inline int hard_smp_processor_id(void)
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
-extern void remove_siblinginfo(int cpu);
#endif
extern void smp_alloc_memory(void);
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index 0434bd8349a7..d2d8eb5b55f5 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -18,7 +18,6 @@ extern unsigned int cpu_khz;
extern unsigned int tsc_khz;
extern void disable_TSC(void);
-extern void enable_TSC(void);
static inline cycles_t get_cycles(void)
{
commit fcbc04c0ab345f6e9cabc92a15f35031a10fde9f
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Apr 21 13:39:53 2008 +0200
x86: voyager fix
Reported-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 87a693cf2bb7..4d350b5cbc71 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,7 +23,7 @@ config X86
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !X86_VOYAGER
config GENERIC_LOCKBREAK
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 96f60c7cd124..d05722121d24 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -206,11 +206,6 @@ static struct irq_chip vic_chip = {
/* used to count up as CPUs are brought on line (starts at 0) */
static int cpucount = 0;
-/* steal a page from the bottom of memory for the trampoline and
- * squirrel its address away here. This will be in kernel virtual
- * space */
-unsigned char *trampoline_base;
-
/* The per cpu profile stuff - used in smp_local_timer_interrupt */
static DEFINE_PER_CPU(int, prof_multiplier) = 1;
static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
@@ -427,18 +422,6 @@ void __init smp_store_cpu_info(int id)
identify_secondary_cpu(c);
}
-/* set up the trampoline and return the physical address of the code */
-unsigned long __init setup_trampoline(void)
-{
- /* these two are global symbols in trampoline.S */
- extern const __u8 trampoline_end[];
- extern const __u8 trampoline_data[];
-
- memcpy(trampoline_base, trampoline_data,
- trampoline_end - trampoline_data);
- return virt_to_phys(trampoline_base);
-}
-
/* Routine initially called when a non-boot CPU is brought online */
static void __init start_secondary(void *unused)
{
commit 6865f0d19306daf3a3bf28cfcfe74639d1bc0df4
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Apr 22 11:09:04 2008 +0200
intel-iommu.c: dma ops fix
Stephen Rothwell noticed that:
Commit 2be621498d461b63ca6124f86e3b9582e1a8e722 ("x86: dma-ops on highmem
fix") in Linus' tree introduced a new warning (noticed in the x86_64
allmodconfig build of linux-next):
drivers/pci/intel-iommu.c:2240: warning: initialization from incompatible pointer type
Which points at an instance of map_single that needs updating.
Fix it to the new prototype.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 301c68fab03b..1fd8bb765702 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1905,32 +1905,31 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
-static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
- size_t size, int dir)
+static dma_addr_t
+intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
{
struct pci_dev *pdev = to_pci_dev(hwdev);
- int ret;
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_paddr;
struct iova *iova;
int prot = 0;
+ int ret;
BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return virt_to_bus(addr);
+ return paddr;
domain = get_valid_domain_for_dev(pdev);
if (!domain)
return 0;
- addr = (void *)virt_to_phys(addr);
- size = aligned_size((u64)addr, size);
+ size = aligned_size((u64)paddr, size);
iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova)
goto error;
- start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ start_paddr = iova->pfn_lo << PAGE_SHIFT_4K;
/*
* Check if DMAR supports zero-length reads on write only
@@ -1942,33 +1941,33 @@ static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
/*
- * addr - (addr + size) might be partial page, we should map the whole
+ * paddr - (paddr + size) might be partial page, we should map the whole
* page. Note: if two part of one page are separately mapped, we
- * might have two guest_addr mapping to the same host addr, but this
+ * might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_page_mapping(domain, start_addr,
- ((u64)addr) & PAGE_MASK_4K, size, prot);
+ ret = domain_page_mapping(domain, start_paddr,
+ ((u64)paddr) & PAGE_MASK_4K, size, prot);
if (ret)
goto error;
pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
- pci_name(pdev), size, (u64)addr,
- size, (u64)start_addr, dir);
+ pci_name(pdev), size, (u64)paddr,
+ size, (u64)start_paddr, dir);
/* it's a non-present to present mapping */
ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
- start_addr, size >> PAGE_SHIFT_4K, 1);
+ start_paddr, size >> PAGE_SHIFT_4K, 1);
if (ret)
iommu_flush_write_buffer(domain->iommu);
- return (start_addr + ((u64)addr & (~PAGE_MASK_4K)));
+ return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K)));
error:
if (iova)
__free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
- pci_name(pdev), size, (u64)addr, dir);
+ pci_name(pdev), size, (u64)paddr, dir);
return 0;
}
@@ -2082,7 +2081,7 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
return NULL;
memset(vaddr, 0, size);
- *dma_handle = intel_map_single(hwdev, vaddr, size, DMA_BIDIRECTIONAL);
+ *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
if (*dma_handle)
return vaddr;
free_pages((unsigned long)vaddr, order);
commit 486fdae21458bd9f4e125099bb3c38a4064e450e
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Apr 19 12:11:10 2008 +0200
sched: build fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index be42548b67bb..f3f4af4b8b0f 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -119,7 +119,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
struct sched_entity *last;
unsigned long flags;
-#ifndef CONFIG_CGROUP_SCHED
+#if !defined(CONFIG_CGROUP_SCHED) || !defined(CONFIG_USER_SCHED)
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#else
char path[128] = "";