Patches contributed by Eötvös Lorand University


commit 927222b102186a6cc3e43e25062fcd18c800435e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:49 2008 +0100

    x86: fix EISA ioremap
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 8534cb53ff60..3cf72977d012 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -1142,11 +1142,11 @@ void __init trap_init(void)
 	int i;
 
 #ifdef CONFIG_EISA
-	void __iomem *p = ioremap(0x0FFFD9, 4);
+	void __iomem *p = early_ioremap(0x0FFFD9, 4);
 	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
 		EISA_bus = 1;
 	}
-	iounmap(p);
+	early_iounmap(p, 4);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC

commit d701fda8601fe267fbd3648f108f0e751305101b
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:48 2008 +0100

    x86: fix early_ioremap()/btmap
    
    fix a long-standing weakness of the early-ioremap allocator: it
    uses a single pgd entry for the boot mappings, and was not properly
    protecting itself against crossing a 2MB (4MB) boundary.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 46bedb7b5426..fde140fd6d95 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -90,10 +90,18 @@ enum fixed_addresses {
 	FIX_PARAVIRT_BOOTMAP,
 #endif
 	__end_of_permanent_fixed_addresses,
-	/* temporary boot-time mappings, used before ioremap() is functional */
+	/*
+	 * 256 temporary boot-time mappings, used by early_ioremap(),
+	 * before ioremap() is functional.
+	 *
+	 * We round it up to the next 512 pages boundary so that we
+	 * can have a single pgd entry and a single pte table:
+	 */
 #define NR_FIX_BTMAPS		64
 #define FIX_BTMAPS_NESTING	4
-	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+	FIX_BTMAP_END =
+		__end_of_permanent_fixed_addresses + 512 -
+			(__end_of_permanent_fixed_addresses & 511),
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
 	FIX_WP_TEST,
 	__end_of_fixed_addresses

commit d690b2afd5a7a02816386aa704c8c0b1aca8d2de
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:47 2008 +0100

    x86: add early_ioremap() leak detection
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
index 7f7daf50efed..b743de841f68 100644
--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -311,6 +311,22 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
 
 int __initdata early_ioremap_nested;
 
+static int __init check_early_ioremap_leak(void)
+{
+	if (!early_ioremap_nested)
+		return 0;
+
+	printk(KERN_WARNING
+		"Debug warning: early ioremap leak of %d areas detected.\n",
+			early_ioremap_nested);
+	printk(KERN_WARNING
+		"please boot with early_ioremap_debug and report the dmesg.\n");
+	WARN_ON(1);
+
+	return 1;
+}
+late_initcall(check_early_ioremap_leak);
+
 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 {
 	unsigned long offset, last_addr;

commit d18d6d65efc2f5a3ff9a41528fbcb716b2e32615
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:45 2008 +0100

    x86: early_ioremap(), debugging
    
    add early_ioremap() debug printouts via the early_ioremap_debug
    boot option.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
index 4bcd5e065df4..30855c44818d 100644
--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -208,6 +208,17 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
+
+int __initdata early_ioremap_debug;
+
+static int __init early_ioremap_debug_setup(char *str)
+{
+	early_ioremap_debug = 1;
+
+	return 1;
+}
+__setup("early_ioremap_debug", early_ioremap_debug_setup);
+
 static __initdata int after_paging_init;
 static __initdata unsigned long bm_pte[1024]
 				__attribute__((aligned(PAGE_SIZE)));
@@ -226,6 +237,9 @@ void __init early_ioremap_init(void)
 {
 	unsigned long *pgd;
 
+	if (early_ioremap_debug)
+		printk("early_ioremap_init()\n");
+
 	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
 	*pgd = __pa(bm_pte) | _PAGE_TABLE;
 	memset(bm_pte, 0, sizeof(bm_pte));
@@ -236,6 +250,9 @@ void __init early_ioremap_clear(void)
 {
 	unsigned long *pgd;
 
+	if (early_ioremap_debug)
+		printk("early_ioremap_clear()\n");
+
 	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
 	*pgd = 0;
 	__flush_tlb_all();
@@ -303,6 +320,11 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 	WARN_ON(system_state != SYSTEM_BOOTING);
 
 	nesting = early_ioremap_nested;
+	if (early_ioremap_debug) {
+		printk("early_ioremap(%08lx, %08lx) [%d] => ",
+				phys_addr, size, nesting);
+		dump_stack();
+	}
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
@@ -343,6 +365,8 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 		--idx;
 		--nrpages;
 	}
+	if (early_ioremap_debug)
+		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
 
 	return (void*) (offset + fix_to_virt(idx0));
 }
@@ -358,6 +382,11 @@ void __init early_iounmap(void *addr, unsigned long size)
 	nesting = --early_ioremap_nested;
 	WARN_ON(nesting < 0);
 
+	if (early_ioremap_debug) {
+		printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
+		dump_stack();
+	}
+
 	virt_addr = (unsigned long)addr;
 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
 		WARN_ON(1);

commit bd796ed0232a036f5ab14ac68d0a05f791ebcc3b
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:45 2008 +0100

    x86: add debug warnings to early_ioremap()
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
index 63f76ecae44c..4bcd5e065df4 100644
--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -306,12 +306,15 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
-	if (!size || last_addr < phys_addr)
+	if (!size || last_addr < phys_addr) {
+		WARN_ON(1);
 		return NULL;
+	}
 
-	if (nesting >= FIX_BTMAPS_NESTING)
+	if (nesting >= FIX_BTMAPS_NESTING) {
+		WARN_ON(1);
 		return NULL;
-
+	}
 	early_ioremap_nested++;
 	/*
 	 * Mappings have to be page-aligned
@@ -324,8 +327,10 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 	 * Mappings have to fit in the FIX_BTMAP area.
 	 */
 	nrpages = size >> PAGE_SHIFT;
-	if (nrpages > NR_FIX_BTMAPS)
+	if (nrpages > NR_FIX_BTMAPS) {
+		WARN_ON(1);
 		return NULL;
+	}
 
 	/*
 	 * Ok, go for it..
@@ -351,11 +356,13 @@ void __init early_iounmap(void *addr, unsigned long size)
 	unsigned int nesting;
 
 	nesting = --early_ioremap_nested;
+	WARN_ON(nesting < 0);
 
 	virt_addr = (unsigned long)addr;
-	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
+		WARN_ON(1);
 		return;
-
+	}
 	offset = virt_addr & ~PAGE_MASK;
 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 

commit a8efa1cd51d6f6407df5f42d8f1a7e9fc7178d1a
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:45 2008 +0100

    x86: increase the number of boot-mappings
    
    increase max early_ioremap() remapping size from 64K to 256K.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 0e990218a09c..46bedb7b5426 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -91,7 +91,7 @@ enum fixed_addresses {
 #endif
 	__end_of_permanent_fixed_addresses,
 	/* temporary boot-time mappings, used before ioremap() is functional */
-#define NR_FIX_BTMAPS		16
+#define NR_FIX_BTMAPS		64
 #define FIX_BTMAPS_NESTING	4
 	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,

commit 1b42f51630c7eebce6fb780b480731eb81afd325
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:45 2008 +0100

    x86: enhance early_ioremap()
    
     - allow nesting of up to 4 levels
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
index bfd7b8b2fe60..63f76ecae44c 100644
--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -291,23 +291,28 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
 		__early_set_fixmap(idx, 0, __pgprot(0));
 }
 
+
+int __initdata early_ioremap_nested;
+
 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 {
 	unsigned long offset, last_addr;
-	unsigned int nrpages;
-	enum fixed_addresses idx;
+	unsigned int nrpages, nesting;
+	enum fixed_addresses idx0, idx;
+
+	WARN_ON(system_state != SYSTEM_BOOTING);
+
+	nesting = early_ioremap_nested;
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
 		return NULL;
 
-	/*
-	 * Don't remap the low PCI/ISA area, it's always mapped..
-	 */
-	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-		return phys_to_virt(phys_addr);
+	if (nesting >= FIX_BTMAPS_NESTING)
+		return NULL;
 
+	early_ioremap_nested++;
 	/*
 	 * Mappings have to be page-aligned
 	 */
@@ -325,14 +330,16 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 	/*
 	 * Ok, go for it..
 	 */
-	idx = FIX_BTMAP_BEGIN;
+	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+	idx = idx0;
 	while (nrpages > 0) {
 		early_set_fixmap(idx, phys_addr);
 		phys_addr += PAGE_SIZE;
 		--idx;
 		--nrpages;
 	}
-	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+
+	return (void*) (offset + fix_to_virt(idx0));
 }
 
 void __init early_iounmap(void *addr, unsigned long size)
@@ -341,17 +348,26 @@ void __init early_iounmap(void *addr, unsigned long size)
 	unsigned long offset;
 	unsigned int nrpages;
 	enum fixed_addresses idx;
+	unsigned int nesting;
+
+	nesting = --early_ioremap_nested;
 
 	virt_addr = (unsigned long)addr;
 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
 		return;
+
 	offset = virt_addr & ~PAGE_MASK;
 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 
-	idx = FIX_BTMAP_BEGIN;
+	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
 	while (nrpages > 0) {
 		early_clear_fixmap(idx);
 		--idx;
 		--nrpages;
 	}
 }
+
+void __this_fixmap_does_not_exist(void)
+{
+	WARN_ON(1);
+}
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 249e753ac805..0e990218a09c 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -65,7 +65,7 @@ enum fixed_addresses {
 #endif
 #ifdef CONFIG_X86_VISWS_APIC
 	FIX_CO_CPU,	/* Cobalt timer */
-	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
+	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */
 	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
 	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
 #endif
@@ -74,7 +74,7 @@ enum fixed_addresses {
 #endif
 #ifdef CONFIG_X86_CYCLONE_TIMER
 	FIX_CYCLONE_TIMER, /*cyclone timer register*/
-#endif 
+#endif
 #ifdef CONFIG_HIGHMEM
 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -91,9 +91,10 @@ enum fixed_addresses {
 #endif
 	__end_of_permanent_fixed_addresses,
 	/* temporary boot-time mappings, used before ioremap() is functional */
-#define NR_FIX_BTMAPS	16
+#define NR_FIX_BTMAPS		16
+#define FIX_BTMAPS_NESTING	4
 	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
 	FIX_WP_TEST,
 	__end_of_fixed_addresses
 };

commit 851339b1ffc2b215c7c4e448465e78a4c2f643f2
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:43 2008 +0100

    x86: clean up arch/x86/mm/pageattr-test.c
    
    fix 15 checkpatch warnings.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 4b22eb47f23a..c58fab061762 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -4,36 +4,36 @@
  * Clears the global bit on random pages in the direct mapping, then reverts
  * and compares page tables forwards and afterwards.
  */
-
-#include <linux/mm.h>
+#include <linux/bootmem.h>
 #include <linux/random.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/mm.h>
+
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/kdebug.h>
 
 enum {
-	NTEST = 400,
+	NTEST			= 400,
 #ifdef CONFIG_X86_64
-	LOWEST_LEVEL = 4,
-	LPS = (1 << PMD_SHIFT),
+	LOWEST_LEVEL		= 4,
+	LPS			= (1 << PMD_SHIFT),
 #elif defined(CONFIG_X86_PAE)
-	LOWEST_LEVEL = 3,
-	LPS = (1 << PMD_SHIFT),
+	LOWEST_LEVEL		= 3,
+	LPS			= (1 << PMD_SHIFT),
 #else
-	LOWEST_LEVEL = 3, /* lookup_address lies here */
-	LPS = (1 << 22),
+	LOWEST_LEVEL		= 3, /* lookup_address lies here */
+	LPS			= (1 << 22),
 #endif
-	GPS = (1<<30)
+	GPS			= (1<<30)
 };
 
 #ifdef CONFIG_X86_64
-#include <asm/proto.h>
-#define max_mapped end_pfn_map
+# include <asm/proto.h>
+# define max_mapped		end_pfn_map
 #else
-#define max_mapped max_low_pfn
+# define max_mapped		max_low_pfn
 #endif
 
 struct split_state {
@@ -43,23 +43,24 @@ struct split_state {
 
 static __init int print_split(struct split_state *s)
 {
-	int printed = 0;
 	long i, expected, missed = 0;
+	int printed = 0;
 	int err = 0;
 
 	s->lpg = s->gpg = s->spg = s->exec = 0;
 	s->min_exec = ~0UL;
 	s->max_exec = 0;
 	for (i = 0; i < max_mapped; ) {
+		unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
 		int level;
 		pte_t *pte;
-		unsigned long adr = (unsigned long)__va(i << PAGE_SHIFT);
 
-		pte = lookup_address(adr, &level);
+		pte = lookup_address(addr, &level);
 		if (!pte) {
 			if (!printed) {
-				dump_pagetable(adr);
-				printk("CPA %lx no pte level %d\n", adr, level);
+				dump_pagetable(addr);
+				printk(KERN_INFO "CPA %lx no pte level %d\n",
+					addr, level);
 				printed = 1;
 			}
 			missed++;
@@ -72,8 +73,9 @@ static __init int print_split(struct split_state *s)
 			i += GPS/PAGE_SIZE;
 		} else if (level != LOWEST_LEVEL) {
 			if (!(pte_val(*pte) & _PAGE_PSE)) {
-				printk("%lx level %d but not PSE %Lx\n",
-					adr, level, (u64)pte_val(*pte));
+				printk(KERN_ERR
+					"%lx level %d but not PSE %Lx\n",
+					addr, level, (u64)pte_val(*pte));
 				err = 1;
 			}
 			s->lpg++;
@@ -84,18 +86,20 @@ static __init int print_split(struct split_state *s)
 		}
 		if (!(pte_val(*pte) & _PAGE_NX)) {
 			s->exec++;
-			if (adr < s->min_exec)
-				s->min_exec = adr;
-			if (adr > s->max_exec)
-				s->max_exec = adr;
+			if (addr < s->min_exec)
+				s->min_exec = addr;
+			if (addr > s->max_exec)
+				s->max_exec = addr;
 		}
 	}
-	printk("CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
+	printk(KERN_INFO
+		"CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
 		s->spg, s->lpg, s->gpg, s->exec,
 		s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
+
 	expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
 	if (expected != i) {
-		printk("CPA max_mapped %lu but expected %lu\n",
+		printk(KERN_ERR "CPA max_mapped %lu but expected %lu\n",
 			max_mapped, expected);
 		return 1;
 	}
@@ -105,56 +109,62 @@ static __init int print_split(struct split_state *s)
 static __init int state_same(struct split_state *a, struct split_state *b)
 {
 	return a->lpg == b->lpg && a->gpg == b->gpg && a->spg == b->spg &&
-			a->exec == b->exec;
+		a->exec == b->exec;
 }
 
-static unsigned long addr[NTEST] __initdata;
-static unsigned len[NTEST] __initdata;
+static unsigned long __initdata addr[NTEST];
+static unsigned int __initdata len[NTEST];
 
 /* Change the global bit on random pages in the direct mapping */
 static __init int exercise_pageattr(void)
 {
-	int i, k;
+	struct split_state sa, sb, sc;
+	unsigned long *bm;
 	pte_t *pte, pte0;
+	int failed = 0;
 	int level;
+	int i, k;
 	int err;
-	struct split_state sa, sb, sc;
-	int failed = 0;
-	unsigned long *bm;
 
-	printk("CPA exercising pageattr\n");
+	printk(KERN_INFO "CPA exercising pageattr\n");
 
 	bm = vmalloc((max_mapped + 7) / 8);
 	if (!bm) {
-		printk("CPA Cannot vmalloc bitmap\n");
+		printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
 		return -ENOMEM;
 	}
 	memset(bm, 0, (max_mapped + 7) / 8);
 
 	failed += print_split(&sa);
 	srandom32(100);
+
 	for (i = 0; i < NTEST; i++) {
 		unsigned long pfn = random32() % max_mapped;
+
 		addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
 		len[i] = random32() % 100;
 		len[i] = min_t(unsigned long, len[i], max_mapped - pfn - 1);
+
 		if (len[i] == 0)
 			len[i] = 1;
 
 		pte = NULL;
 		pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
+
 		for (k = 0; k < len[i]; k++) {
 			pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
 			if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) {
 				addr[i] = 0;
 				break;
 			}
-			if (k == 0)
+			if (k == 0) {
 				pte0 = *pte;
-			else if (pgprot_val(pte_pgprot(*pte)) !=
+			} else {
+				if (pgprot_val(pte_pgprot(*pte)) !=
 					pgprot_val(pte_pgprot(pte0))) {
-				len[i] = k;
-				break;
+					len[i] = k;
+					break;
+				}
 			}
 			if (test_bit(pfn + k, bm)) {
 				len[i] = k;
@@ -170,19 +180,19 @@ static __init int exercise_pageattr(void)
 		err = change_page_attr(virt_to_page(addr[i]), len[i],
 			    pte_pgprot(pte_clrhuge(pte_clrglobal(pte0))));
 		if (err < 0) {
-			printk("CPA %d failed %d\n", i, err);
+			printk(KERN_ERR "CPA %d failed %d\n", i, err);
 			failed++;
 		}
 
 		pte = lookup_address(addr[i], &level);
 		if (!pte || pte_global(*pte) || pte_huge(*pte)) {
-			printk("CPA %lx: bad pte %Lx\n", addr[i],
+			printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
 				pte ? (u64)pte_val(*pte) : 0ULL);
 			failed++;
 		}
 		if (level != LOWEST_LEVEL) {
-			printk("CPA %lx: unexpected level %d\n", addr[i],
-					level);
+			printk(KERN_ERR "CPA %lx: unexpected level %d\n",
+				addr[i], level);
 			failed++;
 		}
 
@@ -192,26 +202,26 @@ static __init int exercise_pageattr(void)
 
 	failed += print_split(&sb);
 
-	printk("CPA reverting everything\n");
+	printk(KERN_INFO "CPA reverting everything\n");
 	for (i = 0; i < NTEST; i++) {
 		if (!addr[i])
 			continue;
 		pte = lookup_address(addr[i], &level);
 		if (!pte) {
-			printk("CPA lookup of %lx failed\n", addr[i]);
+			printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
 			failed++;
 			continue;
 		}
 		err = change_page_attr(virt_to_page(addr[i]), len[i],
 					  pte_pgprot(pte_mkglobal(*pte)));
 		if (err < 0) {
-			printk("CPA reverting failed: %d\n", err);
+			printk(KERN_ERR "CPA reverting failed: %d\n", err);
 			failed++;
 		}
 		pte = lookup_address(addr[i], &level);
 		if (!pte || !pte_global(*pte)) {
-			printk("CPA %lx: bad pte after revert %Lx\n", addr[i],
-			       pte ? (u64)pte_val(*pte) : 0ULL);
+			printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
+				addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
 			failed++;
 		}
 
@@ -223,11 +233,10 @@ static __init int exercise_pageattr(void)
 		failed++;
 
 	if (failed)
-		printk("CPA selftests NOT PASSED. Please report.\n");
+		printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
 	else
-		printk("CPA selftests PASSED\n");
+		printk(KERN_INFO "CPA selftests PASSED\n");
 
 	return 0;
 }
-
 module_init(exercise_pageattr);

commit f0646e43acb18f0e00b00085dc88bc3f403e7930
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:43 2008 +0100

    x86: return the page table level in lookup_address()
    
    based on this patch from Andi Kleen:
    
    |  Subject: CPA: Return the page table level in lookup_address()
    |  From: Andi Kleen <ak@suse.de>
    |
    |  Needed for the next change.
    |
    |  And change all the callers.
    
    and ported it to x86.git.
    
    Signed-off-by: Andi Kleen <ak@suse.de>
    Acked-by: Jan Beulich <jbeulich@novell.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index f7972ae7da07..f4f8c324715f 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -613,7 +613,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
 #ifdef CONFIG_X86_PAE
 		if (error_code & PF_INSTR) {
-			pte_t *pte = lookup_address(address);
+			int level;
+			pte_t *pte = lookup_address(address, &level);
 
 			if (pte && pte_present(*pte) && !pte_exec(*pte))
 				printk(KERN_CRIT "kernel tried to execute "
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 5080646da771..206e3f6800b9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -535,11 +535,12 @@ int __init set_kernel_exec(unsigned long vaddr, int enable)
 {
 	pte_t *pte;
 	int ret = 1;
+	int level;
 
 	if (!nx_enabled)
 		goto out;
 
-	pte = lookup_address(vaddr);
+	pte = lookup_address(vaddr, &level);
 	BUG_ON(!pte);
 
 	if (!pte_exec(*pte))
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index be4656403d77..523fd5b37df9 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -18,7 +18,7 @@
 static DEFINE_SPINLOCK(cpa_lock);
 static struct list_head df_list = LIST_HEAD_INIT(df_list);
 
-pte_t *lookup_address(unsigned long address)
+pte_t *lookup_address(unsigned long address, int *level)
 {
 	pgd_t *pgd = pgd_offset_k(address);
 	pud_t *pud;
@@ -32,8 +32,10 @@ pte_t *lookup_address(unsigned long address)
 	pmd = pmd_offset(pud, address);
 	if (pmd_none(*pmd))
 		return NULL;
+	*level = 2;
 	if (pmd_large(*pmd))
 		return (pte_t *)pmd;
+	*level = 3;
 
 	return pte_offset_kernel(pmd, address);
 }
@@ -156,11 +158,12 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
 	struct page *kpte_page;
 	unsigned long address;
 	pte_t *kpte;
+	int level;
 
 	BUG_ON(PageHighMem(page));
 	address = (unsigned long)page_address(page);
 
-	kpte = lookup_address(address);
+	kpte = lookup_address(address, &level);
 	if (!kpte)
 		return -EINVAL;
 
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 14ab327cde0c..59cd066f6741 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -14,7 +14,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-pte_t *lookup_address(unsigned long address)
+pte_t *lookup_address(unsigned long address, int *level)
 {
 	pgd_t *pgd = pgd_offset_k(address);
 	pud_t *pud;
@@ -29,8 +29,10 @@ pte_t *lookup_address(unsigned long address)
 	pmd = pmd_offset(pud, address);
 	if (!pmd_present(*pmd))
 		return NULL;
+	*level = 3;
 	if (pmd_large(*pmd))
 		return (pte_t *)pmd;
+	*level = 4;
 
 	pte = pte_offset_kernel(pmd, address);
 	if (pte && !pte_present(*pte))
@@ -140,8 +142,9 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
 	struct page *kpte_page;
 	pgprot_t ref_prot2;
 	pte_t *kpte;
+	int level;
 
-	kpte = lookup_address(address);
+	kpte = lookup_address(address, &level);
 	if (!kpte)
 		return 0;
 
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e6184735545f..45aa771e73a9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -58,7 +58,8 @@
 
 xmaddr_t arbitrary_virt_to_machine(unsigned long address)
 {
-	pte_t *pte = lookup_address(address);
+	int level;
+	pte_t *pte = lookup_address(address, &level);
 	unsigned offset = address & PAGE_MASK;
 
 	BUG_ON(pte == NULL);
@@ -70,8 +71,9 @@ void make_lowmem_page_readonly(void *vaddr)
 {
 	pte_t *pte, ptev;
 	unsigned long address = (unsigned long)vaddr;
+	int level;
 
-	pte = lookup_address(address);
+	pte = lookup_address(address, &level);
 	BUG_ON(pte == NULL);
 
 	ptev = pte_wrprotect(*pte);
@@ -84,8 +86,9 @@ void make_lowmem_page_readwrite(void *vaddr)
 {
 	pte_t *pte, ptev;
 	unsigned long address = (unsigned long)vaddr;
+	int level;
 
-	pte = lookup_address(address);
+	pte = lookup_address(address, &level);
 	BUG_ON(pte == NULL);
 
 	ptev = pte_mkwrite(*pte);
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index eb8cccfa6a49..9381bd37b9b1 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -182,7 +182,7 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
  * as a pte too.
  */
-extern pte_t *lookup_address(unsigned long address);
+extern pte_t *lookup_address(unsigned long address, int *level);
 
 /*
  * Make a given kernel text page executable/non-executable.
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 29fdeb8111bb..139da50cd510 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -240,7 +240,7 @@ extern struct list_head pgd_list;
 
 extern int kern_addr_valid(unsigned long addr); 
 
-pte_t *lookup_address(unsigned long addr);
+pte_t *lookup_address(unsigned long addr, int *level);
 
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
 		remap_pfn_range(vma, vaddr, pfn, size, prot)

commit b4416a1be86b0c7bdde4e6ba526715c1a055746f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:33:41 2008 +0100

    x86: clean up arch/x86/mm/pageattr_64.c
    
    clean up arch/x86/mm/pageattr_64.c.
    
    no code changed:
    
       text    data     bss     dec     hex filename
       1751      16       0    1767     6e7 pageattr_64.o.before
       1751      16       0    1767     6e7 pageattr_64.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index c40afbaaf93d..14ab327cde0c 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -1,48 +1,54 @@
-/* 
- * Copyright 2002 Andi Kleen, SuSE Labs. 
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
  * Thanks to Ben LaHaise for precious feedback.
- */ 
+ */
 
-#include <linux/mm.h>
-#include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/mm.h>
+
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
+#include <asm/uaccess.h>
 #include <asm/io.h>
 
 pte_t *lookup_address(unsigned long address)
-{ 
+{
 	pgd_t *pgd = pgd_offset_k(address);
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
+
 	if (pgd_none(*pgd))
 		return NULL;
 	pud = pud_offset(pgd, address);
 	if (!pud_present(*pud))
-		return NULL; 
+		return NULL;
 	pmd = pmd_offset(pud, address);
 	if (!pmd_present(*pmd))
-		return NULL; 
+		return NULL;
 	if (pmd_large(*pmd))
 		return (pte_t *)pmd;
+
 	pte = pte_offset_kernel(pmd, address);
 	if (pte && !pte_present(*pte))
-		pte = NULL; 
+		pte = NULL;
+
 	return pte;
-} 
+}
 
-static struct page *split_large_page(unsigned long address, pgprot_t prot,
-				     pgprot_t ref_prot)
-{ 
-	int i; 
+static struct page *
+split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
+{
 	unsigned long addr;
-	struct page *base = alloc_pages(GFP_KERNEL, 0);
+	struct page *base;
 	pte_t *pbase;
-	if (!base) 
+	int i;
+
+	base = alloc_pages(GFP_KERNEL, 0);
+	if (!base)
 		return NULL;
 	/*
 	 * page_private is used to track the number of entries in
@@ -52,20 +58,21 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
 	page_private(base) = 0;
 
 	address = __pa(address);
-	addr = address & LARGE_PAGE_MASK; 
+	addr = address & LARGE_PAGE_MASK;
 	pbase = (pte_t *)page_address(base);
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
+		pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
 				   addr == address ? prot : ref_prot);
 	}
 	return base;
-} 
+}
 
-void clflush_cache_range(void *adr, int size)
+void clflush_cache_range(void *addr, int size)
 {
 	int i;
+
 	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
-		clflush(adr+i);
+		clflush(addr+i);
 }
 
 static void flush_kernel_map(void *arg)
@@ -76,17 +83,20 @@ static void flush_kernel_map(void *arg)
 	/* When clflush is available always use it because it is
 	   much cheaper than WBINVD. */
 	/* clflush is still broken. Disable for now. */
-	if (1 || !cpu_has_clflush)
+	if (1 || !cpu_has_clflush) {
 		asm volatile("wbinvd" ::: "memory");
-	else list_for_each_entry(pg, l, lru) {
-		void *adr = page_address(pg);
-		clflush_cache_range(adr, PAGE_SIZE);
+	} else {
+		list_for_each_entry(pg, l, lru) {
+			void *addr = page_address(pg);
+
+			clflush_cache_range(addr, PAGE_SIZE);
+		}
 	}
 	__flush_tlb_all();
 }
 
 static inline void flush_map(struct list_head *l)
-{	
+{
 	on_each_cpu(flush_kernel_map, l, 1, 1);
 }
 
@@ -98,52 +108,56 @@ static inline void save_page(struct page *fpage)
 		list_add(&fpage->lru, &deferred_pages);
 }
 
-/* 
+/*
  * No more special protections in this 2/4MB area - revert to a
- * large page again. 
+ * large page again.
  */
 static void revert_page(unsigned long address, pgprot_t ref_prot)
 {
+	unsigned long pfn;
 	pgd_t *pgd;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t large_pte;
-	unsigned long pfn;
 
 	pgd = pgd_offset_k(address);
 	BUG_ON(pgd_none(*pgd));
-	pud = pud_offset(pgd,address);
+	pud = pud_offset(pgd, address);
 	BUG_ON(pud_none(*pud));
 	pmd = pmd_offset(pud, address);
 	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
 	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
 	large_pte = pfn_pte(pfn, ref_prot);
 	large_pte = pte_mkhuge(large_pte);
+
 	set_pte((pte_t *)pmd, large_pte);
-}      
+}
 
 static int
 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-				   pgprot_t ref_prot)
-{ 
-	pte_t *kpte; 
+		   pgprot_t ref_prot)
+{
 	struct page *kpte_page;
 	pgprot_t ref_prot2;
+	pte_t *kpte;
 
 	kpte = lookup_address(address);
-	if (!kpte) return 0;
+	if (!kpte)
+		return 0;
+
 	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
-	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
+	if (pgprot_val(prot) != pgprot_val(ref_prot)) {
 		if (!pte_huge(*kpte)) {
 			set_pte(kpte, pfn_pte(pfn, prot));
 		} else {
- 			/*
+			/*
 			 * split_large_page will take the reference for this
 			 * change_page_attr on the split page.
- 			 */
+			 */
 			struct page *split;
+
 			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
 			split = split_large_page(address, prot, ref_prot2);
 			if (!split)
@@ -153,21 +167,23 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
 			kpte_page = split;
 		}
 		page_private(kpte_page)++;
-	} else if (!pte_huge(*kpte)) {
-		set_pte(kpte, pfn_pte(pfn, ref_prot));
-		BUG_ON(page_private(kpte_page) == 0);
-		page_private(kpte_page)--;
-	} else
-		BUG();
+	} else {
+		if (!pte_huge(*kpte)) {
+			set_pte(kpte, pfn_pte(pfn, ref_prot));
+			BUG_ON(page_private(kpte_page) == 0);
+			page_private(kpte_page)--;
+		} else
+			BUG();
+	}
 
 	/* on x86-64 the direct mapping set at boot is not using 4k pages */
- 	BUG_ON(PageReserved(kpte_page));
+	BUG_ON(PageReserved(kpte_page));
 
 	save_page(kpte_page);
 	if (page_private(kpte_page) == 0)
 		revert_page(address, ref_prot);
 	return 0;
-} 
+}
 
 /*
  * Change the page attributes of an page in the linear mapping.
@@ -176,19 +192,19 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
  * than write-back somewhere - some CPUs do not like it when mappings with
  * different caching policies exist. This changes the page attributes of the
  * in kernel linear mapping too.
- * 
+ *
  * The caller needs to ensure that there are no conflicting mappings elsewhere.
  * This function only deals with the kernel linear map.
- * 
+ *
  * Caller must call global_flush_tlb() after this.
  */
 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 {
-	int err = 0, kernel_map = 0;
-	int i; 
+	int err = 0, kernel_map = 0, i;
+
+	if (address >= __START_KERNEL_map &&
+			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
 
-	if (address >= __START_KERNEL_map
-	    && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
 		address = (unsigned long)__va(__pa(address));
 		kernel_map = 1;
 	}
@@ -198,7 +214,8 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
 
 		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-			err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+			err = __change_page_attr(address, pfn, prot,
+						PAGE_KERNEL);
 			if (err)
 				break;
 		}
@@ -207,14 +224,16 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 		if (__pa(address) < KERNEL_TEXT_SIZE) {
 			unsigned long addr2;
 			pgprot_t prot2;
+
 			addr2 = __START_KERNEL_map + __pa(address);
 			/* Make sure the kernel mappings stay executable */
 			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
 			err = __change_page_attr(addr2, pfn, prot2,
 						 PAGE_KERNEL_EXEC);
-		} 
-	} 	
-	up_write(&init_mm.mmap_sem); 
+		}
+	}
+	up_write(&init_mm.mmap_sem);
+
 	return err;
 }
 
@@ -222,11 +241,13 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 {
 	unsigned long addr = (unsigned long)page_address(page);
+
 	return change_page_attr_addr(addr, numpages, prot);
 }
+EXPORT_SYMBOL(change_page_attr);
 
 void global_flush_tlb(void)
-{ 
+{
 	struct page *pg, *next;
 	struct list_head l;
 
@@ -248,8 +269,6 @@ void global_flush_tlb(void)
 			continue;
 		ClearPagePrivate(pg);
 		__free_page(pg);
-	} 
-} 
-
-EXPORT_SYMBOL(change_page_attr);
+	}
+}
 EXPORT_SYMBOL(global_flush_tlb);