Patches contributed by Eötvös Lorand University


commit ab6e14b712e2d8e0c4dd3cf1a72083b52104853a
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 23:27:58 2008 +0100

    ia64: on UP percpu variables are not small memory model
    
    Tony says:
    
    | The CONFIG_SMP=n path in ia64 makes quite radical changes ... rather
    | than putting all the per-cpu stuff into the top 64K of address space
    | and providing a per-cpu TLB mapping for that range to a different
    | physical address ... it just makes all the per-cpu stuff link as ordinary
    | variables in .data.
    
    the new generic percpu code got confused about this as PER_CPU_ATTRIBUTES
    was defined even on UP, so it picked up that small memory model - which
    was not possible to get linked. The right fix is to only define that
    on SMP. This resolved the build failures in my cross-compiling environment.
    
    also link these variables into the .percpu section even on UP - some
    assembly code has offset dependencies. (such as GET_IA64_MCA_DATA() in
    arch/ia64/kernel/mca_asm.S)
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Acked-by: Tony Luck <tony.luck@intel.com>

diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index 26404b77f007..77f30b664b4e 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -15,18 +15,20 @@
 
 #include <linux/threads.h>
 
+#ifdef CONFIG_SMP
+
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
 # define PER_CPU_ATTRIBUTES	__attribute__((__model__ (__small__)))
 #endif
 
-#ifdef CONFIG_SMP
-
 #define __my_cpu_offset	__ia64_per_cpu_var(local_per_cpu_offset)
 
 extern void *per_cpu_init(void);
 
 #else /* ! SMP */
 
+#define PER_CPU_ATTRIBUTES	__attribute__((__section__(".data.percpu")))
+
 #define per_cpu_init()				(__phys_per_cpu_start)
 
 #endif	/* SMP */

commit 18fbef9e5242e146fe0264cf719a498785102841
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 23:27:58 2008 +0100

    x86: fix arch/x86/kernel/test_nx.c modular build bug
    
    fix this modular build bug:
    
    >   CC [M]  arch/x86/kernel/test_nx.o
    > {standard input}: Assembler messages:
    > {standard input}:58: Error: cannot represent relocation type BFD_RELOC_64
    > {standard input}:59: Error: cannot represent relocation type BFD_RELOC_64
    > make[2]: *** [arch/x86/kernel/test_nx.o] Error 1
    > make[1]: *** [arch/x86/kernel] Error 2
    
    Reported-by: Adrian Bunk <bunk@kernel.org>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
index 6d7ef11e7975..ae0ef2e304c7 100644
--- a/arch/x86/kernel/test_nx.c
+++ b/arch/x86/kernel/test_nx.c
@@ -91,8 +91,13 @@ static noinline int test_address(void *address)
 		".previous\n"
 		".section __ex_table,\"a\"\n"
 		"       .align 8\n"
+#ifdef CONFIG_X86_32
+		"	.long 0b\n"
+		"	.long 2b\n"
+#else
 		"	.quad 0b\n"
 		"	.quad 2b\n"
+#endif
 		".previous\n"
 		: [rslt] "=r" (result)
 		: [fake_code] "r" (address), [zero] "r" (0UL), "0" (result)

commit d7d119d777d7ed22c4095915e9c985a8b75bfe5f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:10 2008 +0100

    x86: arch/x86/mm/init_32.c printk fixes
    
    printk fixes. NOP in terms of functionality, but strings got
    a bit larger due to the KERN_ markers that were added.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c6975fc6944a..02d269c07b96 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -530,7 +530,7 @@ void __init paging_init(void)
 #ifdef CONFIG_X86_PAE
 	set_nx();
 	if (nx_enabled)
-		printk("NX (Execute Disable) protection: active\n");
+		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
 #endif
 	pagetable_init();
 
@@ -557,7 +557,8 @@ void __init paging_init(void)
  */
 static void __init test_wp_bit(void)
 {
-	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
+	printk(KERN_INFO
+  "Checking if this processor honours the WP bit even in supervisor mode...");
 
 	/* Any page-aligned address will do, the test is non-destructive */
 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
@@ -565,12 +566,13 @@ static void __init test_wp_bit(void)
 	clear_fixmap(FIX_WP_TEST);
 
 	if (!boot_cpu_data.wp_works_ok) {
-		printk("No.\n");
+		printk(KERN_CONT "No.\n");
 #ifdef CONFIG_X86_WP_WORKS_OK
-		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
+		panic(
+  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
 #endif
 	} else {
-		printk("Ok.\n");
+		printk(KERN_CONT "Ok.\n");
 	}
 }
 
@@ -588,10 +590,12 @@ void __init mem_init(void)
 
 #ifdef CONFIG_HIGHMEM
 	/* check that fixmap and pkmap do not overlap */
-	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
+	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+		printk(KERN_ERR
+			"fixmap and kmap areas overlap - this will crash\n");
 		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
+				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
+				FIXADDR_START);
 		BUG();
 	}
 #endif
@@ -628,7 +632,7 @@ void __init mem_init(void)
 	       );
 
 #if 1 /* double-sanity-check paranoia */
-	printk("virtual kernel memory layout:\n"
+	printk(KERN_INFO "virtual kernel memory layout:\n"
 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
@@ -756,13 +760,15 @@ void mark_rodata_ro(void)
 #endif
 	{
 		set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-		printk("Write protecting the kernel text: %luk\n", size >> 10);
+		printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+			size >> 10);
 
 #ifdef CONFIG_CPA_DEBUG
-		printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
+		printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+			start, start+size);
 		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
 
-		printk("Testing CPA: write protecting again\n");
+		printk(KERN_INFO "Testing CPA: write protecting again\n");
 		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
 	}
@@ -770,15 +776,15 @@ void mark_rodata_ro(void)
 	start += size;
 	size = (unsigned long)__end_rodata - start;
 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-	printk("Write protecting the kernel read-only data: %luk\n",
-	       size >> 10);
+	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+		size >> 10);
 	rodata_test();
 
 #ifdef CONFIG_CPA_DEBUG
-	printk("Testing CPA: undo %lx-%lx\n", start, start + size);
+	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
 
-	printk("Testing CPA: write protecting again\n");
+	printk(KERN_INFO "Testing CPA: write protecting again\n");
 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
 #endif
 }

commit 8550eb99821b3f78cddfd19964f30e8bc4e429e0
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:10 2008 +0100

    x86: arch/x86/mm/init_32.c cleanup
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8ed5c189d7aa..c6975fc6944a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -39,6 +39,7 @@
 #include <asm/fixmap.h>
 #include <asm/e820.h>
 #include <asm/apic.h>
+#include <asm/bugs.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
@@ -50,7 +51,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20;
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 unsigned long highstart_pfn, highend_pfn;
 
-static int noinline do_test_wp_bit(void);
+static noinline int do_test_wp_bit(void);
 
 /*
  * Creates a middle page table and puts a pointer to it in the
@@ -61,7 +62,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
 {
 	pud_t *pud;
 	pmd_t *pmd_table;
-		
+
 #ifdef CONFIG_X86_PAE
 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
 		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
@@ -69,18 +70,18 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
 		paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
 		pud = pud_offset(pgd, 0);
-		if (pmd_table != pmd_offset(pud, 0))
-			BUG();
+		BUG_ON(pmd_table != pmd_offset(pud, 0));
 	}
 #endif
 	pud = pud_offset(pgd, 0);
 	pmd_table = pmd_offset(pud, 0);
+
 	return pmd_table;
 }
 
 /*
  * Create a page table and place a pointer to it in a middle page
- * directory entry.
+ * directory entry:
  */
 static pte_t * __init one_page_table_init(pmd_t *pmd)
 {
@@ -90,9 +91,10 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
-		if (!page_table)
+		if (!page_table) {
 			page_table =
 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+		}
 
 		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
@@ -103,22 +105,21 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
 }
 
 /*
- * This function initializes a certain range of kernel virtual memory 
+ * This function initializes a certain range of kernel virtual memory
  * with new bootmem page tables, everywhere page tables are missing in
  * the given range.
- */
-
-/*
- * NOTE: The pagetables are allocated contiguous on the physical space 
- * so we can cache the place of the first one and move around without 
+ *
+ * NOTE: The pagetables are allocated contiguous on the physical space
+ * so we can cache the place of the first one and move around without
  * checking the pgd every time.
  */
-static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
+static void __init
+page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
 {
-	pgd_t *pgd;
-	pmd_t *pmd;
 	int pgd_idx, pmd_idx;
 	unsigned long vaddr;
+	pgd_t *pgd;
+	pmd_t *pmd;
 
 	vaddr = start;
 	pgd_idx = pgd_index(vaddr);
@@ -128,7 +129,8 @@ static void __init page_table_range_init (unsigned long start, unsigned long end
 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
 		pmd = one_md_table_init(pgd);
 		pmd = pmd + pmd_index(vaddr);
-		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
+		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+							pmd++, pmd_idx++) {
 			one_page_table_init(pmd);
 
 			vaddr += PMD_SIZE;
@@ -145,17 +147,17 @@ static inline int is_kernel_text(unsigned long addr)
 }
 
 /*
- * This maps the physical memory to kernel virtual address space, a total 
- * of max_low_pfn pages, by creating page tables starting from address 
- * PAGE_OFFSET.
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET:
  */
 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 {
+	int pgd_idx, pmd_idx, pte_ofs;
 	unsigned long pfn;
 	pgd_t *pgd;
 	pmd_t *pmd;
 	pte_t *pte;
-	int pgd_idx, pmd_idx, pte_ofs;
 
 	pgd_idx = pgd_index(PAGE_OFFSET);
 	pgd = pgd_base + pgd_idx;
@@ -165,40 +167,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 		pmd = one_md_table_init(pgd);
 		if (pfn >= max_low_pfn)
 			continue;
+
 		for (pmd_idx = 0;
 		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
 		     pmd++, pmd_idx++) {
-			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
+			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
 
-			/* Map with big pages if possible, otherwise
-			   create normal page tables. */
+			/*
+			 * Map with big pages if possible, otherwise
+			 * create normal page tables:
+			 */
 			if (cpu_has_pse) {
-				unsigned int address2;
+				unsigned int addr2;
 				pgprot_t prot = PAGE_KERNEL_LARGE;
 
-				address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
+				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
 					PAGE_OFFSET + PAGE_SIZE-1;
 
-				if (is_kernel_text(address) ||
-				    is_kernel_text(address2))
+				if (is_kernel_text(addr) ||
+				    is_kernel_text(addr2))
 					prot = PAGE_KERNEL_LARGE_EXEC;
 
 				set_pmd(pmd, pfn_pmd(pfn, prot));
 
 				pfn += PTRS_PER_PTE;
-			} else {
-				pte = one_page_table_init(pmd);
+				continue;
+			}
+			pte = one_page_table_init(pmd);
 
-				for (pte_ofs = 0;
-				     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
-				     pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
-					pgprot_t prot = PAGE_KERNEL;
+			for (pte_ofs = 0;
+			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
+			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
+				pgprot_t prot = PAGE_KERNEL;
 
-					if (is_kernel_text(address))
-						prot = PAGE_KERNEL_EXEC;
+				if (is_kernel_text(addr))
+					prot = PAGE_KERNEL_EXEC;
 
-					set_pte(pte, pfn_pte(pfn, prot));
-				}
+				set_pte(pte, pfn_pte(pfn, prot));
 			}
 		}
 	}
@@ -215,14 +220,19 @@ static inline int page_kills_ppro(unsigned long pagenr)
 pte_t *kmap_pte;
 pgprot_t kmap_prot;
 
-#define kmap_get_fixmap_pte(vaddr)					\
-	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
+static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
+{
+	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
+			vaddr), vaddr), vaddr);
+}
 
 static void __init kmap_init(void)
 {
 	unsigned long kmap_vstart;
 
-	/* cache the first kmap pte */
+	/*
+	 * Cache the first kmap pte:
+	 */
 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 
@@ -231,11 +241,11 @@ static void __init kmap_init(void)
 
 static void __init permanent_kmaps_init(pgd_t *pgd_base)
 {
+	unsigned long vaddr;
 	pgd_t *pgd;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
-	unsigned long vaddr;
 
 	vaddr = PKMAP_BASE;
 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
@@ -244,7 +254,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
 	pud = pud_offset(pgd, vaddr);
 	pmd = pmd_offset(pud, vaddr);
 	pte = pte_offset_kernel(pmd, vaddr);
-	pkmap_page_table = pte;	
+	pkmap_page_table = pte;
 }
 
 static void __meminit free_new_highpage(struct page *page)
@@ -263,7 +273,8 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
 		SetPageReserved(page);
 }
 
-static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
+static int __meminit
+add_one_highpage_hotplug(struct page *page, unsigned long pfn)
 {
 	free_new_highpage(page);
 	totalram_pages++;
@@ -271,6 +282,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
 	max_mapnr = max(pfn, max_mapnr);
 #endif
 	num_physpages++;
+
 	return 0;
 }
 
@@ -278,7 +290,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
  * Not currently handling the NUMA case.
  * Assuming single node and all memory that
  * has been added dynamically that would be
- * onlined here is in HIGHMEM
+ * onlined here is in HIGHMEM.
  */
 void __meminit online_page(struct page *page)
 {
@@ -286,13 +298,11 @@ void __meminit online_page(struct page *page)
 	add_one_highpage_hotplug(page, page_to_pfn(page));
 }
 
-
-#ifdef CONFIG_NUMA
-extern void set_highmem_pages_init(int);
-#else
+#ifndef CONFIG_NUMA
 static void __init set_highmem_pages_init(int bad_ppro)
 {
 	int pfn;
+
 	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
 		/*
 		 * Holes under sparsemem might not have no mem_map[]:
@@ -302,23 +312,18 @@ static void __init set_highmem_pages_init(int bad_ppro)
 	}
 	totalram_pages += totalhigh_pages;
 }
-#endif /* CONFIG_FLATMEM */
+#endif /* !CONFIG_NUMA */
 
 #else
-#define kmap_init() do { } while (0)
-#define permanent_kmaps_init(pgd_base) do { } while (0)
-#define set_highmem_pages_init(bad_ppro) do { } while (0)
+# define kmap_init()				do { } while (0)
+# define permanent_kmaps_init(pgd_base)		do { } while (0)
+# define set_highmem_pages_init(bad_ppro)	do { } while (0)
 #endif /* CONFIG_HIGHMEM */
 
 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
 EXPORT_SYMBOL(__PAGE_KERNEL);
-pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
 
-#ifdef CONFIG_NUMA
-extern void __init remap_numa_kva(void);
-#else
-#define remap_numa_kva() do {} while (0)
-#endif
+pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
 
 void __init native_pagetable_setup_start(pgd_t *base)
 {
@@ -382,10 +387,10 @@ void __init native_pagetable_setup_done(pgd_t *base)
  * be partially populated, and so it avoids stomping on any existing
  * mappings.
  */
-static void __init pagetable_init (void)
+static void __init pagetable_init(void)
 {
-	unsigned long vaddr, end;
 	pgd_t *pgd_base = swapper_pg_dir;
+	unsigned long vaddr, end;
 
 	paravirt_pagetable_setup_start(pgd_base);
 
@@ -424,7 +429,7 @@ static void __init pagetable_init (void)
  * driver might have split up a kernel 4MB mapping.
  */
 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-	__attribute__ ((aligned (PAGE_SIZE)));
+	__attribute__ ((aligned(PAGE_SIZE)));
 
 static inline void save_pg_dir(void)
 {
@@ -436,7 +441,7 @@ static inline void save_pg_dir(void)
 }
 #endif
 
-void zap_low_mappings (void)
+void zap_low_mappings(void)
 {
 	int i;
 
@@ -448,23 +453,24 @@ void zap_low_mappings (void)
 	 * Note that "pgd_clear()" doesn't do it for
 	 * us, because pgd_clear() is a no-op on i386.
 	 */
-	for (i = 0; i < USER_PTRS_PER_PGD; i++)
+	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
 #ifdef CONFIG_X86_PAE
 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
 #else
 		set_pgd(swapper_pg_dir+i, __pgd(0));
 #endif
+	}
 	flush_tlb_all();
 }
 
-int nx_enabled = 0;
+int nx_enabled;
 
 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 #ifdef CONFIG_X86_PAE
 
-static int disable_nx __initdata = 0;
+static int disable_nx __initdata;
 
 /*
  * noexec = on|off
@@ -481,11 +487,14 @@ static int __init noexec_setup(char *str)
 			__supported_pte_mask |= _PAGE_NX;
 			disable_nx = 0;
 		}
-	} else if (!strcmp(str,"off")) {
-		disable_nx = 1;
-		__supported_pte_mask &= ~_PAGE_NX;
-	} else
-		return -EINVAL;
+	} else {
+		if (!strcmp(str, "off")) {
+			disable_nx = 1;
+			__supported_pte_mask &= ~_PAGE_NX;
+		} else {
+			return -EINVAL;
+		}
+	}
 
 	return 0;
 }
@@ -497,6 +506,7 @@ static void __init set_nx(void)
 
 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+
 		if ((v[3] & (1 << 20)) && !disable_nx) {
 			rdmsr(MSR_EFER, l, h);
 			l |= EFER_NX;
@@ -506,7 +516,6 @@ static void __init set_nx(void)
 		}
 	}
 }
-
 #endif
 
 /*
@@ -523,7 +532,6 @@ void __init paging_init(void)
 	if (nx_enabled)
 		printk("NX (Execute Disable) protection: active\n");
 #endif
-
 	pagetable_init();
 
 	load_cr3(swapper_pg_dir);
@@ -547,7 +555,6 @@ void __init paging_init(void)
  * used to involve black magic jumps to work around some nasty CPU bugs,
  * but fortunately the switch to using exceptions got rid of all that.
  */
-
 static void __init test_wp_bit(void)
 {
 	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
@@ -567,19 +574,16 @@ static void __init test_wp_bit(void)
 	}
 }
 
-static struct kcore_list kcore_mem, kcore_vmalloc; 
+static struct kcore_list kcore_mem, kcore_vmalloc;
 
 void __init mem_init(void)
 {
-	extern int ppro_with_ram_bug(void);
 	int codesize, reservedpages, datasize, initsize;
-	int tmp;
-	int bad_ppro;
+	int tmp, bad_ppro;
 
 #ifdef CONFIG_FLATMEM
 	BUG_ON(!mem_map);
 #endif
-	
 	bad_ppro = ppro_with_ram_bug();
 
 #ifdef CONFIG_HIGHMEM
@@ -591,14 +595,13 @@ void __init mem_init(void)
 		BUG();
 	}
 #endif
- 
 	/* this will put all low memory onto the freelists */
 	totalram_pages += free_all_bootmem();
 
 	reservedpages = 0;
 	for (tmp = 0; tmp < max_low_pfn; tmp++)
 		/*
-		 * Only count reserved RAM pages
+		 * Only count reserved RAM pages:
 		 */
 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
 			reservedpages++;
@@ -609,11 +612,12 @@ void __init mem_init(void)
 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 
-	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
 		   VMALLOC_END-VMALLOC_START);
 
-	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
+			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
 		num_physpages << (PAGE_SHIFT-10),
 		codesize >> 10,
@@ -625,44 +629,45 @@ void __init mem_init(void)
 
 #if 1 /* double-sanity-check paranoia */
 	printk("virtual kernel memory layout:\n"
-	       "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
-	       "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
-	       "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-	       "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-	       "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-	       "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-	       "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
-	       FIXADDR_START, FIXADDR_TOP,
-	       (FIXADDR_TOP - FIXADDR_START) >> 10,
+		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
+		FIXADDR_START, FIXADDR_TOP,
+		(FIXADDR_TOP - FIXADDR_START) >> 10,
 
 #ifdef CONFIG_HIGHMEM
-	       PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
-	       (LAST_PKMAP*PAGE_SIZE) >> 10,
+		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+		(LAST_PKMAP*PAGE_SIZE) >> 10,
 #endif
 
-	       VMALLOC_START, VMALLOC_END,
-	       (VMALLOC_END - VMALLOC_START) >> 20,
+		VMALLOC_START, VMALLOC_END,
+		(VMALLOC_END - VMALLOC_START) >> 20,
 
-	       (unsigned long)__va(0), (unsigned long)high_memory,
-	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+		(unsigned long)__va(0), (unsigned long)high_memory,
+		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
 
-	       (unsigned long)&__init_begin, (unsigned long)&__init_end,
-	       ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
+		(unsigned long)&__init_begin, (unsigned long)&__init_end,
+		((unsigned long)&__init_end -
+		 (unsigned long)&__init_begin) >> 10,
 
-	       (unsigned long)&_etext, (unsigned long)&_edata,
-	       ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+		(unsigned long)&_etext, (unsigned long)&_edata,
+		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
 
-	       (unsigned long)&_text, (unsigned long)&_etext,
-	       ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+		(unsigned long)&_text, (unsigned long)&_etext,
+		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 
 #ifdef CONFIG_HIGHMEM
-	BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
-	BUG_ON(VMALLOC_END                     > PKMAP_BASE);
+	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
+	BUG_ON(VMALLOC_END				> PKMAP_BASE);
 #endif
-	BUG_ON(VMALLOC_START                   > VMALLOC_END);
-	BUG_ON((unsigned long)high_memory      > VMALLOC_START);
+	BUG_ON(VMALLOC_START				> VMALLOC_END);
+	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
 #endif /* double-sanity-check paranoia */
 
 #ifdef CONFIG_X86_PAE
@@ -693,45 +698,45 @@ int arch_add_memory(int nid, u64 start, u64 size)
 
 	return __add_pages(zone, start_pfn, nr_pages);
 }
-
 #endif
 
 struct kmem_cache *pmd_cache;
 
 void __init pgtable_cache_init(void)
 {
-	if (PTRS_PER_PMD > 1)
+	if (PTRS_PER_PMD > 1) {
 		pmd_cache = kmem_cache_create("pmd",
 					      PTRS_PER_PMD*sizeof(pmd_t),
 					      PTRS_PER_PMD*sizeof(pmd_t),
 					      SLAB_PANIC,
 					      pmd_ctor);
+	}
 }
 
 /*
  * This function cannot be __init, since exceptions don't work in that
  * section.  Put this after the callers, so that it cannot be inlined.
  */
-static int noinline do_test_wp_bit(void)
+static noinline int do_test_wp_bit(void)
 {
 	char tmp_reg;
 	int flag;
 
 	__asm__ __volatile__(
-		"	movb %0,%1	\n"
-		"1:	movb %1,%0	\n"
-		"	xorl %2,%2	\n"
+		"	movb %0, %1	\n"
+		"1:	movb %1, %0	\n"
+		"	xorl %2, %2	\n"
 		"2:			\n"
-		".section __ex_table,\"a\"\n"
+		".section __ex_table, \"a\"\n"
 		"	.align 4	\n"
-		"	.long 1b,2b	\n"
+		"	.long 1b, 2b	\n"
 		".previous		\n"
 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
 		 "=q" (tmp_reg),
 		 "=r" (flag)
 		:"2" (1)
 		:"memory");
-	
+
 	return flag;
 }
 
@@ -824,4 +829,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 	free_init_pages("initrd memory", start, end);
 }
 #endif
-
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index aac8317420af..3fcc30dc0731 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_BUGS_H
 #define _ASM_X86_BUGS_H
 
-void check_bugs(void);
+extern void check_bugs(void);
+extern int ppro_with_ram_bug(void);
 
 #endif /* _ASM_X86_BUGS_H */
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
index 96fcb157db1d..03d0f7a9bf02 100644
--- a/include/asm-x86/numa_32.h
+++ b/include/asm-x86/numa_32.h
@@ -1,3 +1,15 @@
+#ifndef _ASM_X86_32_NUMA_H
+#define _ASM_X86_32_NUMA_H 1
 
-int pxm_to_nid(int pxm);
+extern int pxm_to_nid(int pxm);
 
+#ifdef CONFIG_NUMA
+extern void __init remap_numa_kva(void);
+extern void set_highmem_pages_init(int);
+#else
+static inline void remap_numa_kva(void)
+{
+}
+#endif
+
+#endif /* _ASM_X86_32_NUMA_H */

commit 10f22dde556d1ed41d55355d1fb8ad495f9810c8
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:10 2008 +0100

    x86: arch/x86/mm/init_64.c printk fixes
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bb732bb79b4a..a95272644591 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -46,10 +46,6 @@
 #include <asm/kdebug.h>
 #include <asm/numa.h>
 
-#ifndef Dprintk
-# define Dprintk(x...)
-#endif
-
 const struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
@@ -119,7 +115,7 @@ static __init void *spp_getpage(void)
 			after_bootmem ? "after bootmem" : "");
 	}
 
-	Dprintk("spp_getpage %p\n", ptr);
+	pr_debug("spp_getpage %p\n", ptr);
 
 	return ptr;
 }
@@ -132,11 +128,12 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 	pmd_t *pmd;
 	pte_t *pte, new_pte;
 
-	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+	pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
 
 	pgd = pgd_offset_k(vaddr);
 	if (pgd_none(*pgd)) {
-		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+		printk(KERN_ERR
+			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
 		return;
 	}
 	pud = pud_offset(pgd, vaddr);
@@ -144,7 +141,7 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 		pmd = (pmd_t *) spp_getpage();
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
 		if (pmd != pmd_offset(pud, 0)) {
-			printk("PAGETABLE BUG #01! %p <-> %p\n",
+			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 				pmd, pmd_offset(pud, 0));
 			return;
 		}
@@ -154,7 +151,7 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 		pte = (pte_t *) spp_getpage();
 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
 		if (pte != pte_offset_kernel(pmd, 0)) {
-			printk("PAGETABLE BUG #02!\n");
+			printk(KERN_ERR "PAGETABLE BUG #02!\n");
 			return;
 		}
 	}
@@ -180,7 +177,7 @@ __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 	unsigned long address = __fix_to_virt(idx);
 
 	if (idx >= __end_of_fixed_addresses) {
-		printk("Invalid __set_fixmap\n");
+		printk(KERN_ERR "Invalid __set_fixmap\n");
 		return;
 	}
 	set_pte_phys(address, phys, prot);
@@ -246,7 +243,7 @@ __meminit void *early_ioremap(unsigned long addr, unsigned long size)
 continue_outer_loop:
 		;
 	}
-	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
+	printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
 
 	return NULL;
 }
@@ -378,7 +375,7 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
 {
 	unsigned long next;
 
-	Dprintk("init_memory_mapping\n");
+	pr_debug("init_memory_mapping\n");
 
 	/*
 	 * Find space for the kernel direct mapping tables.
@@ -506,8 +503,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
 	init_memory_mapping(start, start + size-1);
 
 	ret = __add_pages(zone, start_pfn, nr_pages);
-	if (ret)
-		printk("%s: Problem encountered in __add_pages!\n", __func__);
+	WARN_ON(1);
 
 	return ret;
 }
@@ -567,7 +563,7 @@ void __init mem_init(void)
 	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
 				 VSYSCALL_END - VSYSCALL_START);
 
-	printk("Memory: %luk/%luk available (%ldk kernel code, "
+	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
 				"%ldk reserved, %ldk data, %ldk init)\n",
 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
 		end_pfn << (PAGE_SHIFT-10),
@@ -646,10 +642,10 @@ void mark_rodata_ro(void)
 	rodata_test();
 
 #ifdef CONFIG_CPA_DEBUG
-	printk("Testing CPA: undo %lx-%lx\n", start, end);
+	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
 	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
 
-	printk("Testing CPA: again\n");
+	printk(KERN_INFO "Testing CPA: again\n");
 	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 }

commit 9af993a92623e022c176459fa6607a564b9a7eaf
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: make ioremap() UC by default
    
    Yes! A mere 120 c_p_a() fixing and rewriting patches later,
    we are now confident that we can enable UC by default for
    ioremap(), on x86 too.
    
    Every other architectures was doing this already. Doing so
    makes Linux more robust against MTRR mixups (which might go
    unnoticed if BIOS writers test other OSs only - where PAT
    might override bad MTRRs defaults).
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index ee7a5c955962..f64a59cc396d 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -166,7 +166,7 @@ extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
  */
 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
-	return ioremap_cache(offset, size);
+	return ioremap_nocache(offset, size);
 }
 
 extern void iounmap(volatile void __iomem *addr);

commit 86f03989d99cfa2e1216cdd7aa996852236909cf
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: cpa: fix the self-test
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2d0bd33b73aa..2e1e3af28c3a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
 
 config DEBUG_PAGEALLOC
 	bool "Debug page memory allocations"
-	depends on DEBUG_KERNEL
+	depends on DEBUG_KERNEL && X86_32
 	help
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8d7f723cfc28..8ed5c189d7aa 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -781,8 +781,6 @@ void mark_rodata_ro(void)
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-	unsigned long addr;
-
 #ifdef CONFIG_DEBUG_PAGEALLOC
 	/*
 	 * If debugging page accesses then do not free this memory but
@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 		begin, PAGE_ALIGN(end));
 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
+	unsigned long addr;
+
 	/*
 	 * We just marked the kernel text read only above, now that
 	 * we are going to free part of that, we need to make that
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e0c1e98ad1bf..8a7b725ce3c7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 		free_page(addr);
 		totalram_pages++;
 	}
-#ifdef CONFIG_DEBUG_RODATA
-	/*
-	 * This will make the __init pages not present and
-	 * not executable, so that any attempt to use a
-	 * __init function from now on will fault immediately
-	 * rather than supriously later when memory gets reused.
-	 *
-	 * We only do this for DEBUG_RODATA to not break up the
-	 * 2Mb kernel mapping just for this debug feature.
-	 */
-	if (begin >= __START_KERNEL_map) {
-		set_memory_rw(begin, (end - begin)/PAGE_SIZE);
-		set_memory_np(begin, (end - begin)/PAGE_SIZE);
-		set_memory_nx(begin, (end - begin)/PAGE_SIZE);
-	}
-#endif
 #endif
 }
 
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 554820265b95..06353d43f72e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -15,8 +15,7 @@
 #include <asm/kdebug.h>
 
 enum {
-	NTEST			= 400,
-	LOWEST_LEVEL		= PG_LEVEL_4K,
+	NTEST			= 4000,
 #ifdef CONFIG_X86_64
 	LPS			= (1 << PMD_SHIFT),
 #elif defined(CONFIG_X86_PAE)
@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s)
 			continue;
 		}
 
-		if (level == 2 && sizeof(long) == 8) {
+		if (level == PG_LEVEL_1G && sizeof(long) == 8) {
 			s->gpg++;
 			i += GPS/PAGE_SIZE;
-		} else if (level != LOWEST_LEVEL) {
+		} else if (level == PG_LEVEL_2M) {
 			if (!(pte_val(*pte) & _PAGE_PSE)) {
 				printk(KERN_ERR
 					"%lx level %d but not PSE %Lx\n",
@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void)
 			continue;
 		}
 
-		err = __change_page_attr_clear(addr[i], len[i],
+		err = change_page_attr_clear(addr[i], len[i],
 					       __pgprot(_PAGE_GLOBAL));
 		if (err < 0) {
 			printk(KERN_ERR "CPA %d failed %d\n", i, err);
@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void)
 				pte ? (u64)pte_val(*pte) : 0ULL);
 			failed++;
 		}
-		if (level != LOWEST_LEVEL) {
+		if (level != PG_LEVEL_4K) {
 			printk(KERN_ERR "CPA %lx: unexpected level %d\n",
 				addr[i], level);
 			failed++;
@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void)
 
 	}
 	vfree(bm);
-	cpa_flush_all();
 
 	failed += print_split(&sb);
 
@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void)
 			failed++;
 			continue;
 		}
-		err = __change_page_attr_set(addr[i], len[i],
+		err = change_page_attr_set(addr[i], len[i],
 					     __pgprot(_PAGE_GLOBAL));
 		if (err < 0) {
 			printk(KERN_ERR "CPA reverting failed: %d\n", err);
@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void)
 		}
 
 	}
-	cpa_flush_all();
 
 	failed += print_split(&sc);
 
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 97ec9e7d29d9..532a40bc0e7e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 	unsigned long addr;
 	pte_t *pbase, *tmp;
 	struct page *base;
-	int i, level;
+	unsigned int i, level;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-	gfp_flags = GFP_ATOMIC;
+	gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
+	gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
 #endif
 	base = alloc_pages(gfp_flags, 0);
 	if (!base)
@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
 #endif
 
+	pgprot_val(ref_prot) &= ~_PAGE_NX;
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
 		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
 
@@ -248,7 +250,8 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 }
 
 static int
-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
+__change_page_attr(unsigned long address, unsigned long pfn,
+		   pgprot_t mask_set, pgprot_t mask_clr)
 {
 	struct page *kpte_page;
 	int level, err = 0;
@@ -267,15 +270,20 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
 
-	prot = static_protections(prot, address);
-
 	if (level == PG_LEVEL_4K) {
-		WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
-		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
-	} else {
-		/* Clear the PSE bit for the 4k level pages ! */
-		pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
+		pgprot_t new_prot = pte_pgprot(*kpte);
+		pte_t new_pte, old_pte = *kpte;
+
+		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
+		pgprot_val(new_prot) |= pgprot_val(mask_set);
+
+		new_prot = static_protections(new_prot, address);
+
+		new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
+		BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
 
+		set_pte_atomic(kpte, new_pte);
+	} else {
 		err = split_large_page(kpte, address);
 		if (!err)
 			goto repeat;
@@ -297,22 +305,26 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
  * Modules and drivers should use the set_memory_* APIs instead.
  */
 
-static int change_page_attr_addr(unsigned long address, pgprot_t prot)
+static int
+change_page_attr_addr(unsigned long address, pgprot_t mask_set,
+							pgprot_t mask_clr)
 {
 	int err = 0, kernel_map = 0;
-	unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+	unsigned long pfn;
 
 #ifdef CONFIG_X86_64
 	if (address >= __START_KERNEL_map &&
 			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
 
-		address = (unsigned long)__va(__pa(address));
+		address = (unsigned long)__va(__pa((void *)address));
 		kernel_map = 1;
 	}
 #endif
 
-	if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-		err = __change_page_attr(address, pfn, prot);
+	pfn = __pa(address) >> PAGE_SHIFT;
+
+	if (!kernel_map || 1) {
+		err = __change_page_attr(address, pfn, mask_set, mask_clr);
 		if (err)
 			return err;
 	}
@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
 	 */
 	if (__pa(address) < KERNEL_TEXT_SIZE) {
 		unsigned long addr2;
-		pgprot_t prot2;
 
-		addr2 = __START_KERNEL_map + __pa(address);
+		addr2 = __pa(address) + __START_KERNEL_map - phys_base;
 		/* Make sure the kernel mappings stay executable */
-		prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-		err = __change_page_attr(addr2, pfn, prot2);
+		pgprot_val(mask_clr) |= _PAGE_NX;
+		/*
+		 * Our high aliases are imprecise, so do not propagate
+		 * failures back to users:
+		 */
+		__change_page_attr(addr2, pfn, mask_set, mask_clr);
 	}
 #endif
 
@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
 static int __change_page_attr_set_clr(unsigned long addr, int numpages,
 				      pgprot_t mask_set, pgprot_t mask_clr)
 {
-	pgprot_t new_prot;
-	int level;
-	pte_t *pte;
-	int i, ret;
-
-	for (i = 0; i < numpages ; i++) {
-
-		pte = lookup_address(addr, &level);
-		if (!pte)
-			return -EINVAL;
-
-		new_prot = pte_pgprot(*pte);
-
-		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
-		pgprot_val(new_prot) |= pgprot_val(mask_set);
+	unsigned int i;
+	int ret;
 
-		ret = change_page_attr_addr(addr, new_prot);
+	for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
+		ret = change_page_attr_addr(addr, mask_set, mask_clr);
 		if (ret)
 			return ret;
-		addr += PAGE_SIZE;
 	}
 
 	return 0;
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index ee40a88882f6..269e7e29ea8e 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -240,6 +240,7 @@ enum {
 	PG_LEVEL_NONE,
 	PG_LEVEL_4K,
 	PG_LEVEL_2M,
+	PG_LEVEL_1G,
 };
 
 /*

commit aba8391f7323294e88e3a665513434aba4042a7d
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: rodata config hookup
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 347e33e5f395..2d0bd33b73aa 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -68,6 +68,22 @@ config DEBUG_RODATA
 	  data. This is recommended so that we can catch kernel bugs sooner.
 	  If in doubt, say "Y".
 
+config DEBUG_RODATA_TEST
+	bool "Testcase for the DEBUG_RODATA feature"
+	depends on DEBUG_RODATA
+	help
+	  This option enables a testcase for the DEBUG_RODATA
+	  feature as well as for the change_page_attr() infrastructure.
+	  If in doubt, say "N"
+
+config DEBUG_NX_TEST
+	tristate "Testcase for the NX non-executable stack feature"
+	depends on DEBUG_KERNEL && m
+	help
+	  This option enables a testcase for the CPU NX capability
+	  and the software setup of this feature.
+	  If in doubt, say "N"
+
 config 4KSTACKS
 	bool "Use 4Kb for kernel stacks instead of 8Kb"
 	depends on DEBUG_KERNEL
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 5bdb0f0431e9..6f813009d44b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
 
 obj-$(CONFIG_K8_NB)		+= k8.o
 obj-$(CONFIG_MGEODE_LX)		+= geode_32.o mfgpt_32.o
+obj-$(CONFIG_DEBUG_RODATA_TEST)	+= test_rodata.o
+obj-$(CONFIG_DEBUG_NX_TEST)	+= test_nx.o
 
 obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
 obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o

commit 8f46924600e30b140445f5b84abe9b80d2fff5fb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: enable CONFIG_DEBUG_PAGEALLOC more widely
    
    make CONFIG_DEBUG_PAGEALLOC universally available.
    
    CONFIG_HIBERNATION and CONFIG_HUGETLBFS was disabling it, for no
    particular reason.
    
    If there are any unfixed bugs here we'll fix it, but do not disable
    vital debugging facilities like that ..
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2a859a7e2d74..347e33e5f395 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
 
 config DEBUG_PAGEALLOC
 	bool "Debug page memory allocations"
-	depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS
+	depends on DEBUG_KERNEL
 	help
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types

commit ee01f1122ceb02a2c9b7142c5dd17b49e59ba774
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: init memory debugging
    
    debug incorrect/late access to init memory, by permanently unmapping
    the init memory ranges. Depends on CONFIG_DEBUG_PAGEALLOC=y.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index a72737c05747..8d7f723cfc28 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -783,6 +783,16 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
 	unsigned long addr;
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	/*
+	 * If debugging page accesses then do not free this memory but
+	 * mark them not present - any buggy init-section access will
+	 * create a kernel page fault:
+	 */
+	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
+		begin, PAGE_ALIGN(end));
+	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+#else
 	/*
 	 * We just marked the kernel text read only above, now that
 	 * we are going to free part of that, we need to make that
@@ -798,6 +808,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 		totalram_pages++;
 	}
 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+#endif
 }
 
 void free_initmem(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index f51180c02b8f..e0c1e98ad1bf 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -550,6 +550,16 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 	if (begin >= end)
 		return;
 
+	/*
+	 * If debugging page accesses then do not free this memory but
+	 * mark them not present - any buggy init-section access will
+	 * create a kernel page fault:
+	 */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
+		begin, PAGE_ALIGN(end));
+	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+#else
 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
@@ -575,6 +585,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 		set_memory_nx(begin, (end - begin)/PAGE_SIZE);
 	}
 #endif
+#endif
 }
 
 void free_initmem(void)