Patches contributed by Eötvös Lorand University


commit 4c61afcdb2cd4be299c1442b33adf312b695e2d7
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:09 2008 +0100

    x86: fix clflush_page_range logic
    
    only present ptes must be flushed.
    
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bbfc8e2466ab..97ec9e7d29d9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  * Flushing functions
  */
 
-
 /**
  * clflush_cache_range - flush a cache range with clflush
  * @addr:	virtual start address
@@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  * clflush is an unordered instruction which needs fencing with mfence
  * to avoid ordering issues.
  */
-void clflush_cache_range(void *addr, int size)
+void clflush_cache_range(void *vaddr, unsigned int size)
 {
-	int i;
+	void *vend = vaddr + size - 1;
 
 	mb();
-	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
-		clflush(addr+i);
+
+	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
+		clflush(vaddr);
+	/*
+	 * Flush any possible final partial cacheline:
+	 */
+	clflush(vend);
+
 	mb();
 }
 
@@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg)
 	__flush_tlb_all();
 }
 
-static void cpa_flush_range(unsigned long addr, int numpages)
+static void cpa_flush_range(unsigned long start, int numpages)
 {
+	unsigned int i, level;
+	unsigned long addr;
+
 	BUG_ON(irqs_disabled());
+	WARN_ON(PAGE_ALIGN(start) != start);
 
 	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
 
@@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages)
 	 * will cause all other CPUs to flush the same
 	 * cachelines:
 	 */
-	clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
+	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
+		pte_t *pte = lookup_address(addr, &level);
+
+		/*
+		 * Only flush present addresses:
+		 */
+		if (pte && pte_present(*pte))
+			clflush_cache_range((void *) addr, PAGE_SIZE);
+	}
 }
 
 /*
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 3e74aff90809..8dd8c5e3cc7f 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -42,7 +42,7 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 
-void clflush_cache_range(void *addr, int size);
+void clflush_cache_range(void *addr, unsigned int size);
 
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);

commit adafdf6a4e45f2d1051e10aebe13025e89dbdf6d
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:08 2008 +0100

    x86: ioremap KERN_INFO
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 6a9a1418bc98..ac9ab20d8092 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -293,7 +293,7 @@ void __init early_ioremap_init(void)
 	unsigned long *pgd;
 
 	if (early_ioremap_debug)
-		printk(KERN_DEBUG "early_ioremap_init()\n");
+		printk(KERN_INFO "early_ioremap_init()\n");
 
 	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
 	*pgd = __pa(bm_pte) | _PAGE_TABLE;
@@ -322,7 +322,7 @@ void __init early_ioremap_clear(void)
 	unsigned long *pgd;
 
 	if (early_ioremap_debug)
-		printk(KERN_DEBUG "early_ioremap_clear()\n");
+		printk(KERN_INFO "early_ioremap_clear()\n");
 
 	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
 	*pgd = 0;
@@ -408,7 +408,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 
 	nesting = early_ioremap_nested;
 	if (early_ioremap_debug) {
-		printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
+		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
 		       phys_addr, size, nesting);
 		dump_stack();
 	}
@@ -470,7 +470,7 @@ void __init early_iounmap(void *addr, unsigned long size)
 	WARN_ON(nesting < 0);
 
 	if (early_ioremap_debug) {
-		printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
+		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
 		       size, nesting);
 		dump_stack();
 	}

commit 4692a1450b4d1000a942022b088c8791749dd65e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:07 2008 +0100

    x86: cpa: fix loop
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a2ee317548f2..5cfc0d4ade56 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -304,7 +304,7 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
 static int change_page_attr_set(unsigned long addr, int numpages,
 								pgprot_t prot)
 {
-	pgprot_t current_prot;
+	pgprot_t current_prot, new_prot;
 	int level;
 	pte_t *pte;
 	int i, ret;
@@ -317,9 +317,10 @@ static int change_page_attr_set(unsigned long addr, int numpages,
 		else
 			pgprot_val(current_prot) = 0;
 
-		pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
+		pgprot_val(new_prot) =
+			pgprot_val(current_prot) | pgprot_val(prot);
 
-		ret = change_page_attr_addr(addr, prot);
+		ret = change_page_attr_addr(addr, new_prot);
 		if (ret)
 			return ret;
 		addr += PAGE_SIZE;
@@ -349,7 +350,7 @@ static int change_page_attr_set(unsigned long addr, int numpages,
 static int change_page_attr_clear(unsigned long addr, int numpages,
 								pgprot_t prot)
 {
-	pgprot_t current_prot;
+	pgprot_t current_prot, new_prot;
 	int level;
 	pte_t *pte;
 	int i, ret;
@@ -361,10 +362,10 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
 		else
 			pgprot_val(current_prot) = 0;
 
-		pgprot_val(prot) =
+		pgprot_val(new_prot) =
 				pgprot_val(current_prot) & ~pgprot_val(prot);
 
-		ret = change_page_attr_addr(addr, prot);
+		ret = change_page_attr_addr(addr, new_prot);
 		if (ret)
 			return ret;
 		addr += PAGE_SIZE;

commit 5398f9854f60d670e8ef1ea08c0e0310f253eeb1
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:07 2008 +0100

    x86: remove flush_agp_mappings()
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 832ded20fe70..2720882e66fe 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
 			printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
 			return -ENOMEM;
 		}
-		flush_agp_mappings();
 
 		bridge->scratch_page_real = virt_to_gart(addr);
 		bridge->scratch_page =
@@ -191,7 +190,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
 	if (bridge->driver->needs_scratch_page) {
 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
 						 AGP_PAGE_DESTROY_UNMAP);
-		flush_agp_mappings();
 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
 						 AGP_PAGE_DESTROY_FREE);
 	}
@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
 	    bridge->driver->needs_scratch_page) {
 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
 						 AGP_PAGE_DESTROY_UNMAP);
-		flush_agp_mappings();
 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
 						 AGP_PAGE_DESTROY_FREE);
 	}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64b2f6d7059d..1a4674ce0c71 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
 		for (i = 0; i < curr->page_count; i++) {
 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
 		}
-		flush_agp_mappings();
 		for (i = 0; i < curr->page_count; i++) {
 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
 		}
@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
 	}
 	new->bridge = bridge;
 
-	flush_agp_mappings();
-
 	return new;
 }
 EXPORT_SYMBOL(agp_allocate_memory);
diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h
index ef855a3bc0f5..26c179135293 100644
--- a/include/asm-alpha/agp.h
+++ b/include/asm-alpha/agp.h
@@ -7,7 +7,6 @@
 
 #define map_page_into_agp(page) 
 #define unmap_page_from_agp(page) 
-#define flush_agp_mappings() 
 #define flush_agp_cache() mb()
 
 /* Convert a physical address to an address suitable for the GART. */
diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h
index 4e517f0e6afa..c11fdd8ab4d7 100644
--- a/include/asm-ia64/agp.h
+++ b/include/asm-ia64/agp.h
@@ -15,7 +15,6 @@
  */
 #define map_page_into_agp(page)		/* nothing */
 #define unmap_page_from_agp(page)	/* nothing */
-#define flush_agp_mappings()		/* nothing */
 #define flush_agp_cache()		mb()
 
 /* Convert a physical address to an address suitable for the GART. */
diff --git a/include/asm-parisc/agp.h b/include/asm-parisc/agp.h
index 9f61d4eb6c01..9651660da639 100644
--- a/include/asm-parisc/agp.h
+++ b/include/asm-parisc/agp.h
@@ -9,7 +9,6 @@
 
 #define map_page_into_agp(page)		/* nothing */
 #define unmap_page_from_agp(page)	/* nothing */
-#define flush_agp_mappings()		/* nothing */
 #define flush_agp_cache()		mb()
 
 /* Convert a physical address to an address suitable for the GART. */
diff --git a/include/asm-powerpc/agp.h b/include/asm-powerpc/agp.h
index e5ccaca2f5a4..86455c4c31ee 100644
--- a/include/asm-powerpc/agp.h
+++ b/include/asm-powerpc/agp.h
@@ -6,7 +6,6 @@
 
 #define map_page_into_agp(page)
 #define unmap_page_from_agp(page)
-#define flush_agp_mappings()
 #define flush_agp_cache() mb()
 
 /* Convert a physical address to an address suitable for the GART. */
diff --git a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h
index 58f8cb6ae767..e9fcf0e781ea 100644
--- a/include/asm-sparc64/agp.h
+++ b/include/asm-sparc64/agp.h
@@ -5,7 +5,6 @@
 
 #define map_page_into_agp(page) 
 #define unmap_page_from_agp(page) 
-#define flush_agp_mappings() 
 #define flush_agp_cache() mb()
 
 /* Convert a physical address to an address suitable for the GART. */
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index 0c309b9a5217..e4004a9f6a9a 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -14,7 +14,6 @@
 
 #define map_page_into_agp(page) set_pages_uc(page, 1)
 #define unmap_page_from_agp(page) set_pages_wb(page, 1)
-#define flush_agp_mappings() do { } while (0)
 
 /*
  * Could use CLFLUSH here if the cpu supports it. But then it would

commit f62d0f008e889915c93631c04d4c7d871f05bea7
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:07 2008 +0100

    x86: cpa: set_memory_notpresent()
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 05bb12db0b09..4757be7b5e55 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -559,8 +559,21 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 		free_page(addr);
 		totalram_pages++;
 	}
-	if (addr > __START_KERNEL_map)
-		global_flush_tlb();
+#ifdef CONFIG_DEBUG_RODATA
+	/*
+	 * This will make the __init pages not present and
+	 * not executable, so that any attempt to use a
+	 * __init function from now on will fault immediately
+	 * rather than supriously later when memory gets reused.
+	 *
+	 * We only do this for DEBUG_RODATA to not break up the
+	 * 2Mb kernel mapping just for this debug feature.
+	 */
+	if (begin >= __START_KERNEL_map) {
+		set_memory_np(begin, (end - begin)/PAGE_SIZE);
+		set_memory_nx(begin, (end - begin)/PAGE_SIZE);
+	}
+#endif
 }
 
 void free_initmem(void)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index fcd96125c5ae..e5910ac37e59 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -357,8 +357,6 @@ int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
 	return change_page_attr_addr(addr, numpages, prot);
 }
 
-
-
 int set_memory_uc(unsigned long addr, int numpages)
 {
 	pgprot_t uncached;
@@ -402,7 +400,6 @@ int set_memory_ro(unsigned long addr, int numpages)
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_clear(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_memory_ro);
 
 int set_memory_rw(unsigned long addr, int numpages)
 {
@@ -411,7 +408,14 @@ int set_memory_rw(unsigned long addr, int numpages)
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_set(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_memory_rw);
+
+int set_memory_np(unsigned long addr, int numpages)
+{
+	pgprot_t present;
+
+	pgprot_val(present) = _PAGE_PRESENT;
+	return change_page_attr_clear(addr, numpages, present);
+}
 
 int set_pages_uc(struct page *page, int numpages)
 {
@@ -461,7 +465,6 @@ int set_pages_ro(struct page *page, int numpages)
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_clear(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_pages_ro);
 
 int set_pages_rw(struct page *page, int numpages)
 {
@@ -471,8 +474,6 @@ int set_pages_rw(struct page *page, int numpages)
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_set(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_pages_rw);
-
 
 void clflush_cache_range(void *addr, int size)
 {
@@ -503,6 +504,20 @@ void global_flush_tlb(void)
 EXPORT_SYMBOL(global_flush_tlb);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
+
+static int __set_pages_p(struct page *page, int numpages)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+	return change_page_attr_set(addr, numpages,
+				__pgprot(_PAGE_PRESENT | _PAGE_RW));
+}
+
+static int __set_pages_np(struct page *page, int numpages)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
+}
+
 void kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (PageHighMem(page))
@@ -522,7 +537,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
 	 * The return value is ignored - the calls cannot fail,
 	 * large pages are disabled at boot time:
 	 */
-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+	if (enable)
+		__set_pages_p(page, numpages);
+	else
+		__set_pages_np(page, numpages);
 
 	/*
 	 * We should perform an IPI and flush all tlbs,
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index e79159bc0987..a95afaf1240d 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -42,6 +42,7 @@ int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
 
 void clflush_cache_range(void *addr, int size);
 

commit e81d5dc41b67349c06e80658227c9156738f0df1
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:06 2008 +0100

    x86: cpa: move clflush_cache_range()
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 00f6f341e291..4208571334db 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -9,14 +9,6 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 
-void clflush_cache_range(void *addr, int size)
-{
-	int i;
-
-	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
-		clflush(addr+i);
-}
-
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
@@ -290,6 +282,14 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 }
 EXPORT_SYMBOL(change_page_attr);
 
+void clflush_cache_range(void *addr, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
+		clflush(addr+i);
+}
+
 static void flush_kernel_map(void *arg)
 {
 	/*

commit f316fe687521fad5ad2fd8389397c38aa97439d2
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:04 2008 +0100

    x86: cpa: make self-test depend on DEBUG_KERNEL
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 46d34b2db421..2a859a7e2d74 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -205,6 +205,7 @@ config DEBUG_BOOT_PARAMS
 
 config CPA_DEBUG
 	bool "CPA self test code"
+	depends on DEBUG_KERNEL
 	help
 	  Do change_page_attr self tests at boot.
 

commit d2e626f45cc450c00f5f98a89b8b4c4ac3c9bf5f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:04 2008 +0100

    x86: add PAGE_KERNEL_EXEC_NOCACHE
    
    add PAGE_KERNEL_EXEC_NOCACHE.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 7aa34c8eb220..ee40a88882f6 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -79,6 +79,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
 
 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+#define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
 #define __PAGE_KERNEL_VSYSCALL_NOCACHE	(__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
@@ -96,6 +97,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
 #define PAGE_KERNEL_EXEC		MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RX			MAKE_GLOBAL(__PAGE_KERNEL_RX)
 #define PAGE_KERNEL_NOCACHE		MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_EXEC_NOCACHE	MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
 #define PAGE_KERNEL_LARGE		MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC		MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
 #define PAGE_KERNEL_VSYSCALL		MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)

commit 8192206df093e8fc607b5072ce71a930d44f8638
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:04 2008 +0100

    x86: change cpa to pfn based
    
    change CPA to pfn based.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index effcd78d5f40..d18c41d752f3 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -3,6 +3,7 @@
  * Thanks to Ben LaHaise for precious feedback.
  */
 #include <linux/highmem.h>
+#include <linux/bootmem.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -144,13 +145,15 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 }
 
 static int
-__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
 {
 	struct page *kpte_page;
 	int level, err = 0;
 	pte_t *kpte;
 
-	BUG_ON(PageHighMem(page));
+#ifdef CONFIG_X86_32
+	BUG_ON(pfn > max_low_pfn);
+#endif
 
 repeat:
 	kpte = lookup_address(address, &level);
@@ -164,7 +167,7 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 	prot = check_exec(prot, address);
 
 	if (level == PG_LEVEL_4K) {
-		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
+		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
 	} else {
 		err = split_large_page(kpte, address);
 		if (!err)
@@ -203,7 +206,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
 
 		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-			err = __change_page_attr(address, pfn_to_page(pfn), prot);
+			err = __change_page_attr(address, pfn, prot);
 			if (err)
 				break;
 		}
@@ -219,7 +222,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 			addr2 = __START_KERNEL_map + __pa(address);
 			/* Make sure the kernel mappings stay executable */
 			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-			err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
+			err = __change_page_attr(addr2, pfn, prot2);
 		}
 #endif
 	}

commit 687c4825b6ccab69b85f266ae925500b27aab6c2
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:04 2008 +0100

    x86: keep the BIOS area executable
    
    keep the BIOS area executable.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4589a1382fa1..effcd78d5f40 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -22,6 +22,27 @@ void clflush_cache_range(void *addr, int size)
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 
+/*
+ * We allow the BIOS range to be executable:
+ */
+#define BIOS_BEGIN		0x000a0000
+#define BIOS_END		0x00100000
+
+static inline pgprot_t check_exec(pgprot_t prot, unsigned long address)
+{
+	if (__pa(address) >= BIOS_BEGIN && __pa(address) < BIOS_END)
+		pgprot_val(prot) &= ~_PAGE_NX;
+	/*
+	 * Better fail early if someone sets the kernel text to NX.
+	 * Does not cover __inittext
+	 */
+	BUG_ON(address >= (unsigned long)&_text &&
+		address < (unsigned long)&_etext &&
+	       (pgprot_val(prot) & _PAGE_NX));
+
+	return prot;
+}
+
 pte_t *lookup_address(unsigned long address, int *level)
 {
 	pgd_t *pgd = pgd_offset_k(address);
@@ -140,13 +161,7 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
 
-	/*
-	 * Better fail early if someone sets the kernel text to NX.
-	 * Does not cover __inittext
-	 */
-	BUG_ON(address >= (unsigned long)&_text &&
-		address < (unsigned long)&_etext &&
-	       (pgprot_val(prot) & _PAGE_NX));
+	prot = check_exec(prot, address);
 
 	if (level == PG_LEVEL_4K) {
 		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));