Patches contributed by Eötvös Lorand University


commit e4b71dcf54fa90fc30fb901bbce7e38a46467af7
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:04 2008 +0100

    x86: clean up arch/x86/mm/pageattr.c
    
    do some leftover cleanups in the now unified arch/x86/mm/pageattr.c
    file.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 510ff4091667..a270f9ccebfb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2,7 +2,6 @@
  * Copyright 2002 Andi Kleen, SuSE Labs.
  * Thanks to Ben LaHaise for precious feedback.
  */
-
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -50,9 +49,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
 	/* change init_mm */
 	set_pte_atomic(kpte, pte);
 #ifdef CONFIG_X86_32
-	if (SHARED_KERNEL_PMD)
-		return;
-	{
+	if (!SHARED_KERNEL_PMD) {
 		struct page *page;
 
 		for (page = pgd_list; page; page = (struct page *)page->index) {
@@ -277,14 +274,14 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
 		return;
 
 	/*
-	 * the return value is ignored - the calls cannot fail,
-	 * large pages are disabled at boot time.
+	 * The return value is ignored - the calls cannot fail,
+	 * large pages are disabled at boot time:
 	 */
 	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
 
 	/*
-	 * we should perform an IPI and flush all tlbs,
-	 * but that can deadlock->flush only current cpu.
+	 * We should perform an IPI and flush all tlbs,
+	 * but that can deadlock->flush only current cpu:
 	 */
 	__flush_tlb_all();
 }
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 223b0032bab5..faa6a96c2a5c 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -25,6 +25,8 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 
 #endif /* !__ASSEMBLY__ */
 
+#define SHARED_KERNEL_PMD	1
+
 /*
  * PGDIR_SHIFT determines what a top-level page table entry can map
  */

commit 4554ab95c2b9d6b0ee9cf2a7ed3df665422acebb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: re-add clflush_cache_range()
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 251613449dd6..510ff4091667 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -9,6 +9,14 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 
+void clflush_cache_range(void *addr, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
+		clflush(addr+i);
+}
+
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/sections.h>

commit b195bc00ef8c2ccf8cc744e5ff9470cb08b45d76
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: unify pageattr_32.c and pageattr_64.c
    
    unify the now perfectly identical pageattr_32/64.c files - no code changed.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
index 1aeba3bf34bd..424e5a862271 100644
--- a/arch/x86/mm/Makefile_32
+++ b/arch/x86/mm/Makefile_32
@@ -2,7 +2,7 @@
 # Makefile for the linux i386-specific parts of the memory manager.
 #
 
-obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable.o pageattr_32.o mmap.o
+obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable.o pageattr.o mmap.o
 
 obj-$(CONFIG_CPA_DEBUG) += pageattr-test.o
 obj-$(CONFIG_NUMA) += discontig_32.o
diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64
index cb723167631b..043584478457 100644
--- a/arch/x86/mm/Makefile_64
+++ b/arch/x86/mm/Makefile_64
@@ -2,7 +2,7 @@
 # Makefile for the linux x86_64-specific parts of the memory manager.
 #
 
-obj-y	 := init_64.o fault_64.o ioremap_64.o extable.o pageattr_64.o mmap.o
+obj-y	 := init_64.o fault_64.o ioremap_64.o extable.o pageattr.o mmap.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_NUMA) += numa_64.o
 obj-$(CONFIG_K8_NUMA) += k8topology_64.o
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr.c
similarity index 100%
rename from arch/x86/mm/pageattr_32.c
rename to arch/x86/mm/pageattr.c
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
deleted file mode 100644
index 0246511be99d..000000000000
--- a/arch/x86/mm/pageattr_64.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright 2002 Andi Kleen, SuSE Labs.
- * Thanks to Ben LaHaise for precious feedback.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include <asm/processor.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-pte_t *lookup_address(unsigned long address, int *level)
-{
-	pgd_t *pgd = pgd_offset_k(address);
-	pud_t *pud;
-	pmd_t *pmd;
-
-	if (pgd_none(*pgd))
-		return NULL;
-	pud = pud_offset(pgd, address);
-	if (pud_none(*pud))
-		return NULL;
-	pmd = pmd_offset(pud, address);
-	if (pmd_none(*pmd))
-		return NULL;
-	*level = 3;
-	if (pmd_large(*pmd))
-		return (pte_t *)pmd;
-	*level = 4;
-
-	return pte_offset_kernel(pmd, address);
-}
-
-static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
-{
-	/* change init_mm */
-	set_pte_atomic(kpte, pte);
-#ifdef CONFIG_X86_32
-	if (SHARED_KERNEL_PMD)
-		return;
-	{
-		struct page *page;
-
-		for (page = pgd_list; page; page = (struct page *)page->index) {
-			pgd_t *pgd;
-			pud_t *pud;
-			pmd_t *pmd;
-
-			pgd = (pgd_t *)page_address(page) + pgd_index(address);
-			pud = pud_offset(pgd, address);
-			pmd = pmd_offset(pud, address);
-			set_pte_atomic((pte_t *)pmd, pte);
-		}
-	}
-#endif
-}
-
-static int split_large_page(pte_t *kpte, unsigned long address)
-{
-	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
-	gfp_t gfp_flags = GFP_KERNEL;
-	unsigned long flags;
-	unsigned long addr;
-	pte_t *pbase, *tmp;
-	struct page *base;
-	int i, level;
-
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-	gfp_flags = GFP_ATOMIC;
-#endif
-	base = alloc_pages(gfp_flags, 0);
-	if (!base)
-		return -ENOMEM;
-
-	spin_lock_irqsave(&pgd_lock, flags);
-	/*
-	 * Check for races, another CPU might have split this page
-	 * up for us already:
-	 */
-	tmp = lookup_address(address, &level);
-	if (tmp != kpte) {
-		WARN_ON_ONCE(1);
-		goto out_unlock;
-	}
-
-	address = __pa(address);
-	addr = address & LARGE_PAGE_MASK;
-	pbase = (pte_t *)page_address(base);
-#ifdef CONFIG_X86_32
-	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
-#endif
-
-	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
-		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
-
-	/*
-	 * Install the new, split up pagetable:
-	 */
-	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
-	base = NULL;
-
-out_unlock:
-	spin_unlock_irqrestore(&pgd_lock, flags);
-
-	if (base)
-		__free_pages(base, 0);
-
-	return 0;
-}
-
-static int
-__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
-{
-	struct page *kpte_page;
-	int level, err = 0;
-	pte_t *kpte;
-
-	BUG_ON(PageHighMem(page));
-
-repeat:
-	kpte = lookup_address(address, &level);
-	if (!kpte)
-		return -EINVAL;
-
-	kpte_page = virt_to_page(kpte);
-	BUG_ON(PageLRU(kpte_page));
-	BUG_ON(PageCompound(kpte_page));
-
-	/*
-	 * Better fail early if someone sets the kernel text to NX.
-	 * Does not cover __inittext
-	 */
-	BUG_ON(address >= (unsigned long)&_text &&
-		address < (unsigned long)&_etext &&
-	       (pgprot_val(prot) & _PAGE_NX));
-
-	if (level == 4) {
-		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
-	} else {
-		err = split_large_page(kpte, address);
-		if (!err)
-			goto repeat;
-	}
-	return err;
-}
-
-/**
- * change_page_attr_addr - Change page table attributes in linear mapping
- * @address: Virtual address in linear mapping.
- * @numpages: Number of pages to change
- * @prot:    New page table attribute (PAGE_*)
- *
- * Change page attributes of a page in the direct mapping. This is a variant
- * of change_page_attr() that also works on memory holes that do not have
- * mem_map entry (pfn_valid() is false).
- *
- * See change_page_attr() documentation for more details.
- */
-
-int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
-{
-	int err = 0, kernel_map = 0, i;
-
-#ifdef CONFIG_X86_64
-	if (address >= __START_KERNEL_map &&
-			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
-
-		address = (unsigned long)__va(__pa(address));
-		kernel_map = 1;
-	}
-#endif
-
-	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
-
-		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-			err = __change_page_attr(address, pfn_to_page(pfn), prot);
-			if (err)
-				break;
-		}
-#ifdef CONFIG_X86_64
-		/*
-		 * Handle kernel mapping too which aliases part of
-		 * lowmem:
-		 */
-		if (__pa(address) < KERNEL_TEXT_SIZE) {
-			unsigned long addr2;
-			pgprot_t prot2;
-
-			addr2 = __START_KERNEL_map + __pa(address);
-			/* Make sure the kernel mappings stay executable */
-			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-			err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
-		}
-#endif
-	}
-
-	return err;
-}
-
-/**
- * change_page_attr - Change page table attributes in the linear mapping.
- * @page: First page to change
- * @numpages: Number of pages to change
- * @prot: New protection/caching type (PAGE_*)
- *
- * Returns 0 on success, otherwise a negated errno.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * Caller must call global_flush_tlb() later to make the changes active.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere
- * (e.g. in user space) * This function only deals with the kernel linear map.
- *
- * For MMIO areas without mem_map use change_page_attr_addr() instead.
- */
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
-	unsigned long addr = (unsigned long)page_address(page);
-
-	return change_page_attr_addr(addr, numpages, prot);
-}
-EXPORT_SYMBOL(change_page_attr);
-
-static void flush_kernel_map(void *arg)
-{
-	/*
-	 * Flush all to work around Errata in early athlons regarding
-	 * large page flushing.
-	 */
-	__flush_tlb_all();
-
-	if (boot_cpu_data.x86_model >= 4)
-		wbinvd();
-}
-
-void global_flush_tlb(void)
-{
-	BUG_ON(irqs_disabled());
-
-	on_each_cpu(flush_kernel_map, NULL, 1, 1);
-}
-EXPORT_SYMBOL(global_flush_tlb);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
-	if (PageHighMem(page))
-		return;
-	if (!enable) {
-		debug_check_no_locks_freed(page_address(page),
-					   numpages * PAGE_SIZE);
-	}
-
-	/*
-	 * If page allocator is not up yet then do not call c_p_a():
-	 */
-	if (!debug_pagealloc_enabled)
-		return;
-
-	/*
-	 * the return value is ignored - the calls cannot fail,
-	 * large pages are disabled at boot time.
-	 */
-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
-
-	/*
-	 * we should perform an IPI and flush all tlbs,
-	 * but that can deadlock->flush only current cpu.
-	 */
-	__flush_tlb_all();
-}
-#endif

commit 6050be70d8f7e3952fcc31fcf1fa8a7cbaa18312
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: prepare for pageattr.c unification
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index e1c860800ff1..0246511be99d 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -9,14 +9,6 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 
-void clflush_cache_range(void *addr, int size)
-{
-	int i;
-
-	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
-		clflush(addr+i);
-}
-
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/sections.h>

commit 44af6c41e6a055a0b9bd0d2067cfbc8e9f6a24df
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: backmerge 64-bit details into 32-bit pageattr.c
    
    backmerge 64-bit details into 32-bit pageattr.c.
    
    the pageattr_32.c and pageattr_64.c files are now identical.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 1c7bd81a4194..251613449dd6 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -39,23 +39,26 @@ pte_t *lookup_address(unsigned long address, int *level)
 
 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
 {
-	struct page *page;
-
 	/* change init_mm */
 	set_pte_atomic(kpte, pte);
+#ifdef CONFIG_X86_32
 	if (SHARED_KERNEL_PMD)
 		return;
-
-	for (page = pgd_list; page; page = (struct page *)page->index) {
-		pgd_t *pgd;
-		pud_t *pud;
-		pmd_t *pmd;
-
-		pgd = (pgd_t *)page_address(page) + pgd_index(address);
-		pud = pud_offset(pgd, address);
-		pmd = pmd_offset(pud, address);
-		set_pte_atomic((pte_t *)pmd, pte);
+	{
+		struct page *page;
+
+		for (page = pgd_list; page; page = (struct page *)page->index) {
+			pgd_t *pgd;
+			pud_t *pud;
+			pmd_t *pmd;
+
+			pgd = (pgd_t *)page_address(page) + pgd_index(address);
+			pud = pud_offset(pgd, address);
+			pmd = pmd_offset(pud, address);
+			set_pte_atomic((pte_t *)pmd, pte);
+		}
 	}
+#endif
 }
 
 static int split_large_page(pte_t *kpte, unsigned long address)
@@ -89,7 +92,9 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 	address = __pa(address);
 	addr = address & LARGE_PAGE_MASK;
 	pbase = (pte_t *)page_address(base);
+#ifdef CONFIG_X86_32
 	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
+#endif
 
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
 		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
@@ -109,15 +114,14 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 	return 0;
 }
 
-static int __change_page_attr(struct page *page, pgprot_t prot)
+static int
+__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 {
 	struct page *kpte_page;
-	unsigned long address;
 	int level, err = 0;
 	pte_t *kpte;
 
 	BUG_ON(PageHighMem(page));
-	address = (unsigned long)page_address(page);
 
 repeat:
 	kpte = lookup_address(address, &level);
@@ -146,51 +150,87 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
 	return err;
 }
 
-/*
- * Change the page attributes of an page in the linear mapping.
+/**
+ * change_page_attr_addr - Change page table attributes in linear mapping
+ * @address: Virtual address in linear mapping.
+ * @numpages: Number of pages to change
+ * @prot:    New page table attribute (PAGE_*)
  *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
+ * Change page attributes of a page in the direct mapping. This is a variant
+ * of change_page_attr() that also works on memory holes that do not have
+ * mem_map entry (pfn_valid() is false).
  *
- * Caller must call global_flush_tlb() after this.
+ * See change_page_attr() documentation for more details.
  */
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+
+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 {
-	int err = 0, i;
+	int err = 0, kernel_map = 0, i;
+
+#ifdef CONFIG_X86_64
+	if (address >= __START_KERNEL_map &&
+			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
 
-	for (i = 0; i < numpages; i++, page++) {
-		err = __change_page_attr(page, prot);
-		if (err)
-			break;
+		address = (unsigned long)__va(__pa(address));
+		kernel_map = 1;
+	}
+#endif
+
+	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
+		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+
+		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
+			err = __change_page_attr(address, pfn_to_page(pfn), prot);
+			if (err)
+				break;
+		}
+#ifdef CONFIG_X86_64
+		/*
+		 * Handle kernel mapping too which aliases part of
+		 * lowmem:
+		 */
+		if (__pa(address) < KERNEL_TEXT_SIZE) {
+			unsigned long addr2;
+			pgprot_t prot2;
+
+			addr2 = __START_KERNEL_map + __pa(address);
+			/* Make sure the kernel mappings stay executable */
+			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
+			err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
+		}
+#endif
 	}
 
 	return err;
 }
-EXPORT_SYMBOL(change_page_attr);
 
-int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
+/**
+ * change_page_attr - Change page table attributes in the linear mapping.
+ * @page: First page to change
+ * @numpages: Number of pages to change
+ * @prot: New protection/caching type (PAGE_*)
+ *
+ * Returns 0 on success, otherwise a negated errno.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * Caller must call global_flush_tlb() later to make the changes active.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere
+ * (e.g. in user space) * This function only deals with the kernel linear map.
+ *
+ * For MMIO areas without mem_map use change_page_attr_addr() instead.
+ */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 {
-	int i;
-	unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
-
-	for (i = 0; i < numpages; i++) {
-		if (!pfn_valid(pfn + i)) {
-			WARN_ON_ONCE(1);
-			break;
-		} else {
-			int level;
-			pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
-			BUG_ON(pte && pte_none(*pte));
-		}
-	}
+	unsigned long addr = (unsigned long)page_address(page);
 
-	return change_page_attr(virt_to_page(addr), i, prot);
+	return change_page_attr_addr(addr, numpages, prot);
 }
+EXPORT_SYMBOL(change_page_attr);
 
 static void flush_kernel_map(void *arg)
 {

commit 31a0717cbc6191fc56326fdf95548d87055686e3
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: enable DEBUG_PAGEALLOC on 64-bit
    
    enable CONFIG_DEBUG_PAGEALLOC=y on 64-bit kernels too.
    
    preliminary testing shows that it's working fine.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 38211ff0447f..46d34b2db421 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -41,7 +41,6 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
 config DEBUG_PAGEALLOC
 	bool "Debug page memory allocations"
 	depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS
-	depends on X86_32
 	help
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types

commit ace63e3743ae59fc0cce48450bd2e410776b4148
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: add kernel_map_pages() to 64-bit
    
    needed for DEBUG_PAGEALLOC support and for unification.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 4053832d4108..e1c860800ff1 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -260,3 +260,33 @@ void global_flush_tlb(void)
 	on_each_cpu(flush_kernel_map, NULL, 1, 1);
 }
 EXPORT_SYMBOL(global_flush_tlb);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	if (PageHighMem(page))
+		return;
+	if (!enable) {
+		debug_check_no_locks_freed(page_address(page),
+					   numpages * PAGE_SIZE);
+	}
+
+	/*
+	 * If page allocator is not up yet then do not call c_p_a():
+	 */
+	if (!debug_pagealloc_enabled)
+		return;
+
+	/*
+	 * the return value is ignored - the calls cannot fail,
+	 * large pages are disabled at boot time.
+	 */
+	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+
+	/*
+	 * we should perform an IPI and flush all tlbs,
+	 * but that can deadlock->flush only current cpu.
+	 */
+	__flush_tlb_all();
+}
+#endif

commit f5a50ce1bf53a07cb7d0bab1a87e62cc4f34f0ab
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: return -EINVAL in __change_page_attr(), instead of 0
    
    careful: might change driver behavior - but this is the right
    return value.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 60cfb687f97c..4053832d4108 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -135,7 +135,7 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 repeat:
 	kpte = lookup_address(address, &level);
 	if (!kpte)
-		return 0;
+		return -EINVAL;
 
 	kpte_page = virt_to_page(kpte);
 	BUG_ON(PageLRU(kpte_page));

commit 674d67269e79f6697c3480363b28c9b9934fa60c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: clean up differences between 64-bit and 32-bit
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 139795029fb6..60cfb687f97c 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -126,30 +126,36 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 static int
 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 {
-	pgprot_t ref_prot2, oldprot;
 	struct page *kpte_page;
 	int level, err = 0;
 	pte_t *kpte;
 
+	BUG_ON(PageHighMem(page));
+
 repeat:
 	kpte = lookup_address(address, &level);
 	if (!kpte)
 		return 0;
 
 	kpte_page = virt_to_page(kpte);
-	oldprot = pte_pgprot(*kpte);
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
-	prot = canon_pgprot(prot);
+
+	/*
+	 * Better fail early if someone sets the kernel text to NX.
+	 * Does not cover __inittext
+	 */
+	BUG_ON(address >= (unsigned long)&_text &&
+		address < (unsigned long)&_etext &&
+	       (pgprot_val(prot) & _PAGE_NX));
 
 	if (level == 4) {
-		set_pte_atomic(kpte, mk_pte(page, prot));
+		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
 	} else {
 		err = split_large_page(kpte, address);
 		if (!err)
 			goto repeat;
 	}
-
 	return err;
 }
 

commit 6faa4c53b2f06fd271060761ce27f4f53289175c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Jan 30 13:34:03 2008 +0100

    x86: 64-bit, add the new split_large_page() function
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 190d4d37bc2f..139795029fb6 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -45,34 +45,91 @@ pte_t *lookup_address(unsigned long address, int *level)
 	return pte_offset_kernel(pmd, address);
 }
 
-static struct page *
-split_large_page(unsigned long address, pgprot_t ref_prot)
+static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
 {
+	/* change init_mm */
+	set_pte_atomic(kpte, pte);
+#ifdef CONFIG_X86_32
+	if (SHARED_KERNEL_PMD)
+		return;
+	{
+		struct page *page;
+
+		for (page = pgd_list; page; page = (struct page *)page->index) {
+			pgd_t *pgd;
+			pud_t *pud;
+			pmd_t *pmd;
+
+			pgd = (pgd_t *)page_address(page) + pgd_index(address);
+			pud = pud_offset(pgd, address);
+			pmd = pmd_offset(pud, address);
+			set_pte_atomic((pte_t *)pmd, pte);
+		}
+	}
+#endif
+}
+
+static int split_large_page(pte_t *kpte, unsigned long address)
+{
+	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
+	gfp_t gfp_flags = GFP_KERNEL;
+	unsigned long flags;
 	unsigned long addr;
+	pte_t *pbase, *tmp;
 	struct page *base;
-	pte_t *pbase;
-	int i;
+	int i, level;
+
 
-	base = alloc_pages(GFP_KERNEL, 0);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	gfp_flags = GFP_ATOMIC;
+#endif
+	base = alloc_pages(gfp_flags, 0);
 	if (!base)
-		return NULL;
+		return -ENOMEM;
+
+	spin_lock_irqsave(&pgd_lock, flags);
+	/*
+	 * Check for races, another CPU might have split this page
+	 * up for us already:
+	 */
+	tmp = lookup_address(address, &level);
+	if (tmp != kpte) {
+		WARN_ON_ONCE(1);
+		goto out_unlock;
+	}
 
 	address = __pa(address);
 	addr = address & LARGE_PAGE_MASK;
 	pbase = (pte_t *)page_address(base);
+#ifdef CONFIG_X86_32
+	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
+#endif
+
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
-		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, ref_prot);
+		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
+
+	/*
+	 * Install the new, split up pagetable:
+	 */
+	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
+	base = NULL;
+
+out_unlock:
+	spin_unlock_irqrestore(&pgd_lock, flags);
+
+	if (base)
+		__free_pages(base, 0);
 
-	return base;
+	return 0;
 }
 
 static int
 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 {
+	pgprot_t ref_prot2, oldprot;
 	struct page *kpte_page;
+	int level, err = 0;
 	pte_t *kpte;
-	pgprot_t ref_prot2, oldprot;
-	int level;
 
 repeat:
 	kpte = lookup_address(address, &level);
@@ -88,22 +145,12 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
 	if (level == 4) {
 		set_pte_atomic(kpte, mk_pte(page, prot));
 	} else {
-		/*
-		 * split_large_page will take the reference for this
-		 * change_page_attr on the split page.
-		 */
-		struct page *split;
-
-		ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
-		split = split_large_page(address, ref_prot2);
-		if (!split)
-			return -ENOMEM;
-		pgprot_val(ref_prot2) &= ~_PAGE_NX;
-		set_pte_atomic(kpte, mk_pte(split, ref_prot2));
-		goto repeat;
+		err = split_large_page(kpte, address);
+		if (!err)
+			goto repeat;
 	}
 
-	return 0;
+	return err;
 }
 
 /**