Patches contributed by Eötvös Lorand University
commit cacf890694a36124ceddce44ff4c7b02d372ce7c
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 21 13:46:33 2008 +0200
Revert "introduce two APIs for page attribute"
This reverts commit 1ac2f7d55b7ee1613c90631e87fea22ec06781e5.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4adb33628dec..5c06469a0653 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -752,12 +752,12 @@ static inline int cache_attr(pgprot_t attr)
(_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
}
-static int do_change_page_attr_set_clr(unsigned long addr, int numpages,
+static int change_page_attr_set_clr(unsigned long addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
- int force_split, int *tlb_flush)
+ int force_split)
{
struct cpa_data cpa;
- int ret, checkalias;
+ int ret, cache, checkalias;
/*
* Check, if we are requested to change a not supported
@@ -795,22 +795,9 @@ static int do_change_page_attr_set_clr(unsigned long addr, int numpages,
/*
* Check whether we really changed something:
*/
- *tlb_flush = cpa.flushtlb;
- cpa_fill_pool(NULL);
-
- return ret;
-}
-
-static int change_page_attr_set_clr(unsigned long addr, int numpages,
- pgprot_t mask_set, pgprot_t mask_clr,
- int force_split)
-{
- int cache, flush_cache = 0, ret;
-
- ret = do_change_page_attr_set_clr(addr, numpages, mask_set, mask_clr,
- force_split, &flush_cache);
- if (!flush_cache)
+ if (!cpa.flushtlb)
goto out;
+
/*
* No need to flush, when we did not set any of the caching
* attributes:
@@ -827,7 +814,10 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
cpa_flush_range(addr, numpages, cache);
else
cpa_flush_all(cache);
+
out:
+ cpa_fill_pool(NULL);
+
return ret;
}
@@ -865,30 +855,6 @@ int set_memory_uc(unsigned long addr, int numpages)
}
EXPORT_SYMBOL(set_memory_uc);
-int set_memory_uc_noflush(unsigned long addr, int numpages)
-{
- int flush;
- /*
- * for now UC MINUS. see comments in ioremap_nocache()
- */
- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
- _PAGE_CACHE_UC_MINUS, NULL))
- return -EINVAL;
- /*
- * for now UC MINUS. see comments in ioremap_nocache()
- */
- return do_change_page_attr_set_clr(addr, numpages,
- __pgprot(_PAGE_CACHE_UC_MINUS),
- __pgprot(0), 0, &flush);
-}
-EXPORT_SYMBOL(set_memory_uc_noflush);
-
-void set_memory_flush_all(void)
-{
- cpa_flush_all(1);
-}
-EXPORT_SYMBOL(set_memory_flush_all);
-
int _set_memory_wc(unsigned long addr, int numpages)
{
return change_page_attr_set(addr, numpages,
@@ -963,14 +929,6 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
-int set_pages_uc_noflush(struct page *page, int numpages)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- return set_memory_uc_noflush(addr, numpages);
-}
-EXPORT_SYMBOL(set_pages_uc_noflush);
-
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 57bac7b68c46..f4c0ab50d2c2 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -57,8 +57,6 @@ int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
-int set_memory_uc_noflush(unsigned long addr, int numpages);
-void set_memory_flush_all(void);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
@@ -89,7 +87,6 @@ int set_memory_4k(unsigned long addr, int numpages);
*/
int set_pages_uc(struct page *page, int numpages);
-int set_pages_uc_noflush(struct page *page, int numpages);
int set_pages_wb(struct page *page, int numpages);
int set_pages_x(struct page *page, int numpages);
int set_pages_nx(struct page *page, int numpages);
commit 9326d61bf64c4293f834e86c11f52db5be9798d6
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 21 13:46:25 2008 +0200
Revert "reduce tlb/cache flush times of agpgart memory allocation"
This reverts commit 466ae837424dcc538b1af2a0eaf53be32edcdbe7.
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 395168fb17e3..81e14bea54bd 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -30,10 +30,6 @@
#define _AGP_BACKEND_PRIV_H 1
#include <asm/agp.h> /* for flush_agp_cache() */
-#ifndef map_page_into_agp_noflush
-#define map_page_into_agp_noflush(page) map_page_into_agp(page)
-#define map_page_into_agp_global_flush()
-#endif
#define PFX "agpgart: "
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index bf239b8ecac5..eaa1a355bb32 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -274,7 +274,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
new->memory[i] = virt_to_gart(addr);
new->page_count++;
}
- map_page_into_agp_global_flush();
new->bridge = bridge;
return new;
@@ -1187,8 +1186,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
if (page == NULL)
return NULL;
- /* agp_allocate_memory will do flush */
- map_page_into_agp_noflush(page);
+ map_page_into_agp(page);
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index 181b9e984b3a..e4004a9f6a9a 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -15,9 +15,6 @@
#define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
-#define map_page_into_agp_noflush(page) set_pages_uc_noflush(page, 1)
-#define map_page_into_agp_global_flush() set_memory_flush_all()
-
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
* need to be called for each cacheline of the whole page so it may
commit 470fba7ebe60ad9185056b080b331abad24b4df9
Merge: 7225e75144b9 6a55617ed5d1
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 21 13:28:24 2008 +0200
Merge branch 'linus' into x86/doc
commit 170465ee7f5a9a2d0ac71285507e52642e040353
Merge: 169ad16bb87c 1fca25427482
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Aug 20 12:39:18 2008 +0200
Merge branch 'linus' into x86/xen
commit 7393423dd9b5790a3115873be355e9fc862bce8f
Merge: 8df9676d6402 1fca25427482
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Aug 20 11:52:15 2008 +0200
Merge branch 'linus' into x86/cleanups
commit 2879a927bb7a3cf91ae3906a5e59215f9c17dd75
Merge: 7e7b43892b87 20211e4d3447
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Aug 19 03:34:07 2008 +0200
Merge branch 'x86/oprofile' into oprofile
commit cd98a04a59e2f94fa64d5bf1e26498d27427d5e7
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Aug 13 18:02:18 2008 +0200
x86: add MAP_STACK mmap flag
as per this discussion:
http://lkml.org/lkml/2008/8/12/423
Pardo reported that 64-bit threaded apps, if their stacks exceed the
combined size of ~4GB, slow down drastically in pthread_create() - because
glibc uses MAP_32BIT to allocate the stacks. The use of MAP_32BIT is
a legacy hack - to speed up context switching on certain early model
64-bit P4 CPUs.
So introduce a new flag to be used by glibc instead, to not constrain
64-bit apps like this.
glibc can switch to this new flag straight away - it will be ignored
by the kernel. If those old CPUs ever matter to anyone, support for
it can be implemented.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Ulrich Drepper <drepper@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index c1682b542daf..90bc4108a4fd 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -12,6 +12,7 @@
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
commit 2fdc86901d2ab30a12402b46238951d2a7891590
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Aug 13 18:02:18 2008 +0200
x86: add MAP_STACK mmap flag
as per this discussion:
http://lkml.org/lkml/2008/8/12/423
Pardo reported that 64-bit threaded apps, if their stacks exceed the
combined size of ~4GB, slow down drastically in pthread_create() - because
glibc uses MAP_32BIT to allocate the stacks. The use of MAP_32BIT is
a legacy hack - to speed up context switching on certain early model
64-bit P4 CPUs.
So introduce a new flag to be used by glibc instead, to not constrain
64-bit apps like this.
glibc can switch to this new flag straight away - it will be ignored
by the kernel. If those old CPUs ever matter to anyone, support for
it can be implemented.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Ulrich Drepper <drepper@gmail.com>
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index c1682b542daf..90bc4108a4fd 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -12,6 +12,7 @@
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
commit f3efbe582b5396d134024c03a5fa253f2a85d9a6
Merge: 05d3ed0a1fe3 b635acec48bc
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Aug 15 18:15:17 2008 +0200
Merge branch 'linus' into x86/gart
commit 529d0e402eee62b7bfc89ec5b763a9a875bb49fb
Merge: 66d4bdf22b86 0d5cdc97e242
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Aug 15 17:53:07 2008 +0200
Merge branch 'x86/geode' into x86/urgent