Patches contributed by Eötvös Lorand University


commit 365d46dc9be9b3c833990a06f3994b1987eda578
Merge: 5dc64a3442b9 fd0480883066
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Oct 12 12:35:23 2008 +0200

    Merge branch 'linus' into x86/xen
    
    Conflicts:
            arch/x86/kernel/cpu/common.c
            arch/x86/kernel/process_64.c
            arch/x86/xen/enlighten.c

diff --cc arch/x86/kernel/cpu/common.c
index 9983bc3f5d18,7581b62df184..fb789dd9e691
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@@ -723,8 -1105,32 +1105,21 @@@ void __cpuinit cpu_init(void
  	/*
  	 * Force FPU initialization:
  	 */
- 	current_thread_info()->status = 0;
+ 	if (cpu_has_xsave)
+ 		current_thread_info()->status = TS_XSAVE;
+ 	else
+ 		current_thread_info()->status = 0;
  	clear_used_math();
  	mxcsr_feature_mask_init();
+ 
+ 	/*
+ 	 * Boot processor to setup the FP and extended state context info.
+ 	 */
+ 	if (!smp_processor_id())
+ 		init_thread_xstate();
+ 
+ 	xsave_init();
  }
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -void __cpuinit cpu_uninit(void)
 -{
 -	int cpu = raw_smp_processor_id();
 -	cpu_clear(cpu, cpu_initialized);
 -
 -	/* lazy TLB state */
 -	per_cpu(cpu_tlbstate, cpu).state = 0;
 -	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
 -}
 -#endif
+ 
+ #endif
diff --cc arch/x86/xen/enlighten.c
index 977a54255fb4,a27d562a9744..0013a729b41d
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@@ -1228,10 -1273,38 +1260,8 @@@ static const struct pv_cpu_ops xen_cpu_
  	},
  };
  
 -static void __init __xen_init_IRQ(void)
 -{
 -#ifdef CONFIG_X86_64
 -	int i;
 -
 -	/* Create identity vector->irq map */
 -	for(i = 0; i < NR_VECTORS; i++) {
 -		int cpu;
 -
 -		for_each_possible_cpu(cpu)
 -			per_cpu(vector_irq, cpu)[i] = i;
 -	}
 -#endif	/* CONFIG_X86_64 */
 -
 -	xen_init_IRQ();
 -}
 -
 -static const struct pv_irq_ops xen_irq_ops __initdata = {
 -	.init_IRQ = __xen_init_IRQ,
 -	.save_fl = xen_save_fl,
 -	.restore_fl = xen_restore_fl,
 -	.irq_disable = xen_irq_disable,
 -	.irq_enable = xen_irq_enable,
 -	.safe_halt = xen_safe_halt,
 -	.halt = xen_halt,
 -#ifdef CONFIG_X86_64
 -	.adjust_exception_frame = xen_adjust_exception_frame,
 -#endif
 -};
 -
  static const struct pv_apic_ops xen_apic_ops __initdata = {
  #ifdef CONFIG_X86_LOCAL_APIC
- 	.apic_write = xen_apic_write,
- 	.apic_read = xen_apic_read,
  	.setup_boot_clock = paravirt_nop,
  	.setup_secondary_clock = paravirt_nop,
  	.startup_ipi_hook = paravirt_nop,
@@@ -1598,8 -1706,14 +1628,15 @@@ asmlinkage void __init xen_start_kernel
  	pv_apic_ops = xen_apic_ops;
  	pv_mmu_ops = xen_mmu_ops;
  
 +	xen_init_irq_ops();
 +
+ #ifdef CONFIG_X86_LOCAL_APIC
+ 	/*
+ 	 * set up the basic apic ops.
+ 	 */
+ 	apic_ops = &xen_basic_apic_ops;
+ #endif
+ 
  	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
  		pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
  		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
diff --cc include/asm-x86/smp.h
index 30b5146cc436,29324c103341..6df2615f9138
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@@ -222,5 -205,9 +227,5 @@@ static inline int hard_smp_processor_id
  
  #endif /* CONFIG_X86_LOCAL_APIC */
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -extern void cpu_uninit(void);
 -#endif
 -
  #endif /* __ASSEMBLY__ */
- #endif
+ #endif /* ASM_X86__SMP_H */
diff --cc include/asm-x86/xen/hypervisor.h
index fca066febc35,0ef3a88b869d..445a24759560
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@@ -66,17 -67,6 +66,17 @@@ u64 jiffies_to_st(unsigned long jiffies
  #define MULTI_UVMFLAGS_INDEX 3
  #define MULTI_UVMDOMID_INDEX 4
  
 -#define is_running_on_xen()	(xen_start_info ? 1 : 0)
 +enum xen_domain_type {
 +	XEN_NATIVE,
 +	XEN_PV_DOMAIN,
 +	XEN_HVM_DOMAIN,
 +};
 +
 +extern enum xen_domain_type xen_domain_type;
 +
 +#define xen_domain()		(xen_domain_type != XEN_NATIVE)
 +#define xen_pv_domain()		(xen_domain_type == XEN_PV_DOMAIN)
 +#define xen_initial_domain()	(xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
 +#define xen_hvm_domain()	(xen_domain_type == XEN_HVM_DOMAIN)
  
- #endif /* __HYPERVISOR_H__ */
+ #endif /* ASM_X86__XEN__HYPERVISOR_H */

commit 206855c321adee56db3946ca09a5887cddb9d598
Merge: e8d3f455de4f cb58ffc3889f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Oct 12 11:32:17 2008 +0200

    Merge branch 'x86/urgent' into core/signal
    
    Conflicts:
            arch/x86/kernel/signal_64.c

commit 0afe2db21394820d32646a695eccf3fbfe6ab5c7
Merge: d84705969f89 43603c8df97f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Oct 11 20:23:20 2008 +0200

    Merge branch 'x86/unify-cpu-detect' into x86-v28-for-linus-phase4-D
    
    Conflicts:
            arch/x86/kernel/cpu/common.c
            arch/x86/kernel/signal_64.c
            include/asm-x86/cpufeature.h

diff --cc arch/x86/kernel/sigframe.h
index 8b4956e800ac,6dd7e2b70a4b..cc673aa55ce4
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@@ -23,10 -32,6 +32,11 @@@ struct rt_sigframe 
  	char __user *pretcode;
  	struct ucontext uc;
  	struct siginfo info;
+ 	/* fp state follows here */
  };
 +
 +int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 +		sigset_t *set, struct pt_regs *regs);
 +int ia32_setup_frame(int sig, struct k_sigaction *ka,
 +		sigset_t *set, struct pt_regs *regs);
  #endif
diff --cc arch/x86/kernel/signal_64.c
index 694aa888bb19,4665b598a376..823a55bf8c39
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@@ -157,20 -96,9 +94,9 @@@ restore_sigcontext(struct pt_regs *regs
  	}
  
  	{
 -		struct _fpstate __user * buf;
 +		struct _fpstate __user *buf;
  		err |= __get_user(buf, &sc->fpstate);
- 
- 		if (buf) {
- 			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
- 				goto badframe;
- 			err |= restore_i387(buf);
- 		} else {
- 			struct task_struct *me = current;
- 			if (used_math()) {
- 				clear_fpu(me);
- 				clear_used_math();
- 			}
- 		}
+ 		err |= restore_i387_xstate(buf);
  	}
  
  	err |= __get_user(*pax, &sc->ax);
@@@ -273,10 -197,10 +196,10 @@@ get_stack(struct k_sigaction *ka, struc
  }
  
  static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 -			   sigset_t *set, struct pt_regs * regs)
 +			   sigset_t *set, struct pt_regs *regs)
  {
  	struct rt_sigframe __user *frame;
- 	struct _fpstate __user *fp = NULL;
+ 	void __user *fp = NULL;
  	int err = 0;
  	struct task_struct *me = current;
  
@@@ -285,11 -209,8 +208,8 @@@
  		frame = (void __user *)round_down(
  			(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
  
- 		if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
- 			goto give_sigsegv;
- 
- 		if (save_i387(fp) < 0)
+ 		if (save_i387_xstate(fp) < 0)
 -			err |= -1; 
 +			err |= -1;
  	} else
  		frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
  
@@@ -301,9 -222,12 +221,12 @@@
  		if (err)
  			goto give_sigsegv;
  	}
 -		
 +
  	/* Create the ucontext.  */
- 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	if (cpu_has_xsave)
+ 		err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
+ 	else
+ 		err |= __put_user(0, &frame->uc.uc_flags);
  	err |= __put_user(0, &frame->uc.uc_link);
  	err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
  	err |= __put_user(sas_ss_flags(regs->sp),
diff --cc include/asm-x86/cpufeature.h
index 065c6a86ed80,8d45690bef5f..adfeae6586e1
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@@ -64,49 -72,61 +72,63 @@@
  #define X86_FEATURE_CYRIX_ARR	(3*32+ 2) /* Cyrix ARRs (= MTRRs) */
  #define X86_FEATURE_CENTAUR_MCR	(3*32+ 3) /* Centaur MCRs (= MTRRs) */
  /* cpu types for specific tunings: */
- #define X86_FEATURE_K8		(3*32+ 4) /* Opteron, Athlon64 */
- #define X86_FEATURE_K7		(3*32+ 5) /* Athlon */
- #define X86_FEATURE_P3		(3*32+ 6) /* P3 */
- #define X86_FEATURE_P4		(3*32+ 7) /* P4 */
+ #define X86_FEATURE_K8		(3*32+ 4) /* "" Opteron, Athlon64 */
+ #define X86_FEATURE_K7		(3*32+ 5) /* "" Athlon */
+ #define X86_FEATURE_P3		(3*32+ 6) /* "" P3 */
+ #define X86_FEATURE_P4		(3*32+ 7) /* "" P4 */
  #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
  #define X86_FEATURE_UP		(3*32+ 9) /* smp kernel running on up */
- #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
+ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
  #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
++#define X86_FEATURE_NOPL	(3*32+20) /* The NOPL (0F 1F) instructions */
  #define X86_FEATURE_PEBS	(3*32+12) /* Precise-Event Based Sampling */
  #define X86_FEATURE_BTS		(3*32+13) /* Branch Trace Store */
- #define X86_FEATURE_SYSCALL32	(3*32+14) /* syscall in ia32 userspace */
- #define X86_FEATURE_SYSENTER32	(3*32+15) /* sysenter in ia32 userspace */
- #define X86_FEATURE_REP_GOOD	(3*32+16) /* rep microcode works well on this CPU */
- #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
- #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
- #define X86_FEATURE_11AP	(3*32+19) /* Bad local APIC aka 11AP */
+ #define X86_FEATURE_SYSCALL32	(3*32+14) /* "" syscall in ia32 userspace */
+ #define X86_FEATURE_SYSENTER32	(3*32+15) /* "" sysenter in ia32 userspace */
+ #define X86_FEATURE_REP_GOOD	(3*32+16) /* rep microcode works well */
+ #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
+ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
+ #define X86_FEATURE_11AP	(3*32+19) /* "" Bad local APIC aka 11AP */
  #define X86_FEATURE_NOPL	(3*32+20) /* The NOPL (0F 1F) instructions */
 +#define X86_FEATURE_AMDC1E	(3*32+21) /* AMD C1E detected */
+ #define X86_FEATURE_XTOPOLOGY	(3*32+21) /* cpu topology enum extensions */
  
  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
- #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
- #define X86_FEATURE_MWAIT	(4*32+ 3) /* Monitor/Mwait support */
- #define X86_FEATURE_DSCPL	(4*32+ 4) /* CPL Qualified Debug Store */
+ #define X86_FEATURE_XMM3	(4*32+ 0) /* "pni" SSE-3 */
+ #define X86_FEATURE_PCLMULQDQ	(4*32+ 1) /* PCLMULQDQ instruction */
+ #define X86_FEATURE_DTES64	(4*32+ 2) /* 64-bit Debug Store */
+ #define X86_FEATURE_MWAIT	(4*32+ 3) /* "monitor" Monitor/Mwait support */
+ #define X86_FEATURE_DSCPL	(4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+ #define X86_FEATURE_VMX		(4*32+ 5) /* Hardware virtualization */
+ #define X86_FEATURE_SMX		(4*32+ 6) /* Safer mode */
  #define X86_FEATURE_EST		(4*32+ 7) /* Enhanced SpeedStep */
  #define X86_FEATURE_TM2		(4*32+ 8) /* Thermal Monitor 2 */
+ #define X86_FEATURE_SSSE3	(4*32+ 9) /* Supplemental SSE-3 */
  #define X86_FEATURE_CID		(4*32+10) /* Context ID */
+ #define X86_FEATURE_FMA		(4*32+12) /* Fused multiply-add */
  #define X86_FEATURE_CX16	(4*32+13) /* CMPXCHG16B */
  #define X86_FEATURE_XTPR	(4*32+14) /* Send Task Priority Messages */
+ #define X86_FEATURE_PDCM	(4*32+15) /* Performance Capabilities */
  #define X86_FEATURE_DCA		(4*32+18) /* Direct Cache Access */
+ #define X86_FEATURE_XMM4_1	(4*32+19) /* "sse4_1" SSE-4.1 */
+ #define X86_FEATURE_XMM4_2	(4*32+20) /* "sse4_2" SSE-4.2 */
  #define X86_FEATURE_X2APIC	(4*32+21) /* x2APIC */
- #define X86_FEATURE_XMM4_2	(4*32+20) /* Streaming SIMD Extensions-4.2 */
+ #define X86_FEATURE_AES		(4*32+25) /* AES instructions */
+ #define X86_FEATURE_XSAVE	(4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+ #define X86_FEATURE_OSXSAVE	(4*32+27) /* "" XSAVE enabled in the OS */
+ #define X86_FEATURE_AVX		(4*32+28) /* Advanced Vector Extensions */
  
  /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
- #define X86_FEATURE_XSTORE	(5*32+ 2) /* on-CPU RNG present (xstore insn) */
- #define X86_FEATURE_XSTORE_EN	(5*32+ 3) /* on-CPU RNG enabled */
- #define X86_FEATURE_XCRYPT	(5*32+ 6) /* on-CPU crypto (xcrypt insn) */
- #define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* on-CPU crypto enabled */
+ #define X86_FEATURE_XSTORE	(5*32+ 2) /* "rng" RNG present (xstore) */
+ #define X86_FEATURE_XSTORE_EN	(5*32+ 3) /* "rng_en" RNG enabled */
+ #define X86_FEATURE_XCRYPT	(5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+ #define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* "ace_en" on-CPU crypto enabled */
  #define X86_FEATURE_ACE2	(5*32+ 8) /* Advanced Cryptography Engine v2 */
  #define X86_FEATURE_ACE2_EN	(5*32+ 9) /* ACE v2 enabled */
- #define X86_FEATURE_PHE		(5*32+ 10) /* PadLock Hash Engine */
- #define X86_FEATURE_PHE_EN	(5*32+ 11) /* PHE enabled */
- #define X86_FEATURE_PMM		(5*32+ 12) /* PadLock Montgomery Multiplier */
- #define X86_FEATURE_PMM_EN	(5*32+ 13) /* PMM enabled */
+ #define X86_FEATURE_PHE		(5*32+10) /* PadLock Hash Engine */
+ #define X86_FEATURE_PHE_EN	(5*32+11) /* PHE enabled */
+ #define X86_FEATURE_PMM		(5*32+12) /* PadLock Montgomery Multiplier */
+ #define X86_FEATURE_PMM_EN	(5*32+13) /* PMM enabled */
  
  /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
  #define X86_FEATURE_LAHF_LM	(6*32+ 0) /* LAHF/SAHF in long mode */

commit d84705969f898f294bc3fc32eca33580f14105bd
Merge: 725c25819e4a 11494547b175
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Oct 10 19:50:00 2008 +0200

    Merge branch 'x86/apic' into x86-v28-for-linus-phase4-B
    
    Conflicts:
            arch/x86/kernel/apic_32.c
            arch/x86/kernel/apic_64.c
            arch/x86/kernel/setup.c
            drivers/pci/intel-iommu.c
            include/asm-x86/cpufeature.h
            include/asm-x86/dma-mapping.h

diff --cc arch/x86/kernel/setup.c
index 141efab52400,59f07e14d083..46c98efbbf8d
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@@ -758,6 -738,9 +758,8 @@@ void __init setup_arch(char **cmdline_p
  #else
  	num_physpages = max_pfn;
  
 -	check_efer();
+  	if (cpu_has_x2apic)
+  		check_x2apic();
  
  	/* How many end-of-memory variables you have, grandma! */
  	/* need this before calling reserve_initrd */
diff --cc include/asm-x86/cpufeature.h
index 250fa0cb144b,42afe9ca3a37..065c6a86ed80
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@@ -93,7 -91,7 +93,8 @@@
  #define X86_FEATURE_CX16	(4*32+13) /* CMPXCHG16B */
  #define X86_FEATURE_XTPR	(4*32+14) /* Send Task Priority Messages */
  #define X86_FEATURE_DCA		(4*32+18) /* Direct Cache Access */
+ #define X86_FEATURE_X2APIC	(4*32+21) /* x2APIC */
 +#define X86_FEATURE_XMM4_2	(4*32+20) /* Streaming SIMD Extensions-4.2 */
  
  /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
  #define X86_FEATURE_XSTORE	(5*32+ 2) /* on-CPU RNG present (xstore insn) */
@@@ -192,7 -190,7 +193,8 @@@ extern const char * const x86_power_fla
  #define cpu_has_gbpages		boot_cpu_has(X86_FEATURE_GBPAGES)
  #define cpu_has_arch_perfmon	boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
  #define cpu_has_pat		boot_cpu_has(X86_FEATURE_PAT)
+ #define cpu_has_x2apic		boot_cpu_has(X86_FEATURE_X2APIC)
 +#define cpu_has_xmm4_2		boot_cpu_has(X86_FEATURE_XMM4_2)
  
  #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
  # define cpu_has_invlpg		1

commit 5b16a2212f1840b63d6cc476b26ff9349dab185e
Merge: b922df738374 5b7dba4ff834
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Oct 11 18:50:45 2008 +0200

    Merge branch 'sched/clock' into sched/urgent

commit 725c25819e4a0dafdcf42a5f31bc569341919c7c
Merge: 3dd392a407d1 72d31053f62c 129d6aba444d 1e19b16a30c3
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Oct 10 19:47:12 2008 +0200

    Merge branches 'core/iommu', 'x86/amd-iommu' and 'x86/iommu' into x86-v28-for-linus-phase3-B
    
    Conflicts:
            arch/x86/kernel/pci-gart_64.c
            include/asm-x86/dma-mapping.h

diff --cc MAINTAINERS
index 8dae4555f10e,cad81a24e832,4c5e9fe0f7db,9ac82eab82ee..3c124d7989e0
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@@@ -2703,9 -2705,8 -2767,8 -2706,8 +2704,9 @@@@@ S:	Supporte
    MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
    P:	Michael Kerrisk
    M:	mtk.manpages@gmail.com
  --W:     http://www.kernel.org/doc/man-pages
  --S:     Supported
  ++W:	http://www.kernel.org/doc/man-pages
 +++L:	linux-man@vger.kernel.org
  ++S:	Supported
    
    MARVELL LIBERTAS WIRELESS DRIVER
    P:	Dan Williams
diff --cc arch/x86/kernel/amd_iommu.c
index 042fdc27bc92,69b4d060b21c,de39e1f2ede5,3b346c6f5514..34e4d112b1ef
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@@@@ -1008,7 -998,7 -998,7 -1175,15 +1185,15 @@@@@ static void *alloc_coherent(struct devi
    	struct protection_domain *domain;
    	u16 devid;
    	phys_addr_t paddr;
+++ 	u64 dma_mask = dev->coherent_dma_mask;
+++ 
+++ 	if (!check_device(dev))
+++ 		return NULL;
+   
+++ 	if (!get_device_resources(dev, &iommu, &domain, &devid))
+++ 		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
 ++ 
+++ 	flag |= __GFP_ZERO;
    	virt_addr = (void *)__get_free_pages(flag, get_order(size));
    	if (!virt_addr)
    		return 0;
diff --cc arch/x86/kernel/pci-gart_64.c
index 1a895a582534,49285f8fd4d5,49285f8fd4d5,d077116fec1b..145f1c83369f
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@@@@ -80,10 -80,9 -80,9 -80,10 +80,10 @@@@@ AGPEXTERN int agp_memory_reserved
    AGPEXTERN __u32 *agp_gatt_table;
    
    static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
--- static int need_flush;		/* global flush state. set for each gart wrap */
+++ static bool need_flush;		/* global flush state. set for each gart wrap */
    
 -- static unsigned long alloc_iommu(struct device *dev, int size)
 ++ static unsigned long alloc_iommu(struct device *dev, int size,
 ++ 				 unsigned long align_mask)
    {
    	unsigned long offset, flags;
    	unsigned long boundary_size;
@@@@@ -96,12 -95,11 -95,11 -96,12 +96,12 @@@@@
    
    	spin_lock_irqsave(&iommu_bitmap_lock, flags);
    	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
 -- 				  size, base_index, boundary_size, 0);
 ++ 				  size, base_index, boundary_size, align_mask);
    	if (offset == -1) {
--- 		need_flush = 1;
+++ 		need_flush = true;
    		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
 -- 					  size, base_index, boundary_size, 0);
 ++ 					  size, base_index, boundary_size,
 ++ 					  align_mask);
    	}
    	if (offset != -1) {
    		next_bit = offset+size;
@@@@@ -261,20 -259,16 -259,16 -252,6 +252,6 @@@@@ static dma_addr_t dma_map_area(struct d
    	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
    }
    
--- static dma_addr_t
--- gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
--- {
-   	dma_addr_t map;
-   	unsigned long align_mask;
-   
-   	align_mask = (1UL << get_order(size)) - 1;
-   	map = dma_map_area(dev, paddr, size, dir, align_mask);
 -- 	dma_addr_t map = dma_map_area(dev, paddr, size, dir);
--- 
--- 	flush_gart();
--- 
--- 	return map;
--- }
--- 
    /* Map a single area into the IOMMU */
    static dma_addr_t
    gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
diff --cc include/asm-x86/amd_iommu.h
index 783f43e58052,30a12049353b,30a12049353b,2fd97cb250c7..041d0db7da27
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@@@@ -17,9 -17,9 -17,9 -17,11 +17,11 @@@@@
     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
     */
    
 ---#ifndef _ASM_X86_AMD_IOMMU_H
 ---#define _ASM_X86_AMD_IOMMU_H
 +++#ifndef ASM_X86__AMD_IOMMU_H
 +++#define ASM_X86__AMD_IOMMU_H
 ++ 
+++ #include <linux/irqreturn.h>
+   
    #ifdef CONFIG_AMD_IOMMU
    extern int amd_iommu_init(void);
    extern int amd_iommu_init_dma_ops(void);
diff --cc include/asm-x86/dma-mapping.h
index 5d200e78bd81,ad9cd6d49bfc,ad9cd6d49bfc,3b808e9bb72c..219c33d6361c
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@@@@ -68,7 -68,7 -68,7 -65,7 +65,7 @@@@@ static inline struct dma_mapping_ops *g
    		return dma_ops;
    	else
    		return dev->archdata.dma_ops;
----#endif
++++#endif /* ASM_X86__DMA_MAPPING_H */
    }
    
    /* Make sure we keep the same behaviour */
@@@@@ -247,7 -247,7 -247,7 -241,68 +241,68 @@@@@ static inline int dma_get_cache_alignme
    	return boot_cpu_data.x86_clflush_size;
    }
    
--- #define dma_is_consistent(d, h)	(1)
+++ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+++ 						    gfp_t gfp)
+++ {
+++ 	unsigned long dma_mask = 0;
 ++ 
-   #include <asm-generic/dma-coherent.h>
-   #endif /* ASM_X86__DMA_MAPPING_H */
+++ 	dma_mask = dev->coherent_dma_mask;
+++ 	if (!dma_mask)
+++ 		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
+++ 
+++ 	return dma_mask;
+++ }
+++ 
+++ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+++ {
+++ #ifdef CONFIG_X86_64
+++ 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+++ 
+++ 	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
+++ 		gfp |= GFP_DMA32;
+++ #endif
+++        return gfp;
+++ }
+++ 
+++ static inline void *
+++ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+++ 		gfp_t gfp)
+++ {
+++ 	struct dma_mapping_ops *ops = get_dma_ops(dev);
+++ 	void *memory;
+++ 
+++ 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+++ 
+++ 	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+++ 		return memory;
+++ 
+++ 	if (!dev) {
+++ 		dev = &x86_dma_fallback_dev;
+++ 		gfp |= GFP_DMA;
+++ 	}
+++ 
+++ 	if (!is_device_dma_capable(dev))
+++ 		return NULL;
+++ 
+++ 	if (!ops->alloc_coherent)
+++ 		return NULL;
+++ 
+++ 	return ops->alloc_coherent(dev, size, dma_handle,
+++ 				   dma_alloc_coherent_gfp_flags(dev, gfp));
+++ }
+++ 
+++ static inline void dma_free_coherent(struct device *dev, size_t size,
+++ 				     void *vaddr, dma_addr_t bus)
+++ {
+++ 	struct dma_mapping_ops *ops = get_dma_ops(dev);
+++ 
+++ 	WARN_ON(irqs_disabled());       /* for portability */
+++ 
+++ 	if (dma_release_from_coherent(dev, get_order(size), vaddr))
+++ 		return;
+++ 
+++ 	if (ops->free_coherent)
+++ 		ops->free_coherent(dev, size, vaddr, bus);
+++ }
+   
 -- #include <asm-generic/dma-coherent.h>
+   #endif

commit 3dd392a407d15250a501fa109cc1f93fee95ef85
Merge: b27a43c1e905 d403a6484f03
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Oct 10 19:30:08 2008 +0200

    Merge branch 'linus' into x86/pat2
    
    Conflicts:
            arch/x86/mm/init_64.c

diff --cc arch/x86/mm/init_64.c
index 8c7eae490a2c,770536ebf7e9..fb30486c82f7
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@@ -493,16 -451,16 +493,16 @@@ static void __init find_early_table_spa
  	unsigned long puds, pmds, ptes, tables, start;
  
  	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
- 	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
+ 	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
 -	if (direct_gbpages) {
 +	if (use_gbpages) {
  		unsigned long extra;
  		extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
  		pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
  	} else
  		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
- 	tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+ 	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
  
 -	if (cpu_has_pse) {
 +	if (use_pse) {
  		unsigned long extra;
  		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --cc arch/x86/mm/pageattr.c
index b6374d653d06,898fad617abe..a9ec89c3fbca
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@@ -979,13 -904,15 +979,15 @@@ EXPORT_SYMBOL(set_memory_nx)
  
  int set_memory_ro(unsigned long addr, int numpages)
  {
 -	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
 +	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
  }
+ EXPORT_SYMBOL_GPL(set_memory_ro);
  
  int set_memory_rw(unsigned long addr, int numpages)
  {
 -	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
 +	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
  }
+ EXPORT_SYMBOL_GPL(set_memory_rw);
  
  int set_memory_np(unsigned long addr, int numpages)
  {

commit ad2cde16a21985cdc4302e4a4b0fc373d666fdf7
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Sep 30 13:20:45 2008 +0200

    x86, pat: cleanups
    
    clean up recently added code to be more consistent with other x86 code.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index aceb6c7c6dba..738fd0f24958 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -7,24 +7,24 @@
  * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
  */
 
-#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/debugfs.h>
 #include <linux/kernel.h>
 #include <linux/gfp.h>
+#include <linux/mm.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
 
-#include <asm/msr.h>
-#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 #include <asm/processor.h>
-#include <asm/page.h>
+#include <asm/tlbflush.h>
 #include <asm/pgtable.h>
-#include <asm/pat.h>
-#include <asm/e820.h>
-#include <asm/cacheflush.h>
 #include <asm/fcntl.h>
+#include <asm/e820.h>
 #include <asm/mtrr.h>
+#include <asm/page.h>
+#include <asm/msr.h>
+#include <asm/pat.h>
 #include <asm/io.h>
 
 #ifdef CONFIG_X86_PAT
@@ -46,6 +46,7 @@ early_param("nopat", nopat);
 
 
 static int debug_enable;
+
 static int __init pat_debug_setup(char *str)
 {
 	debug_enable = 1;
@@ -145,14 +146,14 @@ static char *cattr_name(unsigned long flags)
  */
 
 struct memtype {
-	u64 start;
-	u64 end;
-	unsigned long type;
-	struct list_head nd;
+	u64			start;
+	u64			end;
+	unsigned long		type;
+	struct list_head	nd;
 };
 
 static LIST_HEAD(memtype_list);
-static DEFINE_SPINLOCK(memtype_lock); 	/* protects memtype list */
+static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
 
 /*
  * Does intersection of PAT memory type and MTRR memory type and returns
@@ -180,8 +181,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
 	return req_type;
 }
 
-static int chk_conflict(struct memtype *new, struct memtype *entry,
-			unsigned long *type)
+static int
+chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
 {
 	if (new->type != entry->type) {
 		if (type) {
@@ -210,15 +211,6 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
 static struct memtype *cached_entry;
 static u64 cached_start;
 
-/*
- * RED-PEN:  TODO: Add PageReserved() check as well here,
- * once we add SetPageReserved() to all the drivers using
- * set_memory_* or set_pages_*.
- *
- * This will help prevent accidentally freeing pages
- * before setting the attribute back to WB.
- */
-
 /*
  * For RAM pages, mark the pages as non WB memory type using
  * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -232,7 +224,7 @@ static u64 cached_start;
  * UC and WC mapping.
  */
 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
-		       unsigned long *new_type)
+				  unsigned long *new_type)
 {
 	struct page *page;
 	u64 pfn, end_pfn;
@@ -295,15 +287,15 @@ static int free_ram_pages_type(u64 start, u64 end)
  * it will return a negative return value.
  */
 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
-			unsigned long *new_type)
+		    unsigned long *new_type)
 {
 	struct memtype *new, *entry;
 	unsigned long actual_type;
 	struct list_head *where;
-	int err = 0;
 	int is_range_ram;
+	int err = 0;
 
- 	BUG_ON(start >= end); /* end is exclusive */
+	BUG_ON(start >= end); /* end is exclusive */
 
 	if (!pat_enabled) {
 		/* This is identical to page table setting without PAT */
@@ -336,9 +328,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 			actual_type = _PAGE_CACHE_WB;
 		else
 			actual_type = _PAGE_CACHE_UC_MINUS;
-	} else
+	} else {
 		actual_type = pat_x_mtrr_type(start, end,
 					      req_type & _PAGE_CACHE_MASK);
+	}
 
 	is_range_ram = pagerange_is_ram(start, end);
 	if (is_range_ram == 1)
@@ -350,9 +343,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 	if (!new)
 		return -ENOMEM;
 
-	new->start = start;
-	new->end = end;
-	new->type = actual_type;
+	new->start	= start;
+	new->end	= end;
+	new->type	= actual_type;
 
 	if (new_type)
 		*new_type = actual_type;
@@ -411,6 +404,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 		       start, end, cattr_name(new->type), cattr_name(req_type));
 		kfree(new);
 		spin_unlock(&memtype_lock);
+
 		return err;
 	}
 
@@ -469,6 +463,7 @@ int free_memtype(u64 start, u64 end)
 	}
 
 	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+
 	return err;
 }
 
@@ -575,9 +570,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 
 void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
 {
+	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
 	u64 addr = (u64)pfn << PAGE_SHIFT;
 	unsigned long flags;
-	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
 
 	reserve_memtype(addr, addr + size, want_flags, &flags);
 	if (flags != want_flags) {
@@ -620,6 +615,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
 	}
 	spin_unlock(&memtype_lock);
 	kfree(print_entry);
+
 	return NULL;
 }
 
@@ -650,6 +646,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
 	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
 			print_entry->start, print_entry->end);
 	kfree(print_entry);
+
 	return 0;
 }
 

commit 8eb95f28f66b1a5461fdbcc9a1ee9068fb2cf2b6
Merge: d7451fca18e2 3fa8749e584b
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Oct 10 09:25:29 2008 +0200

    Merge commit 'v2.6.27' into timers/hpet

commit a5d8c3483a6e19aca95ef6a2c5890e33bfa5b293
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Oct 9 11:35:51 2008 +0200

    sched debug: add name to sched_domain sysctl entries
    
    add /proc/sys/kernel/sched_domain/cpu0/domain0/name, to make
    it easier to see which specific scheduler domain remained at
    that entry.
    
    Since we process the scheduler domain tree and
    simplify it, it's not always immediately clear during debugging
    which domain came from where.
    
    depends on CONFIG_SCHED_DEBUG=y.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d8e699b55858..5d0819ee442a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -824,6 +824,9 @@ struct sched_domain {
 	unsigned int ttwu_move_affine;
 	unsigned int ttwu_move_balance;
 #endif
+#ifdef CONFIG_SCHED_DEBUG
+	char *name;
+#endif
 };
 
 extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
diff --git a/kernel/sched.c b/kernel/sched.c
index 9715f4ce6cfe..6f230596bd0c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6351,7 +6351,7 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-	struct ctl_table *table = sd_alloc_ctl_entry(12);
+	struct ctl_table *table = sd_alloc_ctl_entry(13);
 
 	if (table == NULL)
 		return NULL;
@@ -6379,7 +6379,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
 		sizeof(int), 0644, proc_dointvec_minmax);
 	set_table_entry(&table[10], "flags", &sd->flags,
 		sizeof(int), 0644, proc_dointvec_minmax);
-	/* &table[11] is terminator */
+	set_table_entry(&table[11], "name", sd->name,
+		CORENAME_MAX_SIZE, 0444, proc_dostring);
+	/* &table[12] is terminator */
 
 	return table;
 }
@@ -7263,13 +7265,21 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  */
 
+#ifdef CONFIG_SCHED_DEBUG
+# define SD_INIT_NAME(sd, type)		sd->name = #type
+#else
+# define SD_INIT_NAME(sd, type)		do { } while (0)
+#endif
+
 #define	SD_INIT(sd, type)	sd_init_##type(sd)
+
 #define SD_INIT_FUNC(type)	\
 static noinline void sd_init_##type(struct sched_domain *sd)	\
 {								\
 	memset(sd, 0, sizeof(*sd));				\
 	*sd = SD_##type##_INIT;					\
 	sd->level = SD_LV_##type;				\
+	SD_INIT_NAME(sd, type);					\
 }
 
 SD_INIT_FUNC(CPU)