Patches contributed by Eötvös Lorand University
commit 7243f2145a9b06e5cf9a49fc9b8b9a4fff6fb42e
Merge: b478b782e110 62395efdb0ef 5bee17f18b59
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Mar 16 09:12:42 2009 +0100
Merge branches 'tracing/ftrace', 'tracing/syscalls' and 'linus' into tracing/core
Conflicts:
arch/parisc/kernel/irq.c
diff --cc arch/parisc/kernel/irq.c
index 49482806863f,49482806863f,29e70e16ede8..2b5f5915dd1d
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@@@ -133,10 -133,10 -132,13 +132,13 @@@@ int cpu_check_affinity(unsigned int irq
static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
{
-- if (cpu_check_affinity(irq, dest))
++ int cpu_dest;
++
++ cpu_dest = cpu_check_affinity(irq, dest);
++ if (cpu_dest < 0)
return;
-- cpumask_copy(irq_desc[irq].affinity, dest);
- cpumask_copy(&irq_desc[irq].affinity, &cpumask_of_cpu(cpu_dest));
+++ cpumask_copy(&irq_desc[irq].affinity, dest);
}
#endif
diff --cc include/linux/ftrace.h
index 9d598bbf28a6,6dc1c652447e,677432b9cb7e..db3fed630db3
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@@@ -485,42 -486,50 -490,6 +485,50 @@@@ static inline int test_tsk_trace_graph(
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
+extern int ftrace_dump_on_oops;
+
#endif /* CONFIG_TRACING */
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+
+void trace_hw_branch(u64 from, u64 to);
+void trace_hw_branch_oops(void);
+
+#else /* CONFIG_HW_BRANCH_TRACER */
+
+static inline void trace_hw_branch(u64 from, u64 to) {}
+static inline void trace_hw_branch_oops(void) {}
+
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
+/*
+ * A syscall entry in the ftrace syscalls array.
+ *
- * @syscall_nr: syscall number
+ + * @name: name of the syscall
+ + * @nb_args: number of parameters it takes
+ + * @types: list of types as strings
+ + * @args: list of args as strings (args[i] matches types[i])
+ */
- struct syscall_trace_entry {
- int syscall_nr;
+ +struct syscall_metadata {
+ + const char *name;
+ + int nb_args;
+ + const char **types;
+ + const char **args;
+};
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+ +extern void arch_init_ftrace_syscalls(void);
+ +extern struct syscall_metadata *syscall_nr_to_meta(int nr);
+extern void start_ftrace_syscalls(void);
+extern void stop_ftrace_syscalls(void);
+extern void ftrace_syscall_enter(struct pt_regs *regs);
+extern void ftrace_syscall_exit(struct pt_regs *regs);
+#else
+static inline void start_ftrace_syscalls(void) { }
+static inline void stop_ftrace_syscalls(void) { }
+static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
+static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
+#endif
+
#endif /* _LINUX_FTRACE_H */
diff --cc kernel/trace/trace.h
index 56ce34d90b03,d80ca0d464d9,4d3d381bfd95..f56162806f50
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@@@ -18,9 -18,8 -16,9 +18,9 @@@@ enum trace_type
TRACE_FN,
TRACE_CTX,
TRACE_WAKE,
- TRACE_CONT,
TRACE_STACK,
TRACE_PRINT,
++ TRACE_BPRINT,
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
@@@@ -118,23 -117,16 -112,15 +118,23 @@@@ struct userstack_entry
};
/*
- * ftrace_printk entry:
+ * trace_printk entry:
*/
- struct print_entry {
++struct bprint_entry {
+ struct trace_entry ent;
+ unsigned long ip;
+ int depth;
+ const char *fmt;
+ u32 buf[];
+};
+
+ struct print_entry {
+ struct trace_entry ent;
+ unsigned long ip;
+ int depth;
+ char buf[];
+ };
+
#define TRACE_OLD_SIZE 88
struct trace_field_cont {
@@@@ -184,24 -176,37 -170,6 +184,37 @@@@ struct trace_power
struct power_trace state_data;
};
+struct kmemtrace_alloc_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+ size_t bytes_req;
+ size_t bytes_alloc;
+ gfp_t gfp_flags;
+ int node;
+};
+
+struct kmemtrace_free_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+};
+
+ +struct syscall_trace_enter {
+ + struct trace_entry ent;
+ + int nr;
+ + unsigned long args[];
+ +};
+ +
+ +struct syscall_trace_exit {
+ + struct trace_entry ent;
+ + int nr;
+ + unsigned long ret;
+ +};
+ +
+ +
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
@@@@ -310,11 -314,15 -279,7 +323,15 @@@@ extern void __ftrace_bad_type(void)
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
- IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
+ TRACE_KMEM_ALLOC); \
+ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
+ TRACE_KMEM_FREE); \
+ + IF_ASSIGN(var, ent, struct syscall_trace_enter, \
+ + TRACE_SYSCALL_ENTER); \
+ + IF_ASSIGN(var, ent, struct syscall_trace_exit, \
+ + TRACE_SYSCALL_EXIT); \
__ftrace_bad_type(); \
} while (0)
@@@@ -579,10 -587,8 -488,17 +596,10 @@@@ extern int trace_selftest_startup_branc
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
-extern void trace_seq_print_cont(struct trace_seq *s,
- struct trace_iterator *iter);
-
-extern int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
- unsigned long sym_flags);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
- size_t cnt);
extern long ns2usecs(cycle_t nsec);
extern int
++trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
++extern int
trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
extern unsigned long trace_flags;
@@@@ -750,51 -756,26 -664,4 +767,51 @@@@ static inline void trace_branch_disable
}
#endif /* CONFIG_BRANCH_TRACER */
++/* set ring buffers to default size if not already done so */
++int tracing_update_buffers(void);
++
+/* trace event type bit fields, not numeric */
+enum {
+ TRACE_EVENT_TYPE_PRINTF = 1,
+ TRACE_EVENT_TYPE_RAW = 2,
+};
+
+struct ftrace_event_call {
+ char *name;
+ char *system;
+ struct dentry *dir;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+};
+
+void event_trace_printk(unsigned long ip, const char *fmt, ...);
+extern struct ftrace_event_call __start_ftrace_events[];
+extern struct ftrace_event_call __stop_ftrace_events[];
+
++extern const char *__start___trace_bprintk_fmt[];
++extern const char *__stop___trace_bprintk_fmt[];
++
++/*
++ * The double __builtin_constant_p is because gcc will give us an error
++ * if we try to allocate the static variable to fmt if it is not a
++ * constant. Even with the outer if statement optimizing out.
++ */
++#define event_trace_printk(ip, fmt, args...) \
++do { \
++ __trace_printk_check_format(fmt, ##args); \
++ tracing_record_cmdline(current); \
++ if (__builtin_constant_p(fmt)) { \
++ static const char *trace_printk_fmt \
++ __attribute__((section("__trace_printk_fmt"))) = \
++ __builtin_constant_p(fmt) ? fmt : NULL; \
++ \
++ __trace_bprintk(ip, trace_printk_fmt, ##args); \
++ } else \
++ __trace_printk(ip, fmt, ##args); \
++} while (0)
++
#endif /* _LINUX_KERNEL_TRACE_H */
commit b0fe551000179c868d46266278a890eab878baca
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 12 15:15:31 2009 +0100
kconfig: improve seed in randconfig
'make randconfig' uses glibc's rand function, and the seed of
that PRNG is set via:
srand(time(NULL));
But 'time()' only increases once every second - freezing the
randconfig result within a single second.
My Nehalem testbox does randconfig much faster than 1 second
and i have a few scripts that do 'randconfig until condition X'
loops.
Those scripts currently waste a lot of CPU time due to randconfig
changing its seed only once per second currently.
Change the seed to be micrseconds based. (I checked the statistical
spread of the seed - the now.tv_sec*now.tv_usec multiplication
there further improves it.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Roman Zippel <zippel@linux-m68k.org>
[sam: fix for systems where usec is zero - noticed by Geert Uytterhoeven]
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 3e1057f885c6..d190092c3b6e 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -11,6 +11,7 @@
#include <time.h>
#include <unistd.h>
#include <sys/stat.h>
+#include <sys/time.h>
#define LKC_DIRECT_LINK
#include "lkc.h"
@@ -464,9 +465,22 @@ int main(int ac, char **av)
input_mode = set_yes;
break;
case 'r':
+ {
+ struct timeval now;
+ unsigned int seed;
+
+ /*
+ * Use microseconds derived seed,
+ * compensate for systems where it may be zero
+ */
+ gettimeofday(&now, NULL);
+
+ seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1));
+ srand(seed);
+
input_mode = set_random;
- srand(time(NULL));
break;
+ }
case 'h':
printf(_("See README for usage info\n"));
exit(0);
commit 0ca0f16fd17c5d880dd0abbe03595b0c7c5b3c95
Merge: c550033ced48 7a81d9a7da03 88200bc28da3 0f3fa48a7eaf 91219bcbdccc 063402356280 773e673de272 5a8ac9d28dae 16a6791934a1 895791dac694
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Mar 14 16:25:40 2009 +0100
Merge branches 'x86/apic', 'x86/asm', 'x86/cleanups', 'x86/debug', 'x86/kconfig', 'x86/mm', 'x86/ptrace', 'x86/setup' and 'x86/urgent'; commit 'v2.6.29-rc8' into x86/core
diff --cc arch/x86/Kconfig
index 7fcf85182681,31758378bcd2,7fcf85182681,469f3450bf81,7fcf85182681,d571cc7e3d35,076f4f85f6ea,7fcf85182681,87717f3687d2,bc2fbadff9f9..34bc3a89228b
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@@@@@@@@@@ -136,11 -136,11 -136,11 -136,8 -136,11 -136,11 -136,11 -136,11 -136,11 -133,8 +136,11 @@@@@@@@@@@ config ARCH_HAS_CACHE_LINE_SIZ
def_bool y
config HAVE_SETUP_PER_CPU_AREA
- def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
+ def_bool y
+
+ +config HAVE_DYNAMIC_PER_CPU_AREA
+ + def_bool y
+
config HAVE_CPUMASK_OF_CPU_MAP
def_bool X86_64_SMP
@@@@@@@@@@@ -1437,8 -1431,8 -1437,8 -1423,8 -1437,8 -1440,8 -1431,8 -1437,8 -1431,8 -1400,8 +1440,8 @@@@@@@@@@@ config CRASH_DUM
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
depends on EXPERIMENTAL
- - - depends on KEXEC && HIBERNATION && X86_32
- help
+ + + depends on KEXEC && HIBERNATION
+ ---help---
Jump between original kernel and kexeced kernel and invoke
code in physical address mode via KEXEC
diff --cc arch/x86/include/asm/processor.h
index 76139506c3e4,76139506c3e4,76139506c3e4,dccef5be0fc1,76139506c3e4,76139506c3e4,bd3406db1d68,76139506c3e4,76139506c3e4,3bfd5235a9eb..ae85a8d66a30
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@@@@@@@@@@ -74,10 -74,10 -74,10 -74,10 -74,10 -74,10 -74,10 -74,10 -74,10 -73,10 +74,10 @@@@@@@@@@@ struct cpuinfo_x86
char pad0;
#else
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
- int x86_tlbsize;
+ int x86_tlbsize;
++++++ +++#endif
__u8 x86_virt_bits;
__u8 x86_phys_bits;
------ ---#endif
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
/* Max extended CPUID function supported: */
@@@@@@@@@@@ -373,30 -373,30 -373,30 -373,33 -373,30 -373,30 -373,30 -373,30 -373,30 -378,9 +373,33 @@@@@@@@@@@ union thread_xstate
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+union irq_stack_union {
+ char irq_stack[IRQ_STACK_SIZE];
+ /*
+ * GCC hardcodes the stack canary as %gs:40. Since the
+ * irq_stack is the object at %gs:0, we reserve the bottom
+ * 48 bytes of the irq stack for the canary.
+ */
+ struct {
+ char gs_base[40];
+ unsigned long stack_canary;
+ };
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
+DECLARE_PER_CPU(char *, irq_stack_ptr);
+++ ++++++DECLARE_PER_CPU(unsigned int, irq_count);
+++ ++++++extern unsigned long kernel_eflags;
+++ ++++++extern asmlinkage void ignore_sysret(void);
+#else /* X86_64 */
+#ifdef CONFIG_CC_STACKPROTECTOR
+DECLARE_PER_CPU(unsigned long, stack_canary);
#endif
+#endif /* X86_64 */
-extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
diff --cc arch/x86/kernel/cpu/common.c
index f8869978bbb7,826d5c876278,f8869978bbb7,a9e3791ca098,54cbe7690f93,f8869978bbb7,a95e9480bb9c,f8869978bbb7,826d5c876278,83492b1f93b1..e2962cc1e27b
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@@@@@@@@@@ -1,44 -1,44 -1,44 -1,44 -1,44 -1,44 -1,44 -1,44 -1,44 -1,43 +1,44 @@@@@@@@@@@
--- ------#include <linux/init.h>
--- ------#include <linux/kernel.h>
--- ------#include <linux/sched.h>
--- ------#include <linux/string.h>
#include <linux/bootmem.h>
+++ ++++++#include <linux/linkage.h>
#include <linux/bitops.h>
+++ ++++++#include <linux/kernel.h>
#include <linux/module.h>
--- ------#include <linux/kgdb.h>
--- ------#include <linux/topology.h>
+++ ++++++#include <linux/percpu.h>
+++ ++++++#include <linux/string.h>
#include <linux/delay.h>
+++ ++++++#include <linux/sched.h>
+++ ++++++#include <linux/init.h>
+++ ++++++#include <linux/kgdb.h>
#include <linux/smp.h>
--- ------#include <linux/percpu.h>
--- ------#include <asm/i387.h>
--- ------#include <asm/msr.h>
--- ------#include <asm/io.h>
--- ------#include <asm/linkage.h>
+++ ++++++#include <linux/io.h>
+++ ++++++
+++ ++++++#include <asm/stackprotector.h>
#include <asm/mmu_context.h>
+++ ++++++#include <asm/hypervisor.h>
+++ ++++++#include <asm/processor.h>
+++ ++++++#include <asm/sections.h>
+++ ++++++#include <asm/topology.h>
+++ ++++++#include <asm/cpumask.h>
+++ ++++++#include <asm/pgtable.h>
+++ ++++++#include <asm/atomic.h>
+++ ++++++#include <asm/proto.h>
+++ ++++++#include <asm/setup.h>
+++ ++++++#include <asm/apic.h>
+++ ++++++#include <asm/desc.h>
+++ ++++++#include <asm/i387.h>
#include <asm/mtrr.h>
+++ ++++++#include <asm/numa.h>
+++ ++++++#include <asm/asm.h>
+++ ++++++#include <asm/cpu.h>
#include <asm/mce.h>
+++ ++++++#include <asm/msr.h>
#include <asm/pat.h>
--- ------#include <asm/asm.h>
--- ------#include <asm/numa.h>
#include <asm/smp.h>
--- ----- #include <asm/cpu.h>
--- ----- #include <asm/cpumask.h>
--- ----- #include <asm/apic.h>
+
#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#include <asm/genapic.h>
+#include <asm/uv/uv.h>
#endif
-#include <asm/pda.h>
--- ------#include <asm/pgtable.h>
--- ------#include <asm/processor.h>
--- ------#include <asm/desc.h>
--- ------#include <asm/atomic.h>
--- ------#include <asm/proto.h>
--- ------#include <asm/sections.h>
--- ------#include <asm/setup.h>
--- ------#include <asm/hypervisor.h>
--- ----- #include <asm/stackprotector.h>
--- ------
#include "cpu.h"
#ifdef CONFIG_X86_64
@@@@@@@@@@@ -51,48 -51,48 -51,48 -51,48 -51,48 -51,48 -51,48 -51,48 -51,48 -50,39 +51,48 @@@@@@@@@@@ cpumask_var_t cpu_callin_mask
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
+/* correctly size the local cpu masks */
+void __init setup_cpu_local_masks(void)
+{
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
#else /* CONFIG_X86_32 */
--- ------cpumask_t cpu_callin_map;
+++ ++++++cpumask_t cpu_sibling_setup_map;
cpumask_t cpu_callout_map;
cpumask_t cpu_initialized;
--- ------cpumask_t cpu_sibling_setup_map;
+++ ++++++cpumask_t cpu_callin_map;
#endif /* CONFIG_X86_32 */
---- -----static struct cpu_dev *this_cpu __cpuinitdata;
++++ +++++static const struct cpu_dev *this_cpu __cpuinitdata;
+DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
-/* We need valid kernel segments for data and code in long mode too
- * IRET will check the segment types kkeil 2000/10/28
- * Also sysret mandates a special GDT layout
- */
-/* The TLS descriptors are currently at a different place compared to i386.
- Hopefully nobody expects them at a fixed place (Wine?) */
-DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
-} };
+ /*
+ * We need valid kernel segments for data and code in long mode too
+ * IRET will check the segment types kkeil 2000/10/28
+ * Also sysret mandates a special GDT layout
+ *
--- ----- * The TLS descriptors are currently at a different place compared to i386.
+++ ++++++ * TLS descriptors are currently at a different place compared to i386.
+ * Hopefully nobody expects them at a fixed place (Wine?)
+ */
--- ----- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
--- ----- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
--- ----- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
--- ----- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
#else
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
--- ------ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
--- ------ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
--- ------ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
--- ------ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
+++ ++++++ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
+++ ++++++ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
@@@@@@@@@@@ -113,17 -113,17 -113,17 -113,17 -113,17 -113,17 -113,17 -113,17 -113,17 -103,16 +113,17 @@@@@@@@@@@
* are set at run time. All have 64k limits.
*/
/* 32-bit code */
--- ------ [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
--- ------ [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
/* data */
--- ------ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+++ ++++++ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
--- ------ [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
--- ----- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
- [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
-} };
+++ ++++++ [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
+++ ++++++ [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+ GDT_STACK_CANARY_INIT
#endif
+} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
#ifdef CONFIG_X86_32
@@@@@@@@@@@ -223,49 -223,49 -223,49 -228,54 -223,49 -223,49 -223,49 -223,49 -223,49 -212,6 +228,54 @@@@@@@@@@@ static inline void squash_the_stupid_se
}
#endif
+/*
+ * Some CPU features depend on higher CPUID levels, which may not always
+ * be available due to CPUID level capping or broken virtualization
+ * software. Add those features to this table to auto-disable them.
+ */
+struct cpuid_dependent_feature {
+ u32 feature;
+ u32 level;
+};
+++ ++++++
+static const struct cpuid_dependent_feature __cpuinitconst
+cpuid_dependent_features[] = {
+ { X86_FEATURE_MWAIT, 0x00000005 },
+ { X86_FEATURE_DCA, 0x00000009 },
+ { X86_FEATURE_XSAVE, 0x0000000d },
+ { 0, 0 }
+};
+
+static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+{
+ const struct cpuid_dependent_feature *df;
+++ ++++++
+ for (df = cpuid_dependent_features; df->feature; df++) {
+++ ++++++
+++ ++++++ if (!cpu_has(c, df->feature))
+++ ++++++ continue;
+ /*
+ * Note: cpuid_level is set to -1 if unavailable, but
+ * extended_extended_level is set to 0 if unavailable
+ * and the legitimate extended levels are all negative
+ * when signed; hence the weird messing around with
+ * signs here...
+ */
--- ----- if (cpu_has(c, df->feature) &&
--- ----- ((s32)df->level < 0 ?
+++ ++++++ if (!((s32)df->level < 0 ?
+ (u32)df->level > (u32)c->extended_cpuid_level :
--- ----- (s32)df->level > (s32)c->cpuid_level)) {
--- ----- clear_cpu_cap(c, df->feature);
--- ----- if (warn)
--- ----- printk(KERN_WARNING
--- ----- "CPU: CPU feature %s disabled "
--- ----- "due to lack of CPUID level 0x%x\n",
--- ----- x86_cap_flags[df->feature],
--- ----- df->level);
--- ----- }
+++ ++++++ (s32)df->level > (s32)c->cpuid_level))
+++ ++++++ continue;
+++ ++++++
+++ ++++++ clear_cpu_cap(c, df->feature);
+++ ++++++ if (!warn)
+++ ++++++ continue;
+++ ++++++
+++ ++++++ printk(KERN_WARNING
+++ ++++++ "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
+++ ++++++ x86_cap_flags[df->feature], df->level);
+ }
+}
+
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
@@@@@@@@@@@ -296,32 -296,32 -296,32 -306,34 -296,32 -296,32 -296,32 -296,32 -296,32 -242,21 +306,34 @@@@@@@@@@@ static const char *__cpuinit table_look
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
-/* Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one. */
-void switch_to_new_gdt(void)
+void load_percpu_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+ loadsegment(fs, __KERNEL_PERCPU);
+#else
+ loadsegment(gs, 0);
+ wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+#endif
+ load_stack_canary_segment();
+}
+
--- ----- /* Current gdt points %fs at the "master" per-cpu area: after this,
--- ----- * it's on the real one. */
+++ ++++++/*
+++ ++++++ * Current gdt points %fs at the "master" per-cpu area: after this,
+++ ++++++ * it's on the real one.
+++ ++++++ */
+void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
- gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+ gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
-#ifdef CONFIG_X86_32
- asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-#endif
+ /* Reload the per-cpu base */
+
+ load_percpu_segment(cpu);
}
---- -----static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
++++ +++++static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c)
{
@@@@@@@@@@@ -438,27 -438,27 -438,27 -452,30 -438,27 -438,27 -438,27 -438,27 -438,27 -373,36 +452,30 @@@@@@@@@@@ void __cpuinit detect_ht(struct cpuinfo
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
--- ------ } else if (smp_num_siblings > 1) {
+++ ++++++ goto out;
+++ ++++++ }
--- ------ if (smp_num_siblings > nr_cpu_ids) {
--- ------ printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
--- ------ smp_num_siblings);
--- ------ smp_num_siblings = 1;
--- ------ return;
--- ------ }
+++ ++++++ if (smp_num_siblings <= 1)
+++ ++++++ goto out;
+++ +++++
- index_msb = get_count_order(smp_num_siblings);
-#ifdef CONFIG_X86_64
- c->phys_proc_id = phys_pkg_id(index_msb);
-#else
- c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
-#endif
+++ ++++++ if (smp_num_siblings > nr_cpu_ids) {
+++ ++++++ pr_warning("CPU: Unsupported number of siblings %d",
+++ ++++++ smp_num_siblings);
+++ ++++++ smp_num_siblings = 1;
+++ ++++++ return;
+++ ++++++ }
--- ----- index_msb = get_count_order(smp_num_siblings);
--- ----- c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+++ ++++++ index_msb = get_count_order(smp_num_siblings);
+++ ++++++ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
--- ----- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
- index_msb = get_count_order(smp_num_siblings);
+++ ++++++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
--- ----- index_msb = get_count_order(smp_num_siblings);
- core_bits = get_count_order(c->x86_max_cores);
+++ ++++++ index_msb = get_count_order(smp_num_siblings);
--- ----- core_bits = get_count_order(c->x86_max_cores);
-#ifdef CONFIG_X86_64
- c->cpu_core_id = phys_pkg_id(index_msb) &
- ((1 << core_bits) - 1);
-#else
- c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
- ((1 << core_bits) - 1);
-#endif
- }
+++ ++++++ core_bits = get_count_order(c->x86_max_cores);
+
--- ----- c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
--- ----- ((1 << core_bits) - 1);
--- ----- }
+++ ++++++ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+++ ++++++ ((1 << core_bits) - 1);
out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@@@@@@@@@@ -634,12 -634,12 -634,12 -659,12 -634,12 -634,12 -640,12 -634,12 -634,12 -579,12 +665,12 @@@@@@@@@@@ static void __init early_identify_cpu(s
void __init early_cpu_init(void)
{
---- ----- struct cpu_dev **cdev;
++++ +++++ const struct cpu_dev *const *cdev;
int count = 0;
--- ------ printk("KERNEL supported cpus:\n");
+++ ++++++ printk(KERN_INFO "KERNEL supported cpus:\n");
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
---- ----- struct cpu_dev *cpudev = *cdev;
++++ +++++ const struct cpu_dev *cpudev = *cdev;
unsigned int j;
if (count >= X86_VENDOR_NUM)
@@@@@@@@@@@ -759,16 -759,16 -759,16 -784,16 -759,16 -759,16 -769,16 -759,16 -759,16 -704,13 +794,16 @@@@@@@@@@@ static void __cpuinit identify_cpu(stru
squash_the_stupid_serial_number(c);
/*
--- ------ * The vendor-specific functions might have changed features. Now
--- ------ * we do "generic changes."
+++ ++++++ * The vendor-specific functions might have changed features.
+++ ++++++ * Now we do "generic changes."
*/
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
---- ----- char *p;
++++ +++++ const char *p;
p = table_lookup_model(c);
if (p)
strcpy(c->x86_model_id, p);
@@@@@@@@@@@ -843,11 -843,11 -843,11 -868,11 -843,11 -843,11 -853,11 -843,11 -843,11 -785,11 +878,11 @@@@@@@@@@@ void __cpuinit identify_secondary_cpu(s
}
struct msr_range {
--- ------ unsigned min;
--- ------ unsigned max;
+++ ++++++ unsigned min;
+++ ++++++ unsigned max;
};
---- -----static struct msr_range msr_range_array[] __cpuinitdata = {
++++ +++++static const struct msr_range msr_range_array[] __cpuinitconst = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
@@@@@@@@@@@ -894,12 -894,12 -894,12 -921,14 -894,12 -894,12 -904,12 -894,12 -894,12 -836,12 +931,14 @@@@@@@@@@@ __setup("noclflush", setup_noclflush)
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{
---- ----- char *vendor = NULL;
++++ +++++ const char *vendor = NULL;
--- ------ if (c->x86_vendor < X86_VENDOR_NUM)
+++ ++++++ if (c->x86_vendor < X86_VENDOR_NUM) {
vendor = this_cpu->c_vendor;
--- ------ else if (c->cpuid_level >= 0)
--- ------ vendor = c->x86_vendor_id;
+++ ++++++ } else {
+++ ++++++ if (c->cpuid_level >= 0)
+++ ++++++ vendor = c->x86_vendor_id;
+++ ++++++ }
if (vendor && !strstr(c->x86_model_id, vendor))
printk(KERN_CONT "%s ", vendor);
@@@@@@@@@@@ -935,25 -935,25 -935,25 -966,35 -935,25 -935,25 -945,25 -935,25 -935,25 -877,57 +976,35 @@@@@@@@@@@ static __init int setup_disablecpuid(ch
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct x8664_pda **_cpu_pda __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
-static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
+DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
+++ +++++
-void __cpuinit pda_init(int cpu)
-{
- struct x8664_pda *pda = cpu_pda(cpu);
+DEFINE_PER_CPU(char *, irq_stack_ptr) =
+ init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
- /* Setup up data that may be needed in __get_free_pages early */
- loadsegment(fs, 0);
- loadsegment(gs, 0);
- /* Memory clobbers used to order PDA accessed */
- mb();
- wrmsrl(MSR_GS_BASE, pda);
- mb();
-
- pda->cpunumber = cpu;
- pda->irqcount = -1;
- pda->kernelstack = (unsigned long)stack_thread_info() -
- PDA_STACKOFFSET + THREAD_SIZE;
- pda->active_mm = &init_mm;
- pda->mmu_state = 0;
-
- if (cpu == 0) {
- /* others are initialized in smpboot.c */
- pda->pcurrent = &init_task;
- pda->irqstackptr = boot_cpu_stack;
- pda->irqstackptr += IRQSTACKSIZE - 64;
- } else {
- if (!pda->irqstackptr) {
- pda->irqstackptr = (char *)
- __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
- if (!pda->irqstackptr)
- panic("cannot allocate irqstack for cpu %d",
- cpu);
- pda->irqstackptr += IRQSTACKSIZE - 64;
- }
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
- if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
- pda->nodenumber = cpu_to_node(cpu);
- }
-}
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
-static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
- DEBUG_STKSZ] __page_aligned_bss;
+++ ++++++/*
+++ ++++++ * Special IST stacks which the CPU switches to when it calls
+++ ++++++ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+++ ++++++ * limit), all of them are 4K, except the debug stack which
+++ ++++++ * is 8K.
+++ ++++++ */
+++ ++++++static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+++ ++++++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
+++ ++++++ [DEBUG_STACK - 1] = DEBUG_STKSZ
+++ ++++++};
+++ +++++
-extern asmlinkage void ignore_sysret(void);
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+ [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+ __aligned(PAGE_SIZE);
--- ----- extern asmlinkage void ignore_sysret(void);
--- -----
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
@@@@@@@@@@@ -983,21 -983,21 -983,21 -1024,38 -983,21 -983,21 -993,21 -983,21 -983,21 -957,16 +1034,38 @@@@@@@@@@@ unsigned long kernel_eflags
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
--- ----- #else /* x86_64 */
-#else
+++ ++++++#else /* CONFIG_X86_64 */
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+DEFINE_PER_CPU(unsigned long, stack_canary);
+#endif
-/* Make sure %fs is initialized properly in idle threads */
+/* Make sure %fs and %gs are initialized properly in idle threads */
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
+ regs->gs = __KERNEL_STACK_CANARY;
+++ ++++++
return regs;
}
--- ----- #endif /* x86_64 */
-#endif
+++ ++++++#endif /* CONFIG_X86_64 */
+++ ++++++
+++ ++++++/*
+++ ++++++ * Clear all 6 debug registers:
+++ ++++++ */
+++ ++++++static void clear_all_debug_regs(void)
+++ ++++++{
+++ ++++++ int i;
+++ ++++++
+++ ++++++ for (i = 0; i < 8; i++) {
+++ ++++++ /* Ignore db4, db5 */
+++ ++++++ if ((i == 4) || (i == 5))
+++ ++++++ continue;
+++ ++++++
+++ ++++++ set_debugreg(0, i);
+++ ++++++ }
+++ ++++++}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
@@@@@@@@@@@ -1007,20 -1007,20 -1007,20 -1065,25 -1007,20 -1007,20 -1017,20 -1007,20 -1007,20 -976,21 +1075,25 @@@@@@@@@@@
* A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
+++ ++++++
void __cpuinit cpu_init(void)
{
--- ------ int cpu = stack_smp_processor_id();
--- ------ struct tss_struct *t = &per_cpu(init_tss, cpu);
--- ------ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
--- ------ unsigned long v;
- char *estacks = NULL;
+++ ++++++ struct orig_ist *orig_ist;
struct task_struct *me;
+++ ++++++ struct tss_struct *t;
+++ ++++++ unsigned long v;
+++ ++++++ int cpu;
int i;
- /* CPU 0 is initialised in head64.c */
- if (cpu != 0)
- pda_init(cpu);
- else
- estacks = boot_exception_stacks;
+++ ++++++ cpu = stack_smp_processor_id();
+++ ++++++ t = &per_cpu(init_tss, cpu);
+++ ++++++ orig_ist = &per_cpu(orig_ist, cpu);
+++ ++++++
+#ifdef CONFIG_NUMA
+ if (cpu != 0 && percpu_read(node_number) == 0 &&
+ cpu_to_node(cpu) != NUMA_NO_NODE)
+ percpu_write(node_number, cpu_to_node(cpu));
+#endif
me = current;
@@@@@@@@@@@ -1056,13 -1056,13 -1056,13 -1119,10 -1056,13 -1056,13 -1066,13 -1056,13 -1056,13 -1024,18 +1129,10 @@@@@@@@@@@
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
--- ----- static const unsigned int sizes[N_EXCEPTION_STACKS] = {
--- ----- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
--- ----- [DEBUG_STACK - 1] = DEBUG_STKSZ
- static const unsigned int order[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
- [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
--- ------ };
+ char *estacks = per_cpu(exception_stacks, cpu);
+++ ++++++
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
--- ----- estacks += sizes[v];
- if (cpu) {
- estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
- if (!estacks)
- panic("Cannot allocate exception "
- "stack %ld %d\n", v, cpu);
- }
- estacks += PAGE_SIZE << order[v];
+++ ++++++ estacks += exception_stack_sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
@@@@@@@@@@@ -1095,19 -1096,19 -1095,19 -1157,9 -1095,19 -1095,19 -1106,19 -1095,19 -1096,19 -1069,22 +1166,9 @@@@@@@@@@@
*/
if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
- else {
-#endif
- /*
- * Clear all 6 debug registers:
- */
-
- set_debugreg(0UL, 0);
- set_debugreg(0UL, 1);
- set_debugreg(0UL, 2);
- set_debugreg(0UL, 3);
- set_debugreg(0UL, 6);
- set_debugreg(0UL, 7);
-#ifdef CONFIG_KGDB
- /* If the kgdb is connected no debug regs should be altered. */
- }
+ else
#endif
--- ----- {
--- ----- /*
--- ----- * Clear all 6 debug registers:
--- ----- */
--- ----- set_debugreg(0UL, 0);
--- ----- set_debugreg(0UL, 1);
--- ----- set_debugreg(0UL, 2);
--- ----- set_debugreg(0UL, 3);
--- ----- set_debugreg(0UL, 6);
--- ----- set_debugreg(0UL, 7);
--- ----- }
+++ ++++++ clear_all_debug_regs();
fpu_init();
@@@@@@@@@@@ -1157,13 -1159,13 -1157,13 -1211,7 -1157,13 -1157,13 -1169,13 -1157,13 -1159,13 -1135,16 +1219,7 @@@@@@@@@@@ void __cpuinit cpu_init(void
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
#endif
- /* Clear %gs. */
- asm volatile ("mov %0, %%gs" : : "r" (0));
-
--- ------ /* Clear all 6 debug registers: */
--- ------ set_debugreg(0, 0);
--- ------ set_debugreg(0, 1);
--- ------ set_debugreg(0, 2);
--- ------ set_debugreg(0, 3);
--- ------ set_debugreg(0, 6);
--- ------ set_debugreg(0, 7);
+++ ++++++ clear_all_debug_regs();
/*
* Force FPU initialization:
commit 0f3fa48a7eaf5d1118cfda1650e8c759b2a116e4
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Mar 14 08:46:17 2009 +0100
x86: cpu/common.c more cleanups
Complete/fix the cleanups of cpu/common.c:
- fix ugly warning due to asm/topology.h -> linux/topology.h change
- standardize the style across the file
- simplify/refactor the code flow where possible
Cc: Jaswinder Singh Rajput <jaswinder@kernel.org>
LKML-Reference: <1237009789.4387.2.camel@localhost.localdomain>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cad6878c88db..a9e3791ca098 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,4 +1,3 @@
-#include <linux/topology.h>
#include <linux/bootmem.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -18,6 +17,7 @@
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/sections.h>
+#include <asm/topology.h>
#include <asm/cpumask.h>
#include <asm/pgtable.h>
#include <asm/atomic.h>
@@ -82,45 +82,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
* TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?)
*/
- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+ [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
+ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
+ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
+ [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
+ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
+ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
#else
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+ [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
+ [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
+ [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
+ [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
* the transfer segment sizes are set at run time.
*/
/* 32-bit code */
- [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
+ [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
- [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
+ [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
/*
* The APM segments have byte granularity and their bases
* are set at run time. All have 64k limits.
*/
/* 32-bit code */
- [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+ [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
/* 16-bit code */
- [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+ [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
/* data */
- [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+ [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
- [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+ [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
+ [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
GDT_STACK_CANARY_INIT
#endif
} };
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
* the CPUID. Add "volatile" to not allow gcc to
* optimize the subsequent calls to this function.
*/
- asm volatile ("pushfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "movl %0,%1\n\t"
- "xorl %2,%0\n\t"
- "pushl %0\n\t"
- "popfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "popfl\n\t"
+ asm volatile ("pushfl \n\t"
+ "pushfl \n\t"
+ "popl %0 \n\t"
+ "movl %0, %1 \n\t"
+ "xorl %2, %0 \n\t"
+ "pushl %0 \n\t"
+ "popfl \n\t"
+ "pushfl \n\t"
+ "popl %0 \n\t"
+ "popfl \n\t"
+
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
- if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
- /* Disable processor serial number */
- unsigned long lo, hi;
- rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
- lo |= 0x200000;
- wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
- printk(KERN_NOTICE "CPU serial number disabled.\n");
- clear_cpu_cap(c, X86_FEATURE_PN);
-
- /* Disabling the serial number may affect the cpuid level */
- c->cpuid_level = cpuid_eax(0);
- }
+ unsigned long lo, hi;
+
+ if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
+ return;
+
+ /* Disable processor serial number: */
+
+ rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
+ lo |= 0x200000;
+ wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
+
+ printk(KERN_NOTICE "CPU serial number disabled.\n");
+ clear_cpu_cap(c, X86_FEATURE_PN);
+
+ /* Disabling the serial number may affect the cpuid level */
+ c->cpuid_level = cpuid_eax(0);
}
static int __init x86_serial_nr_setup(char *s)
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature {
u32 feature;
u32 level;
};
+
static const struct cpuid_dependent_feature __cpuinitconst
cpuid_dependent_features[] = {
{ X86_FEATURE_MWAIT, 0x00000005 },
@@ -245,6 +251,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
const struct cpuid_dependent_feature *df;
for (df = cpuid_dependent_features; df->feature; df++) {
+
+ if (!cpu_has(c, df->feature))
+ continue;
/*
* Note: cpuid_level is set to -1 if unavailable, but
* extended_extended_level is set to 0 if unavailable
@@ -252,26 +261,26 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
* when signed; hence the weird messing around with
* signs here...
*/
- if (cpu_has(c, df->feature) &&
- ((s32)df->level < 0 ?
+ if (!((s32)df->level < 0 ?
(u32)df->level > (u32)c->extended_cpuid_level :
- (s32)df->level > (s32)c->cpuid_level)) {
- clear_cpu_cap(c, df->feature);
- if (warn)
- printk(KERN_WARNING
- "CPU: CPU feature %s disabled "
- "due to lack of CPUID level 0x%x\n",
- x86_cap_flags[df->feature],
- df->level);
- }
+ (s32)df->level > (s32)c->cpuid_level))
+ continue;
+
+ clear_cpu_cap(c, df->feature);
+ if (!warn)
+ continue;
+
+ printk(KERN_WARNING
+ "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
+ x86_cap_flags[df->feature], df->level);
}
}
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
- * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
- *
+ * in particular, if CPUID levels 0x80000002..4 are supported, this
+ * isn't used
*/
/* Look up CPU names by table lookup. */
@@ -308,8 +317,10 @@ void load_percpu_segment(int cpu)
load_stack_canary_segment();
}
-/* Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one. */
+/*
+ * Current gdt points %fs at the "master" per-cpu area: after this,
+ * it's on the real one.
+ */
void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -355,14 +366,16 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level < 0x80000004)
return;
- v = (unsigned int *) c->x86_model_id;
+ v = (unsigned int *)c->x86_model_id;
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
c->x86_model_id[48] = 0;
- /* Intel chips right-justify this string for some dumb reason;
- undo that brain damage */
+ /*
+ * Intel chips right-justify this string for some dumb reason;
+ * undo that brain damage:
+ */
p = q = &c->x86_model_id[0];
while (*p == ' ')
p++;
@@ -439,28 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (smp_num_siblings > 1) {
+ goto out;
+ }
- if (smp_num_siblings > nr_cpu_ids) {
- pr_warning("CPU: Unsupported number of siblings %d",
- smp_num_siblings);
- smp_num_siblings = 1;
- return;
- }
+ if (smp_num_siblings <= 1)
+ goto out;
- index_msb = get_count_order(smp_num_siblings);
- c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
- index_msb);
+ if (smp_num_siblings > nr_cpu_ids) {
+ pr_warning("CPU: Unsupported number of siblings %d",
+ smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
- index_msb = get_count_order(smp_num_siblings);
+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
- core_bits = get_count_order(c->x86_max_cores);
+ index_msb = get_count_order(smp_num_siblings);
- c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
- ((1 << core_bits) - 1);
- }
+ core_bits = get_count_order(c->x86_max_cores);
+
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+ ((1 << core_bits) - 1);
out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -475,8 +490,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
- int i;
static int printed;
+ int i;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (!cpu_devs[i])
@@ -485,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
(cpu_devs[i]->c_ident[1] &&
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
+
this_cpu = cpu_devs[i];
c->x86_vendor = this_cpu->c_x86_vendor;
return;
@@ -493,8 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
if (!printed) {
printed++;
- printk(KERN_ERR "CPU: vendor_id '%s'"
- "unknown, using generic init.\n", v);
+ printk(KERN_ERR
+ "CPU: vendor_id '%s' unknown, using generic init.\n", v);
+
printk(KERN_ERR "CPU: Your system may be unstable.\n");
}
@@ -514,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
+
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
+
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xf) << 4;
+
if (cap0 & (1<<19)) {
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
c->x86_cache_alignment = c->x86_clflush_size;
@@ -537,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 capability, excap;
+
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
@@ -545,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
+
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -762,8 +784,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
squash_the_stupid_serial_number(c);
/*
- * The vendor-specific functions might have changed features. Now
- * we do "generic changes."
+ * The vendor-specific functions might have changed features.
+ * Now we do "generic changes."
*/
/* Filter out anything that depends on CPUID levels we don't have */
@@ -846,8 +868,8 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
}
struct msr_range {
- unsigned min;
- unsigned max;
+ unsigned min;
+ unsigned max;
};
static struct msr_range msr_range_array[] __cpuinitdata = {
@@ -859,14 +881,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
static void __cpuinit print_cpu_msr(void)
{
+ unsigned index_min, index_max;
unsigned index;
u64 val;
int i;
- unsigned index_min, index_max;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
+
for (index = index_min; index < index_max; index++) {
if (rdmsrl_amd_safe(index, &val))
continue;
@@ -876,6 +899,7 @@ static void __cpuinit print_cpu_msr(void)
}
static int show_msr __cpuinitdata;
+
static __init int setup_show_msr(char *arg)
{
int num;
@@ -899,10 +923,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{
char *vendor = NULL;
- if (c->x86_vendor < X86_VENDOR_NUM)
+ if (c->x86_vendor < X86_VENDOR_NUM) {
vendor = this_cpu->c_vendor;
- else if (c->cpuid_level >= 0)
- vendor = c->x86_vendor_id;
+ } else {
+ if (c->cpuid_level >= 0)
+ vendor = c->x86_vendor_id;
+ }
if (vendor && !strstr(c->x86_model_id, vendor))
printk(KERN_CONT "%s ", vendor);
@@ -929,10 +955,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
static __init int setup_disablecpuid(char *arg)
{
int bit;
+
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
setup_clear_cpu_cap(bit);
else
return 0;
+
return 1;
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -942,6 +970,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
+
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
@@ -951,6 +980,17 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(unsigned int, irq_count) = -1;
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
+ [DEBUG_STACK - 1] = DEBUG_STKSZ
+};
+
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
__aligned(PAGE_SIZE);
@@ -984,7 +1024,7 @@ unsigned long kernel_eflags;
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
-#else /* x86_64 */
+#else /* CONFIG_X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -996,9 +1036,10 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
regs->gs = __KERNEL_STACK_CANARY;
+
return regs;
}
-#endif /* x86_64 */
+#endif /* CONFIG_X86_64 */
/*
* Clear all 6 debug registers:
@@ -1024,15 +1065,20 @@ static void clear_all_debug_regs(void)
* A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
+
void __cpuinit cpu_init(void)
{
- int cpu = stack_smp_processor_id();
- struct tss_struct *t = &per_cpu(init_tss, cpu);
- struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
- unsigned long v;
+ struct orig_ist *orig_ist;
struct task_struct *me;
+ struct tss_struct *t;
+ unsigned long v;
+ int cpu;
int i;
+ cpu = stack_smp_processor_id();
+ t = &per_cpu(init_tss, cpu);
+ orig_ist = &per_cpu(orig_ist, cpu);
+
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1073,19 +1119,17 @@ void __cpuinit cpu_init(void)
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
- static const unsigned int sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
- };
char *estacks = per_cpu(exception_stacks, cpu);
+
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
- estacks += sizes[v];
+ estacks += exception_stack_sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+
/*
* <= is required because the CPU will access up to
* 8 bits beyond the end of the IO permission bitmap.
@@ -1187,5 +1231,4 @@ void __cpuinit cpu_init(void)
xsave_init();
}
-
#endif
commit c550033ced484d8d333bc1edc0a482728680e689
Merge: a98fe7f3425c 7a46c594bf7f
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Mar 14 09:50:10 2009 +0100
Merge branch 'core/percpu' into x86/core
commit 62395efdb0ef42e664ca81677901268c403a6286
Merge: ccd50dfd92ea 88200bc28da3
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Mar 14 08:16:21 2009 +0100
Merge branch 'x86/asm' into tracing/syscalls
We need the wider TIF work-mask checks in entry_32.S.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
commit 063402356280a7b262952d6351d21315336f657b
Merge: f9a36fa5413f a98fe7f3425c
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Mar 13 17:08:30 2009 +0100
Merge branch 'x86/core' into x86/kconfig
commit ccd50dfd92ea2c4ba9e39531ac55db53393e783e
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Mar 13 17:02:17 2009 +0100
tracing/syscalls: support for syscalls tracing on x86, fix
Impact: build fix
kernel/built-in.o: In function `ftrace_syscall_exit':
(.text+0x76667): undefined reference to `syscall_nr_to_meta'
ftrace.o is built:
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
But now a CONFIG_FTRACE_SYSCALLS dependency is needed too.
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <1236401580-5758-3-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 339ce35648e6..84000eb931ff 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -66,7 +66,8 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
commit 79258a354e0c69be94ae2871809a195bf4a647b1
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Mar 13 12:02:08 2009 +0100
x86, bts: detect size of DS fields, fix
Impact: build fix
One usage site was missed in the sizeof_field -> sizeof_ptr_field
rename.
Cc: Markus Metzger <markus.t.metzger@intel.com>
LKML-Reference: <20090313104218.A30096@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 7363e01ba082..5fd53333c1df 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -983,7 +983,7 @@ ds_configure(const struct ds_configuration *cfg,
printk("bts/pebs record: %u/%u bytes\n",
ds_cfg.sizeof_rec[ds_bts], ds_cfg.sizeof_rec[ds_pebs]);
- WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_field));
+ WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_ptr_field));
}
void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
commit e9a22d1fb94050b7d600019c32e6b672d539054b
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Mar 13 11:54:40 2009 +0100
x86, bts: cleanups
Impact: cleanup, no code changed
Cc: Markus Metzger <markus.t.metzger@intel.com>
LKML-Reference: <20090313104218.A30096@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index d9cab7168058..7363e01ba082 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -19,43 +19,52 @@
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
*/
-
-#include <asm/ds.h>
-
-#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/slab.h>
+#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/kernel.h>
+
+#include <asm/ds.h>
#include "ds_selftest.h"
/*
- * The configuration for a particular DS hardware implementation.
+ * The configuration for a particular DS hardware implementation:
*/
struct ds_configuration {
- /* The name of the configuration. */
- const char *name;
- /* The size of pointer-typed fields in DS, BTS, and PEBS. */
- unsigned char sizeof_ptr_field;
- /* The size of a BTS/PEBS record in bytes. */
- unsigned char sizeof_rec[2];
- /* Control bit-masks indexed by enum ds_feature. */
- unsigned long ctl[dsf_ctl_max];
+ /* The name of the configuration: */
+ const char *name;
+
+ /* The size of pointer-typed fields in DS, BTS, and PEBS: */
+ unsigned char sizeof_ptr_field;
+
+ /* The size of a BTS/PEBS record in bytes: */
+ unsigned char sizeof_rec[2];
+
+ /* Control bit-masks indexed by enum ds_feature: */
+ unsigned long ctl[dsf_ctl_max];
};
static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array);
#define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())
-#define MAX_SIZEOF_DS (12 * 8) /* Maximal size of a DS configuration. */
-#define MAX_SIZEOF_BTS (3 * 8) /* Maximal size of a BTS record. */
-#define DS_ALIGNMENT (1 << 3) /* BTS and PEBS buffer alignment. */
+/* Maximal size of a DS configuration: */
+#define MAX_SIZEOF_DS (12 * 8)
-#define BTS_CONTROL \
- (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\
- ds_cfg.ctl[dsf_bts_overflow])
+/* Maximal size of a BTS record: */
+#define MAX_SIZEOF_BTS (3 * 8)
+/* BTS and PEBS buffer alignment: */
+#define DS_ALIGNMENT (1 << 3)
+
+/* Mask of control bits in the DS MSR register: */
+#define BTS_CONTROL \
+ ( ds_cfg.ctl[dsf_bts] | \
+ ds_cfg.ctl[dsf_bts_kernel] | \
+ ds_cfg.ctl[dsf_bts_user] | \
+ ds_cfg.ctl[dsf_bts_overflow] )
/*
* A BTS or PEBS tracer.
@@ -65,28 +74,32 @@ static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array);
*/
struct ds_tracer {
/* The DS context (partially) owned by this tracer. */
- struct ds_context *context;
+ struct ds_context *context;
/* The buffer provided on ds_request() and its size in bytes. */
- void *buffer;
- size_t size;
+ void *buffer;
+ size_t size;
};
struct bts_tracer {
- /* The common DS part. */
- struct ds_tracer ds;
- /* The trace including the DS configuration. */
- struct bts_trace trace;
- /* Buffer overflow notification function. */
- bts_ovfl_callback_t ovfl;
+ /* The common DS part: */
+ struct ds_tracer ds;
+
+ /* The trace including the DS configuration: */
+ struct bts_trace trace;
+
+ /* Buffer overflow notification function: */
+ bts_ovfl_callback_t ovfl;
};
struct pebs_tracer {
- /* The common DS part. */
- struct ds_tracer ds;
- /* The trace including the DS configuration. */
- struct pebs_trace trace;
- /* Buffer overflow notification function. */
- pebs_ovfl_callback_t ovfl;
+ /* The common DS part: */
+ struct ds_tracer ds;
+
+ /* The trace including the DS configuration: */
+ struct pebs_trace trace;
+
+ /* Buffer overflow notification function: */
+ pebs_ovfl_callback_t ovfl;
};
/*
@@ -95,6 +108,7 @@ struct pebs_tracer {
*
* The DS configuration consists of the following fields; different
* architetures vary in the size of those fields.
+ *
* - double-word aligned base linear address of the BTS buffer
* - write pointer into the BTS buffer
* - end linear address of the BTS buffer (one byte beyond the end of
@@ -133,19 +147,20 @@ enum ds_field {
};
enum ds_qualifier {
- ds_bts = 0,
+ ds_bts = 0,
ds_pebs
};
-static inline unsigned long ds_get(const unsigned char *base,
- enum ds_qualifier qual, enum ds_field field)
+static inline unsigned long
+ds_get(const unsigned char *base, enum ds_qualifier qual, enum ds_field field)
{
base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
return *(unsigned long *)base;
}
-static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
- enum ds_field field, unsigned long value)
+static inline void
+ds_set(unsigned char *base, enum ds_qualifier qual, enum ds_field field,
+ unsigned long value)
{
base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
(*(unsigned long *)base) = value;
@@ -157,7 +172,6 @@ static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
*/
static DEFINE_SPINLOCK(ds_lock);
-
/*
* We either support (system-wide) per-cpu or per-thread allocation.
* We distinguish the two based on the task_struct pointer, where a
@@ -211,17 +225,21 @@ static inline int check_tracer(struct task_struct *task)
* deallocated when the last user puts the context.
*/
struct ds_context {
- /* The DS configuration; goes into MSR_IA32_DS_AREA. */
- unsigned char ds[MAX_SIZEOF_DS];
- /* The owner of the BTS and PEBS configuration, respectively. */
- struct bts_tracer *bts_master;
- struct pebs_tracer *pebs_master;
- /* Use count. */
+ /* The DS configuration; goes into MSR_IA32_DS_AREA: */
+ unsigned char ds[MAX_SIZEOF_DS];
+
+ /* The owner of the BTS and PEBS configuration, respectively: */
+ struct bts_tracer *bts_master;
+ struct pebs_tracer *pebs_master;
+
+ /* Use count: */
unsigned long count;
- /* Pointer to the context pointer field. */
- struct ds_context **this;
- /* The traced task; NULL for current cpu. */
- struct task_struct *task;
+
+ /* Pointer to the context pointer field: */
+ struct ds_context **this;
+
+ /* The traced task; NULL for current cpu: */
+ struct task_struct *task;
};
static DEFINE_PER_CPU(struct ds_context *, system_context_array);
@@ -328,9 +346,9 @@ static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
* The remainder of any partially written record is zeroed out.
*
* context: the DS context
- * qual: the buffer type
- * record: the data to write
- * size: the size of the data
+ * qual: the buffer type
+ * record: the data to write
+ * size: the size of the data
*/
static int ds_write(struct ds_context *context, enum ds_qualifier qual,
const void *record, size_t size)
@@ -429,12 +447,12 @@ enum bts_field {
bts_to,
bts_flags,
- bts_qual = bts_from,
- bts_jiffies = bts_to,
- bts_pid = bts_flags,
+ bts_qual = bts_from,
+ bts_jiffies = bts_to,
+ bts_pid = bts_flags,
- bts_qual_mask = (bts_qual_max - 1),
- bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
+ bts_qual_mask = (bts_qual_max - 1),
+ bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
};
static inline unsigned long bts_get(const char *base, enum bts_field field)
@@ -461,8 +479,8 @@ static inline void bts_set(char *base, enum bts_field field, unsigned long val)
*
* return: bytes read/written on success; -Eerrno, otherwise
*/
-static int bts_read(struct bts_tracer *tracer, const void *at,
- struct bts_struct *out)
+static int
+bts_read(struct bts_tracer *tracer, const void *at, struct bts_struct *out)
{
if (!tracer)
return -EINVAL;
diff --git a/arch/x86/kernel/ds_selftest.h b/arch/x86/kernel/ds_selftest.h
index 0e6e19d4c7d2..2ba8745c6663 100644
--- a/arch/x86/kernel/ds_selftest.h
+++ b/arch/x86/kernel/ds_selftest.h
@@ -12,4 +12,4 @@ extern int ds_selftest_pebs(void);
#else
static inline int ds_selftest_bts(void) { return 0; }
static inline int ds_selftest_pebs(void) { return 0; }
-#endif /* CONFIG_X86_DS_SELFTEST */
+#endif
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 4ca82700c04e..8b2109a6c61c 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -1,5 +1,5 @@
/*
- * h/w branch tracer for x86 based on bts
+ * h/w branch tracer for x86 based on BTS
*
* Copyright (C) 2008-2009 Intel Corporation.
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
@@ -15,8 +15,8 @@
#include <asm/ds.h>
-#include "trace.h"
#include "trace_output.h"
+#include "trace.h"
#define BTS_BUFFER_SIZE (1 << 13)
@@ -197,10 +197,10 @@ static void bts_trace_print_header(struct seq_file *m)
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
{
+ unsigned long symflags = TRACE_ITER_SYM_OFFSET;
struct trace_entry *entry = iter->ent;
struct trace_seq *seq = &iter->seq;
struct hw_branch_entry *it;
- unsigned long symflags = TRACE_ITER_SYM_OFFSET;
trace_assign_type(it, entry);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 3c7b797d0d28..b91091267067 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -189,6 +189,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
#else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */
+
/*
* Simple verification test of ftrace function tracer.
* Enable ftrace, sleep 1/10 second, and then read the trace
@@ -698,10 +699,10 @@ int
trace_selftest_startup_hw_branches(struct tracer *trace,
struct trace_array *tr)
{
- unsigned long count;
- int ret;
struct trace_iterator iter;
struct tracer tracer;
+ unsigned long count;
+ int ret;
if (!trace->open) {
printk(KERN_CONT "missing open function...");