Patches contributed by Eötvös Lorand University
commit d6e8cc6cc7ac77b0f9118f78c453a2e834e62709
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 18:23:03 2008 +0100
netfilter: fix warning in net/netfilter/nf_conntrack_ftp.c
this warning:
net/netfilter/nf_conntrack_ftp.c: In function 'help':
net/netfilter/nf_conntrack_ftp.c:360: warning: 'matchoff' may be used uninitialized in this function
net/netfilter/nf_conntrack_ftp.c:360: warning: 'matchlen' may be used uninitialized in this function
triggers because GCC does not recognize the (correct) error flow
between find_pattern(), 'found', 'matchoff' and 'matchlen'.
Annotate it.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Patrick McHardy <kaber@trash.net>
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 867cad6b3c8d..00fecc385f9b 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -358,7 +358,7 @@ static int help(struct sk_buff *skb,
int ret;
u32 seq;
int dir = CTINFO2DIR(ctinfo);
- unsigned int matchlen, matchoff;
+ unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
struct nf_conntrack_expect *exp;
union nf_inet_addr *daddr;
commit 65f233fb1669e6c990cd1d7fd308ac7dc66dc207
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 18:20:13 2008 +0100
netfilter: fix warning in net/netfilter/nf_conntrack_proto_tcp.c
fix this warning:
net/netfilter/nf_conntrack_proto_tcp.c: In function \u2018tcp_in_window\u2019:
net/netfilter/nf_conntrack_proto_tcp.c:491: warning: unused variable \u2018net\u2019
net/netfilter/nf_conntrack_proto_tcp.c: In function \u2018tcp_packet\u2019:
net/netfilter/nf_conntrack_proto_tcp.c:812: warning: unused variable \u2018net\u2019
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Patrick McHardy <kaber@trash.net>
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7f2f43c77284..debdaf75cecf 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -129,7 +129,7 @@ extern const struct nla_policy nf_ct_port_nla_policy[];
&& net_ratelimit())
#endif
#else
-#define LOG_INVALID(net, proto) 0
+static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
#endif /* CONFIG_SYSCTL */
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
commit 7d55718b0c19ba611241c330f688ee824e9bab79
Merge: 6f893fb2e892 de90add30e79 f4166c54bfe0
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 17:30:25 2008 +0100
Merge branches 'tracing/core', 'x86/urgent' and 'x86/ptrace' into tracing/hw-branch-tracing
This pulls together all the topic branches that are needed
for the DS/BTS/PEBS tracing work.
diff --cc arch/x86/kernel/ds.c
index d1a121443bde,a2d1176c38ee,c570252905a1..d6938d9351cf
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@@@ -234,16 -231,12 -234,12 +231,12 @@@@ static inline struct ds_context *ds_all
struct ds_context **p_context =
(task ? &task->thread.ds_ctx : &this_system_context);
struct ds_context *context = *p_context;
+ + unsigned long irq;
if (!context) {
- spin_unlock(&ds_lock);
-
context = kzalloc(sizeof(*context), GFP_KERNEL);
- -
- if (!context) {
- spin_lock(&ds_lock);
+ if (!context)
return NULL;
- }
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) {
@@@@ -251,30 -244,27 -247,18 +244,27 @@@@
return NULL;
}
- spin_lock(&ds_lock);
- /*
- * Check for race - another CPU could have allocated
- * it meanwhile:
- */
- *p_context = context;
+ + spin_lock_irqsave(&ds_lock, irq);
+
- context->this = p_context;
- context->task = task;
+ if (*p_context) {
+ kfree(context->ds);
+ kfree(context);
- return *p_context;
- }
+
- *p_context = context;
+ + context = *p_context;
+ + } else {
+ + *p_context = context;
- context->this = p_context;
- context->task = task;
- if (task)
- set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
+ + context->this = p_context;
+ + context->task = task;
- if (task)
- set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
- if (!task || (task == current))
- wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
+ + if (task)
+ + set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
- if (!task || (task == current))
- wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
-
- - get_tracer(task);
+ + if (!task || (task == current))
+ + wrmsrl(MSR_IA32_DS_AREA,
+ + (unsigned long)context->ds);
+ + }
+ + spin_unlock_irqrestore(&ds_lock, irq);
}
context->count++;
@@@@ -398,26 -391,27 -382,25 +391,27 @@@@ static int ds_request(struct task_struc
return -EOPNOTSUPP;
- - spin_lock(&ds_lock);
-
- if (!check_tracer(task))
- return -EPERM;
- -
- - error = -ENOMEM;
context = ds_alloc_context(task);
if (!context)
- goto out_unlock;
+ + return -ENOMEM;
+ +
+ + spin_lock_irqsave(&ds_lock, irq);
+
+ error = -EPERM;
+ if (!check_tracer(task))
goto out_unlock;
+ + get_tracer(task);
+ +
error = -EALREADY;
if (context->owner[qual] == current)
- - goto out_unlock;
+ + goto out_put_tracer;
error = -EPERM;
if (context->owner[qual] != NULL)
- - goto out_unlock;
+ + goto out_put_tracer;
context->owner[qual] = current;
- - spin_unlock(&ds_lock);
+ + spin_unlock_irqrestore(&ds_lock, irq);
error = -ENOMEM;
commit 7807fafa52b990abb321f1212416c71e64523ecb
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 08:44:24 2008 +0100
lockdep: fix unused function warning in kernel/lockdep.c
Impact: fix build warning
this warning:
kernel/lockdep.c:584: warning: ‘print_lock_dependencies’ defined but not used
triggers because print_lock_dependencies() is only used if both
CONFIG_TRACE_IRQFLAGS and CONFIG_PROVE_LOCKING are enabled.
But adding #ifdefs is not an option here - it would spread out to 4-5
other helper functions and uglify the file. So mark this function
as __used - it's static and the compiler can eliminate it just fine.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index a42858303233..c137953420e0 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -580,7 +580,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
/*
* printk all lock dependencies starting at <entry>:
*/
-static void print_lock_dependencies(struct lock_class *class, int depth)
+static void __used
+print_lock_dependencies(struct lock_class *class, int depth)
{
struct lock_list *entry;
commit e951e4af2e399c46891004d4931333d2d8d520ab
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 08:42:01 2008 +0100
x86: fix unused variable warning in arch/x86/kernel/hpet.c
Impact: fix build warning
this warning:
arch/x86/kernel/hpet.c:36: warning: ‘hpet_num_timers’ defined but not used
Triggers because hpet_num_timers is unused in the !CONFIG_PCI_MSI case.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 15fcaacc1f84..3f0a3edf0a57 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -33,7 +33,9 @@
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
unsigned long hpet_address;
+#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
+#endif
static void __iomem *hpet_virt_address;
struct hpet_dev {
commit 14bfc987e395797dfe03e915e8b4c7fc9e5078e4
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 25 08:58:11 2008 +0100
tracing, tty: fix warnings caused by branch tracing and tty_kref_get()
Stephen Rothwell reported tht this warning started triggering in
linux-next:
In file included from init/main.c:27:
include/linux/tty.h: In function ‘tty_kref_get’:
include/linux/tty.h:330: warning: ‘______f’ is static but declared in inline function ‘tty_kref_get’ which is not static
Which gcc emits for 'extern inline' functions that nevertheless define
static variables. Change it to 'static inline', which is the norm
in the kernel anyway.
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b8121d4e36f..eaec37c9d83d 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -325,7 +325,7 @@ extern struct class *tty_class;
* go away
*/
-extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
+static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
kref_get(&tty->kref);
commit 943f3d030003e1fa5f77647328e805441213bf49
Merge: 64b7482de253 b19b3c74c7bb 6f893fb2e892
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 24 17:46:57 2008 +0100
Merge branches 'sched/core', 'core/core' and 'tracing/core' into cpus4096
diff --cc kernel/Makefile
index 6a212b842d86,19fad003b19d,03a45e7e87b7..010ccb311166
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@@@ -19,7 -19,8 -19,12 +19,11 @@@@ CFLAGS_REMOVE_mutex-debug.o = -p
CFLAGS_REMOVE_rtmutex-debug.o = -pg
CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
--CFLAGS_REMOVE_sched.o = -pg
+ endif
++ ifdef CONFIG_FUNCTION_RET_TRACER
++ CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
++ CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
+ endif
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
diff --cc kernel/sched.c
index 338340a3fb89,558e5f284269,388d9db044ab..bb827651558e
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@@@ -703,18 -703,45 -709,45 +709,18 @@@@ static __read_mostly char *sched_feat_n
#undef SCHED_FEAT
--static int sched_feat_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t
-sched_feat_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
++static int sched_feat_show(struct seq_file *m, void *v)
{
- filp->private_data = inode->i_private;
- return 0;
- }
-
- static ssize_t
- sched_feat_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
- {
-- char *buf;
-- int r = 0;
-- int len = 0;
int i;
for (i = 0; sched_feat_names[i]; i++) {
-- len += strlen(sched_feat_names[i]);
-- len += 4;
- }
-
- buf = kmalloc(len + 2, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (i = 0; sched_feat_names[i]; i++) {
- if (sysctl_sched_features & (1UL << i))
- r += sprintf(buf + r, "%s ", sched_feat_names[i]);
- else
- r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
++ if (!(sysctl_sched_features & (1UL << i)))
++ seq_puts(m, "NO_");
++ seq_printf(m, "%s ", sched_feat_names[i]);
}
++ seq_puts(m, "\n");
- buf = kmalloc(len + 2, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (i = 0; sched_feat_names[i]; i++) {
- if (sysctl_sched_features & (1UL << i))
- r += sprintf(buf + r, "%s ", sched_feat_names[i]);
- else
- r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
- }
-
-- r += sprintf(buf + r, "\n");
-- WARN_ON(r >= len + 2);
--
-- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
--
-- kfree(buf);
--
-- return r;
++ return 0;
}
static ssize_t
commit 6f893fb2e89287a4d755f928c3cda9d18440355c
Merge: 0429149fb5e0 1d926f275639 69bb54ec05f5 65afa5e603d5 cbe2f5a6e84e 813b8520f5c2 033601a32b20 958086d17844 fb91ee6cf5b8
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 24 17:46:24 2008 +0100
Merge branches 'tracing/branch-tracer', 'tracing/fastboot', 'tracing/ftrace', 'tracing/function-return-tracer', 'tracing/power-tracer', 'tracing/powerpc', 'tracing/ring-buffer', 'tracing/stack-tracer' and 'tracing/urgent' into tracing/core
diff --cc include/linux/ftrace.h
index f7ba4ea5e128,f7ba4ea5e128,13e9cfc09928,938ca1942641,f7ba4ea5e128,703eb53cfa2b,f7ba4ea5e128,f7ba4ea5e128,703eb53cfa2b..7854d87b97b2
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@@@@@@@@@ -253,11 -253,11 -253,12 -253,11 -253,11 -181,6 -253,11 -253,11 -181,6 +253,12 @@@@@@@@@@ static inline void __ftrace_enabled_res
#endif
#ifdef CONFIG_TRACING
+ +extern int ftrace_dump_on_oops;
+ +
+ +extern void tracing_start(void);
+ +extern void tracing_stop(void);
++ ++++++extern void ftrace_off_permanent(void);
+ +
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
@@@@@@@@@@ -288,8 -288,8 -289,9 -288,8 -288,8 -211,6 -288,8 -288,8 -211,6 +289,9 @@@@@@@@@@ ftrace_special(unsigned long arg1, unsi
static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
+ +static inline void tracing_start(void) { }
+ +static inline void tracing_stop(void) { }
++ ++++++static inline void ftrace_off_permanent(void) { }
static inline int
ftrace_printk(const char *fmt, ...)
{
@@@@@@@@@@ -310,26 -310,26 -312,26 -310,34 -310,26 -229,25 -310,26 -310,26 -229,25 +312,34 @@@@@@@@@@ ftrace_init_module(struct module *mod
#endif
- -struct boot_trace {
- - pid_t caller;
- - char func[KSYM_NAME_LEN];
- - int result;
- - unsigned long long duration; /* usecs */
- - ktime_t calltime;
- - ktime_t rettime;
+ +/*
+ + * Structure that defines a return function trace.
+ + */
+ +struct ftrace_retfunc {
+ + unsigned long ret; /* Return address */
+ + unsigned long func; /* Current function */
+ + unsigned long long calltime;
+ + unsigned long long rettime;
+ + /* Number of functions that overran the depth limit for current task */
+ + unsigned long overrun;
};
- -#ifdef CONFIG_BOOT_TRACER
- -extern void trace_boot(struct boot_trace *it, initcall_t fn);
- -extern void start_boot_trace(void);
- -extern void stop_boot_trace(void);
- -#else
- -static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
- -static inline void start_boot_trace(void) { }
- -static inline void stop_boot_trace(void) { }
- -#endif
+ +#ifdef CONFIG_FUNCTION_RET_TRACER
+++ +++++#define FTRACE_RETFUNC_DEPTH 50
+++ +++++#define FTRACE_RETSTACK_ALLOC_SIZE 32
+ +/* Type of a callback handler of tracing return function */
+ +typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
+ +extern int register_ftrace_return(trace_function_return_t func);
+ +/* The current handler in use */
+ +extern trace_function_return_t ftrace_function_return;
+ +extern void unregister_ftrace_return(void);
+++ + ++
+++ +++++extern void ftrace_retfunc_init_task(struct task_struct *t);
+++ +++++extern void ftrace_retfunc_exit_task(struct task_struct *t);
+++ +++++#else
+++ +++++static inline void ftrace_retfunc_init_task(struct task_struct *t) { }
+++ +++++static inline void ftrace_retfunc_exit_task(struct task_struct *t) { }
+ +#endif
#endif /* _LINUX_FTRACE_H */
diff --cc kernel/trace/Kconfig
index 61e8cca6ff45,b8378fad29a3,b8378fad29a3,b8378fad29a3,b8378fad29a3,33dbefd471e8,b8378fad29a3,87fc34a1bb91,33dbefd471e8..9cbf7761f498
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@@@@@@@@@ -158,59 -158,44 -158,44 -158,44 -158,44 -138,6 -158,44 -161,44 -138,6 +161,59 @@@@@@@@@@ config BOOT_TRACE
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )
+ +config TRACE_BRANCH_PROFILING
+ + bool "Trace likely/unlikely profiler"
+ + depends on DEBUG_KERNEL
+ + select TRACING
+ + help
+ + This tracer profiles all the the likely and unlikely macros
+ + in the kernel. It will display the results in:
+ +
---- -- /debugfs/tracing/profile_likely
---- -- /debugfs/tracing/profile_unlikely
++++++++ /debugfs/tracing/profile_annotated_branch
+ +
+ + Note: this will add a significant overhead, only turn this
+ + on if you need to profile the system's use of these macros.
+ +
+ + Say N if unsure.
+ +
++++++++config PROFILE_ALL_BRANCHES
++++++++ bool "Profile all if conditionals"
++++++++ depends on TRACE_BRANCH_PROFILING
++++++++ help
++++++++ This tracer profiles all branch conditions. Every if ()
++++++++ taken in the kernel is recorded whether it hit or miss.
++++++++ The results will be displayed in:
++++++++
++++++++ /debugfs/tracing/profile_branch
++++++++
++++++++ This configuration, when enabled, will impose a great overhead
++++++++ on the system. This should only be enabled when the system
++++++++ is to be analyzed
++++++++
++++++++ Say N if unsure.
++++++++
+ +config TRACING_BRANCHES
+ + bool
+ + help
+ + Selected by tracers that will trace the likely and unlikely
+ + conditions. This prevents the tracers themselves from being
+ + profiled. Profiling the tracing infrastructure can only happen
+ + when the likelys and unlikelys are not being traced.
+ +
+ +config BRANCH_TRACER
+ + bool "Trace likely/unlikely instances"
+ + depends on TRACE_BRANCH_PROFILING
+ + select TRACING_BRANCHES
+ + help
+ + This traces the events of likely and unlikely condition
+ + calls in the kernel. The difference between this and the
+ + "Trace likely/unlikely profiler" is that this is not a
+ + histogram of the callers, but actually places the calling
+ + events into a running trace buffer to see when and where the
+ + events happened, as well as their results.
+ +
+ + Say N if unsure.
+ +
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
diff --cc kernel/trace/trace.c
index 4ee6f0375222,4ee6f0375222,0dbfb23ced97,4ee6f0375222,4ee6f0375222,d86e3252f300,4ee6f0375222,48d1536f1ca4,d86e3252f300..a45b59e53fbc
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@@@@@@@@@ -272,9 -272,9 -272,9 -272,9 -272,9 -213,6 -272,9 -273,11 -213,6 +273,11 @@@@@@@@@@ static const char *trace_options[] =
"stacktrace",
"sched-tree",
"ftrace_printk",
+ + "ftrace_preempt",
+ + "branch",
+ + "annotate",
+++++++ + "userstacktrace",
+++++++ + "sym-userobj",
NULL
};
@@@@@@@@@@ -657,76 -657,76 -657,91 -657,76 -657,76 -581,6 -657,76 -682,76 -581,6 +682,91 @@@@@@@@@@ static void trace_init_cmdlines(void
cmdline_idx = 0;
}
+ +static int trace_stop_count;
+ +static DEFINE_SPINLOCK(tracing_start_lock);
+ +
++ ++++++/**
++ ++++++ * ftrace_off_permanent - disable all ftrace code permanently
++ ++++++ *
++ ++++++ * This should only be called when a serious anomally has
++ ++++++ * been detected. This will turn off the function tracing,
++ ++++++ * ring buffers, and other tracing utilites. It takes no
++ ++++++ * locks and can be called from any context.
++ ++++++ */
++ ++++++void ftrace_off_permanent(void)
++ ++++++{
++ ++++++ tracing_disabled = 1;
++ ++++++ ftrace_stop();
++ ++++++ tracing_off_permanent();
++ ++++++}
++ ++++++
+ +/**
+ + * tracing_start - quick start of the tracer
+ + *
+ + * If tracing is enabled but was stopped by tracing_stop,
+ + * this will start the tracer back up.
+ + */
+ +void tracing_start(void)
+ +{
+ + struct ring_buffer *buffer;
+ + unsigned long flags;
+ +
+ + if (tracing_disabled)
+ + return;
+ +
+ + spin_lock_irqsave(&tracing_start_lock, flags);
+ + if (--trace_stop_count)
+ + goto out;
+ +
+ + if (trace_stop_count < 0) {
+ + /* Someone screwed up their debugging */
+ + WARN_ON_ONCE(1);
+ + trace_stop_count = 0;
+ + goto out;
+ + }
+ +
+ +
+ + buffer = global_trace.buffer;
+ + if (buffer)
+ + ring_buffer_record_enable(buffer);
+ +
+ + buffer = max_tr.buffer;
+ + if (buffer)
+ + ring_buffer_record_enable(buffer);
+ +
+ + ftrace_start();
+ + out:
+ + spin_unlock_irqrestore(&tracing_start_lock, flags);
+ +}
+ +
+ +/**
+ + * tracing_stop - quick stop of the tracer
+ + *
+ + * Light weight way to stop tracing. Use in conjunction with
+ + * tracing_start.
+ + */
+ +void tracing_stop(void)
+ +{
+ + struct ring_buffer *buffer;
+ + unsigned long flags;
+ +
+ + ftrace_stop();
+ + spin_lock_irqsave(&tracing_start_lock, flags);
+ + if (trace_stop_count++)
+ + goto out;
+ +
+ + buffer = global_trace.buffer;
+ + if (buffer)
+ + ring_buffer_record_disable(buffer);
+ +
+ + buffer = max_tr.buffer;
+ + if (buffer)
+ + ring_buffer_record_disable(buffer);
+ +
+ + out:
+ + spin_unlock_irqrestore(&tracing_start_lock, flags);
+ +}
+ +
void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
@@@@@@@@@@ -1690,18 -1690,18 -1705,18 -1690,18 -1690,18 -1448,6 -1690,18 -1829,27 -1448,6 +1844,27 @@@@@@@@@@ print_lat_fmt(struct trace_iterator *it
trace_seq_print_cont(s, iter);
break;
}
+ + case TRACE_BRANCH: {
+ + struct trace_branch *field;
+ +
+ + trace_assign_type(field, entry);
+ +
+ + trace_seq_printf(s, "[%s] %s:%s:%d\n",
+ + field->correct ? " ok " : " MISS ",
+ + field->func,
+ + field->file,
+ + field->line);
+ + break;
+ + }
+++++++ + case TRACE_USER_STACK: {
+++++++ + struct userstack_entry *field;
+++++++ +
+++++++ + trace_assign_type(field, entry);
+++++++ +
+++++++ + seq_print_userip_objs(field, s, sym_flags);
+++++++ + trace_seq_putc(s, '\n');
+++++++ + break;
+++++++ + }
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
@@@@@@@@@@ -1837,22 -1837,22 -1852,22 -1837,22 -1837,22 -1581,6 -1837,22 -1985,35 -1581,6 +2000,35 @@@@@@@@@@ static enum print_line_t print_trace_fm
trace_seq_print_cont(s, iter);
break;
}
+ + case TRACE_FN_RET: {
+ + return print_return_function(iter);
+ + break;
+ + }
+ + case TRACE_BRANCH: {
+ + struct trace_branch *field;
+ +
+ + trace_assign_type(field, entry);
+ +
+ + trace_seq_printf(s, "[%s] %s:%s:%d\n",
+ + field->correct ? " ok " : " MISS ",
+ + field->func,
+ + field->file,
+ + field->line);
+ + break;
+ + }
+++++++ + case TRACE_USER_STACK: {
+++++++ + struct userstack_entry *field;
+++++++ +
+++++++ + trace_assign_type(field, entry);
+++++++ +
+++++++ + ret = seq_print_userip_objs(field, s, sym_flags);
+++++++ + if (!ret)
+++++++ + return TRACE_TYPE_PARTIAL_LINE;
+++++++ + ret = trace_seq_putc(s, '\n');
+++++++ + if (!ret)
+++++++ + return TRACE_TYPE_PARTIAL_LINE;
+++++++ + break;
+++++++ + }
}
return TRACE_TYPE_HANDLED;
}
commit b19b3c74c7bbec45a848631b8f970ac110665a01
Merge: ed313489bade 6003ab0bad4c 42569c39917a 7918baa55514 29cbda77a67c 2b5fe6de5827 b0788caf7af7 8dd2337470d2
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 24 17:44:55 2008 +0100
Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal', 'core/urgent' and 'core/xen' into core/core
diff --cc arch/x86/include/asm/uaccess_64.h
index f8cfd00db450,515d4dce96b5,515d4dce96b5,543ba883cc66,664f15280f14,664f15280f14,f8cfd00db450,c96c1f5d07a2..84210c479fca
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@@@@@@@@ -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 +1,5 @@@@@@@@@
-- #ifndef __X86_64_UACCESS_H
-- #define __X86_64_UACCESS_H
-#ifndef ASM_X86__UACCESS_64_H
-#define ASM_X86__UACCESS_64_H
++ +#ifndef _ASM_X86_UACCESS_64_H
++ +#define _ASM_X86_UACCESS_64_H
/*
* User space memory access functions
@@@@@@@@@ -199,4 -198,4 -198,4 -205,4 -199,4 -199,4 -199,4 -199,4 +205,4 @@@@@@@@@ static inline int __copy_from_user_inat
unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
-- #endif /* __X86_64_UACCESS_H */
-#endif /* ASM_X86__UACCESS_64_H */
++ +#endif /* _ASM_X86_UACCESS_64_H */
diff --cc include/linux/kernel.h
index dc7e0d0a6474,3f30557be2a3,2651f805ba6d,69a9bfdf9c86,fba141d3ca07,fba141d3ca07,dc7e0d0a6474,94d17ff64c5a..269df5a17b30
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@@@@@@@@ -318,36 -290,28 -288,28 -327,32 -318,32 -318,32 -318,36 -294,32 +329,36 @@@@@@@@@ static inline char *pack_hex_byte(char
return buf;
}
----- -#define pr_emerg(fmt, arg...) \
----- - printk(KERN_EMERG fmt, ##arg)
----- -#define pr_alert(fmt, arg...) \
----- - printk(KERN_ALERT fmt, ##arg)
----- -#define pr_crit(fmt, arg...) \
----- - printk(KERN_CRIT fmt, ##arg)
----- -#define pr_err(fmt, arg...) \
----- - printk(KERN_ERR fmt, ##arg)
----- -#define pr_warning(fmt, arg...) \
----- - printk(KERN_WARNING fmt, ##arg)
----- -#define pr_notice(fmt, arg...) \
----- - printk(KERN_NOTICE fmt, ##arg)
----- -#define pr_info(fmt, arg...) \
----- - printk(KERN_INFO fmt, ##arg)
--
-- #ifdef DEBUG
+++++ +#ifndef pr_fmt
+++++ +#define pr_fmt(fmt) fmt
+++++ +#endif
+++++ +
+++++ +#define pr_emerg(fmt, ...) \
+++++ + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_alert(fmt, ...) \
+++++ + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_crit(fmt, ...) \
+++++ + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_err(fmt, ...) \
+++++ + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_warning(fmt, ...) \
+++++ + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_notice(fmt, ...) \
+++++ + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+++++ +#define pr_info(fmt, ...) \
+++++ + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
++
/* If you are writing a driver, please use dev_dbg instead */
-- #define pr_debug(fmt, arg...) \
-- printk(KERN_DEBUG fmt, ##arg)
++ #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
++ #define pr_debug(fmt, ...) do { \
--- - dynamic_pr_debug(fmt, ##__VA_ARGS__); \
+++++ + dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
++ } while (0)
++ #elif defined(DEBUG)
--- -#define pr_debug(fmt, arg...) \
--- - printk(KERN_DEBUG fmt, ##arg)
+++++ +#define pr_debug(fmt, ...) \
+++++ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
----- -#define pr_debug(fmt, arg...) \
----- - ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; })
+++++ +#define pr_debug(fmt, ...) \
+++++ + ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
diff --cc kernel/exit.c
index 2d8be7ebb0f7,16395644a98f,85a83c831856,ae2b92be5fae,80137a5d9467,b9c4d8bb72e5,2d8be7ebb0f7,80137a5d9467..30fcdf16737a
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@@@@@@@@ -1316,23 -1309,20 -1317,20 -1325,23 -1320,23 -1325,23 -1316,23 -1320,23 +1316,23 @@@@@@@@@ static int wait_task_zombie(struct task
* need to protect the access to p->parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
++ *
++ * We use thread_group_cputime() to get times for the thread
++ * group, which consolidates times for all threads in the
++ * group including the group leader.
*/
+++++ ++ thread_group_cputime(p, &cputime);
spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal;
sig = p->signal;
- -- -- thread_group_cputime(p, &cputime);
psig->cutime =
cputime_add(psig->cutime,
-- cputime_add(p->utime,
-- cputime_add(sig->utime,
-- sig->cutime)));
++ cputime_add(cputime.utime,
++ sig->cutime));
psig->cstime =
cputime_add(psig->cstime,
-- cputime_add(p->stime,
-- cputime_add(sig->stime,
-- sig->cstime)));
++ cputime_add(cputime.stime,
++ sig->cstime));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
diff --cc kernel/futex.c
index 8af10027514b,7d1136e97c14,62cbd648e28a,8af10027514b,8af10027514b,8af10027514b,8af10027514b,7d1136e97c14..e10c5c8786a6
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@@@@@@@@ -229,79 -229,79 -248,29 -229,79 -229,79 -229,79 -229,79 -229,79 +248,29 @@@@@@@@@ again
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
-- ----- * the object not the particular process. Therefore we use
-- ----- * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
-- ----- * mappings of _writable_ handles.
++ +++++ * the object not the particular process.
*/
-- ----- if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
-- ----- key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
++ +++++ if (PageAnon(page)) {
++ +++++ key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
- - return 0;
- - }
- -
- - /*
- - * Linear file mappings are also simple.
- - */
- - key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
- - key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
- - if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
- - key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
- - + vma->vm_pgoff);
-- ----- return 0;
++ +++++ } else {
++ +++++ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
++ +++++ key->shared.inode = page->mapping->host;
++ +++++ key->shared.pgoff = page->index;
}
-- ----- /*
- ---- * Linear file mappings are also simple.
- - * We could walk the page table to read the non-linear
- - * pte, and get the page index without fetching the page
- - * from swap. But that's a lot of code to duplicate here
- - * for a rare case, so we simply fetch the page.
-- ----- */
- ---- key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
- ---- key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
- ---- if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
- ---- key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
- ---- + vma->vm_pgoff);
- - err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
- - if (err >= 0) {
- - key->shared.pgoff =
- - page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- - put_page(page);
-- ----- return 0;
-- ----- }
- - return err;
- -}
++ +++++ get_futex_key_refs(key);
- ---- /*
- ---- * We could walk the page table to read the non-linear
- ---- * pte, and get the page index without fetching the page
- ---- * from swap. But that's a lot of code to duplicate here
- ---- * for a rare case, so we simply fetch the page.
- ---- */
- ---- err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
- ---- if (err >= 0) {
- ---- key->shared.pgoff =
- ---- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- ---- put_page(page);
- ---- return 0;
- ---- }
- ---- return err;
- ---- }
- ----
-- -----/*
-- ----- * Take a reference to the resource addressed by a key.
-- ----- * Can be called while holding spinlocks.
-- ----- *
-- ----- */
-- -----static void get_futex_key_refs(union futex_key *key)
-- -----{
-- ----- if (key->both.ptr == NULL)
-- ----- return;
-- ----- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- ----- case FUT_OFF_INODE:
-- ----- atomic_inc(&key->shared.inode->i_count);
-- ----- break;
-- ----- case FUT_OFF_MMSHARED:
-- ----- atomic_inc(&key->private.mm->mm_count);
-- ----- break;
-- ----- }
++ +++++ unlock_page(page);
++ +++++ put_page(page);
++ +++++ return 0;
}
-- -----/*
-- ----- * Drop a reference to the resource addressed by a key.
-- ----- * The hash bucket spinlock must not be held.
-- ----- */
-- -----static void drop_futex_key_refs(union futex_key *key)
++ +++++static inline
++ +++++void put_futex_key(int fshared, union futex_key *key)
{
-- ----- if (!key->both.ptr)
-- ----- return;
-- ----- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- ----- case FUT_OFF_INODE:
-- ----- iput(key->shared.inode);
-- ----- break;
-- ----- case FUT_OFF_MMSHARED:
-- ----- mmdrop(key->private.mm);
-- ----- break;
-- ----- }
++ +++++ drop_futex_key_refs(key);
}
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
diff --cc kernel/sched.c
index 9b1e79371c20,cc1f81b50b82,13dd2db9fb2d,2a106b6b78b0,e8819bc6f462,b388c9b243e9,9b1e79371c20,d906f72b42d2..558e5f284269
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@@@@@@@@ -1433,37 -1425,9 -1425,9 -1433,35 -1425,35 -1433,37 -1433,37 -1419,35 +1433,37 @@@@@@@@@ up
parent = parent->parent;
if (parent)
goto up;
++ out_unlock:
rcu_read_unlock();
++
++ return ret;
++ }
++
++ static int tg_nop(struct task_group *tg, void *data)
++ {
++ return 0;
++ }
++ #endif
++
++ #ifdef CONFIG_SMP
++ static unsigned long source_load(int cpu, int type);
++ static unsigned long target_load(int cpu, int type);
++ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
++
++ static unsigned long cpu_avg_load_per_task(int cpu)
++ {
++ struct rq *rq = cpu_rq(cpu);
++
++ if (rq->nr_running)
++ rq->avg_load_per_task = rq->load.weight / rq->nr_running;
++++ + else
++++ + rq->avg_load_per_task = 0;
++
++ return rq->avg_load_per_task;
}
++ #ifdef CONFIG_FAIR_GROUP_SCHED
++
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
/*
@@@@@@@@@ -1547,10 -1507,14 -1507,14 -1545,10 -1537,10 -1547,10 -1547,10 -1527,16 +1547,10 @@@@@@@@@ static int tg_shares_up(struct task_gro
if (!rq_weight)
rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
-- - for_each_cpu_mask(i, sd->span) {
-- - struct rq *rq = cpu_rq(i);
-- - unsigned long flags;
-
- spin_lock_irqsave(&rq->lock, flags);
- __update_group_shares_cpu(tg, i, shares, rq_weight);
- spin_unlock_irqrestore(&rq->lock, flags);
- }
++ + for_each_cpu_mask(i, sd->span)
++ + update_group_shares_cpu(tg, i, shares, rq_weight);
-- spin_lock_irqsave(&rq->lock, flags);
-- __update_group_shares_cpu(tg, i, shares, rq_weight);
-- spin_unlock_irqrestore(&rq->lock, flags);
-- }
++ return 0;
}
/*
@@@@@@@@@ -9025,25 -8905,16 -8905,19 -9021,25 -9008,25 -9023,25 -9025,25 -9008,25 +9024,25 @@@@@@@@@ long sched_group_rt_period(struct task_
static int sched_rt_global_constraints(void)
{
-- struct task_group *tg = &root_task_group;
-- u64 rt_runtime, rt_period;
++ u64 runtime, period;
int ret = 0;
- rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- rt_runtime = tg->rt_bandwidth.rt_runtime;
+ if (sysctl_sched_rt_period <= 0)
+ return -EINVAL;
+
- rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- rt_runtime = tg->rt_bandwidth.rt_runtime;
++ runtime = global_rt_runtime();
++ period = global_rt_period();
++
++ /*
++ * Sanity check on the sysctl variables.
++ */
++ if (runtime > period && runtime != RUNTIME_INF)
++ return -EINVAL;
mutex_lock(&rt_constraints_mutex);
-- if (!__rt_schedulable(tg, rt_period, rt_runtime))
-- ret = -EINVAL;
++ read_lock(&tasklist_lock);
++ ret = __rt_schedulable(NULL, 0, 0);
++ read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return ret;
commit 64b7482de253c10efa2589a6212e3d2093a3efc7
Merge: 957ad0166e9f 50ee91765e25
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 24 17:37:12 2008 +0100
Merge branch 'sched/rt' into sched/core