Patches contributed by Eötvös Lorand University
commit 5177dc3f2b3220b2dcfcf35eb9e6ec53ee818231
Merge: 0bb943c7a213 7f0f598a0069
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 21:37:07 2008 +0100
Merge branch 'linus' into tracing/urgent
commit f632ddcc0786149c0e4bef9b6b44c96a75c0d074
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 17:32:26 2008 +0100
x86: fix wakeup_cpu with numaq/es7000, v2, fix #2
Impact: fix boot crash
fix default_update_genapic().
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c366e891e10b..31328909456e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -585,8 +585,10 @@ early_param("elfcorehdr", setup_elfcorehdr);
static int __init default_update_genapic(void)
{
-#if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
- genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
+#ifdef CONFIG_X86_SMP
+# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
+ genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
+# endif
#endif
return 0;
commit 73f56c0d35e6427081a4eabd620d8b8d8a35bd09
Merge: 0af40a4b1050 8501c45cc32c
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 16:48:49 2008 +0100
Merge branch 'iommu-fixes-2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
commit cbe9ee00cea58d1f77b172fe22a51080e90877f2
Merge: 0bd7b79851d0 10db4ef7b9a6
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 15:41:36 2008 +0100
Merge branch 'x86/urgent' into x86/cleanups
commit 10db4ef7b9a65b86e4d047671a1886f4c101a859
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 15:23:08 2008 +0100
x86, PEBS/DS: fix code flow in ds_request()
this compiler warning:
arch/x86/kernel/ds.c: In function 'ds_request':
arch/x86/kernel/ds.c:368: warning: 'context' may be used uninitialized in this function
Shows that the code flow in ds_request() is buggy - it goes into
the unlock+release-context path even when the context is not allocated
yet.
First allocate the context, then do the other checks.
Also, take care with GFP allocations under the ds_lock spinlock.
Cc: <stable@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ac1d5b0586ba..d1a121443bde 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
struct ds_context *context = *p_context;
if (!context) {
+ spin_unlock(&ds_lock);
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
+ if (!context) {
+ spin_lock(&ds_lock);
return NULL;
+ }
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) {
kfree(context);
+ spin_lock(&ds_lock);
return NULL;
}
+ spin_lock(&ds_lock);
+ /*
+ * Check for race - another CPU could have allocated
+ * it meanwhile:
+ */
+ if (*p_context) {
+ kfree(context->ds);
+ kfree(context);
+ return *p_context;
+ }
+
*p_context = context;
context->this = p_context;
@@ -384,15 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
spin_lock(&ds_lock);
- error = -EPERM;
- if (!check_tracer(task))
- goto out_unlock;
-
error = -ENOMEM;
context = ds_alloc_context(task);
if (!context)
goto out_unlock;
+ error = -EPERM;
+ if (!check_tracer(task))
+ goto out_unlock;
+
error = -EALREADY;
if (context->owner[qual] == current)
goto out_unlock;
commit 5a209c2d58e70f9bc415b9cdf0e3b9aaefb70371
Merge: 3f8e402f34ec 0c726da983de e270219f4372
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 18 08:52:13 2008 +0100
Merge branches 'tracing/branch-tracer' and 'tracing/urgent' into tracing/core
diff --cc kernel/trace/trace.c
index 396fda034e3f,b04923b72ce0,697eda36b86a..2596b5a968c4
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@@@ -258,11 -258,9 -213,6 +258,9 @@@@ static const char *trace_options[] =
"stacktrace",
"sched-tree",
"ftrace_printk",
+ "ftrace_preempt",
- #ifdef CONFIG_BRANCH_TRACER
+ "branch",
- #endif
+ "annotate",
NULL
};
@@@@ -1031,62 -1029,62 -884,11 +1029,62 @@@@ function_trace_call_preempt_only(unsign
trace_function(tr, data, ip, parent_ip, flags, pc);
atomic_dec(&data->disabled);
- if (resched)
- preempt_enable_no_resched_notrace();
- else
- preempt_enable_notrace();
+ ftrace_preempt_enable(resched);
+}
+
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
- raw_local_irq_save(flags);
++ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ trace_function(tr, data, ip, parent_ip, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
++ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_RET_TRACER
+void trace_function_return(struct ftrace_retfunc *trace)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ raw_local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_function_return(tr, data, trace, flags, pc);
+ }
+ atomic_dec(&data->disabled);
+ raw_local_irq_restore(flags);
}
+#endif /* CONFIG_FUNCTION_RET_TRACER */
static struct ftrace_ops trace_ops __read_mostly =
{
@@@@ -2638,47 -2636,42 -2417,14 +2636,47 @@@@ static int tracing_set_tracer(char *buf
current_trace->reset(tr);
current_trace = t;
-- if (t->init)
-- t->init(tr);
++ if (t->init) {
++ ret = t->init(tr);
++ if (ret)
++ goto out;
++ }
+ trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
- if (ret > 0)
- filp->f_pos += ret;
+ return ret;
+}
+
+static ssize_t
+tracing_set_trace_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[max_tracer_type_len+1];
+ int i;
+ size_t ret;
++ int err;
++
++ ret = cnt;
+
+ if (cnt > max_tracer_type_len)
+ cnt = max_tracer_type_len;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ /* strip ending whitespace. */
+ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+ buf[i] = 0;
+
- ret = tracing_set_tracer(buf);
- if (!ret)
- ret = cnt;
++ err = tracing_set_tracer(buf);
++ if (err)
++ return err;
+
- if (ret > 0)
- filp->f_pos += ret;
++ filp->f_pos += ret;
return ret;
}
diff --cc kernel/trace/trace.h
index cdbd5cc22be8,b41d7b4c2cae,8465ad052707..37947f6b92bf
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@@@ -264,11 -264,10 -234,8 +264,11 @@@@ enum print_line_t
*/
struct tracer {
const char *name;
-- void (*init)(struct trace_array *tr);
++ /* Your tracer should raise a warning if init fails */
++ int (*init)(struct trace_array *tr);
void (*reset)(struct trace_array *tr);
+ void (*start)(struct trace_array *tr);
+ void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
@@@@ -470,92 -469,90 -415,8 +470,90 @@@@ enum trace_iterator_flags
TRACE_ITER_STACKTRACE = 0x100,
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
+ TRACE_ITER_PREEMPTONLY = 0x800,
- #ifdef CONFIG_BRANCH_TRACER
+ TRACE_ITER_BRANCH = 0x1000,
- #endif
+ TRACE_ITER_ANNOTATE = 0x2000,
};
+/*
+ * TRACE_ITER_SYM_MASK masks the options in trace_flags that
+ * control the output of kernel symbols.
+ */
+#define TRACE_ITER_SYM_MASK \
+ (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
+
extern struct tracer nop_trace;
+/**
+ * ftrace_preempt_disable - disable preemption scheduler safe
+ *
+ * When tracing can happen inside the scheduler, there exists
+ * cases that the tracing might happen before the need_resched
+ * flag is checked. If this happens and the tracer calls
+ * preempt_enable (after a disable), a schedule might take place
+ * causing an infinite recursion.
+ *
+ * To prevent this, we read the need_recshed flag before
+ * disabling preemption. When we want to enable preemption we
+ * check the flag, if it is set, then we call preempt_enable_no_resched.
+ * Otherwise, we call preempt_enable.
+ *
+ * The rational for doing the above is that if need resched is set
+ * and we have yet to reschedule, we are either in an atomic location
+ * (where we do not need to check for scheduling) or we are inside
+ * the scheduler and do not want to resched.
+ */
+static inline int ftrace_preempt_disable(void)
+{
+ int resched;
+
+ resched = need_resched();
+ preempt_disable_notrace();
+
+ return resched;
+}
+
+/**
+ * ftrace_preempt_enable - enable preemption scheduler safe
+ * @resched: the return value from ftrace_preempt_disable
+ *
+ * This is a scheduler safe way to enable preemption and not miss
+ * any preemption checks. The disabled saved the state of preemption.
+ * If resched is set, then we were either inside an atomic or
+ * are inside the scheduler (we would have already scheduled
+ * otherwise). In this case, we do not want to call normal
+ * preempt_enable, but preempt_enable_no_resched instead.
+ */
+static inline void ftrace_preempt_enable(int resched)
+{
+ if (resched)
+ preempt_enable_no_resched_notrace();
+ else
+ preempt_enable_notrace();
+}
+
+#ifdef CONFIG_BRANCH_TRACER
+extern int enable_branch_tracing(struct trace_array *tr);
+extern void disable_branch_tracing(void);
+static inline int trace_branch_enable(struct trace_array *tr)
+{
+ if (trace_flags & TRACE_ITER_BRANCH)
+ return enable_branch_tracing(tr);
+ return 0;
+}
+static inline void trace_branch_disable(void)
+{
+ /* due to races, always disable */
+ disable_branch_tracing();
+}
+#else
+static inline int trace_branch_enable(struct trace_array *tr)
+{
+ return 0;
+}
+static inline void trace_branch_disable(void)
+{
+}
+#endif /* CONFIG_BRANCH_TRACER */
+
#endif /* _LINUX_KERNEL_TRACE_H */
commit 9dacc71ff31a008d1e689fc824d31f6696454f68
Merge: 19f47c634ea8 9bf1a2445f3c
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 17 10:46:18 2008 +0100
Merge commit 'v2.6.28-rc5' into x86/cleanups
commit 3f8e402f34ecc7d1d00b54703d3baa401b8bdd78
Merge: 072b40a15616 e7d3737ea1b1 227a837567e3 5821e1b74f0d
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Nov 17 09:36:22 2008 +0100
Merge branches 'tracing/branch-tracer', 'tracing/ftrace', 'tracing/function-return-tracer', 'tracing/tracepoints' and 'tracing/urgent' into tracing/core
diff --cc kernel/trace/ftrace.c
index 54cb9a7d15e5,2f78a45aac14,54cb9a7d15e5,e60205722d0c..f212da486689
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@@@@ -47,9 -47,12 -47,9 -47,6 +47,12 @@@@@
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
+/* Quick disabling of function tracer. */
+int function_trace_stop;
+
+ ++/* By default, current tracing type is normal tracing. */
+ ++enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
+ ++
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
@@@@@ -559,9 -555,8 -559,9 -533,9 +555,8 @@@@@ static void ftrace_startup(void
return;
mutex_lock(&ftrace_start_lock);
- ftrace_start++;
- if (ftrace_start == 1)
- command |= FTRACE_ENABLE_CALLS;
+ ftrace_start_up++;
- - if (ftrace_start_up == 1)
- - command |= FTRACE_ENABLE_CALLS;
+ ++ command |= FTRACE_ENABLE_CALLS;
if (saved_ftrace_func != ftrace_trace_function) {
saved_ftrace_func = ftrace_trace_function;
@@@@@ -1211,7 -1206,7 -1211,7 -1186,7 +1210,7 @@@@@ ftrace_regex_release(struct inode *inod
mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftrace_start_lock);
- - if (iter->filtered && ftrace_start_up && ftrace_enabled)
- if (iter->filtered && ftrace_start && ftrace_enabled)
+ ++ if (ftrace_start_up && ftrace_enabled)
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
mutex_unlock(&ftrace_start_lock);
mutex_unlock(&ftrace_sysctl_lock);
@@@@@ -1479,19 -1492,48 -1479,19 -1454,3 +1496,48 @@@@@ ftrace_enable_sysctl(struct ctl_table *
return ret;
}
+#ifdef CONFIG_FUNCTION_RET_TRACER
+ ++
+ ++/* The callback that hooks the return of a function */
+trace_function_return_t ftrace_function_return =
+ (trace_function_return_t)ftrace_stub;
- - void register_ftrace_return(trace_function_return_t func)
+ ++
+ ++int register_ftrace_return(trace_function_return_t func)
+{
+ ++ int ret = 0;
+ ++
+ ++ mutex_lock(&ftrace_sysctl_lock);
+ ++
+ ++ /*
+ ++ * Don't launch return tracing if normal function
+ ++ * tracing is already running.
+ ++ */
+ ++ if (ftrace_trace_function != ftrace_stub) {
+ ++ ret = -EBUSY;
+ ++ goto out;
+ ++ }
+ ++
+ ++ ftrace_tracing_type = FTRACE_TYPE_RETURN;
+ ftrace_function_return = func;
+ ++ ftrace_startup();
+ ++
+ ++out:
+ ++ mutex_unlock(&ftrace_sysctl_lock);
+ ++ return ret;
+}
+
+void unregister_ftrace_return(void)
+{
+ ++ mutex_lock(&ftrace_sysctl_lock);
+ ++
+ ftrace_function_return = (trace_function_return_t)ftrace_stub;
+ ++ ftrace_shutdown();
+ ++ /* Restore normal tracing type */
+ ++ ftrace_tracing_type = FTRACE_TYPE_ENTER;
+ ++
+ ++ mutex_unlock(&ftrace_sysctl_lock);
+}
+#endif
+
+
+
diff --cc kernel/trace/trace_branch.c
index 2511e32572ca,44bd39539d61,85265553918f,000000000000..23f9b02ce967
mode 100644,100644,100644,000000..100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@@@@ -1,320 -1,321 -1,320 -1,0 +1,321 @@@@@
+/*
+ * unlikely profiler
+ *
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/hash.h>
+#include <linux/fs.h>
+#include <asm/local.h>
+#include "trace.h"
+
+#ifdef CONFIG_BRANCH_TRACER
+
+static int branch_tracing_enabled __read_mostly;
+static DEFINE_MUTEX(branch_tracing_mutex);
+static struct trace_array *branch_tracer;
+
+static void
+probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+ struct trace_array *tr = branch_tracer;
+ struct ring_buffer_event *event;
+ struct trace_branch *entry;
+ unsigned long flags, irq_flags;
+ int cpu, pc;
+ const char *p;
+
+ /*
+ * I would love to save just the ftrace_likely_data pointer, but
+ * this code can also be used by modules. Ugly things can happen
+ * if the module is unloaded, and then we go and read the
+ * pointer. This is slower, but much safer.
+ */
+
+ if (unlikely(!tr))
+ return;
+
-- local_irq_save(flags);
+++ raw_local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+ goto out;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+ &irq_flags);
+ if (!event)
+ goto out;
+
+ pc = preempt_count();
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, flags, pc);
+ entry->ent.type = TRACE_BRANCH;
+
+ /* Strip off the path, only save the file */
+ p = f->file + strlen(f->file);
+ while (p >= f->file && *p != '/')
+ p--;
+ p++;
+
+ strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
+ strncpy(entry->file, p, TRACE_FILE_SIZE);
+ entry->func[TRACE_FUNC_SIZE] = 0;
+ entry->file[TRACE_FILE_SIZE] = 0;
+ entry->line = f->line;
+ entry->correct = val == expect;
+
+ ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+ out:
+ atomic_dec(&tr->data[cpu]->disabled);
-- local_irq_restore(flags);
+++ raw_local_irq_restore(flags);
+}
+
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+ if (!branch_tracing_enabled)
+ return;
+
+ probe_likely_condition(f, val, expect);
+}
+
+int enable_branch_tracing(struct trace_array *tr)
+{
+ int ret = 0;
+
+ mutex_lock(&branch_tracing_mutex);
+ branch_tracer = tr;
+ /*
+ * Must be seen before enabling. The reader is a condition
+ * where we do not need a matching rmb()
+ */
+ smp_wmb();
+ branch_tracing_enabled++;
+ mutex_unlock(&branch_tracing_mutex);
+
+ return ret;
+}
+
+void disable_branch_tracing(void)
+{
+ mutex_lock(&branch_tracing_mutex);
+
+ if (!branch_tracing_enabled)
+ goto out_unlock;
+
+ branch_tracing_enabled--;
+
+ out_unlock:
+ mutex_unlock(&branch_tracing_mutex);
+}
+
+static void start_branch_trace(struct trace_array *tr)
+{
+ enable_branch_tracing(tr);
+}
+
+static void stop_branch_trace(struct trace_array *tr)
+{
+ disable_branch_tracing();
+}
+
- - static void branch_trace_init(struct trace_array *tr)
+ ++static int branch_trace_init(struct trace_array *tr)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+
+ start_branch_trace(tr);
+ ++ return 0;
+}
+
+static void branch_trace_reset(struct trace_array *tr)
+{
+ stop_branch_trace(tr);
+}
+
+struct tracer branch_trace __read_mostly =
+{
+ .name = "branch",
+ .init = branch_trace_init,
+ .reset = branch_trace_reset,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_branch,
+#endif
+};
+
+__init static int init_branch_trace(void)
+{
+ return register_tracer(&branch_trace);
+}
+
+device_initcall(init_branch_trace);
+#else
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+}
+#endif /* CONFIG_BRANCH_TRACER */
+
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
+{
+ /*
+ * I would love to have a trace point here instead, but the
+ * trace point code is so inundated with unlikely and likely
+ * conditions that the recursive nightmare that exists is too
+ * much to try to get working. At least for now.
+ */
+ trace_likely_condition(f, val, expect);
+
+ /* FIXME: Make this atomic! */
+ if (val == expect)
+ f->correct++;
+ else
+ f->incorrect++;
+}
+EXPORT_SYMBOL(ftrace_likely_update);
+
+struct ftrace_pointer {
+ void *start;
+ void *stop;
+};
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_pointer *f = m->private;
+ struct ftrace_branch_data *p = v;
+
+ (*pos)++;
+
+ if (v == (void *)1)
+ return f->start;
+
+ ++p;
+
+ if ((void *)p >= (void *)f->stop)
+ return NULL;
+
+ return p;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+ void *t = (void *)1;
+ loff_t l = 0;
+
+ for (; t && l < *pos; t = t_next(m, t, &l))
+ ;
+
+ return t;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+ struct ftrace_branch_data *p = v;
+ const char *f;
+ unsigned long percent;
+
+ if (v == (void *)1) {
+ seq_printf(m, " correct incorrect %% "
+ " Function "
+ " File Line\n"
+ " ------- --------- - "
+ " -------- "
+ " ---- ----\n");
+ return 0;
+ }
+
+ /* Only print the file, not the path */
+ f = p->file + strlen(p->file);
+ while (f >= p->file && *f != '/')
+ f--;
+ f++;
+
+ if (p->correct) {
+ percent = p->incorrect * 100;
+ percent /= p->correct + p->incorrect;
+ } else
+ percent = p->incorrect ? 100 : 0;
+
+ seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
+ seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
+ return 0;
+}
+
+static struct seq_operations tracing_likely_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .stop = t_stop,
+ .show = t_show,
+};
+
+static int tracing_likely_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = seq_open(file, &tracing_likely_seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = (void *)inode->i_private;
+ }
+
+ return ret;
+}
+
+static struct file_operations tracing_likely_fops = {
+ .open = tracing_likely_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+extern unsigned long __start_likely_profile[];
+extern unsigned long __stop_likely_profile[];
+extern unsigned long __start_unlikely_profile[];
+extern unsigned long __stop_unlikely_profile[];
+
+static struct ftrace_pointer ftrace_likely_pos = {
+ .start = __start_likely_profile,
+ .stop = __stop_likely_profile,
+};
+
+static struct ftrace_pointer ftrace_unlikely_pos = {
+ .start = __start_unlikely_profile,
+ .stop = __stop_unlikely_profile,
+};
+
+static __init int ftrace_branch_init(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *entry;
+
+ d_tracer = tracing_init_dentry();
+
+ entry = debugfs_create_file("profile_likely", 0444, d_tracer,
+ &ftrace_likely_pos,
+ &tracing_likely_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'profile_likely' entry\n");
+
+ entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
+ &ftrace_unlikely_pos,
+ &tracing_likely_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs"
+ " 'profile_unlikely' entry\n");
+
+ return 0;
+}
+
+device_initcall(ftrace_branch_init);
commit 227a837567e339c74d9d4243d03a29bd943a018c
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Nov 16 09:50:34 2008 +0100
markers/tracpoints: fix non-modular build
fix:
kernel/marker.c: In function 'marker_module_notify':
kernel/marker.c:905: error: 'MODULE_STATE_COMING' undeclared (first use in this function)
[...]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/marker.c b/kernel/marker.c
index c14ec26a9b9f..ea54f2647868 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -896,6 +896,8 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
}
EXPORT_SYMBOL_GPL(marker_get_private_data);
+#ifdef CONFIG_MODULES
+
int marker_module_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -924,3 +926,5 @@ static int init_markers(void)
return register_module_notifier(&marker_module_nb);
}
__initcall(init_markers);
+
+#endif /* CONFIG_MODULES */
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 94ac4e35530d..79602740bbb5 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -542,6 +542,8 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
}
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+#ifdef CONFIG_MODULES
+
int tracepoint_module_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -570,3 +572,5 @@ static int init_tracepoints(void)
return register_module_notifier(&tracepoint_module_nb);
}
__initcall(init_tracepoints);
+
+#endif /* CONFIG_MODULES */
commit 0a7ad64531713e33e39af95bdbfb172f4f328b1e
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Nov 16 08:54:36 2008 +0100
tracepoints: format documentation
Impact: documentation update
Properly format Documentation/tracepoints.txt - it was full of
overlong lines and other typographical problems.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
index 3a1c74384ffb..2d42241a25c3 100644
--- a/Documentation/tracepoints.txt
+++ b/Documentation/tracepoints.txt
@@ -3,28 +3,30 @@
Mathieu Desnoyers
-This document introduces Linux Kernel Tracepoints and their use. It provides
-examples of how to insert tracepoints in the kernel and connect probe functions
-to them and provides some examples of probe functions.
+This document introduces Linux Kernel Tracepoints and their use. It
+provides examples of how to insert tracepoints in the kernel and
+connect probe functions to them and provides some examples of probe
+functions.
* Purpose of tracepoints
-A tracepoint placed in code provides a hook to call a function (probe) that you
-can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or
-"off" (no probe is attached). When a tracepoint is "off" it has no effect,
-except for adding a tiny time penalty (checking a condition for a branch) and
-space penalty (adding a few bytes for the function call at the end of the
-instrumented function and adds a data structure in a separate section). When a
-tracepoint is "on", the function you provide is called each time the tracepoint
-is executed, in the execution context of the caller. When the function provided
-ends its execution, it returns to the caller (continuing from the tracepoint
-site).
+A tracepoint placed in code provides a hook to call a function (probe)
+that you can provide at runtime. A tracepoint can be "on" (a probe is
+connected to it) or "off" (no probe is attached). When a tracepoint is
+"off" it has no effect, except for adding a tiny time penalty
+(checking a condition for a branch) and space penalty (adding a few
+bytes for the function call at the end of the instrumented function
+and adds a data structure in a separate section). When a tracepoint
+is "on", the function you provide is called each time the tracepoint
+is executed, in the execution context of the caller. When the function
+provided ends its execution, it returns to the caller (continuing from
+the tracepoint site).
You can put tracepoints at important locations in the code. They are
lightweight hooks that can pass an arbitrary number of parameters,
-which prototypes are described in a tracepoint declaration placed in a header
-file.
+which prototypes are described in a tracepoint declaration placed in a
+header file.
They can be used for tracing and performance accounting.
@@ -63,36 +65,41 @@ Where :
- subsys_eventname is an identifier unique to your event
- subsys is the name of your subsystem.
- eventname is the name of the event to trace.
-- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function
- called by this tracepoint.
-- TPARGS(firstarg, p) are the parameters names, same as found in the prototype.
-Connecting a function (probe) to a tracepoint is done by providing a probe
-(function to call) for the specific tracepoint through
+- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the
+ function called by this tracepoint.
+
+- TPARGS(firstarg, p) are the parameters names, same as found in the
+ prototype.
+
+Connecting a function (probe) to a tracepoint is done by providing a
+probe (function to call) for the specific tracepoint through
register_trace_subsys_eventname(). Removing a probe is done through
unregister_trace_subsys_eventname(); it will remove the probe.
-tracepoint_synchronize_unregister() must be called before the end of the module exit
-function to make sure there is no caller left using the probe. This, and the
-fact that preemption is disabled around the probe call, make sure that probe
-removal and module unload are safe. See the "Probe example" section below for a
-sample probe module.
-
-The tracepoint mechanism supports inserting multiple instances of the same
-tracepoint, but a single definition must be made of a given tracepoint name over
-all the kernel to make sure no type conflict will occur. Name mangling of the
-tracepoints is done using the prototypes to make sure typing is correct.
-Verification of probe type correctness is done at the registration site by the
-compiler. Tracepoints can be put in inline functions, inlined static functions,
-and unrolled loops as well as regular functions.
-
-The naming scheme "subsys_event" is suggested here as a convention intended
-to limit collisions. Tracepoint names are global to the kernel: they are
-considered as being the same whether they are in the core kernel image or in
-modules.
+
+tracepoint_synchronize_unregister() must be called before the end of
+the module exit function to make sure there is no caller left using
+the probe. This, and the fact that preemption is disabled around the
+probe call, make sure that probe removal and module unload are safe.
+See the "Probe example" section below for a sample probe module.
+
+The tracepoint mechanism supports inserting multiple instances of the
+same tracepoint, but a single definition must be made of a given
+tracepoint name over all the kernel to make sure no type conflict will
+occur. Name mangling of the tracepoints is done using the prototypes
+to make sure typing is correct. Verification of probe type correctness
+is done at the registration site by the compiler. Tracepoints can be
+put in inline functions, inlined static functions, and unrolled loops
+as well as regular functions.
+
+The naming scheme "subsys_event" is suggested here as a convention
+intended to limit collisions. Tracepoint names are global to the
+kernel: they are considered as being the same whether they are in the
+core kernel image or in modules.
If the tracepoint has to be used in kernel modules, an
-EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be used to
-export the defined tracepoints.
+EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
+used to export the defined tracepoints.
* Probe / tracepoint example