Patches contributed by Eötvös Lorand University


commit c1d2327b36f2261ffa8ff7227321ba900c7eee7f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:55 2008 +0200

    ftrace: restrict tracing to HAVE_FTRACE architectures
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index ebc158e6d59a..f3005717bcd0 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -32,6 +32,7 @@ config IRQSOFF_TRACER
 	default n
 	depends on TRACE_IRQFLAGS_SUPPORT
 	depends on GENERIC_TIME
+	depends on HAVE_FTRACE
 	select TRACE_IRQFLAGS
 	select TRACING
 	select TRACER_MAX_TRACE
@@ -54,6 +55,7 @@ config PREEMPT_TRACER
 	default n
 	depends on GENERIC_TIME
 	depends on PREEMPT
+	depends on HAVE_FTRACE
 	select TRACING
 	select TRACER_MAX_TRACE
 	help
@@ -72,6 +74,7 @@ config PREEMPT_TRACER
 
 config SCHED_TRACER
 	bool "Scheduling Latency Tracer"
+	depends on HAVE_FTRACE
 	select TRACING
 	select CONTEXT_SWITCH_TRACER
 	select TRACER_MAX_TRACE
@@ -81,6 +84,7 @@ config SCHED_TRACER
 
 config CONTEXT_SWITCH_TRACER
 	bool "Trace process context switches"
+	depends on HAVE_FTRACE
 	select TRACING
 	select MARKERS
 	help

commit 4d9493c90f8e6e1b164aede3814010a290161abb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:54 2008 +0200

    ftrace: remove add-hoc code
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched.c b/kernel/sched.c
index 1ec3fb2efee6..ad95cca4e42e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-
-void ftrace_task(struct task_struct *p, void *__tr, void *__data)
-{
-#if 0
-	/*  
-	 * trace timeline tree
-	 */
-	__trace_special(__tr, __data,
-			p->pid, p->se.vruntime, p->se.sum_exec_runtime);
-#else
-	/*
-	 * trace balance metrics
-	 */
-	__trace_special(__tr, __data,
-			p->pid, p->se.avg_overlap, 0);
-#endif
-}
-
-void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
-{
-	struct task_struct *p;
-	struct sched_entity *se;
-	struct rb_node *curr;
-	struct rq *rq = __rq;
-
-	if (rq->cfs.curr) {
-		p = task_of(rq->cfs.curr);
-		ftrace_task(p, __tr, __data);
-	}
-	if (rq->cfs.next) {
-		p = task_of(rq->cfs.next);
-		ftrace_task(p, __tr, __data);
-	}
-
-	for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
-		se = rb_entry(curr, struct sched_entity, run_node);
-		if (!entity_is_task(se))
-			continue;
-
-		p = task_of(se);
-		ftrace_task(p, __tr, __data);
-	}
-}
-
-#endif
-
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index dc1856f10795..e24ecd39c4b8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
 	if (!(this_sd->flags & SD_WAKE_AFFINE))
 		return 0;
 
-	ftrace_special(__LINE__, curr->se.avg_overlap, sync);
-	ftrace_special(__LINE__, p->se.avg_overlap, -1);
 	/*
 	 * If the currently running task will sleep within
 	 * a reasonable amount of time then attract this newly
@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
 	if (unlikely(se == pse))
 		return;
 
-	ftrace_special(__LINE__, p->pid, se->last_wakeup);
 	cfs_rq_of(pse)->next = pse;
 
 	/*
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index bddf676914ed..5671db0e1827 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
 
-	if (likely(disabled == 1)) {
+	if (likely(disabled == 1))
 		tracing_sched_switch_trace(tr, data, prev, next, flags);
-		if (trace_flags & TRACE_ITER_SCHED_TREE)
-			ftrace_all_fair_tasks(__rq, tr, data);
-	}
 
 	atomic_dec(&data->disabled);
 	local_irq_restore(flags);
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
 
-	if (likely(disabled == 1)) {
+	if (likely(disabled == 1))
 		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
-		if (trace_flags & TRACE_ITER_SCHED_TREE)
-			ftrace_all_fair_tasks(__rq, tr, data);
-	}
 
 	atomic_dec(&data->disabled);
 	local_irq_restore(flags);

commit 694379e9ed4f2f6babe111bf001c69e2e263338b
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:54 2008 +0200

    ftrace: make it more available in the Kconfig
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index eb1988ed84b7..ebc158e6d59a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -14,7 +14,7 @@ config TRACING
 
 config FTRACE
 	bool "Kernel Function Tracer"
-	depends on DEBUG_KERNEL && HAVE_FTRACE
+	depends on HAVE_FTRACE
 	select FRAME_POINTER
 	select TRACING
 	select CONTEXT_SWITCH_TRACER
@@ -72,7 +72,6 @@ config PREEMPT_TRACER
 
 config SCHED_TRACER
 	bool "Scheduling Latency Tracer"
-	depends on DEBUG_KERNEL
 	select TRACING
 	select CONTEXT_SWITCH_TRACER
 	select TRACER_MAX_TRACE
@@ -82,7 +81,6 @@ config SCHED_TRACER
 
 config CONTEXT_SWITCH_TRACER
 	bool "Trace process context switches"
-	depends on DEBUG_KERNEL
 	select TRACING
 	select MARKERS
 	help

commit 88a4216c3ec4281fc7e6725cc3a3ccd01fb1aa14
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:53 2008 +0200

    ftrace: sched special
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5b186bed54bc..360ca99033d2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2138,6 +2138,8 @@ extern void
 ftrace_wake_up_task(void *rq, struct task_struct *wakee,
 		    struct task_struct *curr);
 extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
 #else
 static inline void
 ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
@@ -2155,6 +2157,10 @@ ftrace_wake_up_task(void *rq, struct task_struct *wakee,
 static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 {
 }
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
 #endif
 
 extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e24ecd39c4b8..dc1856f10795 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1061,6 +1061,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
 	if (!(this_sd->flags & SD_WAKE_AFFINE))
 		return 0;
 
+	ftrace_special(__LINE__, curr->se.avg_overlap, sync);
+	ftrace_special(__LINE__, p->se.avg_overlap, -1);
 	/*
 	 * If the currently running task will sleep within
 	 * a reasonable amount of time then attract this newly
@@ -1238,6 +1240,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
 	if (unlikely(se == pse))
 		return;
 
+	ftrace_special(__LINE__, p->pid, se->last_wakeup);
 	cfs_rq_of(pse)->next = pse;
 
 	/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3a4032492fcb..b87a26414892 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1251,7 +1251,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
 				 comm);
 		break;
 	case TRACE_SPECIAL:
-		trace_seq_printf(s, " %ld %ld %ld\n",
+		trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
@@ -1335,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
 			return 0;
 		break;
 	case TRACE_SPECIAL:
-		ret = trace_seq_printf(s, " %ld %ld %ld\n",
+		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
@@ -1400,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
 		break;
 	case TRACE_SPECIAL:
 	case TRACE_STACK:
-		ret = trace_seq_printf(s, " %ld %ld %ld\n",
+		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5a217e863586..bddf676914ed 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -103,6 +103,30 @@ ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
 	wakeup_sched_wakeup(wakee, curr);
 }
 
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+	struct trace_array *tr = ctx_trace;
+	struct trace_array_cpu *data;
+	unsigned long flags;
+	long disabled;
+	int cpu;
+
+	if (!tracer_enabled)
+		return;
+
+	local_irq_save(flags);
+	cpu = raw_smp_processor_id();
+	data = tr->data[cpu];
+	disabled = atomic_inc_return(&data->disabled);
+
+	if (likely(disabled == 1))
+		__trace_special(tr, data, arg1, arg2, arg3);
+
+	atomic_dec(&data->disabled);
+	local_irq_restore(flags);
+}
+
 static void sched_switch_reset(struct trace_array *tr)
 {
 	int cpu;

commit 36fc25a9f48deacd8aa08cd2d1c186a4e412604f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:53 2008 +0200

    ftrace: sched tree fix
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched.c b/kernel/sched.c
index 9ca4a2e6a236..3bc7c5362998 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2414,6 +2414,23 @@ static int sched_balance_self(int cpu, int flag)
 
 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
 
+void ftrace_task(struct task_struct *p, void *__tr, void *__data)
+{
+#if 0
+	/*  
+	 * trace timeline tree
+	 */
+	__trace_special(__tr, __data,
+			p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+#else
+	/*
+	 * trace balance metrics
+	 */
+	__trace_special(__tr, __data,
+			p->pid, p->se.avg_overlap, 0);
+#endif
+}
+
 void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 {
 	struct task_struct *p;
@@ -2421,32 +2438,22 @@ void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 	struct rb_node *curr;
 	struct rq *rq = __rq;
 
-	curr = first_fair(&rq->cfs);
-	if (!curr)
-		return;
-
 	if (rq->cfs.curr) {
 		p = task_of(rq->cfs.curr);
-		__trace_special(__tr, __data,
-		      p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+		ftrace_task(p, __tr, __data);
 	}
 	if (rq->cfs.next) {
 		p = task_of(rq->cfs.next);
-		__trace_special(__tr, __data,
-		      p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+		ftrace_task(p, __tr, __data);
 	}
 
-	while (curr) {
+	for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
 		se = rb_entry(curr, struct sched_entity, run_node);
 		if (!entity_is_task(se))
 			continue;
 
 		p = task_of(se);
-
-		__trace_special(__tr, __data,
-			      p->pid, p->se.vruntime, p->se.sum_exec_runtime);
-
-		curr = rb_next(curr);
+		ftrace_task(p, __tr, __data);
 	}
 }
 

commit f29c73fe3404f8799ed57aaf48859e0b55fc071f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:53 2008 +0200

    ftrace: include cpu in stacktrace
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 688b4cf72d99..3a4032492fcb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1227,10 +1227,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
 				 abs_usecs % 1000, rel_usecs/1000,
 				 rel_usecs % 1000);
 	} else {
-		if (entry->type != TRACE_STACK) {
-			lat_print_generic(s, entry, cpu);
-			lat_print_timestamp(s, abs_usecs, rel_usecs);
-		}
+		lat_print_generic(s, entry, cpu);
+		lat_print_timestamp(s, abs_usecs, rel_usecs);
 	}
 	switch (entry->type) {
 	case TRACE_FN:
@@ -1293,17 +1291,15 @@ static int print_trace_fmt(struct trace_iterator *iter)
 	usec_rem = do_div(t, 1000000ULL);
 	secs = (unsigned long)t;
 
-	if (entry->type != TRACE_STACK) {
-		ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
-		if (!ret)
-			return 0;
-		ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
-		if (!ret)
-			return 0;
-		ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
-		if (!ret)
-			return 0;
-	}
+	ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+	if (!ret)
+		return 0;
+	ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+	if (!ret)
+		return 0;
+	ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+	if (!ret)
+		return 0;
 
 	switch (entry->type) {
 	case TRACE_FN:

commit 442e544ce52d4415a024920b84fb95c5f9aa0855
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:53 2008 +0200

    ftrace: iter ctrl fix
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ae17e254fc3..688b4cf72d99 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1920,6 +1920,11 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 			break;
 		}
 	}
+	/*
+	 * If no option could be set, return an error:
+	 */
+	if (!trace_options[i])
+		return -EINVAL;
 
 	filp->f_pos += cnt;
 

commit d9af56fbd8b37181bffaad060f74aa2e17382874
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:53 2008 +0200

    ftrace: fix cmdline tracing
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5555b906a666..5a217e863586 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -29,6 +29,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
 	if (!tracer_enabled)
 		return;
 
+	tracing_record_cmdline(prev);
+
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
@@ -56,6 +58,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
 	if (!tracer_enabled)
 		return;
 
+	tracing_record_cmdline(curr);
+
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
@@ -75,8 +79,6 @@ void
 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
 		  struct task_struct *next)
 {
-	tracing_record_cmdline(prev);
-
 	/*
 	 * If tracer_switch_func only points to the local
 	 * switch func, it still needs the ptr passed to it.
@@ -93,8 +95,6 @@ void
 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
 		    struct task_struct *curr)
 {
-	tracing_record_cmdline(curr);
-
 	wakeup_func(__rq, wakee, curr);
 
 	/*

commit 36dfe9252bd4c9b55e8454363fb7e444c92c5030
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:52 2008 +0200

    ftrace: make use of tracing_cpumask
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 18f0ab88ca85..3ae17e254fc3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -70,23 +70,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
-/*
- * Only trace on a CPU if the bitmask is set:
- */
-static cpumask_t tracing_cpumask __read_mostly = CPU_MASK_ALL;
-
-/*
- * The tracer itself will not take this lock, but still we want
- * to provide a consistent cpumask to user-space:
- */
-static DEFINE_MUTEX(tracing_cpumask_update_lock);
-
-/*
- * Temporary storage for the character representation of the
- * CPU bitmask:
- */
-static char mask_str[NR_CPUS];
-
 void trace_wake_up(void)
 {
 	/*
@@ -1776,19 +1759,46 @@ static struct file_operations show_traces_fops = {
 	.release	= seq_release,
 };
 
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
 static ssize_t
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
 		     size_t count, loff_t *ppos)
 {
-	int err;
-
-	count = min(count, (size_t)NR_CPUS);
+	int len;
 
 	mutex_lock(&tracing_cpumask_update_lock);
-	cpumask_scnprintf(mask_str, NR_CPUS, tracing_cpumask);
-	err = copy_to_user(ubuf, mask_str, count);
-	if (err)
-		count = -EFAULT;
+
+	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+	if (count - len < 2) {
+		count = -EINVAL;
+		goto out_err;
+	}
+	len += sprintf(mask_str + len, "\n");
+	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
 	mutex_unlock(&tracing_cpumask_update_lock);
 
 	return count;
@@ -1798,16 +1808,40 @@ static ssize_t
 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 		      size_t count, loff_t *ppos)
 {
-	int err;
+	int err, cpu;
 
 	mutex_lock(&tracing_cpumask_update_lock);
-	err = cpumask_parse_user(ubuf, count, tracing_cpumask);
-	mutex_unlock(&tracing_cpumask_update_lock);
-
+	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
 	if (err)
-		return err;
+		goto err_unlock;
+
+	spin_lock_irq(&ftrace_max_lock);
+	for_each_possible_cpu(cpu) {
+		/*
+		 * Increase/decrease the disabled counter if we are
+		 * about to flip a bit in the cpumask:
+		 */
+		if (cpu_isset(cpu, tracing_cpumask) &&
+				!cpu_isset(cpu, tracing_cpumask_new)) {
+			atomic_inc(&global_trace.data[cpu]->disabled);
+		}
+		if (!cpu_isset(cpu, tracing_cpumask) &&
+				cpu_isset(cpu, tracing_cpumask_new)) {
+			atomic_dec(&global_trace.data[cpu]->disabled);
+		}
+	}
+	spin_unlock_irq(&ftrace_max_lock);
+
+	tracing_cpumask = tracing_cpumask_new;
+
+	mutex_unlock(&tracing_cpumask_update_lock);
 
 	return count;
+
+err_unlock:
+	mutex_unlock(&tracing_cpumask_update_lock);
+
+	return err;
 }
 
 static struct file_operations tracing_cpumask_fops = {
@@ -1846,8 +1880,7 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
 	r += sprintf(buf + r, "\n");
 	WARN_ON(r >= len + 2);
 
-	r = simple_read_from_buffer(ubuf, cnt, ppos,
-				    buf, r);
+	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 
 	kfree(buf);
 

commit c7078de1aaf562a31b20984409c38cc1b40fa8a3
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon May 12 21:20:52 2008 +0200

    ftrace: add tracing_cpumask
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 06380dc1ebe0..18f0ab88ca85 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -70,6 +70,23 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask __read_mostly = CPU_MASK_ALL;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask:
+ */
+static char mask_str[NR_CPUS];
+
 void trace_wake_up(void)
 {
 	/*
@@ -1754,9 +1771,49 @@ static struct file_operations tracing_lt_fops = {
 };
 
 static struct file_operations show_traces_fops = {
-	.open = show_traces_open,
-	.read = seq_read,
-	.release = seq_release,
+	.open		= show_traces_open,
+	.read		= seq_read,
+	.release	= seq_release,
+};
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+		     size_t count, loff_t *ppos)
+{
+	int err;
+
+	count = min(count, (size_t)NR_CPUS);
+
+	mutex_lock(&tracing_cpumask_update_lock);
+	cpumask_scnprintf(mask_str, NR_CPUS, tracing_cpumask);
+	err = copy_to_user(ubuf, mask_str, count);
+	if (err)
+		count = -EFAULT;
+	mutex_unlock(&tracing_cpumask_update_lock);
+
+	return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+		      size_t count, loff_t *ppos)
+{
+	int err;
+
+	mutex_lock(&tracing_cpumask_update_lock);
+	err = cpumask_parse_user(ubuf, count, tracing_cpumask);
+	mutex_unlock(&tracing_cpumask_update_lock);
+
+	if (err)
+		return err;
+
+	return count;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+	.open		= tracing_open_generic,
+	.read		= tracing_cpumask_read,
+	.write		= tracing_cpumask_write,
 };
 
 static ssize_t
@@ -1837,9 +1894,9 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 }
 
 static struct file_operations tracing_iter_fops = {
-	.open = tracing_open_generic,
-	.read = tracing_iter_ctrl_read,
-	.write = tracing_iter_ctrl_write,
+	.open		= tracing_open_generic,
+	.read		= tracing_iter_ctrl_read,
+	.write		= tracing_iter_ctrl_write,
 };
 
 static const char readme_msg[] =
@@ -1870,8 +1927,8 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
 }
 
 static struct file_operations tracing_readme_fops = {
-	.open = tracing_open_generic,
-	.read = tracing_readme_read,
+	.open		= tracing_open_generic,
+	.read		= tracing_readme_read,
 };
 
 static ssize_t
@@ -2334,6 +2391,11 @@ static __init void tracer_init_debugfs(void)
 	if (!entry)
 		pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
 
+	entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
+				    NULL, &tracing_cpumask_fops);
+	if (!entry)
+		pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
+
 	entry = debugfs_create_file("latency_trace", 0444, d_tracer,
 				    &global_trace, &tracing_lt_fops);
 	if (!entry)