Patches contributed by Eötvös Lorand University


commit 0fec171cdbd7763ef86cbaccb91f3708de6a9003
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:01 2007 +0200

    sched: clean up sleep_on() APIs
    
    clean up the sleep_on() APIs:
    
     - do not use fastcall
     - replace fragile macro magic with proper inline functions
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/wait.h b/include/linux/wait.h
index e820d00e1383..0e686280450b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -366,15 +366,15 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
 
 /*
  * These are the old interfaces to sleep waiting for an event.
- * They are racy.  DO NOT use them, use the wait_event* interfaces above.  
- * We plan to remove these interfaces during 2.7.
+ * They are racy.  DO NOT use them, use the wait_event* interfaces above.
+ * We plan to remove these interfaces.
  */
-extern void FASTCALL(sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
-				      signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
-						    signed long timeout));
+extern void sleep_on(wait_queue_head_t *q);
+extern long sleep_on_timeout(wait_queue_head_t *q,
+				      signed long timeout);
+extern void interruptible_sleep_on(wait_queue_head_t *q);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
+					   signed long timeout);
 
 /*
  * Waitqueues which are removed from the waitqueue_head at wakeup time
diff --git a/kernel/sched.c b/kernel/sched.c
index ef6b6bb3e0b2..0e3caf742ae3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3699,74 +3699,85 @@ wait_for_completion_interruptible_timeout(struct completion *x,
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-
-#define	SLEEP_ON_VAR					\
-	unsigned long flags;				\
-	wait_queue_t wait;				\
-	init_waitqueue_entry(&wait, current);
-
-#define SLEEP_ON_HEAD					\
-	spin_lock_irqsave(&q->lock,flags);		\
-	__add_wait_queue(q, &wait);			\
+static inline void
+sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+	spin_lock_irqsave(&q->lock, *flags);
+	__add_wait_queue(q, wait);
 	spin_unlock(&q->lock);
+}
 
-#define	SLEEP_ON_TAIL					\
-	spin_lock_irq(&q->lock);			\
-	__remove_wait_queue(q, &wait);			\
-	spin_unlock_irqrestore(&q->lock, flags);
+static inline void
+sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+	spin_lock_irq(&q->lock);
+	__remove_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, *flags);
+}
 
-void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_INTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	schedule();
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched
+long __sched
 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_INTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	timeout = schedule_timeout(timeout);
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 
 	return timeout;
 }
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void fastcall __sched sleep_on(wait_queue_head_t *q)
+void __sched sleep_on(wait_queue_head_t *q)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_UNINTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	schedule();
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(sleep_on);
 
-long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_UNINTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	timeout = schedule_timeout(timeout);
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 
 	return timeout;
 }
-
 EXPORT_SYMBOL(sleep_on_timeout);
 
 #ifdef CONFIG_RT_MUTEXES

commit 9761eea8516d1ff2c7b185e283c5d81cfc307acb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: style cleanups
    
    4 small style cleanups to sched.c: checkpatch.pl is now happy about
    the totality of sched.c [ignoring false positives] - yay! ;-)
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index f8cf78c6af2e..ef6b6bb3e0b2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1265,9 +1265,9 @@ static int sched_balance_self(int cpu, int flag)
 	struct sched_domain *tmp, *sd = NULL;
 
 	for_each_domain(cpu, tmp) {
- 		/*
- 	 	 * If power savings logic is enabled for a domain, stop there.
- 	 	 */
+		/*
+		 * If power savings logic is enabled for a domain, stop there.
+		 */
 		if (tmp->flags & SD_POWERSAVINGS_BALANCE)
 			break;
 		if (tmp->flags & flag)
@@ -1350,9 +1350,9 @@ static int wake_idle(int cpu, struct task_struct *p)
 				if (idle_cpu(i))
 					return i;
 			}
-		}
-		else
+		} else {
 			break;
+		}
 	}
 	return cpu;
 }
@@ -1702,7 +1702,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
 		/*
 		 * Remove function-return probe instances associated with this
 		 * task and put them back on the free list.
-	 	 */
+		 */
 		kprobe_flush_task(prev);
 		put_task_struct(prev);
 	}
@@ -5920,6 +5920,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
 		sched_group_nodes[i] = sg;
 		for_each_cpu_mask(j, nodemask) {
 			struct sched_domain *sd;
+
 			sd = &per_cpu(node_domains, j);
 			sd->groups = sg;
 		}

commit 23bdd703a585a869f2eb32fb9f66749d0476d71e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: do not set softirqs to nice +19
    
    do not set softirqs to nice +19. _If_ for whatever reason
    we missed to process some high-prio softirq and woke up
    ksoftirqd, we should give it a fair chance to actually
    get some work done, even if the system is under load.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0b9886a00e74..73217a9e2875 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -488,7 +488,6 @@ void __init softirq_init(void)
 
 static int ksoftirqd(void * __bind_cpu)
 {
-	set_user_nice(current, 19);
 	current->flags |= PF_NOFREEZE;
 
 	set_current_state(TASK_INTERRUPTIBLE);

commit 5e7eaade55d53da856f0e07dc9c188f78f780192
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: add CFS documentation
    
    add Documentation/sched-design-CFS.txt
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/Documentation/sched-design-CFS.txt b/Documentation/sched-design-CFS.txt
new file mode 100644
index 000000000000..16feebb7bdc0
--- /dev/null
+++ b/Documentation/sched-design-CFS.txt
@@ -0,0 +1,119 @@
+
+This is the CFS scheduler.
+
+80% of CFS's design can be summed up in a single sentence: CFS basically
+models an "ideal, precise multi-tasking CPU" on real hardware.
+
+"Ideal multi-tasking CPU" is a (non-existent  :-))  CPU that has 100%
+physical power and which can run each task at precise equal speed, in
+parallel, each at 1/nr_running speed. For example: if there are 2 tasks
+running then it runs each at 50% physical power - totally in parallel.
+
+On real hardware, we can run only a single task at once, so while that
+one task runs, the other tasks that are waiting for the CPU are at a
+disadvantage - the current task gets an unfair amount of CPU time. In
+CFS this fairness imbalance is expressed and tracked via the per-task
+p->wait_runtime (nanosec-unit) value. "wait_runtime" is the amount of
+time the task should now run on the CPU for it to become completely fair
+and balanced.
+
+( small detail: on 'ideal' hardware, the p->wait_runtime value would
+  always be zero - no task would ever get 'out of balance' from the
+  'ideal' share of CPU time. )
+
+CFS's task picking logic is based on this p->wait_runtime value and it
+is thus very simple: it always tries to run the task with the largest
+p->wait_runtime value. In other words, CFS tries to run the task with
+the 'gravest need' for more CPU time. So CFS always tries to split up
+CPU time between runnable tasks as close to 'ideal multitasking
+hardware' as possible.
+
+Most of the rest of CFS's design just falls out of this really simple
+concept, with a few add-on embellishments like nice levels,
+multiprocessing and various algorithm variants to recognize sleepers.
+
+In practice it works like this: the system runs a task a bit, and when
+the task schedules (or a scheduler tick happens) the task's CPU usage is
+'accounted for': the (small) time it just spent using the physical CPU
+is deducted from p->wait_runtime. [minus the 'fair share' it would have
+gotten anyway]. Once p->wait_runtime gets low enough so that another
+task becomes the 'leftmost task' of the time-ordered rbtree it maintains
+(plus a small amount of 'granularity' distance relative to the leftmost
+task so that we do not over-schedule tasks and trash the cache) then the
+new leftmost task is picked and the current task is preempted.
+
+The rq->fair_clock value tracks the 'CPU time a runnable task would have
+fairly gotten, had it been runnable during that time'. So by using
+rq->fair_clock values we can accurately timestamp and measure the
+'expected CPU time' a task should have gotten. All runnable tasks are
+sorted in the rbtree by the "rq->fair_clock - p->wait_runtime" key, and
+CFS picks the 'leftmost' task and sticks to it. As the system progresses
+forwards, newly woken tasks are put into the tree more and more to the
+right - slowly but surely giving a chance for every task to become the
+'leftmost task' and thus get on the CPU within a deterministic amount of
+time.
+
+Some implementation details:
+
+ - the introduction of Scheduling Classes: an extensible hierarchy of
+   scheduler modules. These modules encapsulate scheduling policy
+   details and are handled by the scheduler core without the core
+   code assuming about them too much.
+
+ - sched_fair.c implements the 'CFS desktop scheduler': it is a
+   replacement for the vanilla scheduler's SCHED_OTHER interactivity
+   code.
+
+   I'd like to give credit to Con Kolivas for the general approach here:
+   he has proven via RSDL/SD that 'fair scheduling' is possible and that
+   it results in better desktop scheduling. Kudos Con!
+
+   The CFS patch uses a completely different approach and implementation
+   from RSDL/SD. My goal was to make CFS's interactivity quality exceed
+   that of RSDL/SD, which is a high standard to meet :-) Testing
+   feedback is welcome to decide this one way or another. [ and, in any
+   case, all of SD's logic could be added via a kernel/sched_sd.c module
+   as well, if Con is interested in such an approach. ]
+
+   CFS's design is quite radical: it does not use runqueues, it uses a
+   time-ordered rbtree to build a 'timeline' of future task execution,
+   and thus has no 'array switch' artifacts (by which both the vanilla
+   scheduler and RSDL/SD are affected).
+
+   CFS uses nanosecond granularity accounting and does not rely on any
+   jiffies or other HZ detail. Thus the CFS scheduler has no notion of
+   'timeslices' and has no heuristics whatsoever. There is only one
+   central tunable:
+
+         /proc/sys/kernel/sched_granularity_ns
+
+   which can be used to tune the scheduler from 'desktop' (low
+   latencies) to 'server' (good batching) workloads. It defaults to a
+   setting suitable for desktop workloads. SCHED_BATCH is handled by the
+   CFS scheduler module too.
+
+   Due to its design, the CFS scheduler is not prone to any of the
+   'attacks' that exist today against the heuristics of the stock
+   scheduler: fiftyp.c, thud.c, chew.c, ring-test.c, massive_intr.c all
+   work fine and do not impact interactivity and produce the expected
+   behavior.
+
+   the CFS scheduler has a much stronger handling of nice levels and
+   SCHED_BATCH: both types of workloads should be isolated much more
+   agressively than under the vanilla scheduler.
+
+   ( another detail: due to nanosec accounting and timeline sorting,
+     sched_yield() support is very simple under CFS, and in fact under
+     CFS sched_yield() behaves much better than under any other
+     scheduler i have tested so far. )
+
+ - sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler
+   way than the vanilla scheduler does. It uses 100 runqueues (for all
+   100 RT priority levels, instead of 140 in the vanilla scheduler)
+   and it needs no expired array.
+
+ - reworked/sanitized SMP load-balancing: the runqueue-walking
+   assumptions are gone from the load-balancing code now, and
+   iterators of the scheduling modules are used. The balancing code got
+   quite a bit simpler as a result.
+

commit b642b6d3fad45f659270a9e35df876b38c489082
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: scheduler debugging, enable in Kconfig
    
    enable CONFIG_SCHED_DEBUG in lib/Kconfig.debug.
    
    the runtime overhead of this option is very small.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index da95e10cfd70..fab32a286371 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -105,6 +105,15 @@ config DETECT_SOFTLOCKUP
 	   can be detected via the NMI-watchdog, on platforms that
 	   support it.)
 
+config SCHED_DEBUG
+	bool "Collect scheduler debugging info"
+	depends on DEBUG_KERNEL && PROC_FS
+	default y
+	help
+	  If you say Y here, the /proc/sched_debug file will be provided
+	  that can help debug the scheduler. The runtime overhead of this
+	  option is minimal.
+
 config SCHEDSTATS
 	bool "Collect scheduler statistics"
 	depends on DEBUG_KERNEL && PROC_FS

commit 43ae34cb4cd650d1eb4460a8253a8e747ba052ac
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: scheduler debugging, core
    
    scheduler debugging core: implement /proc/sched_debug and
    /proc/<PID>/sched files for scheduler debugging.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0f40e820c7fd..46ea5d56e1bb 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -929,6 +929,69 @@ static const struct file_operations proc_fault_inject_operations = {
 };
 #endif
 
+#ifdef CONFIG_SCHED_DEBUG
+/*
+ * Print out various scheduling related per-task fields:
+ */
+static int sched_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	WARN_ON(!inode);
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+	proc_sched_show_task(p, m);
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct task_struct *p;
+
+	WARN_ON(!inode);
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+	proc_sched_set_task(p);
+
+	put_task_struct(p);
+
+	return count;
+}
+
+static int sched_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	ret = single_open(filp, sched_show, NULL);
+	if (!ret) {
+		struct seq_file *m = filp->private_data;
+
+		m->private = inode;
+	}
+	return ret;
+}
+
+static const struct file_operations proc_pid_sched_operations = {
+	.open		= sched_open,
+	.read		= seq_read,
+	.write		= sched_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#endif
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -1963,6 +2026,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 	INF("environ",    S_IRUSR, pid_environ),
 	INF("auxv",       S_IRUSR, pid_auxv),
 	INF("status",     S_IRUGO, pid_status),
+#ifdef CONFIG_SCHED_DEBUG
+	REG("sched",      S_IRUGO|S_IWUSR, pid_sched),
+#endif
 	INF("cmdline",    S_IRUGO, pid_cmdline),
 	INF("stat",       S_IRUGO, tgid_stat),
 	INF("statm",      S_IRUGO, pid_statm),
@@ -2247,6 +2313,9 @@ static const struct pid_entry tid_base_stuff[] = {
 	INF("environ",   S_IRUSR, pid_environ),
 	INF("auxv",      S_IRUSR, pid_auxv),
 	INF("status",    S_IRUGO, pid_status),
+#ifdef CONFIG_SCHED_DEBUG
+	REG("sched",     S_IRUGO|S_IWUSR, pid_sched),
+#endif
 	INF("cmdline",   S_IRUGO, pid_cmdline),
 	INF("stat",      S_IRUGO, tid_stat),
 	INF("statm",     S_IRUGO, pid_statm),
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c9d65738bb7a..785ec8465bd3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -132,6 +132,26 @@ extern unsigned long nr_active(void);
 extern unsigned long nr_iowait(void);
 extern unsigned long weighted_cpuload(const int cpu);
 
+struct seq_file;
+struct cfs_rq;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now);
+#else
+static inline void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+}
+#endif
 
 /*
  * Task state bitmask. NOTE! These bits are also
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
new file mode 100644
index 000000000000..1baf87cceb7c
--- /dev/null
+++ b/kernel/sched_debug.c
@@ -0,0 +1,275 @@
+/*
+ * kernel/time/sched_debug.c
+ *
+ * Print the CFS rbtree
+ *
+ * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/utsname.h>
+
+/*
+ * This allows printing both to /proc/sched_debug and
+ * to the console
+ */
+#define SEQ_printf(m, x...)			\
+ do {						\
+	if (m)					\
+		seq_printf(m, x);		\
+	else					\
+		printk(x);			\
+ } while (0)
+
+static void
+print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now)
+{
+	if (rq->curr == p)
+		SEQ_printf(m, "R");
+	else
+		SEQ_printf(m, " ");
+
+	SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d "
+		      "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+		p->comm, p->pid,
+		(long long)p->se.fair_key,
+		(long long)(p->se.fair_key - rq->cfs.fair_clock),
+		(long long)p->se.wait_runtime,
+		(long long)(p->nvcsw + p->nivcsw),
+		p->prio,
+		(long long)p->se.sum_exec_runtime,
+		(long long)p->se.sum_wait_runtime,
+		(long long)p->se.sum_sleep_runtime,
+		(long long)p->se.wait_runtime_overruns,
+		(long long)p->se.wait_runtime_underruns);
+}
+
+static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
+{
+	struct task_struct *g, *p;
+
+	SEQ_printf(m,
+	"\nrunnable tasks:\n"
+	"            task   PID        tree-key         delta       waiting"
+	"  switches  prio"
+	"        sum-exec        sum-wait       sum-sleep"
+	"    wait-overrun   wait-underrun\n"
+	"------------------------------------------------------------------"
+	"----------------"
+	"------------------------------------------------"
+	"--------------------------------\n");
+
+	read_lock_irq(&tasklist_lock);
+
+	do_each_thread(g, p) {
+		if (!p->se.on_rq || task_cpu(p) != rq_cpu)
+			continue;
+
+		print_task(m, rq, p, now);
+	} while_each_thread(g, p);
+
+	read_unlock_irq(&tasklist_lock);
+}
+
+static void
+print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+{
+	s64 wait_runtime_rq_sum = 0;
+	struct task_struct *p;
+	struct rb_node *curr;
+	unsigned long flags;
+	struct rq *rq = &per_cpu(runqueues, cpu);
+
+	spin_lock_irqsave(&rq->lock, flags);
+	curr = first_fair(cfs_rq);
+	while (curr) {
+		p = rb_entry(curr, struct task_struct, se.run_node);
+		wait_runtime_rq_sum += p->se.wait_runtime;
+
+		curr = rb_next(curr);
+	}
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	SEQ_printf(m, "  .%-30s: %Ld\n", "wait_runtime_rq_sum",
+		(long long)wait_runtime_rq_sum);
+}
+
+void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+	SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq);
+
+#define P(x) \
+	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+
+	P(fair_clock);
+	P(exec_clock);
+	P(wait_runtime);
+	P(wait_runtime_overruns);
+	P(wait_runtime_underruns);
+	P(sleeper_bonus);
+#undef P
+
+	print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
+}
+
+static void print_cpu(struct seq_file *m, int cpu, u64 now)
+{
+	struct rq *rq = &per_cpu(runqueues, cpu);
+
+#ifdef CONFIG_X86
+	{
+		unsigned int freq = cpu_khz ? : 1;
+
+		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
+			   cpu, freq / 1000, (freq % 1000));
+	}
+#else
+	SEQ_printf(m, "\ncpu#%d\n", cpu);
+#endif
+
+#define P(x) \
+	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
+
+	P(nr_running);
+	SEQ_printf(m, "  .%-30s: %lu\n", "load",
+		   rq->ls.load.weight);
+	P(ls.delta_fair);
+	P(ls.delta_exec);
+	P(nr_switches);
+	P(nr_load_updates);
+	P(nr_uninterruptible);
+	SEQ_printf(m, "  .%-30s: %lu\n", "jiffies", jiffies);
+	P(next_balance);
+	P(curr->pid);
+	P(clock);
+	P(prev_clock_raw);
+	P(clock_warps);
+	P(clock_overflows);
+	P(clock_unstable_events);
+	P(clock_max_delta);
+	P(cpu_load[0]);
+	P(cpu_load[1]);
+	P(cpu_load[2]);
+	P(cpu_load[3]);
+	P(cpu_load[4]);
+#undef P
+
+	print_cfs_stats(m, cpu, now);
+
+	print_rq(m, rq, cpu, now);
+}
+
+static int sched_debug_show(struct seq_file *m, void *v)
+{
+	u64 now = ktime_to_ns(ktime_get());
+	int cpu;
+
+	SEQ_printf(m, "Sched Debug Version: v0.04, cfs-v20, %s %.*s\n",
+		init_utsname()->release,
+		(int)strcspn(init_utsname()->version, " "),
+		init_utsname()->version);
+
+	SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
+
+	for_each_online_cpu(cpu)
+		print_cpu(m, cpu, now);
+
+	SEQ_printf(m, "\n");
+
+	return 0;
+}
+
+void sysrq_sched_debug_show(void)
+{
+	sched_debug_show(NULL, NULL);
+}
+
+static int sched_debug_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_debug_show, NULL);
+}
+
+static struct file_operations sched_debug_fops = {
+	.open		= sched_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init init_sched_debug_procfs(void)
+{
+	struct proc_dir_entry *pe;
+
+	pe = create_proc_entry("sched_debug", 0644, NULL);
+	if (!pe)
+		return -ENOMEM;
+
+	pe->proc_fops = &sched_debug_fops;
+
+	return 0;
+}
+
+__initcall(init_sched_debug_procfs);
+
+void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+	unsigned long flags;
+	int num_threads = 1;
+
+	rcu_read_lock();
+	if (lock_task_sighand(p, &flags)) {
+		num_threads = atomic_read(&p->signal->count);
+		unlock_task_sighand(p, &flags);
+	}
+	rcu_read_unlock();
+
+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
+	SEQ_printf(m, "----------------------------------------------\n");
+#define P(F) \
+	SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
+
+	P(se.wait_start);
+	P(se.wait_start_fair);
+	P(se.exec_start);
+	P(se.sleep_start);
+	P(se.sleep_start_fair);
+	P(se.block_start);
+	P(se.sleep_max);
+	P(se.block_max);
+	P(se.exec_max);
+	P(se.wait_max);
+	P(se.wait_runtime);
+	P(se.wait_runtime_overruns);
+	P(se.wait_runtime_underruns);
+	P(se.sum_wait_runtime);
+	P(se.sum_exec_runtime);
+	SEQ_printf(m, "%-25s:%20Ld\n",
+		   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
+	P(se.load.weight);
+	P(policy);
+	P(prio);
+#undef P
+
+	{
+		u64 t0, t1;
+
+		t0 = sched_clock();
+		t1 = sched_clock();
+		SEQ_printf(m, "%-25s:%20Ld\n",
+			   "clock-delta", (long long)(t1-t0));
+	}
+}
+
+void proc_sched_set_task(struct task_struct *p)
+{
+	p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
+	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
+	p->se.sum_exec_runtime = 0;
+}

commit 77e54a1f88a1cb0746c7694fa40052bd02df1123
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: add CFS debug sysctls
    
    add CFS debug sysctls: only tweakable if SCHED_DEBUG is enabled.
    This allows for faster debugging of scheduler problems.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 30ee462ee79f..51f5dac42a00 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -206,7 +206,87 @@ static ctl_table root_table[] = {
 	{ .ctl_name = 0 }
 };
 
+#ifdef CONFIG_SCHED_DEBUG
+static unsigned long min_sched_granularity_ns = 100000;		/* 100 usecs */
+static unsigned long max_sched_granularity_ns = 1000000000;	/* 1 second */
+static unsigned long min_wakeup_granularity_ns;			/* 0 usecs */
+static unsigned long max_wakeup_granularity_ns = 1000000000;	/* 1 second */
+#endif
+
 static ctl_table kern_table[] = {
+#ifdef CONFIG_SCHED_DEBUG
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_granularity_ns",
+		.data		= &sysctl_sched_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_sched_granularity_ns,
+		.extra2		= &max_sched_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_wakeup_granularity_ns",
+		.data		= &sysctl_sched_wakeup_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_batch_wakeup_granularity_ns",
+		.data		= &sysctl_sched_batch_wakeup_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_stat_granularity_ns",
+		.data		= &sysctl_sched_stat_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_runtime_limit_ns",
+		.data		= &sysctl_sched_runtime_limit,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_sched_granularity_ns,
+		.extra2		= &max_sched_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_child_runs_first",
+		.data		= &sysctl_sched_child_runs_first,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_features",
+		.data		= &sysctl_sched_features,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
 	{
 		.ctl_name	= KERN_PANIC,
 		.procname	= "panic",

commit 7dd593608df3f9d4e4531cfe29f28c3a3766a0ee
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: remove old cpu accounting field
    
    remove the old cpu-accounting field from signal_struct, now
    that the code is using CFS's stats.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index aa582be8cafa..c9d65738bb7a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -482,7 +482,6 @@ struct signal_struct {
 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
 	 * other than jiffies.)
 	 */
-	unsigned long sched_time;
 	unsigned long long sum_sched_runtime;
 
 	/*

commit b2cfba19f67228e78e48177187f6b07f4107c784
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: remove unused rq types from sched.c
    
    remove unused rq types from sched.c, now that we switched
    over to CFS.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 886531c681c1..f8cf78c6af2e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -208,15 +208,6 @@ struct rt_rq {
 	struct list_head *rt_load_balance_head, *rt_load_balance_curr;
 };
 
-/*
- * The prio-array type of the old scheduler:
- */
-struct prio_array {
-	unsigned int nr_active;
-	DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
-	struct list_head queue[MAX_PRIO];
-};
-
 /*
  * This is the main, per-CPU runqueue data structure.
  *
@@ -232,7 +223,6 @@ struct rq {
 	 * remote CPUs use both these fields when doing load calculation.
 	 */
 	unsigned long nr_running;
-	unsigned long raw_weighted_load;
 	#define CPU_LOAD_IDX_MAX 5
 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 	unsigned char idle_at_tick;
@@ -257,16 +247,10 @@ struct rq {
 	 */
 	unsigned long nr_uninterruptible;
 
-	unsigned long expired_timestamp;
-	unsigned long long most_recent_timestamp;
-
 	struct task_struct *curr, *idle;
 	unsigned long next_balance;
 	struct mm_struct *prev_mm;
 
-	struct prio_array *active, *expired, arrays[2];
-	int best_expired_prio;
-
 	u64 clock, prev_clock_raw;
 	s64 clock_max_delta;
 

commit 0c57d5893e4a9857ff22ec9e379f6bdbdad50850
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:52:00 2007 +0200

    sched: remove batch_task()
    
    batch_task() in sched.h is now unused - remove it.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index efa3beb007ff..aa582be8cafa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1157,11 +1157,6 @@ static inline int rt_task(struct task_struct *p)
 	return rt_prio(p->prio);
 }
 
-static inline int batch_task(struct task_struct *p)
-{
-	return p->policy == SCHED_BATCH;
-}
-
 static inline pid_t process_group(struct task_struct *tsk)
 {
 	return tsk->signal->pgrp;