Patches contributed by Eötvös Lorand University


commit 6aa645ea5f7a246702e07f29edc7075d487ae4a3
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: cfs rq data types
    
    add the CFS rq data types to sched.c.
    
    (the old scheduler fields are still intact, they are removed
     by a later patch)
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index f8b8eda4494d..085418bedccd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -91,6 +91,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 #define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
 #define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
 
+#define NICE_0_LOAD		SCHED_LOAD_SCALE
+#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
+
 /*
  * These are the 'tuning knobs' of the scheduler:
  *
@@ -218,9 +221,61 @@ static inline unsigned int task_timeslice(struct task_struct *p)
 }
 
 /*
- * These are the runqueue data structures:
+ * This is the priority-queue data structure of the RT scheduling class:
  */
+struct rt_prio_array {
+	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+	struct list_head queue[MAX_RT_PRIO];
+};
+
+struct load_stat {
+	struct load_weight load;
+	u64 load_update_start, load_update_last;
+	unsigned long delta_fair, delta_exec, delta_stat;
+};
+
+/* CFS-related fields in a runqueue */
+struct cfs_rq {
+	struct load_weight load;
+	unsigned long nr_running;
+
+	s64 fair_clock;
+	u64 exec_clock;
+	s64 wait_runtime;
+	u64 sleeper_bonus;
+	unsigned long wait_runtime_overruns, wait_runtime_underruns;
+
+	struct rb_root tasks_timeline;
+	struct rb_node *rb_leftmost;
+	struct rb_node *rb_load_balance_curr;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/* 'curr' points to currently running entity on this cfs_rq.
+	 * It is set to NULL otherwise (i.e when none are currently running).
+	 */
+	struct sched_entity *curr;
+	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
+
+	/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+	 * (like users, containers etc.)
+	 *
+	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+	 * list is used during load balance.
+	 */
+	struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
+#endif
+};
 
+/* Real-Time classes' related field in a runqueue: */
+struct rt_rq {
+	struct rt_prio_array active;
+	int rt_load_balance_idx;
+	struct list_head *rt_load_balance_head, *rt_load_balance_curr;
+};
+
+/*
+ * The prio-array type of the old scheduler:
+ */
 struct prio_array {
 	unsigned int nr_active;
 	DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
@@ -235,7 +290,7 @@ struct prio_array {
  * acquire operations must be ordered by ascending &runqueue.
  */
 struct rq {
-	spinlock_t lock;
+	spinlock_t lock;	/* runqueue lock */
 
 	/*
 	 * nr_running and cpu_load should be in the same cacheline because
@@ -243,14 +298,21 @@ struct rq {
 	 */
 	unsigned long nr_running;
 	unsigned long raw_weighted_load;
-#ifdef CONFIG_SMP
-	unsigned long cpu_load[3];
+	#define CPU_LOAD_IDX_MAX 5
+	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 	unsigned char idle_at_tick;
 #ifdef CONFIG_NO_HZ
 	unsigned char in_nohz_recently;
 #endif
+	struct load_stat ls;	/* capture load from *all* tasks on this cpu */
+	unsigned long nr_load_updates;
+	u64 nr_switches;
+
+	struct cfs_rq cfs;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
 #endif
-	unsigned long long nr_switches;
+	struct rt_rq  rt;
 
 	/*
 	 * This is part of a global counter where only the total sum
@@ -261,13 +323,23 @@ struct rq {
 	unsigned long nr_uninterruptible;
 
 	unsigned long expired_timestamp;
-	/* Cached timestamp set by update_cpu_clock() */
 	unsigned long long most_recent_timestamp;
+
 	struct task_struct *curr, *idle;
 	unsigned long next_balance;
 	struct mm_struct *prev_mm;
+
 	struct prio_array *active, *expired, arrays[2];
 	int best_expired_prio;
+
+	u64 clock, prev_clock_raw;
+	s64 clock_max_delta;
+
+	unsigned int clock_warps, clock_overflows;
+	unsigned int clock_unstable_events;
+
+	struct sched_class *load_balance_class;
+
 	atomic_t nr_iowait;
 
 #ifdef CONFIG_SMP

commit 20b8a59f2461e1be911dce2cfafefab9d22e4eee
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: cfs, core data types
    
    add the CFS data types to sched.h.
    
    (the old scheduler is still fully intact.)
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 90420321994f..995eb407c234 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -820,6 +820,86 @@ enum sleep_type {
 };
 
 struct prio_array;
+struct rq;
+struct sched_domain;
+
+struct sched_class {
+	struct sched_class *next;
+
+	void (*enqueue_task) (struct rq *rq, struct task_struct *p,
+			      int wakeup, u64 now);
+	void (*dequeue_task) (struct rq *rq, struct task_struct *p,
+			      int sleep, u64 now);
+	void (*yield_task) (struct rq *rq, struct task_struct *p);
+
+	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+	struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
+	void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
+
+	int (*load_balance) (struct rq *this_rq, int this_cpu,
+			struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved);
+
+	void (*set_curr_task) (struct rq *rq);
+	void (*task_tick) (struct rq *rq, struct task_struct *p);
+	void (*task_new) (struct rq *rq, struct task_struct *p);
+};
+
+struct load_weight {
+	unsigned long weight, inv_weight;
+};
+
+/*
+ * CFS stats for a schedulable entity (task, task-group etc)
+ *
+ * Current field usage histogram:
+ *
+ *     4 se->block_start
+ *     4 se->run_node
+ *     4 se->sleep_start
+ *     4 se->sleep_start_fair
+ *     6 se->load.weight
+ *     7 se->delta_fair
+ *    15 se->wait_runtime
+ */
+struct sched_entity {
+	long			wait_runtime;
+	unsigned long		delta_fair_run;
+	unsigned long		delta_fair_sleep;
+	unsigned long		delta_exec;
+	s64			fair_key;
+	struct load_weight	load;		/* for load-balancing */
+	struct rb_node		run_node;
+	unsigned int		on_rq;
+
+	u64			wait_start_fair;
+	u64			wait_start;
+	u64			exec_start;
+	u64			sleep_start;
+	u64			sleep_start_fair;
+	u64			block_start;
+	u64			sleep_max;
+	u64			block_max;
+	u64			exec_max;
+	u64			wait_max;
+	u64			last_ran;
+
+	u64			sum_exec_runtime;
+	s64			sum_wait_runtime;
+	s64			sum_sleep_runtime;
+	unsigned long		wait_runtime_overruns;
+	unsigned long		wait_runtime_underruns;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	struct sched_entity	*parent;
+	/* rq on which this entity is (to be) queued: */
+	struct cfs_rq		*cfs_rq;
+	/* rq "owned" by this entity/group: */
+	struct cfs_rq		*my_q;
+#endif
+};
 
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
@@ -839,6 +919,8 @@ struct task_struct {
 	int prio, static_prio, normal_prio;
 	struct list_head run_list;
 	struct prio_array *array;
+	struct sched_class *sched_class;
+	struct sched_entity se;
 
 	unsigned short ioprio;
 #ifdef CONFIG_BLK_DEV_IO_TRACE

commit fa72e9e484c16f0c9aee23981917d8c8c03f0482
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: cfs core, kernel/sched_idletask.c
    
    add kernel/sched_idletask.c - which implements the idle thread
    scheduling class. This further simplifies sched.c (under CFS),
    for example a number of 'if (p == rq->idle)' type of special-cases
    can be removed from sched.c, and schedule() gets simpler too.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
new file mode 100644
index 000000000000..41841e741c4a
--- /dev/null
+++ b/kernel/sched_idletask.c
@@ -0,0 +1,71 @@
+/*
+ * idle-task scheduling class.
+ *
+ * (NOTE: these are not related to SCHED_IDLE tasks which are
+ *  handled in sched_fair.c)
+ */
+
+/*
+ * Idle tasks are unconditionally rescheduled:
+ */
+static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p)
+{
+	resched_task(rq->idle);
+}
+
+static struct task_struct *pick_next_task_idle(struct rq *rq, u64 now)
+{
+	schedstat_inc(rq, sched_goidle);
+
+	return rq->idle;
+}
+
+/*
+ * It is not legal to sleep in the idle task - print a warning
+ * message if some code attempts to do it:
+ */
+static void
+dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	spin_unlock_irq(&rq->lock);
+	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
+	dump_stack();
+	spin_lock_irq(&rq->lock);
+}
+
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, u64 now)
+{
+}
+
+static int
+load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved)
+{
+	return 0;
+}
+
+static void task_tick_idle(struct rq *rq, struct task_struct *curr)
+{
+}
+
+/*
+ * Simple, special scheduling class for the per-CPU idle tasks:
+ */
+static struct sched_class idle_sched_class __read_mostly = {
+	/* no enqueue/yield_task for idle tasks */
+
+	/* dequeue is not valid, we print a debug message there: */
+	.dequeue_task		= dequeue_task_idle,
+
+	.check_preempt_curr	= check_preempt_curr_idle,
+
+	.pick_next_task		= pick_next_task_idle,
+	.put_prev_task		= put_prev_task_idle,
+
+	.load_balance		= load_balance_idle,
+
+	.task_tick		= task_tick_idle,
+	/* no .task_new for idle tasks */
+};

commit bb44e5d1c6b3b748e0facf8f516b3162009feb27
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: cfs core, kernel/sched_rt.c
    
    add kernel/sched_rt.c: SCHED_FIFO/SCHED_RR support. The behavior
    and semantics of SCHED_FIFO/SCHED_RR tasks is unchanged.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
new file mode 100644
index 000000000000..1192a2741b99
--- /dev/null
+++ b/kernel/sched_rt.c
@@ -0,0 +1,255 @@
+/*
+ * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
+ * policies)
+ */
+
+/*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+static inline void update_curr_rt(struct rq *rq, u64 now)
+{
+	struct task_struct *curr = rq->curr;
+	u64 delta_exec;
+
+	if (!task_has_rt_policy(curr))
+		return;
+
+	delta_exec = now - curr->se.exec_start;
+	if (unlikely((s64)delta_exec < 0))
+		delta_exec = 0;
+	if (unlikely(delta_exec > curr->se.exec_max))
+		curr->se.exec_max = delta_exec;
+
+	curr->se.sum_exec_runtime += delta_exec;
+	curr->se.exec_start = now;
+}
+
+static void
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void
+dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	update_curr_rt(rq, now);
+
+	list_del(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static void
+yield_task_rt(struct rq *rq, struct task_struct *p)
+{
+	requeue_task_rt(rq, p);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
+{
+	if (p->prio < rq->curr->prio)
+		resched_task(rq->curr);
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+	struct task_struct *next;
+	struct list_head *queue;
+	int idx;
+
+	idx = sched_find_first_bit(array->bitmap);
+	if (idx >= MAX_RT_PRIO)
+		return NULL;
+
+	queue = array->queue + idx;
+	next = list_entry(queue->next, struct task_struct, run_list);
+
+	next->se.exec_start = now;
+
+	return next;
+}
+
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now)
+{
+	update_curr_rt(rq, now);
+	p->se.exec_start = 0;
+}
+
+/*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+static struct task_struct *load_balance_start_rt(void *arg)
+{
+	struct rq *rq = arg;
+	struct rt_prio_array *array = &rq->rt.active;
+	struct list_head *head, *curr;
+	struct task_struct *p;
+	int idx;
+
+	idx = sched_find_first_bit(array->bitmap);
+	if (idx >= MAX_RT_PRIO)
+		return NULL;
+
+	head = array->queue + idx;
+	curr = head->prev;
+
+	p = list_entry(curr, struct task_struct, run_list);
+
+	curr = curr->prev;
+
+	rq->rt.rt_load_balance_idx = idx;
+	rq->rt.rt_load_balance_head = head;
+	rq->rt.rt_load_balance_curr = curr;
+
+	return p;
+}
+
+static struct task_struct *load_balance_next_rt(void *arg)
+{
+	struct rq *rq = arg;
+	struct rt_prio_array *array = &rq->rt.active;
+	struct list_head *head, *curr;
+	struct task_struct *p;
+	int idx;
+
+	idx = rq->rt.rt_load_balance_idx;
+	head = rq->rt.rt_load_balance_head;
+	curr = rq->rt.rt_load_balance_curr;
+
+	/*
+	 * If we arrived back to the head again then
+	 * iterate to the next queue (if any):
+	 */
+	if (unlikely(head == curr)) {
+		int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
+
+		if (next_idx >= MAX_RT_PRIO)
+			return NULL;
+
+		idx = next_idx;
+		head = array->queue + idx;
+		curr = head->prev;
+
+		rq->rt.rt_load_balance_idx = idx;
+		rq->rt.rt_load_balance_head = head;
+	}
+
+	p = list_entry(curr, struct task_struct, run_list);
+
+	curr = curr->prev;
+
+	rq->rt.rt_load_balance_curr = curr;
+
+	return p;
+}
+
+static int
+load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *load_moved)
+{
+	int this_best_prio, best_prio, best_prio_seen = 0;
+	int nr_moved;
+	struct rq_iterator rt_rq_iterator;
+
+	best_prio = sched_find_first_bit(busiest->rt.active.bitmap);
+	this_best_prio = sched_find_first_bit(this_rq->rt.active.bitmap);
+
+	/*
+	 * Enable handling of the case where there is more than one task
+	 * with the best priority.   If the current running task is one
+	 * of those with prio==best_prio we know it won't be moved
+	 * and therefore it's safe to override the skip (based on load)
+	 * of any task we find with that prio.
+	 */
+	if (busiest->curr->prio == best_prio)
+		best_prio_seen = 1;
+
+	rt_rq_iterator.start = load_balance_start_rt;
+	rt_rq_iterator.next = load_balance_next_rt;
+	/* pass 'busiest' rq argument into
+	 * load_balance_[start|next]_rt iterators
+	 */
+	rt_rq_iterator.arg = busiest;
+
+	nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
+			max_load_move, sd, idle, all_pinned, load_moved,
+			this_best_prio, best_prio, best_prio_seen,
+			&rt_rq_iterator);
+
+	return nr_moved;
+}
+
+static void task_tick_rt(struct rq *rq, struct task_struct *p)
+{
+	/*
+	 * RR tasks need a special form of timeslice management.
+	 * FIFO tasks have no timeslices.
+	 */
+	if (p->policy != SCHED_RR)
+		return;
+
+	if (--p->time_slice)
+		return;
+
+	p->time_slice = static_prio_timeslice(p->static_prio);
+	set_tsk_need_resched(p);
+
+	/* put it at the end of the queue: */
+	requeue_task_rt(rq, p);
+}
+
+/*
+ * No parent/child timeslice management necessary for RT tasks,
+ * just activate them:
+ */
+static void task_new_rt(struct rq *rq, struct task_struct *p)
+{
+	activate_task(rq, p, 1);
+}
+
+static struct sched_class rt_sched_class __read_mostly = {
+	.enqueue_task		= enqueue_task_rt,
+	.dequeue_task		= dequeue_task_rt,
+	.yield_task		= yield_task_rt,
+
+	.check_preempt_curr	= check_preempt_curr_rt,
+
+	.pick_next_task		= pick_next_task_rt,
+	.put_prev_task		= put_prev_task_rt,
+
+	.load_balance		= load_balance_rt,
+
+	.task_tick		= task_tick_rt,
+	.task_new		= task_new_rt,
+};

commit bf0f6f24a1ece8988b243aefe84ee613099a9245
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: cfs core, kernel/sched_fair.c
    
    add kernel/sched_fair.c - which implements the bulk of CFS's
    behavioral changes for SCHED_OTHER tasks.
    
    see Documentation/sched-design-CFS.txt about details.
    
    Authors:
    
     Ingo Molnar <mingo@elte.hu>
     Dmitry Adamushko <dmitry.adamushko@gmail.com>
     Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
     Mike Galbraith <efault@gmx.de>
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Mike Galbraith <efault@gmx.de>
    Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
    Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d5084e7c48cf..90420321994f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1243,6 +1243,14 @@ static inline void idle_task_exit(void) {}
 
 extern void sched_idle_next(void);
 
+extern unsigned int sysctl_sched_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_batch_wakeup_granularity;
+extern unsigned int sysctl_sched_stat_granularity;
+extern unsigned int sysctl_sched_runtime_limit;
+extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_features;
+
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
new file mode 100644
index 000000000000..6971db0a7160
--- /dev/null
+++ b/kernel/sched_fair.c
@@ -0,0 +1,1131 @@
+/*
+ * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
+ *
+ *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ *  Interactivity improvements by Mike Galbraith
+ *  (C) 2007 Mike Galbraith <efault@gmx.de>
+ *
+ *  Various enhancements by Dmitry Adamushko.
+ *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ *  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  Copyright IBM Corporation, 2007
+ *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
+ *
+ *  Scaled math optimizations by Thomas Gleixner
+ *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ */
+
+/*
+ * Preemption granularity:
+ * (default: 2 msec, units: nanoseconds)
+ *
+ * NOTE: this granularity value is not the same as the concept of
+ * 'timeslice length' - timeslices in CFS will typically be somewhat
+ * larger than this value. (to see the precise effective timeslice
+ * length of your workload, run vmstat and monitor the context-switches
+ * field)
+ *
+ * On SMP systems the value of this is multiplied by the log2 of the
+ * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
+ * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
+ */
+unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ;
+
+/*
+ * SCHED_BATCH wake-up granularity.
+ * (default: 10 msec, units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
+							10000000000ULL/HZ;
+
+/*
+ * SCHED_OTHER wake-up granularity.
+ * (default: 1 msec, units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ;
+
+unsigned int sysctl_sched_stat_granularity __read_mostly;
+
+/*
+ * Initialized in sched_init_granularity():
+ */
+unsigned int sysctl_sched_runtime_limit __read_mostly;
+
+/*
+ * Debugging: various feature bits
+ */
+enum {
+	SCHED_FEAT_FAIR_SLEEPERS	= 1,
+	SCHED_FEAT_SLEEPER_AVG		= 2,
+	SCHED_FEAT_SLEEPER_LOAD_AVG	= 4,
+	SCHED_FEAT_PRECISE_CPU_LOAD	= 8,
+	SCHED_FEAT_START_DEBIT		= 16,
+	SCHED_FEAT_SKIP_INITIAL		= 32,
+};
+
+unsigned int sysctl_sched_features __read_mostly =
+		SCHED_FEAT_FAIR_SLEEPERS	*1 |
+		SCHED_FEAT_SLEEPER_AVG		*1 |
+		SCHED_FEAT_SLEEPER_LOAD_AVG	*1 |
+		SCHED_FEAT_PRECISE_CPU_LOAD	*1 |
+		SCHED_FEAT_START_DEBIT		*1 |
+		SCHED_FEAT_SKIP_INITIAL		*0;
+
+extern struct sched_class fair_sched_class;
+
+/**************************************************************
+ * CFS operations on generic schedulable entities:
+ */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* cpu runqueue to which this cfs_rq is attached */
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->rq;
+}
+
+/* currently running entity (if any) on this cfs_rq */
+static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->curr;
+}
+
+/* An entity is a task if it doesn't "own" a runqueue */
+#define entity_is_task(se)	(!se->my_q)
+
+static inline void
+set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	cfs_rq->curr = se;
+}
+
+#else	/* CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+	return container_of(cfs_rq, struct rq, cfs);
+}
+
+static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+{
+	struct rq *rq = rq_of(cfs_rq);
+
+	if (unlikely(rq->curr->sched_class != &fair_sched_class))
+		return NULL;
+
+	return &rq->curr->se;
+}
+
+#define entity_is_task(se)	1
+
+static inline void
+set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+
+#endif	/* CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+	return container_of(se, struct task_struct, se);
+}
+
+
+/**************************************************************
+ * Scheduling class tree data structure manipulation methods:
+ */
+
+/*
+ * Enqueue an entity into the rb-tree:
+ */
+static inline void
+__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+	struct rb_node *parent = NULL;
+	struct sched_entity *entry;
+	s64 key = se->fair_key;
+	int leftmost = 1;
+
+	/*
+	 * Find the right place in the rbtree:
+	 */
+	while (*link) {
+		parent = *link;
+		entry = rb_entry(parent, struct sched_entity, run_node);
+		/*
+		 * We dont care about collisions. Nodes with
+		 * the same key stay together.
+		 */
+		if (key - entry->fair_key < 0) {
+			link = &parent->rb_left;
+		} else {
+			link = &parent->rb_right;
+			leftmost = 0;
+		}
+	}
+
+	/*
+	 * Maintain a cache of leftmost tree entries (it is frequently
+	 * used):
+	 */
+	if (leftmost)
+		cfs_rq->rb_leftmost = &se->run_node;
+
+	rb_link_node(&se->run_node, parent, link);
+	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+	update_load_add(&cfs_rq->load, se->load.weight);
+	cfs_rq->nr_running++;
+	se->on_rq = 1;
+}
+
+static inline void
+__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	if (cfs_rq->rb_leftmost == &se->run_node)
+		cfs_rq->rb_leftmost = rb_next(&se->run_node);
+	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+	update_load_sub(&cfs_rq->load, se->load.weight);
+	cfs_rq->nr_running--;
+	se->on_rq = 0;
+}
+
+static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->rb_leftmost;
+}
+
+static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+{
+	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
+}
+
+/**************************************************************
+ * Scheduling class statistics methods:
+ */
+
+/*
+ * We rescale the rescheduling granularity of tasks according to their
+ * nice level, but only linearly, not exponentially:
+ */
+static long
+niced_granularity(struct sched_entity *curr, unsigned long granularity)
+{
+	u64 tmp;
+
+	/*
+	 * Negative nice levels get the same granularity as nice-0:
+	 */
+	if (likely(curr->load.weight >= NICE_0_LOAD))
+		return granularity;
+	/*
+	 * Positive nice level tasks get linearly finer
+	 * granularity:
+	 */
+	tmp = curr->load.weight * (u64)granularity;
+
+	/*
+	 * It will always fit into 'long':
+	 */
+	return (long) (tmp >> NICE_0_SHIFT);
+}
+
+static inline void
+limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	long limit = sysctl_sched_runtime_limit;
+
+	/*
+	 * Niced tasks have the same history dynamic range as
+	 * non-niced tasks:
+	 */
+	if (unlikely(se->wait_runtime > limit)) {
+		se->wait_runtime = limit;
+		schedstat_inc(se, wait_runtime_overruns);
+		schedstat_inc(cfs_rq, wait_runtime_overruns);
+	}
+	if (unlikely(se->wait_runtime < -limit)) {
+		se->wait_runtime = -limit;
+		schedstat_inc(se, wait_runtime_underruns);
+		schedstat_inc(cfs_rq, wait_runtime_underruns);
+	}
+}
+
+static inline void
+__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+{
+	se->wait_runtime += delta;
+	schedstat_add(se, sum_wait_runtime, delta);
+	limit_wait_runtime(cfs_rq, se);
+}
+
+static void
+add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+{
+	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
+	__add_wait_runtime(cfs_rq, se, delta);
+	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+}
+
+/*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+static inline void
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
+{
+	unsigned long delta, delta_exec, delta_fair;
+	long delta_mine;
+	struct load_weight *lw = &cfs_rq->load;
+	unsigned long load = lw->weight;
+
+	if (unlikely(!load))
+		return;
+
+	delta_exec = curr->delta_exec;
+#ifdef CONFIG_SCHEDSTATS
+	if (unlikely(delta_exec > curr->exec_max))
+		curr->exec_max = delta_exec;
+#endif
+
+	curr->sum_exec_runtime += delta_exec;
+	cfs_rq->exec_clock += delta_exec;
+
+	delta_fair = calc_delta_fair(delta_exec, lw);
+	delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
+
+	if (cfs_rq->sleeper_bonus > sysctl_sched_stat_granularity) {
+		delta = calc_delta_mine(cfs_rq->sleeper_bonus,
+					curr->load.weight, lw);
+		if (unlikely(delta > cfs_rq->sleeper_bonus))
+			delta = cfs_rq->sleeper_bonus;
+
+		cfs_rq->sleeper_bonus -= delta;
+		delta_mine -= delta;
+	}
+
+	cfs_rq->fair_clock += delta_fair;
+	/*
+	 * We executed delta_exec amount of time on the CPU,
+	 * but we were only entitled to delta_mine amount of
+	 * time during that period (if nr_running == 1 then
+	 * the two values are equal)
+	 * [Note: delta_mine - delta_exec is negative]:
+	 */
+	add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
+}
+
+static void update_curr(struct cfs_rq *cfs_rq, u64 now)
+{
+	struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+	unsigned long delta_exec;
+
+	if (unlikely(!curr))
+		return;
+
+	/*
+	 * Get the amount of time the current task was running
+	 * since the last time we changed load (this cannot
+	 * overflow on 32 bits):
+	 */
+	delta_exec = (unsigned long)(now - curr->exec_start);
+
+	curr->delta_exec += delta_exec;
+
+	if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
+		__update_curr(cfs_rq, curr, now);
+		curr->delta_exec = 0;
+	}
+	curr->exec_start = now;
+}
+
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	se->wait_start_fair = cfs_rq->fair_clock;
+	se->wait_start = now;
+}
+
+/*
+ * We calculate fair deltas here, so protect against the random effects
+ * of a multiplication overflow by capping it to the runtime limit:
+ */
+#if BITS_PER_LONG == 32
+static inline unsigned long
+calc_weighted(unsigned long delta, unsigned long weight, int shift)
+{
+	u64 tmp = (u64)delta * weight >> shift;
+
+	if (unlikely(tmp > sysctl_sched_runtime_limit*2))
+		return sysctl_sched_runtime_limit*2;
+	return tmp;
+}
+#else
+static inline unsigned long
+calc_weighted(unsigned long delta, unsigned long weight, int shift)
+{
+	return delta * weight >> shift;
+}
+#endif
+
+/*
+ * Task is being enqueued - update stats:
+ */
+static void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	s64 key;
+
+	/*
+	 * Are we enqueueing a waiting task? (for current tasks
+	 * a dequeue/enqueue event is a NOP)
+	 */
+	if (se != cfs_rq_curr(cfs_rq))
+		update_stats_wait_start(cfs_rq, se, now);
+	/*
+	 * Update the key:
+	 */
+	key = cfs_rq->fair_clock;
+
+	/*
+	 * Optimize the common nice 0 case:
+	 */
+	if (likely(se->load.weight == NICE_0_LOAD)) {
+		key -= se->wait_runtime;
+	} else {
+		u64 tmp;
+
+		if (se->wait_runtime < 0) {
+			tmp = -se->wait_runtime;
+			key += (tmp * se->load.inv_weight) >>
+					(WMULT_SHIFT - NICE_0_SHIFT);
+		} else {
+			tmp = se->wait_runtime;
+			key -= (tmp * se->load.weight) >> NICE_0_SHIFT;
+		}
+	}
+
+	se->fair_key = key;
+}
+
+/*
+ * Note: must be called with a freshly updated rq->fair_clock.
+ */
+static inline void
+__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long delta_fair = se->delta_fair_run;
+
+#ifdef CONFIG_SCHEDSTATS
+	{
+		s64 delta_wait = now - se->wait_start;
+		if (unlikely(delta_wait > se->wait_max))
+			se->wait_max = delta_wait;
+	}
+#endif
+
+	if (unlikely(se->load.weight != NICE_0_LOAD))
+		delta_fair = calc_weighted(delta_fair, se->load.weight,
+							NICE_0_SHIFT);
+
+	add_wait_runtime(cfs_rq, se, delta_fair);
+}
+
+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long delta_fair;
+
+	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+			(u64)(cfs_rq->fair_clock - se->wait_start_fair));
+
+	se->delta_fair_run += delta_fair;
+	if (unlikely(abs(se->delta_fair_run) >=
+				sysctl_sched_stat_granularity)) {
+		__update_stats_wait_end(cfs_rq, se, now);
+		se->delta_fair_run = 0;
+	}
+
+	se->wait_start_fair = 0;
+	se->wait_start = 0;
+}
+
+static inline void
+update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	update_curr(cfs_rq, now);
+	/*
+	 * Mark the end of the wait period if dequeueing a
+	 * waiting task:
+	 */
+	if (se != cfs_rq_curr(cfs_rq))
+		update_stats_wait_end(cfs_rq, se, now);
+}
+
+/*
+ * We are picking a new current task - update its stats:
+ */
+static inline void
+update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	/*
+	 * We are starting a new run period:
+	 */
+	se->exec_start = now;
+}
+
+/*
+ * We are descheduling a task - update its stats:
+ */
+static inline void
+update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	se->exec_start = 0;
+}
+
+/**************************************************
+ * Scheduling class queueing methods:
+ */
+
+static void
+__enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long load = cfs_rq->load.weight, delta_fair;
+	long prev_runtime;
+
+	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+		load = rq_of(cfs_rq)->cpu_load[2];
+
+	delta_fair = se->delta_fair_sleep;
+
+	/*
+	 * Fix up delta_fair with the effect of us running
+	 * during the whole sleep period:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+		delta_fair = div64_likely32((u64)delta_fair * load,
+						load + se->load.weight);
+
+	if (unlikely(se->load.weight != NICE_0_LOAD))
+		delta_fair = calc_weighted(delta_fair, se->load.weight,
+							NICE_0_SHIFT);
+
+	prev_runtime = se->wait_runtime;
+	__add_wait_runtime(cfs_rq, se, delta_fair);
+	delta_fair = se->wait_runtime - prev_runtime;
+
+	/*
+	 * Track the amount of bonus we've given to sleepers:
+	 */
+	cfs_rq->sleeper_bonus += delta_fair;
+
+	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+}
+
+static void
+enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	struct task_struct *tsk = task_of(se);
+	unsigned long delta_fair;
+
+	if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
+			 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+		return;
+
+	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+		(u64)(cfs_rq->fair_clock - se->sleep_start_fair));
+
+	se->delta_fair_sleep += delta_fair;
+	if (unlikely(abs(se->delta_fair_sleep) >=
+				sysctl_sched_stat_granularity)) {
+		__enqueue_sleeper(cfs_rq, se, now);
+		se->delta_fair_sleep = 0;
+	}
+
+	se->sleep_start_fair = 0;
+
+#ifdef CONFIG_SCHEDSTATS
+	if (se->sleep_start) {
+		u64 delta = now - se->sleep_start;
+
+		if ((s64)delta < 0)
+			delta = 0;
+
+		if (unlikely(delta > se->sleep_max))
+			se->sleep_max = delta;
+
+		se->sleep_start = 0;
+		se->sum_sleep_runtime += delta;
+	}
+	if (se->block_start) {
+		u64 delta = now - se->block_start;
+
+		if ((s64)delta < 0)
+			delta = 0;
+
+		if (unlikely(delta > se->block_max))
+			se->block_max = delta;
+
+		se->block_start = 0;
+		se->sum_sleep_runtime += delta;
+	}
+#endif
+}
+
+static void
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+	       int wakeup, u64 now)
+{
+	/*
+	 * Update the fair clock.
+	 */
+	update_curr(cfs_rq, now);
+
+	if (wakeup)
+		enqueue_sleeper(cfs_rq, se, now);
+
+	update_stats_enqueue(cfs_rq, se, now);
+	__enqueue_entity(cfs_rq, se);
+}
+
+static void
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+	       int sleep, u64 now)
+{
+	update_stats_dequeue(cfs_rq, se, now);
+	if (sleep) {
+		se->sleep_start_fair = cfs_rq->fair_clock;
+#ifdef CONFIG_SCHEDSTATS
+		if (entity_is_task(se)) {
+			struct task_struct *tsk = task_of(se);
+
+			if (tsk->state & TASK_INTERRUPTIBLE)
+				se->sleep_start = now;
+			if (tsk->state & TASK_UNINTERRUPTIBLE)
+				se->block_start = now;
+		}
+		cfs_rq->wait_runtime -= se->wait_runtime;
+#endif
+	}
+	__dequeue_entity(cfs_rq, se);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void
+__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
+			  struct sched_entity *curr, unsigned long granularity)
+{
+	s64 __delta = curr->fair_key - se->fair_key;
+
+	/*
+	 * Take scheduling granularity into account - do not
+	 * preempt the current task unless the best task has
+	 * a larger than sched_granularity fairness advantage:
+	 */
+	if (__delta > niced_granularity(curr, granularity))
+		resched_task(rq_of(cfs_rq)->curr);
+}
+
+static inline void
+set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	/*
+	 * Any task has to be enqueued before it get to execute on
+	 * a CPU. So account for the time it spent waiting on the
+	 * runqueue. (note, here we rely on pick_next_task() having
+	 * done a put_prev_task_fair() shortly before this, which
+	 * updated rq->fair_clock - used by update_stats_wait_end())
+	 */
+	update_stats_wait_end(cfs_rq, se, now);
+	update_stats_curr_start(cfs_rq, se, now);
+	set_cfs_rq_curr(cfs_rq, se);
+}
+
+static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq, u64 now)
+{
+	struct sched_entity *se = __pick_next_entity(cfs_rq);
+
+	set_next_entity(cfs_rq, se, now);
+
+	return se;
+}
+
+static void
+put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now)
+{
+	/*
+	 * If still on the runqueue then deactivate_task()
+	 * was not called and update_curr() has to be done:
+	 */
+	if (prev->on_rq)
+		update_curr(cfs_rq, now);
+
+	update_stats_curr_end(cfs_rq, prev, now);
+
+	if (prev->on_rq)
+		update_stats_wait_start(cfs_rq, prev, now);
+	set_cfs_rq_curr(cfs_rq, NULL);
+}
+
+static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
+	struct rq *rq = rq_of(cfs_rq);
+	struct sched_entity *next;
+	u64 now = __rq_clock(rq);
+
+	/*
+	 * Dequeue and enqueue the task to update its
+	 * position within the tree:
+	 */
+	dequeue_entity(cfs_rq, curr, 0, now);
+	enqueue_entity(cfs_rq, curr, 0, now);
+
+	/*
+	 * Reschedule if another task tops the current one.
+	 */
+	next = __pick_next_entity(cfs_rq);
+	if (next == curr)
+		return;
+
+	__check_preempt_curr_fair(cfs_rq, next, curr, sysctl_sched_granularity);
+}
+
+/**************************************************
+ * CFS operations on tasks:
+ */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* Walk up scheduling entities hierarchy */
+#define for_each_sched_entity(se) \
+		for (; se; se = se->parent)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+	return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+	return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+	return grp->my_q;
+}
+
+/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+	/* A later patch will take group into account */
+	return &cpu_rq(this_cpu)->cfs;
+}
+
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+/* Do the two (enqueued) tasks belong to the same group ? */
+static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+{
+	if (curr->se.cfs_rq == p->se.cfs_rq)
+		return 1;
+
+	return 0;
+}
+
+#else	/* CONFIG_FAIR_GROUP_SCHED */
+
+#define for_each_sched_entity(se) \
+		for (; se; se = NULL)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+	return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+	struct task_struct *p = task_of(se);
+	struct rq *rq = task_rq(p);
+
+	return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+	return NULL;
+}
+
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+	return &cpu_rq(this_cpu)->cfs;
+}
+
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+{
+	return 1;
+}
+
+#endif	/* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+ * then put the task into the rbtree:
+ */
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		if (se->on_rq)
+			break;
+		cfs_rq = cfs_rq_of(se);
+		enqueue_entity(cfs_rq, se, wakeup, now);
+	}
+}
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static void
+dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		dequeue_entity(cfs_rq, se, sleep, now);
+		/* Don't dequeue parent if it has other entities besides us */
+		if (cfs_rq->load.weight)
+			break;
+	}
+}
+
+/*
+ * sched_yield() support is very simple - we dequeue and enqueue
+ */
+static void yield_task_fair(struct rq *rq, struct task_struct *p)
+{
+	struct cfs_rq *cfs_rq = task_cfs_rq(p);
+	u64 now = __rq_clock(rq);
+
+	/*
+	 * Dequeue and enqueue the task to update its
+	 * position within the tree:
+	 */
+	dequeue_entity(cfs_rq, &p->se, 0, now);
+	enqueue_entity(cfs_rq, &p->se, 0, now);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
+{
+	struct task_struct *curr = rq->curr;
+	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+	unsigned long gran;
+
+	if (unlikely(rt_prio(p->prio))) {
+		update_curr(cfs_rq, rq_clock(rq));
+		resched_task(curr);
+		return;
+	}
+
+	gran = sysctl_sched_wakeup_granularity;
+	/*
+	 * Batch tasks prefer throughput over latency:
+	 */
+	if (unlikely(p->policy == SCHED_BATCH))
+		gran = sysctl_sched_batch_wakeup_granularity;
+
+	if (is_same_group(curr, p))
+		__check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
+}
+
+static struct task_struct *pick_next_task_fair(struct rq *rq, u64 now)
+{
+	struct cfs_rq *cfs_rq = &rq->cfs;
+	struct sched_entity *se;
+
+	if (unlikely(!cfs_rq->nr_running))
+		return NULL;
+
+	do {
+		se = pick_next_entity(cfs_rq, now);
+		cfs_rq = group_cfs_rq(se);
+	} while (cfs_rq);
+
+	return task_of(se);
+}
+
+/*
+ * Account for a descheduled task:
+ */
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, u64 now)
+{
+	struct sched_entity *se = &prev->se;
+	struct cfs_rq *cfs_rq;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		put_prev_entity(cfs_rq, se, now);
+	}
+}
+
+/**************************************************
+ * Fair scheduling class load-balancing methods:
+ */
+
+/*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+static inline struct task_struct *
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+{
+	struct task_struct *p;
+
+	if (!curr)
+		return NULL;
+
+	p = rb_entry(curr, struct task_struct, se.run_node);
+	cfs_rq->rb_load_balance_curr = rb_next(curr);
+
+	return p;
+}
+
+static struct task_struct *load_balance_start_fair(void *arg)
+{
+	struct cfs_rq *cfs_rq = arg;
+
+	return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+}
+
+static struct task_struct *load_balance_next_fair(void *arg)
+{
+	struct cfs_rq *cfs_rq = arg;
+
+	return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+}
+
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+{
+	struct sched_entity *curr;
+	struct task_struct *p;
+
+	if (!cfs_rq->nr_running)
+		return MAX_PRIO;
+
+	curr = __pick_next_entity(cfs_rq);
+	p = task_of(curr);
+
+	return p->prio;
+}
+
+static int
+load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved)
+{
+	struct cfs_rq *busy_cfs_rq;
+	unsigned long load_moved, total_nr_moved = 0, nr_moved;
+	long rem_load_move = max_load_move;
+	struct rq_iterator cfs_rq_iterator;
+
+	cfs_rq_iterator.start = load_balance_start_fair;
+	cfs_rq_iterator.next = load_balance_next_fair;
+
+	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+		struct cfs_rq *this_cfs_rq;
+		long imbalance;
+		unsigned long maxload;
+		int this_best_prio, best_prio, best_prio_seen = 0;
+
+		this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
+
+		imbalance = busy_cfs_rq->load.weight -
+						 this_cfs_rq->load.weight;
+		/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+		if (imbalance <= 0)
+			continue;
+
+		/* Don't pull more than imbalance/2 */
+		imbalance /= 2;
+		maxload = min(rem_load_move, imbalance);
+
+		this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+		best_prio = cfs_rq_best_prio(busy_cfs_rq);
+
+		/*
+		 * Enable handling of the case where there is more than one task
+		 * with the best priority. If the current running task is one
+		 * of those with prio==best_prio we know it won't be moved
+		 * and therefore it's safe to override the skip (based on load)
+		 * of any task we find with that prio.
+		 */
+		if (cfs_rq_curr(busy_cfs_rq) == &busiest->curr->se)
+			best_prio_seen = 1;
+
+		/* pass busy_cfs_rq argument into
+		 * load_balance_[start|next]_fair iterators
+		 */
+		cfs_rq_iterator.arg = busy_cfs_rq;
+		nr_moved = balance_tasks(this_rq, this_cpu, busiest,
+				max_nr_move, maxload, sd, idle, all_pinned,
+				&load_moved, this_best_prio, best_prio,
+				best_prio_seen, &cfs_rq_iterator);
+
+		total_nr_moved += nr_moved;
+		max_nr_move -= nr_moved;
+		rem_load_move -= load_moved;
+
+		if (max_nr_move <= 0 || rem_load_move <= 0)
+			break;
+	}
+
+	*total_load_moved = max_load_move - rem_load_move;
+
+	return total_nr_moved;
+}
+
+/*
+ * scheduler tick hitting a task of our scheduling class:
+ */
+static void task_tick_fair(struct rq *rq, struct task_struct *curr)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &curr->se;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		entity_tick(cfs_rq, se);
+	}
+}
+
+/*
+ * Share the fairness runtime between parent and child, thus the
+ * total amount of pressure for CPU stays equal - new tasks
+ * get a chance to run but frequent forkers are not allowed to
+ * monopolize the CPU. Note: the parent runqueue is locked,
+ * the child is not running yet.
+ */
+static void task_new_fair(struct rq *rq, struct task_struct *p)
+{
+	struct cfs_rq *cfs_rq = task_cfs_rq(p);
+	struct sched_entity *se = &p->se;
+	u64 now = rq_clock(rq);
+
+	sched_info_queued(p);
+
+	update_stats_enqueue(cfs_rq, se, now);
+	/*
+	 * Child runs first: we let it run before the parent
+	 * until it reschedules once. We set up the key so that
+	 * it will preempt the parent:
+	 */
+	p->se.fair_key = current->se.fair_key -
+		niced_granularity(&rq->curr->se, sysctl_sched_granularity) - 1;
+	/*
+	 * The first wait is dominated by the child-runs-first logic,
+	 * so do not credit it with that waiting time yet:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
+		p->se.wait_start_fair = 0;
+
+	/*
+	 * The statistical average of wait_runtime is about
+	 * -granularity/2, so initialize the task with that:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
+		p->se.wait_runtime = -(sysctl_sched_granularity / 2);
+
+	__enqueue_entity(cfs_rq, se);
+	inc_nr_running(p, rq, now);
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+static void set_curr_task_fair(struct rq *rq)
+{
+	struct task_struct *curr = rq->curr;
+	struct sched_entity *se = &curr->se;
+	u64 now = rq_clock(rq);
+	struct cfs_rq *cfs_rq;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		set_next_entity(cfs_rq, se, now);
+	}
+}
+#else
+static void set_curr_task_fair(struct rq *rq)
+{
+}
+#endif
+
+/*
+ * All the scheduling class methods:
+ */
+struct sched_class fair_sched_class __read_mostly = {
+	.enqueue_task		= enqueue_task_fair,
+	.dequeue_task		= dequeue_task_fair,
+	.yield_task		= yield_task_fair,
+
+	.check_preempt_curr	= check_preempt_curr_fair,
+
+	.pick_next_task		= pick_next_task_fair,
+	.put_prev_task		= put_prev_task_fair,
+
+	.load_balance		= load_balance_fair,
+
+	.set_curr_task          = set_curr_task_fair,
+	.task_tick		= task_tick_fair,
+	.task_new		= task_new_fair,
+};
+
+#ifdef CONFIG_SCHED_DEBUG
+void print_cfs_stats(struct seq_file *m, int cpu, u64 now)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct cfs_rq *cfs_rq;
+
+	for_each_leaf_cfs_rq(rq, cfs_rq)
+		print_cfs_rq(m, cpu, cfs_rq, now);
+}
+#endif

commit 9aa7b369819940cb1f3c74ba210516739a32ad95
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: increase the resolution of smpnice
    
    increase SMP-nice's resolution. This is needed by CFS to
    implement SCHED_IDLE and cleaned up nice level support.
    
    no behavioral changes.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 61a111fe2b7a..d5084e7c48cf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -652,7 +652,14 @@ enum cpu_idle_type {
 /*
  * sched-domains (multiprocessor balancing) declarations:
  */
-#define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */
+
+/*
+ * Increase resolution of nice-level calculations:
+ */
+#define SCHED_LOAD_SHIFT	10
+#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
+
+#define SCHED_LOAD_SCALE_FUZZ	(SCHED_LOAD_SCALE >> 5)
 
 #ifdef CONFIG_SMP
 #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */

commit 425e0968a25fa3f111f9919964cac079738140b5
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: move code into kernel/sched_stats.h
    
    create sched_stats.h and move sched.c schedstats code into it.
    This cleans up sched.c a bit.
    
    no code changes are caused by this patch.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index ac4d26241d1e..f8b8eda4494d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -460,134 +460,6 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
 	spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
-#ifdef CONFIG_SCHEDSTATS
-/*
- * bump this up when changing the output format or the meaning of an existing
- * format, so that tools can adapt (or abort)
- */
-#define SCHEDSTAT_VERSION 14
-
-static int show_schedstat(struct seq_file *seq, void *v)
-{
-	int cpu;
-
-	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
-	seq_printf(seq, "timestamp %lu\n", jiffies);
-	for_each_online_cpu(cpu) {
-		struct rq *rq = cpu_rq(cpu);
-#ifdef CONFIG_SMP
-		struct sched_domain *sd;
-		int dcnt = 0;
-#endif
-
-		/* runqueue-specific stats */
-		seq_printf(seq,
-		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
-		    cpu, rq->yld_both_empty,
-		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
-		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
-		    rq->ttwu_cnt, rq->ttwu_local,
-		    rq->rq_sched_info.cpu_time,
-		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
-
-		seq_printf(seq, "\n");
-
-#ifdef CONFIG_SMP
-		/* domain-specific stats */
-		preempt_disable();
-		for_each_domain(cpu, sd) {
-			enum cpu_idle_type itype;
-			char mask_str[NR_CPUS];
-
-			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
-			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
-			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
-					itype++) {
-				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
-						"%lu",
-				    sd->lb_cnt[itype],
-				    sd->lb_balanced[itype],
-				    sd->lb_failed[itype],
-				    sd->lb_imbalance[itype],
-				    sd->lb_gained[itype],
-				    sd->lb_hot_gained[itype],
-				    sd->lb_nobusyq[itype],
-				    sd->lb_nobusyg[itype]);
-			}
-			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
-			    " %lu %lu %lu\n",
-			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
-			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
-			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
-			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
-			    sd->ttwu_move_balance);
-		}
-		preempt_enable();
-#endif
-	}
-	return 0;
-}
-
-static int schedstat_open(struct inode *inode, struct file *file)
-{
-	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
-	char *buf = kmalloc(size, GFP_KERNEL);
-	struct seq_file *m;
-	int res;
-
-	if (!buf)
-		return -ENOMEM;
-	res = single_open(file, show_schedstat, NULL);
-	if (!res) {
-		m = file->private_data;
-		m->buf = buf;
-		m->size = size;
-	} else
-		kfree(buf);
-	return res;
-}
-
-const struct file_operations proc_schedstat_operations = {
-	.open    = schedstat_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = single_release,
-};
-
-/*
- * Expects runqueue lock to be held for atomicity of update
- */
-static inline void
-rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
-{
-	if (rq) {
-		rq->rq_sched_info.run_delay += delta_jiffies;
-		rq->rq_sched_info.pcnt++;
-	}
-}
-
-/*
- * Expects runqueue lock to be held for atomicity of update
- */
-static inline void
-rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
-{
-	if (rq)
-		rq->rq_sched_info.cpu_time += delta_jiffies;
-}
-# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
-# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
-#else /* !CONFIG_SCHEDSTATS */
-static inline void
-rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
-{}
-static inline void
-rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
-{}
-# define schedstat_inc(rq, field)	do { } while (0)
-# define schedstat_add(rq, field, amt)	do { } while (0)
-#endif
-
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -603,111 +475,7 @@ static inline struct rq *this_rq_lock(void)
 	return rq;
 }
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
-/*
- * Called when a process is dequeued from the active array and given
- * the cpu.  We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue.  (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * This function is only called from sched_info_arrive(), rather than
- * dequeue_task(). Even though a task may be queued and dequeued multiple
- * times as it is shuffled about, we're really interested in knowing how
- * long it was from the *first* time it was queued to the time that it
- * finally hit a cpu.
- */
-static inline void sched_info_dequeued(struct task_struct *t)
-{
-	t->sched_info.last_queued = 0;
-}
-
-/*
- * Called when a task finally hits the cpu.  We can now calculate how
- * long it was waiting to run.  We also note when it began so that we
- * can keep stats on how long its timeslice is.
- */
-static void sched_info_arrive(struct task_struct *t)
-{
-	unsigned long now = jiffies, delta_jiffies = 0;
-
-	if (t->sched_info.last_queued)
-		delta_jiffies = now - t->sched_info.last_queued;
-	sched_info_dequeued(t);
-	t->sched_info.run_delay += delta_jiffies;
-	t->sched_info.last_arrival = now;
-	t->sched_info.pcnt++;
-
-	rq_sched_info_arrive(task_rq(t), delta_jiffies);
-}
-
-/*
- * Called when a process is queued into either the active or expired
- * array.  The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu.  Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
- * This function is only called from enqueue_task(), but also only updates
- * the timestamp if it is already not set.  It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
- */
-static inline void sched_info_queued(struct task_struct *t)
-{
-	if (unlikely(sched_info_on()))
-		if (!t->sched_info.last_queued)
-			t->sched_info.last_queued = jiffies;
-}
-
-/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily.  Now we can calculate how long we ran.
- */
-static inline void sched_info_depart(struct task_struct *t)
-{
-	unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
-
-	t->sched_info.cpu_time += delta_jiffies;
-	rq_sched_info_depart(task_rq(t), delta_jiffies);
-}
-
-/*
- * Called when tasks are switched involuntarily due, typically, to expiring
- * their time slice.  (This may also be called when switching to or from
- * the idle task.)  We are only called when prev != next.
- */
-static inline void
-__sched_info_switch(struct task_struct *prev, struct task_struct *next)
-{
-	struct rq *rq = task_rq(prev);
-
-	/*
-	 * prev now departs the cpu.  It's not interesting to record
-	 * stats about how efficient we were at scheduling the idle
-	 * process, however.
-	 */
-	if (prev != rq->idle)
-		sched_info_depart(prev);
-
-	if (next != rq->idle)
-		sched_info_arrive(next);
-}
-static inline void
-sched_info_switch(struct task_struct *prev, struct task_struct *next)
-{
-	if (unlikely(sched_info_on()))
-		__sched_info_switch(prev, next);
-}
-#else
-#define sched_info_queued(t)		do { } while (0)
-#define sched_info_switch(t, next)	do { } while (0)
-#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+#include "sched_stats.h"
 
 /*
  * Adding/removing a task to/from a priority array:
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
new file mode 100644
index 000000000000..cd82c6078904
--- /dev/null
+++ b/kernel/sched_stats.h
@@ -0,0 +1,235 @@
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION 14
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+	int cpu;
+
+	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+	seq_printf(seq, "timestamp %lu\n", jiffies);
+	for_each_online_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+#ifdef CONFIG_SMP
+		struct sched_domain *sd;
+		int dcnt = 0;
+#endif
+
+		/* runqueue-specific stats */
+		seq_printf(seq,
+		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+		    cpu, rq->yld_both_empty,
+		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
+		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
+		    rq->ttwu_cnt, rq->ttwu_local,
+		    rq->rq_sched_info.cpu_time,
+		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+
+		seq_printf(seq, "\n");
+
+#ifdef CONFIG_SMP
+		/* domain-specific stats */
+		preempt_disable();
+		for_each_domain(cpu, sd) {
+			enum cpu_idle_type itype;
+			char mask_str[NR_CPUS];
+
+			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+					itype++) {
+				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+						"%lu",
+				    sd->lb_cnt[itype],
+				    sd->lb_balanced[itype],
+				    sd->lb_failed[itype],
+				    sd->lb_imbalance[itype],
+				    sd->lb_gained[itype],
+				    sd->lb_hot_gained[itype],
+				    sd->lb_nobusyq[itype],
+				    sd->lb_nobusyg[itype]);
+			}
+			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+			    " %lu %lu %lu\n",
+			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
+			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
+			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
+			    sd->ttwu_move_balance);
+		}
+		preempt_enable();
+#endif
+	}
+	return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
+	char *buf = kmalloc(size, GFP_KERNEL);
+	struct seq_file *m;
+	int res;
+
+	if (!buf)
+		return -ENOMEM;
+	res = single_open(file, show_schedstat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+
+const struct file_operations proc_schedstat_operations = {
+	.open    = schedstat_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+{
+	if (rq) {
+		rq->rq_sched_info.run_delay += delta;
+		rq->rq_sched_info.pcnt++;
+	}
+}
+
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long long delta)
+{
+	if (rq)
+		rq->rq_sched_info.cpu_time += delta;
+}
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
+#else /* !CONFIG_SCHEDSTATS */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+{}
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long long delta)
+{}
+# define schedstat_inc(rq, field)	do { } while (0)
+# define schedstat_add(rq, field, amt)	do { } while (0)
+#endif
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(struct task_struct *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static void sched_info_arrive(struct task_struct *t)
+{
+	unsigned long long now = sched_clock(), delta = 0;
+
+	if (t->sched_info.last_queued)
+		delta = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += delta;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	rq_sched_info_arrive(task_rq(t), delta);
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(struct task_struct *t)
+{
+	if (unlikely(sched_info_on()))
+		if (!t->sched_info.last_queued)
+			t->sched_info.last_queued = sched_clock();
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(struct task_struct *t)
+{
+	unsigned long long delta = sched_clock() - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += delta;
+	rq_sched_info_depart(task_rq(t), delta);
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void
+__sched_info_switch(struct task_struct *prev, struct task_struct *next)
+{
+	struct rq *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+static inline void
+sched_info_switch(struct task_struct *prev, struct task_struct *next)
+{
+	if (unlikely(sched_info_on()))
+		__sched_info_switch(prev, next);
+}
+#else
+#define sched_info_queued(t)		do { } while (0)
+#define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+

commit 1df21055e34b6a68d62cf0c524b9e52deebd7ead
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: add init_idle_bootup_task()
    
    add the init_idle_bootup_task() callback to the bootup thread,
    unused at the moment. (CFS will use it to switch the scheduling
    class of the boot thread to the idle class)
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b912e753ca0..61a111fe2b7a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -195,6 +195,7 @@ struct task_struct;
 extern void sched_init(void);
 extern void sched_init_smp(void);
 extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
 
 extern cpumask_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
diff --git a/init/main.c b/init/main.c
index eb8bdbae4fc7..0eb1c7463fe4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -436,15 +436,16 @@ static void noinline __init_refok rest_init(void)
 
 	/*
 	 * The boot idle thread must execute schedule()
-	 * at least one to get things moving:
+	 * at least once to get things moving:
 	 */
+	init_idle_bootup_task(current);
 	preempt_enable_no_resched();
 	schedule();
 	preempt_disable();
 
 	/* Call into cpu_idle with preempt disabled */
 	cpu_idle();
-} 
+}
 
 /* Check for early params. */
 static int __init do_early_param(char *param, char *val)
diff --git a/kernel/sched.c b/kernel/sched.c
index 7090982350d3..ac4d26241d1e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5018,6 +5018,11 @@ void show_state_filter(unsigned long state_filter)
 		debug_show_all_locks();
 }
 
+void __cpuinit init_idle_bootup_task(struct task_struct *idle)
+{
+	/* nothing yet */
+}
+
 /**
  * init_idle - set up an idle thread for a given CPU
  * @idle: task in question

commit 4da1ce6d9c7e2a6d9236bf4dcfd33cf506082794
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: add in_atomic_preempt_off()
    
    add in_atomic_preempt_off() - debugging helper that will
    simplify schedule().
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 7803014f3a11..8d302298a161 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -78,6 +78,19 @@
 # define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)
 #endif
 
+#ifdef CONFIG_PREEMPT
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
 #ifdef CONFIG_PREEMPT
 # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)

commit f64f61145a38f7039e4f1c0b50dcc3fbe70ec28e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Jul 9 18:51:58 2007 +0200

    sched: remove sched_exit()
    
    remove sched_exit(): the elaborate dance of us trying to recover
    timeslices given to child tasks never really worked.
    
    CFS does not need it either.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/exit.c b/kernel/exit.c
index 5c8ecbaa19a5..6c7699240327 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -182,7 +182,6 @@ void release_task(struct task_struct * p)
 		zap_leader = (leader->exit_signal == -1);
 	}
 
-	sched_exit(p);
 	write_unlock_irq(&tasklist_lock);
 	proc_flush_task(p);
 	release_thread(p);
diff --git a/kernel/sched.c b/kernel/sched.c
index d6624978feb2..7090982350d3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1831,37 +1831,6 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 	task_rq_unlock(this_rq, &flags);
 }
 
-/*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
-void fastcall sched_exit(struct task_struct *p)
-{
-	unsigned long flags;
-	struct rq *rq;
-
-	/*
-	 * If the child was a (relative-) CPU hog then decrease
-	 * the sleep_avg of the parent as well.
-	 */
-	rq = task_rq_lock(p->parent, &flags);
-	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
-		p->parent->time_slice += p->time_slice;
-		if (unlikely(p->parent->time_slice > task_timeslice(p)))
-			p->parent->time_slice = task_timeslice(p);
-	}
-	if (p->sleep_avg < p->parent->sleep_avg)
-		p->parent->sleep_avg = p->parent->sleep_avg /
-		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
-		(EXIT_WEIGHT + 1);
-	task_rq_unlock(rq, &flags);
-}
-
 /**
  * prepare_task_switch - prepare to switch tasks
  * @rq: the runqueue preparing to switch