Patches contributed by Eötvös Lorand University


commit 1e819950660e6a811b549422ffb652273257e45e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:13 2007 +0200

    sched: optimize schedule() a bit on SMP
    
    optimize schedule() a bit on SMP, by moving the rq-clock update
    outside the rq lock.
    
    code size is the same:
    
          text    data     bss     dec     hex filename
         25725    2666      96   28487    6f47 sched.o.before
         25725    2666      96   28487    6f47 sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched.c b/kernel/sched.c
index e717047be5cf..4f13d379bea5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3481,9 +3481,13 @@ asmlinkage void __sched schedule(void)
 
 	schedule_debug(prev);
 
-	spin_lock_irq(&rq->lock);
-	clear_tsk_need_resched(prev);
+	/*
+	 * Do the rq-clock update outside the rq lock:
+	 */
+	local_irq_disable();
 	__update_rq_clock(rq);
+	spin_lock(&rq->lock);
+	clear_tsk_need_resched(prev);
 
 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
 		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&

commit 647e7cac2d215fb8890f79252d7eaee3d6743d66
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:13 2007 +0200

    sched: vslice fixups for non-0 nice levels
    
    Make vslice accurate wrt nice levels, and add some comments
    while we're at it.
    
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 32fd976f8566..1f14b56d0d00 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -217,6 +217,15 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
+
+/*
+ * The idea is to set a period in which each task runs once.
+ *
+ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
+ * this period because otherwise the slices get too small.
+ *
+ * p = (nr <= nl) ? l : l*nr/nl
+ */
 static u64 __sched_period(unsigned long nr_running)
 {
 	u64 period = sysctl_sched_latency;
@@ -230,27 +239,45 @@ static u64 __sched_period(unsigned long nr_running)
 	return period;
 }
 
+/*
+ * We calculate the wall-time slice from the period by taking a part
+ * proportional to the weight.
+ *
+ * s = p*w/rw
+ */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	u64 period = __sched_period(cfs_rq->nr_running);
+	u64 slice = __sched_period(cfs_rq->nr_running);
 
-	period *= se->load.weight;
-	do_div(period, cfs_rq->load.weight);
+	slice *= se->load.weight;
+	do_div(slice, cfs_rq->load.weight);
 
-	return period;
+	return slice;
 }
 
-static u64 __sched_vslice(unsigned long nr_running)
+/*
+ * We calculate the vruntime slice.
+ *
+ * vs = s/w = p/rw
+ */
+static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
 {
-	unsigned long period = sysctl_sched_latency;
-	unsigned long nr_latency = sysctl_sched_nr_latency;
+	u64 vslice = __sched_period(nr_running);
 
-	if (unlikely(nr_running > nr_latency))
-		nr_running = nr_latency;
+	do_div(vslice, rq_weight);
 
-	period /= nr_running;
+	return vslice;
+}
 
-	return (u64)period;
+static u64 sched_vslice(struct cfs_rq *cfs_rq)
+{
+	return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+}
+
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	return __sched_vslice(cfs_rq->load.weight + se->load.weight,
+			cfs_rq->nr_running + 1);
 }
 
 /*
@@ -469,10 +496,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 			vruntime >>= 1;
 		}
 	} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
-		vruntime += __sched_vslice(cfs_rq->nr_running)/2;
+		vruntime += sched_vslice(cfs_rq)/2;
 
 	if (initial && sched_feat(START_DEBIT))
-		vruntime += __sched_vslice(cfs_rq->nr_running + 1);
+		vruntime += sched_vslice_add(cfs_rq, se);
 
 	if (!initial) {
 		if (sched_feat(NEW_FAIR_SLEEPERS))

commit 3a2520157234d58abce89526756a32c272824f3f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:12 2007 +0200

    sched: whitespace cleanups
    
    more whitespace cleanups. No code changed:
    
          text    data     bss     dec     hex filename
         26553    2790     288   29631    73bf sched.o.before
         26553    2790     288   29631    73bf sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched.c b/kernel/sched.c
index f582e2cedb09..e717047be5cf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -193,17 +193,17 @@ static struct sched_entity *init_sched_entity_p[NR_CPUS];
 static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
 
 /* Default task group.
- * 	Every task in system belong to this group at bootup.
+ *	Every task in system belong to this group at bootup.
  */
-struct task_grp init_task_grp =  {
-				.se     = init_sched_entity_p,
-				.cfs_rq = init_cfs_rq_p,
-				 };
+struct task_grp init_task_grp = {
+	.se     = init_sched_entity_p,
+	.cfs_rq = init_cfs_rq_p,
+};
 
 #ifdef CONFIG_FAIR_USER_SCHED
-#define INIT_TASK_GRP_LOAD	2*NICE_0_LOAD
+# define INIT_TASK_GRP_LOAD	2*NICE_0_LOAD
 #else
-#define INIT_TASK_GRP_LOAD	NICE_0_LOAD
+# define INIT_TASK_GRP_LOAD	NICE_0_LOAD
 #endif
 
 static int init_task_grp_load = INIT_TASK_GRP_LOAD;
@@ -6516,25 +6516,25 @@ void __init sched_init(void)
 		init_cfs_rq(&rq->cfs, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
-	 	{
- 			struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
- 			struct sched_entity *se =
- 					 &per_cpu(init_sched_entity, i);
-
- 			init_cfs_rq_p[i] = cfs_rq;
- 			init_cfs_rq(cfs_rq, rq);
- 			cfs_rq->tg = &init_task_grp;
- 			list_add(&cfs_rq->leaf_cfs_rq_list,
+		{
+			struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
+			struct sched_entity *se =
+					 &per_cpu(init_sched_entity, i);
+
+			init_cfs_rq_p[i] = cfs_rq;
+			init_cfs_rq(cfs_rq, rq);
+			cfs_rq->tg = &init_task_grp;
+			list_add(&cfs_rq->leaf_cfs_rq_list,
 							 &rq->leaf_cfs_rq_list);
 
- 			init_sched_entity_p[i] = se;
- 			se->cfs_rq = &rq->cfs;
- 			se->my_q = cfs_rq;
- 			se->load.weight = init_task_grp_load;
+			init_sched_entity_p[i] = se;
+			se->cfs_rq = &rq->cfs;
+			se->my_q = cfs_rq;
+			se->load.weight = init_task_grp_load;
 			se->load.inv_weight =
 				 div64_64(1ULL<<32, init_task_grp_load);
- 			se->parent = NULL;
- 		}
+			se->parent = NULL;
+		}
 		init_task_grp.shares = init_task_grp_load;
 #endif
 
@@ -6840,9 +6840,9 @@ void sched_destroy_group(struct task_grp *tg)
 }
 
 /* change task's runqueue when it moves between groups.
- * 	The caller of this function should have put the task in its new group
- * 	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * 	reflect its new group.
+ *	The caller of this function should have put the task in its new group
+ *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
+ *	reflect its new group.
  */
 void sched_move_task(struct task_struct *tsk)
 {
@@ -6915,4 +6915,4 @@ int sched_group_set_shares(struct task_grp *tg, unsigned long shares)
 	return 0;
 }
 
-#endif 	/* CONFIG_FAIR_GROUP_SCHED */
+#endif	/* CONFIG_FAIR_GROUP_SCHED */

commit 5522d5d5f70005faeffff3ffc0cfa8eec0155de4
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:12 2007 +0200

    sched: mark scheduling classes as const
    
    mark scheduling classes as const. The speeds up the code
    a bit and shrinks it:
    
       text    data     bss     dec     hex filename
      40027    4018     292   44337    ad31 sched.o.before
      40190    3842     292   44324    ad24 sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 97f736b749c2..47e3717a0356 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -863,7 +863,7 @@ struct rq;
 struct sched_domain;
 
 struct sched_class {
-	struct sched_class *next;
+	const struct sched_class *next;
 
 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
@@ -949,7 +949,7 @@ struct task_struct {
 
 	int prio, static_prio, normal_prio;
 	struct list_head run_list;
-	struct sched_class *sched_class;
+	const struct sched_class *sched_class;
 	struct sched_entity se;
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/kernel/sched.c b/kernel/sched.c
index e1657e0c86d0..f582e2cedb09 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -847,9 +847,9 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		      int *this_best_prio, struct rq_iterator *iterator);
 
 #include "sched_stats.h"
-#include "sched_rt.c"
-#include "sched_fair.c"
 #include "sched_idletask.c"
+#include "sched_fair.c"
+#include "sched_rt.c"
 #ifdef CONFIG_SCHED_DEBUG
 # include "sched_debug.c"
 #endif
@@ -2251,7 +2251,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		      struct sched_domain *sd, enum cpu_idle_type idle,
 		      int *all_pinned)
 {
-	struct sched_class *class = sched_class_highest;
+	const struct sched_class *class = sched_class_highest;
 	unsigned long total_load_moved = 0;
 	int this_best_prio = this_rq->curr->prio;
 
@@ -2276,7 +2276,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
 			 struct sched_domain *sd, enum cpu_idle_type idle)
 {
-	struct sched_class *class;
+	const struct sched_class *class;
 	int this_best_prio = MAX_PRIO;
 
 	for (class = sched_class_highest; class; class = class->next)
@@ -3432,7 +3432,7 @@ static inline void schedule_debug(struct task_struct *prev)
 static inline struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev)
 {
-	struct sched_class *class;
+	const struct sched_class *class;
 	struct task_struct *p;
 
 	/*
@@ -6504,13 +6504,6 @@ void __init sched_init(void)
 	int highest_cpu = 0;
 	int i, j;
 
-	/*
-	 * Link up the scheduling class hierarchy:
-	 */
-	rt_sched_class.next = &fair_sched_class;
-	fair_sched_class.next = &idle_sched_class;
-	idle_sched_class.next = NULL;
-
 	for_each_possible_cpu(i) {
 		struct rt_prio_array *array;
 		struct rq *rq;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index de13a6f5b977..32fd976f8566 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -76,8 +76,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
  */
 const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
 
-extern struct sched_class fair_sched_class;
-
 /**************************************************************
  * CFS operations on generic schedulable entities:
  */
@@ -1031,7 +1029,8 @@ static void set_curr_task_fair(struct rq *rq)
 /*
  * All the scheduling class methods:
  */
-struct sched_class fair_sched_class __read_mostly = {
+static const struct sched_class fair_sched_class = {
+	.next			= &idle_sched_class,
 	.enqueue_task		= enqueue_task_fair,
 	.dequeue_task		= dequeue_task_fair,
 	.yield_task		= yield_task_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 5ebf829cdd73..6e2ead41516e 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -57,7 +57,8 @@ static void set_curr_task_idle(struct rq *rq)
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
-static struct sched_class idle_sched_class __read_mostly = {
+const struct sched_class idle_sched_class = {
+	/* .next is NULL */
 	/* no enqueue/yield_task for idle tasks */
 
 	/* dequeue is not valid, we print a debug message there: */
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e1d5f1c8b532..dbe4d8cf80d6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -225,7 +225,8 @@ static void set_curr_task_rt(struct rq *rq)
 	p->se.exec_start = rq->clock;
 }
 
-static struct sched_class rt_sched_class __read_mostly = {
+const struct sched_class rt_sched_class = {
+	.next			= &fair_sched_class,
 	.enqueue_task		= enqueue_task_rt,
 	.dequeue_task		= dequeue_task_rt,
 	.yield_task		= yield_task_rt,

commit b39c5dd7f938775fd0a1df5b4b1c26f854d15231
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:12 2007 +0200

    sched: cleanup, remove stale comment
    
    cleanup, remove stale comment.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ae2d4b08e782..c44a295eee0e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -496,7 +496,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
 	update_curr(cfs_rq);
 
 	if (wakeup) {
-		/* se->vruntime += cfs_rq->min_vruntime; */
 		place_entity(cfs_rq, se, 0);
 		enqueue_sleeper(cfs_rq, se);
 	}

commit 2d72376b3af1e7d4d4515ebfd0f4383f2e92c343
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:12 2007 +0200

    sched: clean up schedstats, cnt -> count
    
    rename all 'cnt' fields and variables to the less yucky 'count' name.
    
    yuckage noticed by Andrew Morton.
    
    no change in code, other than the /proc/sched_debug bkl_count string got
    a bit larger:
    
       text    data     bss     dec     hex filename
      38236    3506      24   41766    a326 sched.o.before
      38240    3506      24   41770    a32a sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/fs/proc/base.c b/fs/proc/base.c
index 19489b0d5554..e5d0953d4db1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -304,7 +304,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
 	return sprintf(buffer, "%llu %llu %lu\n",
 			task->sched_info.cpu_time,
 			task->sched_info.run_delay,
-			task->sched_info.pcnt);
+			task->sched_info.pcount);
 }
 #endif
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c33227b0f82..d5daca4bcc6b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -614,7 +614,7 @@ struct reclaim_state;
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 struct sched_info {
 	/* cumulative counters */
-	unsigned long pcnt;	      /* # of times run on this cpu */
+	unsigned long pcount;	      /* # of times run on this cpu */
 	unsigned long long cpu_time,  /* time spent on the cpu */
 			   run_delay; /* time spent waiting on a runqueue */
 
@@ -623,7 +623,7 @@ struct sched_info {
 			   last_queued;	/* when we were last queued to run */
 #ifdef CONFIG_SCHEDSTATS
 	/* BKL stats */
-	unsigned long bkl_cnt;
+	unsigned long bkl_count;
 #endif
 };
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -759,7 +759,7 @@ struct sched_domain {
 
 #ifdef CONFIG_SCHEDSTATS
 	/* load_balance() stats */
-	unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_count[CPU_MAX_IDLE_TYPES];
 	unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
 	unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
 	unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
@@ -769,17 +769,17 @@ struct sched_domain {
 	unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
 
 	/* Active load balancing */
-	unsigned long alb_cnt;
+	unsigned long alb_count;
 	unsigned long alb_failed;
 	unsigned long alb_pushed;
 
 	/* SD_BALANCE_EXEC stats */
-	unsigned long sbe_cnt;
+	unsigned long sbe_count;
 	unsigned long sbe_balanced;
 	unsigned long sbe_pushed;
 
 	/* SD_BALANCE_FORK stats */
-	unsigned long sbf_cnt;
+	unsigned long sbf_count;
 	unsigned long sbf_balanced;
 	unsigned long sbf_pushed;
 
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 81e697829633..09e9574eeb26 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
 	 * No locking available for sched_info (and too expensive to add one)
 	 * Mitigate by taking snapshot of values
 	 */
-	t1 = tsk->sched_info.pcnt;
+	t1 = tsk->sched_info.pcount;
 	t2 = tsk->sched_info.run_delay;
 	t3 = tsk->sched_info.cpu_time;
 
diff --git a/kernel/sched.c b/kernel/sched.c
index cd2b4942fe35..ba9fa6c0ab65 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -349,19 +349,19 @@ struct rq {
 	unsigned long yld_exp_empty;
 	unsigned long yld_act_empty;
 	unsigned long yld_both_empty;
-	unsigned long yld_cnt;
+	unsigned long yld_count;
 
 	/* schedule() stats */
 	unsigned long sched_switch;
-	unsigned long sched_cnt;
+	unsigned long sched_count;
 	unsigned long sched_goidle;
 
 	/* try_to_wake_up() stats */
-	unsigned long ttwu_cnt;
+	unsigned long ttwu_count;
 	unsigned long ttwu_local;
 
 	/* BKL stats */
-	unsigned long bkl_cnt;
+	unsigned long bkl_count;
 #endif
 	struct lock_class_key rq_lock_key;
 };
@@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
 	new_cpu = cpu;
 
-	schedstat_inc(rq, ttwu_cnt);
+	schedstat_inc(rq, ttwu_count);
 	if (cpu == this_cpu) {
 		schedstat_inc(rq, ttwu_local);
 		goto out_set_cpu;
@@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 		sd_idle = 1;
 
-	schedstat_inc(sd, lb_cnt[idle]);
+	schedstat_inc(sd, lb_count[idle]);
 
 redo:
 	group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
@@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
 	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 		sd_idle = 1;
 
-	schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
+	schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
 redo:
 	group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
 				   &sd_idle, &cpus, NULL);
@@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
 	}
 
 	if (likely(sd)) {
-		schedstat_inc(sd, alb_cnt);
+		schedstat_inc(sd, alb_count);
 
 		if (move_one_task(target_rq, target_cpu, busiest_rq,
 				  sd, CPU_IDLE))
@@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev)
 
 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
 
-	schedstat_inc(this_rq(), sched_cnt);
+	schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
 	if (unlikely(prev->lock_depth >= 0)) {
-		schedstat_inc(this_rq(), bkl_cnt);
-		schedstat_inc(prev, sched_info.bkl_cnt);
+		schedstat_inc(this_rq(), bkl_count);
+		schedstat_inc(prev, sched_info.bkl_count);
 	}
 #endif
 }
@@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void)
 {
 	struct rq *rq = this_rq_lock();
 
-	schedstat_inc(rq, yld_cnt);
+	schedstat_inc(rq, yld_count);
 	current->sched_class->yield_task(rq);
 
 	/*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4659c90c3418..be79cd6d9e80 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -137,8 +137,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 	SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SCHEDSTATS
-	SEQ_printf(m, "  .%-30s: %ld\n", "bkl_cnt",
-			rq->bkl_cnt);
+	SEQ_printf(m, "  .%-30s: %ld\n", "bkl_count",
+			rq->bkl_count);
 #endif
 	SEQ_printf(m, "  .%-30s: %ld\n", "nr_spread_over",
 			cfs_rq->nr_spread_over);
@@ -342,7 +342,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 	PN(se.exec_max);
 	PN(se.slice_max);
 	PN(se.wait_max);
-	P(sched_info.bkl_cnt);
+	P(sched_info.bkl_count);
 #endif
 	SEQ_printf(m, "%-25s:%20Ld\n",
 		   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
@@ -370,7 +370,7 @@ void proc_sched_set_task(struct task_struct *p)
 	p->se.exec_max			= 0;
 	p->se.slice_max			= 0;
 	p->se.wait_max			= 0;
-	p->sched_info.bkl_cnt		= 0;
+	p->sched_info.bkl_count		= 0;
 #endif
 	p->se.sum_exec_runtime		= 0;
 	p->se.prev_sum_exec_runtime	= 0;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 1d9ec98c38de..1c084842c3e7 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -16,18 +16,18 @@ static int show_schedstat(struct seq_file *seq, void *v)
 		struct rq *rq = cpu_rq(cpu);
 #ifdef CONFIG_SMP
 		struct sched_domain *sd;
-		int dcnt = 0;
+		int dcount = 0;
 #endif
 
 		/* runqueue-specific stats */
 		seq_printf(seq,
 		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
 		    cpu, rq->yld_both_empty,
-		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
-		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
-		    rq->ttwu_cnt, rq->ttwu_local,
+		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
+		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
+		    rq->ttwu_count, rq->ttwu_local,
 		    rq->rq_sched_info.cpu_time,
-		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 
 		seq_printf(seq, "\n");
 
@@ -39,12 +39,12 @@ static int show_schedstat(struct seq_file *seq, void *v)
 			char mask_str[NR_CPUS];
 
 			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
-			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+			seq_printf(seq, "domain%d %s", dcount++, mask_str);
 			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
 					itype++) {
 				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
 						"%lu",
-				    sd->lb_cnt[itype],
+				    sd->lb_count[itype],
 				    sd->lb_balanced[itype],
 				    sd->lb_failed[itype],
 				    sd->lb_imbalance[itype],
@@ -55,9 +55,9 @@ static int show_schedstat(struct seq_file *seq, void *v)
 			}
 			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
 			    " %lu %lu %lu\n",
-			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
-			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
-			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
+			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 			    sd->ttwu_move_balance);
 		}
@@ -101,7 +101,7 @@ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 {
 	if (rq) {
 		rq->rq_sched_info.run_delay += delta;
-		rq->rq_sched_info.pcnt++;
+		rq->rq_sched_info.pcount++;
 	}
 }
 
@@ -164,7 +164,7 @@ static void sched_info_arrive(struct task_struct *t)
 	sched_info_dequeued(t);
 	t->sched_info.run_delay += delta;
 	t->sched_info.last_arrival = now;
-	t->sched_info.pcnt++;
+	t->sched_info.pcount++;
 
 	rq_sched_info_arrive(task_rq(t), delta);
 }

commit 57cb499df26d80ec11cd49e56d20835334ac4ab9
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:11 2007 +0200

    sched: remove set_leftmost()
    
    Lee Schermerhorn noticed that set_leftmost() contains dead code,
    remove this.
    
    Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 91664d665c0f..48c69211888f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -124,16 +124,6 @@ max_vruntime(u64 min_vruntime, u64 vruntime)
 	return min_vruntime;
 }
 
-static inline void
-set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
-{
-	struct sched_entity *se;
-
-	cfs_rq->rb_leftmost = leftmost;
-	if (leftmost)
-		se = rb_entry(leftmost, struct sched_entity, run_node);
-}
-
 static inline s64
 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -175,7 +165,7 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	 * used):
 	 */
 	if (leftmost)
-		set_leftmost(cfs_rq, &se->run_node);
+		cfs_rq->rb_leftmost = &se->run_node;
 
 	rb_link_node(&se->run_node, parent, link);
 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
@@ -185,7 +175,7 @@ static void
 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	if (cfs_rq->rb_leftmost == &se->run_node)
-		set_leftmost(cfs_rq, rb_next(&se->run_node));
+		cfs_rq->rb_leftmost = rb_next(&se->run_node);
 
 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }

commit 02e4bac2a5b097e23d757bf2953740b3d51b7976
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:11 2007 +0200

    sched: fix sched_fork()
    
    fix sched_fork(): large latencies at new task creation time because
    the ->vruntime was not fixed up cross-CPU, if the parent got migrated
    after the child's CPU got set up.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

diff --git a/kernel/sched.c b/kernel/sched.c
index 744bd5050f10..36484da963f9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1657,7 +1657,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
 #ifdef CONFIG_SMP
 	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
 #endif
-	__set_task_cpu(p, cpu);
+	set_task_cpu(p, cpu);
 
 	/*
 	 * Make sure we do not leak PI boosting priority to the child:

commit b8487b924177385e3932f846f430b73ce8e69bba
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:11 2007 +0200

    sched: fix sign check error in place_entity()
    
    fix sign check error in place_entity() - we'd get excessive
    latencies due to negatives being converted to large u64's.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 62a9ee8db13e..2bd9625fa62d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -483,7 +483,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 		if (sched_feat(NEW_FAIR_SLEEPERS))
 			vruntime -= sysctl_sched_latency;
 
-		vruntime = max(vruntime, se->vruntime);
+		vruntime = max_t(s64, vruntime, se->vruntime);
 	}
 
 	se->vruntime = vruntime;

commit 94359f05cb7e1fed0deccc83ebc30a1175a9ae16
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Oct 15 17:00:11 2007 +0200

    sched: undo some of the recent changes
    
    undo some of the recent changes that are not needed after all,
    such as last_min_vruntime.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d74830cc51eb..2c33227b0f82 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -908,7 +908,6 @@ struct sched_entity {
 	u64			sum_exec_runtime;
 	u64			vruntime;
 	u64			prev_sum_exec_runtime;
-	u64			last_min_vruntime;
 
 #ifdef CONFIG_SCHEDSTATS
 	u64			wait_start;
diff --git a/kernel/sched.c b/kernel/sched.c
index c779bf9d3552..744bd5050f10 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1616,7 +1616,6 @@ static void __sched_fork(struct task_struct *p)
 	p->se.exec_start		= 0;
 	p->se.sum_exec_runtime		= 0;
 	p->se.prev_sum_exec_runtime	= 0;
-	p->se.last_min_vruntime		= 0;
 
 #ifdef CONFIG_SCHEDSTATS
 	p->se.wait_start		= 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0228de186503..62a9ee8db13e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -480,14 +480,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 		vruntime += __sched_vslice(cfs_rq->nr_running + 1);
 
 	if (!initial) {
-		if (sched_feat(NEW_FAIR_SLEEPERS)) {
-			s64 latency = cfs_rq->min_vruntime - se->vruntime;
-			if (latency < 0 || !cfs_rq->nr_running)
-				latency = 0;
-			else
-				latency = min_t(s64, latency, sysctl_sched_latency);
-			vruntime -= latency;
-		}
+		if (sched_feat(NEW_FAIR_SLEEPERS))
+			vruntime -= sysctl_sched_latency;
+
 		vruntime = max(vruntime, se->vruntime);
 	}
 
@@ -531,8 +526,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 				se->block_start = rq_of(cfs_rq)->clock;
 		}
 #endif
-		/* se->vruntime = entity_key(cfs_rq, se); */
-		se->last_min_vruntime = cfs_rq->min_vruntime;
 	}
 
 	if (se != cfs_rq->curr)