Patches contributed by Eötvös Lorand University


commit 326e96b92306b7af24a3608ec01156cba17a3fc1
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Jan 27 08:03:54 2008 +0100

    printk: revert ktime_get() timestamps
    
    revert 19ef9309273d26cb005cb23e6a370353dca91099.
    
    Kevin Winchester reported a lockup during X startup an bisected
    it to this commit.
    
    Reported-by: Kevin Winchester <kjwinchester@gmail.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/printk.c b/kernel/printk.c
index 423a8c765a57..3b7c968d0ef9 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -702,9 +702,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
 					loglev_char = default_message_loglevel
 						+ '0';
 				}
-				t = 0;
-				if (system_state != SYSTEM_BOOTING)
-					t = ktime_to_ns(ktime_get());
+				t = cpu_clock(printk_cpu);
 				nanosec_rem = do_div(t, 1000000000);
 				tlen = sprintf(tbuf,
 						"<%c>[%5lu.%06lu] ",

commit 19ef9309273d26cb005cb23e6a370353dca91099
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:34 2008 +0100

    printk: use ktime_get()
    
    printk timestamps: use ktime_get().
    
    Some platforms have a functioning clocksource function only after
    they are done with early bootup, so delay this until out of
    SYSTEM_BOOTING state.
    
    it's also inherently safe now, as any bugs in this area will be
    caught by the printk recursion checks.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/printk.c b/kernel/printk.c
index 3b7c968d0ef9..423a8c765a57 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -702,7 +702,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
 					loglev_char = default_message_loglevel
 						+ '0';
 				}
-				t = cpu_clock(printk_cpu);
+				t = 0;
+				if (system_state != SYSTEM_BOOTING)
+					t = ktime_to_ns(ktime_get());
 				nanosec_rem = do_div(t, 1000000000);
 				tlen = sprintf(tbuf,
 						"<%c>[%5lu.%06lu] ",

commit 90739081ef8d5495d50abba9c5d333be9acd872a
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:34 2008 +0100

    softlockup: fix signedness
    
    fix softlockup tunables signedness.
    
    mark tunables read-mostly.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index dfc76e172f3f..53534f90a96a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -269,10 +269,10 @@ extern void softlockup_tick(void);
 extern void spawn_softlockup_task(void);
 extern void touch_softlockup_watchdog(void);
 extern void touch_all_softlockup_watchdogs(void);
-extern int softlockup_thresh;
+extern unsigned long  softlockup_thresh;
 extern unsigned long sysctl_hung_task_check_count;
 extern unsigned long sysctl_hung_task_timeout_secs;
-extern long sysctl_hung_task_warnings;
+extern unsigned long sysctl_hung_task_warnings;
 #else
 static inline void softlockup_tick(void)
 {
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 02f0ad534441..c1d76552446e 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -24,8 +24,8 @@ static DEFINE_PER_CPU(unsigned long, touch_timestamp);
 static DEFINE_PER_CPU(unsigned long, print_timestamp);
 static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
 
-static int did_panic;
-int softlockup_thresh = 60;
+static int __read_mostly did_panic;
+unsigned long __read_mostly softlockup_thresh = 60;
 
 static int
 softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -121,14 +121,14 @@ void softlockup_tick(void)
 /*
  * Have a reasonable limit on the number of tasks checked:
  */
-unsigned long sysctl_hung_task_check_count = 1024;
+unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
 
 /*
  * Zero means infinite timeout - no checking done:
  */
-unsigned long sysctl_hung_task_timeout_secs = 120;
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
 
-long sysctl_hung_task_warnings = 10;
+unsigned long __read_mostly sysctl_hung_task_warnings = 10;
 
 /*
  * Only do the hung-tasks check on one CPU:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5418ef61e16e..8e96558cb8f3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -772,9 +772,9 @@ static struct ctl_table kern_table[] = {
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "softlockup_thresh",
 		.data		= &softlockup_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
+		.proc_handler	= &proc_doulongvec_minmax,
 		.strategy	= &sysctl_intvec,
 		.extra1		= &one,
 		.extra2		= &sixty,
@@ -783,27 +783,27 @@ static struct ctl_table kern_table[] = {
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "hung_task_check_count",
 		.data		= &sysctl_hung_task_check_count,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
+		.proc_handler	= &proc_doulongvec_minmax,
 		.strategy	= &sysctl_intvec,
 	},
 	{
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "hung_task_timeout_secs",
 		.data		= &sysctl_hung_task_timeout_secs,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
+		.proc_handler	= &proc_doulongvec_minmax,
 		.strategy	= &sysctl_intvec,
 	},
 	{
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "hung_task_warnings",
 		.data		= &sysctl_hung_task_warnings,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
+		.proc_handler	= &proc_doulongvec_minmax,
 		.strategy	= &sysctl_intvec,
 	},
 #endif

commit 6478d8800b75253b2a934ddcb734e13ade023ad0
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:33 2008 +0100

    sched: remove the !PREEMPT_BKL code
    
    remove the !PREEMPT_BKL code.
    
    this removes 160 lines of legacy code.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8d302298a161..2961ec788046 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -72,11 +72,7 @@
 #define in_softirq()		(softirq_count())
 #define in_interrupt()		(irq_count())
 
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
-#else
-# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-#endif
+#define in_atomic()		((preempt_count() & ~PREEMPT_ACTIVE) != 0)
 
 #ifdef CONFIG_PREEMPT
 # define PREEMPT_CHECK_OFFSET 1
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 58962c51dee1..aab3a4cff4e1 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
 		__release_kernel_lock();	\
 } while (0)
 
-/*
- * Non-SMP kernels will never block on the kernel lock,
- * so we are better off returning a constant zero from
- * reacquire_kernel_lock() so that the compiler can see
- * it at compile-time.
- */
-#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
-# define return_value_on_smp return
-#else
-# define return_value_on_smp
-#endif
-
 static inline int reacquire_kernel_lock(struct task_struct *task)
 {
 	if (unlikely(task->lock_depth >= 0))
-		return_value_on_smp __reacquire_kernel_lock();
+		return __reacquire_kernel_lock();
 	return 0;
 }
 
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 4420ef427f83..0669b70fa6a3 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,10 +52,6 @@ config PREEMPT
 
 endchoice
 
-config PREEMPT_BKL
-	def_bool y
-	depends on SMP || PREEMPT
-
 config RCU_TRACE
 	bool "Enable tracing for RCU - currently stats in debugfs"
 	select DEBUG_FS
diff --git a/kernel/sched.c b/kernel/sched.c
index 22712b2e058a..629614ad0358 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
 	struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
 	struct task_struct *task = current;
 	int saved_lock_depth;
-#endif
+
 	/*
 	 * If there is a non-zero preempt_count or interrupts are disabled,
 	 * we do not want to preempt the current task. Just return..
@@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
 		 * clear ->lock_depth so that schedule() doesnt
 		 * auto-release the semaphore:
 		 */
-#ifdef CONFIG_PREEMPT_BKL
 		saved_lock_depth = task->lock_depth;
 		task->lock_depth = -1;
-#endif
 		schedule();
-#ifdef CONFIG_PREEMPT_BKL
 		task->lock_depth = saved_lock_depth;
-#endif
 		sub_preempt_count(PREEMPT_ACTIVE);
 
 		/*
@@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
 	struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
 	struct task_struct *task = current;
 	int saved_lock_depth;
-#endif
+
 	/* Catch callers which need to be fixed */
 	BUG_ON(ti->preempt_count || !irqs_disabled());
 
@@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
 		 * clear ->lock_depth so that schedule() doesnt
 		 * auto-release the semaphore:
 		 */
-#ifdef CONFIG_PREEMPT_BKL
 		saved_lock_depth = task->lock_depth;
 		task->lock_depth = -1;
-#endif
 		local_irq_enable();
 		schedule();
 		local_irq_disable();
-#ifdef CONFIG_PREEMPT_BKL
 		task->lock_depth = saved_lock_depth;
-#endif
 		sub_preempt_count(PREEMPT_ACTIVE);
 
 		/*
@@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 	spin_unlock_irqrestore(&rq->lock, flags);
 
 	/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-	task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
 	task_thread_info(idle)->preempt_count = 0;
-#endif
+
 	/*
 	 * The idle tasks have their own, simple scheduling class:
 	 */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index f73e2f8c308f..812dbf00844b 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 
-#ifdef CONFIG_PREEMPT_BKL
 /*
  * The 'big kernel semaphore'
  *
@@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void)
 		up(&kernel_sem);
 }
 
-#else
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel().  It is transparently dropped and reacquired
- * over schedule().  It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - _raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
-	while (!_raw_spin_trylock(&kernel_flag)) {
-		if (test_thread_flag(TIF_NEED_RESCHED))
-			return -EAGAIN;
-		cpu_relax();
-	}
-	preempt_disable();
-	return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
-	_raw_spin_unlock(&kernel_flag);
-	preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption. 
- * If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
-	preempt_disable();
-	if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
-		/*
-		 * If preemption was disabled even before this
-		 * was called, there's nothing we can be polite
-		 * about - just spin.
-		 */
-		if (preempt_count() > 1) {
-			_raw_spin_lock(&kernel_flag);
-			return;
-		}
-
-		/*
-		 * Otherwise, let's wait for the kernel lock
-		 * with preemption enabled..
-		 */
-		do {
-			preempt_enable();
-			while (spin_is_locked(&kernel_flag))
-				cpu_relax();
-			preempt_disable();
-		} while (!_raw_spin_trylock(&kernel_flag));
-	}
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
-	_raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
-	/*
-	 * the BKL is not covered by lockdep, so we open-code the
-	 * unlocking sequence (and thus avoid the dep-chain ops):
-	 */
-	_raw_spin_unlock(&kernel_flag);
-	preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc lock_kernel(void)
-{
-	int depth = current->lock_depth+1;
-	if (likely(!depth))
-		__lock_kernel();
-	current->lock_depth = depth;
-}
-
-void __lockfunc unlock_kernel(void)
-{
-	BUG_ON(current->lock_depth < 0);
-	if (likely(--current->lock_depth < 0))
-		__unlock_kernel();
-}
-
-#endif
-
 EXPORT_SYMBOL(lock_kernel);
 EXPORT_SYMBOL(unlock_kernel);
 

commit 58b8a73ab8becfcaea84abc2a06038281efa4c8a
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:33 2008 +0100

    sched: make PREEMPT_BKL the default
    
    make PREEMPT_BKL the default.
    
    precursor to removal of the !PREEMPT_BKL code.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 61fa116efcde..4420ef427f83 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -53,15 +53,8 @@ config PREEMPT
 endchoice
 
 config PREEMPT_BKL
-	bool "Preempt The Big Kernel Lock"
+	def_bool y
 	depends on SMP || PREEMPT
-	default y
-	help
-	  This option reduces the latency of the kernel by making the
-	  big kernel lock preemptible.
-
-	  Say Y here if you are building a kernel for a desktop system.
-	  Say N if you are unsure.
 
 config RCU_TRACE
 	bool "Enable tracing for RCU - currently stats in debugfs"

commit 03319ec8b06849051747a17aa2a0f9aba9277980
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:28 2008 +0100

    sched: documentation, whitespace fixes
    
    whitespace fixes.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index c2cedd09d895..b9ee0f4db66a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -182,7 +182,7 @@ struct task_group {
 	 *
 	 *	Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
 	 *	Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
-	 * 	Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
+	 *	Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
 	 *
 	 * The weight assigned to a task group's schedulable entities on every
 	 * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
@@ -192,9 +192,9 @@ struct task_group {
 	 *  tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
 	 *
 	 * Note: It's not necessary that each of a task's group schedulable
-	 * 	 entity have the same weight on all CPUs. If the group
-	 * 	 has 2 of its tasks on CPU0 and 1 task on CPU1, then a
-	 * 	 better distribution of weight could be:
+	 *	 entity have the same weight on all CPUs. If the group
+	 *	 has 2 of its tasks on CPU0 and 1 task on CPU1, then a
+	 *	 better distribution of weight could be:
 	 *
 	 *	tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
 	 *	tg_A->se[1]->load.weight = 1/2 * 2000 =  667

commit 32525d022ad52a5c14e80e130260431e16e294b6
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:20 2008 +0100

    sched: whitespace cleanups in topology.h
    
    whitespace cleanups in topology.h.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/topology.h b/include/linux/topology.h
index b6c073d8b709..2352f46160d3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2002, IBM Corp.
  *
- * All rights reserved.          
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by

commit 52d853431e8d9dc17ba94792123a3fe2bc039831
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:20 2008 +0100

    sched: reactivate fork balancing
    
    reactivate fork balancing.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/topology.h b/include/linux/topology.h
index 47729f18bfdf..b6c073d8b709 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -103,6 +103,7 @@
 	.forkexec_idx		= 0,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
+				| SD_BALANCE_FORK	\
 				| SD_BALANCE_EXEC	\
 				| SD_WAKE_AFFINE	\
 				| SD_WAKE_IDLE		\
@@ -134,6 +135,7 @@
 	.forkexec_idx		= 1,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
+				| SD_BALANCE_FORK	\
 				| SD_BALANCE_EXEC	\
 				| SD_WAKE_AFFINE	\
 				| SD_WAKE_IDLE		\
@@ -165,6 +167,7 @@
 	.forkexec_idx		= 1,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
+				| SD_BALANCE_FORK	\
 				| SD_BALANCE_EXEC	\
 				| SD_WAKE_AFFINE	\
 				| BALANCE_FOR_PKG_POWER,\

commit b913176917399e92e6f741672038c73d7ce93be5
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:19 2008 +0100

    sched: add credits for RT balancing improvements
    
    add credits for RT balancing improvements.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 23b9925a1dfb..55c521780f93 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -22,6 +22,8 @@
  *              by Peter Williams
  *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
  *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ *              Thomas Gleixner, Mike Kravetz
  */
 
 #include <linux/mm.h>

commit 0eab9146571dfa9b50ea952ec2ab27d591f26b63
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jan 25 21:08:19 2008 +0100

    sched: style cleanup, #2
    
    style cleanup of various changes that were done recently.
    
    no code changed:
    
          text    data     bss     dec     hex filename
         26399    2578      48   29025    7161 sched.o.before
         26399    2578      48   29025    7161 sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 461ee900d1ac..23b9925a1dfb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares);
  *	Every task in system belong to this group at bootup.
  */
 struct task_group init_task_group = {
-	.se     = init_sched_entity_p,
+	.se	= init_sched_entity_p,
 	.cfs_rq = init_cfs_rq_p,
 };
 
 #ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GROUP_LOAD	2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD	(2*NICE_0_LOAD)
 #else
 # define INIT_TASK_GROUP_LOAD	NICE_0_LOAD
 #endif
 
-#define MIN_GROUP_SHARES       2
+#define MIN_GROUP_SHARES	2
 
 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 
@@ -352,8 +352,8 @@ struct rt_rq {
 
 /*
  * We add the notion of a root-domain which will be used to define per-domain
- * variables.  Each exclusive cpuset essentially defines an island domain by
- * fully partitioning the member cpus from any other cpuset.  Whenever a new
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
  * exclusive cpuset is created, we also create and attach a new root-domain
  * object.
  *
@@ -365,12 +365,12 @@ struct root_domain {
 	cpumask_t span;
 	cpumask_t online;
 
-        /*
+	/*
 	 * The "RT overload" flag: it gets set if a CPU has more than
 	 * one runnable RT task.
 	 */
 	cpumask_t rto_mask;
-	atomic_t  rto_count;
+	atomic_t rto_count;
 };
 
 static struct root_domain def_root_domain;
@@ -434,7 +434,7 @@ struct rq {
 	atomic_t nr_iowait;
 
 #ifdef CONFIG_SMP
-	struct root_domain  *rd;
+	struct root_domain *rd;
 	struct sched_domain *sd;
 
 	/* For active balancing */
@@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 	if (p->sched_class->set_cpus_allowed)
 		p->sched_class->set_cpus_allowed(p, &new_mask);
 	else {
-		p->cpus_allowed    = new_mask;
+		p->cpus_allowed = new_mask;
 		p->nr_cpus_allowed = cpus_weight(new_mask);
 	}
 
@@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
 	if (rq->rd) {
 		struct root_domain *old_rd = rq->rd;
 
-		for (class = sched_class_highest; class; class = class->next)
+		for (class = sched_class_highest; class; class = class->next) {
 			if (class->leave_domain)
 				class->leave_domain(rq);
+		}
 
 		if (atomic_dec_and_test(&old_rd->refcount))
 			kfree(old_rd);
@@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
 	atomic_inc(&rd->refcount);
 	rq->rd = rd;
 
-	for (class = sched_class_highest; class; class = class->next)
+	for (class = sched_class_highest; class; class = class->next) {
 		if (class->join_domain)
 			class->join_domain(rq);
+	}
 
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
 }
 
 /*
- * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  * hold the hotplug lock.
  */
-static void cpu_attach_domain(struct sched_domain *sd,
-			      struct root_domain *rd, int cpu)
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 	struct sched_domain *tmp;
@@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu)
 		for_each_cpu_mask(i, sdspan)
 			total_load += tg->cfs_rq[i]->load.weight;
 
-		/* Nothing to do if this group has no load  */
+		/* Nothing to do if this group has no load */
 		if (!total_load)
 			continue;