Patches contributed by Eötvös Lorand University


commit 41a2d6cfa3f77ec469e7e5f06b4d7ffd031f9c0e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Dec 5 15:46:09 2007 +0100

    sched: style cleanups
    
    style cleanup of various changes that were done recently.
    
    no code changed:
    
          text    data     bss     dec     hex filename
         23680    2542      28   26250    668a sched.o.before
         23680    2542      28   26250    668a sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index b062856b946c..67d9d1799d86 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p)
 	tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
 				struct task_group, css);
 #else
-	tg  = &init_task_group;
+	tg = &init_task_group;
 #endif
-
 	return tg;
 }
 
@@ -249,15 +248,16 @@ struct cfs_rq {
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
 
-	/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+	/*
+	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 	 * (like users, containers etc.)
 	 *
 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 	 * list is used during load balance.
 	 */
-	struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
-	struct task_group *tg;    /* group that "owns" this runqueue */
+	struct list_head leaf_cfs_rq_list;
+	struct task_group *tg;	/* group that "owns" this runqueue */
 #endif
 };
 
@@ -300,7 +300,7 @@ struct rq {
 	/* list of leaf cfs_rq on this cpu: */
 	struct list_head leaf_cfs_rq_list;
 #endif
-	struct rt_rq  rt;
+	struct rt_rq rt;
 
 	/*
 	 * This is part of a global counter where only the total sum
@@ -457,8 +457,8 @@ enum {
 	SCHED_FEAT_NEW_FAIR_SLEEPERS	= 1,
 	SCHED_FEAT_WAKEUP_PREEMPT	= 2,
 	SCHED_FEAT_START_DEBIT		= 4,
-	SCHED_FEAT_TREE_AVG             = 8,
-	SCHED_FEAT_APPROX_AVG           = 16,
+	SCHED_FEAT_TREE_AVG		= 8,
+	SCHED_FEAT_APPROX_AVG		= 16,
 };
 
 const_debug unsigned int sysctl_sched_features =
@@ -591,7 +591,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 
 /*
  * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts.  Note the ordering: we can safely lookup the task_rq without
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
  * explicitly disabling preemption.
  */
 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -779,7 +779,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
  * of tasks with abnormal "nice" values across CPUs the contribution that
  * each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  * scaled version of the new time slice allocation that they receive on time
  * slice expiry etc.
  */
@@ -1854,7 +1854,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * and do any other architecture-specific cleanup actions.
  *
  * Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock.  (Doing it
+ * so, we finish that here outside of the runqueue lock. (Doing it
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
  */
@@ -2136,7 +2136,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
 /*
  * If dest_cpu is allowed for this process, migrate the task to it.
  * This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
  * the cpu_allowed mask is restored.
  */
 static void sched_migrate_task(struct task_struct *p, int dest_cpu)
@@ -2581,7 +2581,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
 	 * tasks around. Thus we look for the minimum possible imbalance.
 	 * Negative imbalances (*we* are more loaded than anyone else) will
 	 * be counted as no imbalance for these purposes -- we can't fix that
-	 * by pulling tasks to us.  Be careful of negative numbers as they'll
+	 * by pulling tasks to us. Be careful of negative numbers as they'll
 	 * appear as very large values with unsigned longs.
 	 */
 	if (max_load <= busiest_load_per_task)
@@ -3016,7 +3016,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
 
 	/*
 	 * This condition is "impossible", if it occurs
-	 * we need to fix it.  Originally reported by
+	 * we need to fix it. Originally reported by
 	 * Bjorn Helgaas on a 128-cpu setup.
 	 */
 	BUG_ON(busiest_rq == target_rq);
@@ -3048,7 +3048,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
 #ifdef CONFIG_NO_HZ
 static struct {
 	atomic_t load_balancer;
-	cpumask_t  cpu_mask;
+	cpumask_t cpu_mask;
 } nohz ____cacheline_aligned = {
 	.load_balancer = ATOMIC_INIT(-1),
 	.cpu_mask = CPU_MASK_NONE,
@@ -3552,7 +3552,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
 static inline void schedule_debug(struct task_struct *prev)
 {
 	/*
-	 * Test if we are atomic.  Since do_exit() needs to call into
+	 * Test if we are atomic. Since do_exit() needs to call into
 	 * schedule() atomically, we ignore that path for now.
 	 * Otherwise, whine if we are scheduling when we should not be.
 	 */
@@ -3674,7 +3674,7 @@ EXPORT_SYMBOL(schedule);
 #ifdef CONFIG_PREEMPT
 /*
  * this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable.  Kernel preemptions off return from interrupt
+ * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
 asmlinkage void __sched preempt_schedule(void)
@@ -3686,7 +3686,7 @@ asmlinkage void __sched preempt_schedule(void)
 #endif
 	/*
 	 * If there is a non-zero preempt_count or interrupts are disabled,
-	 * we do not want to preempt the current task.  Just return..
+	 * we do not want to preempt the current task. Just return..
 	 */
 	if (likely(ti->preempt_count || irqs_disabled()))
 		return;
@@ -3772,12 +3772,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
 EXPORT_SYMBOL(default_wake_function);
 
 /*
- * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  * number) then we wake all the non-exclusive tasks and one exclusive task.
  *
  * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  */
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
-				       struct sched_param __user *param)
+asmlinkage long
+sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
 	/* negative values for policy are not valid */
 	if (policy < 0)
@@ -4491,7 +4491,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
 
 	/*
 	 * It is not safe to call set_cpus_allowed with the
-	 * tasklist_lock held.  We will bump the task_struct's
+	 * tasklist_lock held. We will bump the task_struct's
 	 * usage count and then drop tasklist_lock.
 	 */
 	get_task_struct(p);
@@ -4687,7 +4687,7 @@ EXPORT_SYMBOL(cond_resched);
  * cond_resched_lock() - if a reschedule is pending, drop the given lock,
  * call schedule, and on return reacquire the lock.
  *
- * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  * operations here to prevent schedule() from being called twice (once via
  * spin_unlock(), once by hand).
  */
@@ -4741,7 +4741,7 @@ void __sched yield(void)
 EXPORT_SYMBOL(yield);
 
 /*
- * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  *
  * But don't do that if it is a deliberate, throttling IO wait (this task
@@ -5050,7 +5050,7 @@ static inline void sched_init_granularity(void)
  * is removed from the allowed bitmask.
  *
  * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely.  The
+ * task must not exit() & deallocate itself prematurely. The
  * call is not atomic; no spinlocks may be held.
  */
 int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
@@ -5087,7 +5087,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 EXPORT_SYMBOL_GPL(set_cpus_allowed);
 
 /*
- * Move (not current) task off this cpu, onto dest cpu.  We're doing
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
  * this because either it can't run here any more (set_cpus_allowed()
  * away from this CPU, or CPU going down), or because we're
  * attempting to rebalance this task on exec (sched_exec).
@@ -5232,7 +5232,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 			 * Try to stay on the same cpuset, where the
 			 * current cpuset may be a subset of all cpus.
 			 * The cpuset_cpus_allowed_locked() variant of
-			 * cpuset_cpus_allowed() will not block.  It must be
+			 * cpuset_cpus_allowed() will not block. It must be
 			 * called within calls to cpuset_lock/cpuset_unlock.
 			 */
 			rq = task_rq_lock(p, &flags);
@@ -5245,10 +5245,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 			 * kernel threads (both mm NULL), since they never
 			 * leave kernel.
 			 */
-			if (p->mm && printk_ratelimit())
+			if (p->mm && printk_ratelimit()) {
 				printk(KERN_INFO "process %d (%s) no "
 				       "longer affine to cpu%d\n",
-			       task_pid_nr(p), p->comm, dead_cpu);
+					task_pid_nr(p), p->comm, dead_cpu);
+			}
 		}
 	} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
 }
@@ -5350,7 +5351,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
 
 	/*
 	 * Drop lock around migration; if someone else moves it,
-	 * that's OK.  No task can be added to this CPU, so iteration is
+	 * that's OK. No task can be added to this CPU, so iteration is
 	 * fine.
 	 */
 	spin_unlock_irq(&rq->lock);
@@ -5414,7 +5415,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
 	/*
 	 * In the intermediate directories, both the child directory and
 	 * procname are dynamically allocated and could fail but the mode
-	 * will always be set.  In the lowest directory the names are
+	 * will always be set. In the lowest directory the names are
 	 * static strings and all have proc handlers.
 	 */
 	for (entry = *tablep; entry->mode; entry++) {
@@ -5585,7 +5586,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 	case CPU_UP_CANCELED_FROZEN:
 		if (!cpu_rq(cpu)->migration_thread)
 			break;
-		/* Unbind it from offline cpu so it can run.  Fall thru. */
+		/* Unbind it from offline cpu so it can run. Fall thru. */
 		kthread_bind(cpu_rq(cpu)->migration_thread,
 			     any_online_cpu(cpu_online_map));
 		kthread_stop(cpu_rq(cpu)->migration_thread);
@@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		migrate_nr_uninterruptible(rq);
 		BUG_ON(rq->nr_running != 0);
 
-		/* No need to migrate the tasks: it was best-effort if
-		 * they didn't take sched_hotcpu_mutex.  Just wake up
-		 * the requestors. */
+		/*
+		 * No need to migrate the tasks: it was best-effort if
+		 * they didn't take sched_hotcpu_mutex. Just wake up
+		 * the requestors.
+		 */
 		spin_lock_irq(&rq->lock);
 		while (!list_empty(&rq->migration_queue)) {
 			struct migration_req *req;
@@ -5922,7 +5925,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
  * @node: node whose sched_domain we're building
  * @used_nodes: nodes already in the sched_domain
  *
- * Find the next node to include in a given scheduling domain.  Simply
+ * Find the next node to include in a given scheduling domain. Simply
  * finds the closest node not already in the @used_nodes map.
  *
  * Should use nodemask_t.
@@ -5962,7 +5965,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
  * @node: node whose cpumask we're constructing
  * @size: number of nodes to include in this span
  *
- * Given a node, construct a good cpumask for its sched_domain to span.  It
+ * Given a node, construct a good cpumask for its sched_domain to span. It
  * should be one that prevents unnecessary balancing, but also spreads tasks
  * out optimally.
  */
@@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
 static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
 
-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
-			    struct sched_group **sg)
+static int
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
 {
 	if (sg)
 		*sg = &per_cpu(sched_group_cpus, cpu);
@@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
 #endif
 
 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
-			     struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
 {
 	int group;
 	cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
 	return group;
 }
 #elif defined(CONFIG_SCHED_MC)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
-			     struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
 {
 	if (sg)
 		*sg = &per_cpu(sched_group_core, cpu);
@@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
 static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
 
-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
-			     struct sched_group **sg)
+static int
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
 {
 	int group;
 #ifdef CONFIG_SCHED_MC
@@ -6222,7 +6225,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
 	 * Allocate the per-node list of sched groups
 	 */
 	sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
-					   GFP_KERNEL);
+				    GFP_KERNEL);
 	if (!sched_group_nodes) {
 		printk(KERN_WARNING "Can not alloc sched group node list\n");
 		return -ENOMEM;
@@ -6469,7 +6472,7 @@ static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
 static cpumask_t fallback_doms;
 
 /*
- * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  * For now this just excludes isolated cpus, but could be used to
  * exclude other special cases in the future.
  */
@@ -6511,19 +6514,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
 
 /*
  * Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks.  This compares
+ * cpumasks in the array doms_new[] of cpumasks. This compares
  * doms_new[] to the current sched domain partitioning, doms_cur[].
  * It destroys each deleted domain and builds each new domain.
  *
  * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.)  We should setup one
- * sched domain for each mask.  CPUs not in any of the cpumasks will
- * not be load balanced.  If the same cpumask appears both in the
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
  * current 'doms_cur' domains and in the new 'doms_new', we can leave
  * it as it is.
  *
- * The passed in 'doms_new' should be kmalloc'd.  This routine takes
- * ownership of it and will kfree it when done with it.  If the caller
+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
+ * ownership of it and will kfree it when done with it. If the caller
  * failed the kmalloc call, then it can pass in doms_new == NULL,
  * and partition_sched_domains() will fallback to the single partition
  * 'fallback_doms'.
@@ -6653,7 +6656,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
 #endif
 
 /*
- * Force a reinitialization of the sched domains hierarchy.  The domains
+ * Force a reinitialization of the sched domains hierarchy. The domains
  * and groups cannot be updated in place without racing with the balancing
  * code, so we temporarily attach all running cpus to the NULL domain
  * which will prevent rebalancing while the sched domains are recalculated.
@@ -6943,8 +6946,8 @@ struct task_struct *curr_task(int cpu)
  * @p: the task pointer to set.
  *
  * Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack.  It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner.  This function
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
  * must be called with all CPU's synchronized, and interrupts disabled, the
  * and caller must save the original value of the current task (see
  * curr_task() above) and restore that value before reenabling interrupts and
@@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
 	return &tg->css;
 }
 
-static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
-			       struct cgroup *cgrp)
+static void
+cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
 	struct task_group *tg = cgroup_tg(cgrp);
 
 	sched_destroy_group(tg);
 }
 
-static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
-			     struct cgroup *cgrp, struct task_struct *tsk)
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+		      struct task_struct *tsk)
 {
 	/* We don't support RT-tasks being in separate groups */
 	if (tsk->sched_class != &fair_sched_class)
@@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
 }
 
 /* destroy an existing cpu accounting group */
-static void cpuacct_destroy(struct cgroup_subsys *ss,
-			    struct cgroup *cont)
+static void
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
 {
 	struct cpuacct *ca = cgroup_ca(cont);
 

commit 799b37b5ee6b4c197f38611eb7f02552e4f14e70
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Dec 4 11:32:38 2007 +0100

    drivers/s390/net/ctcmain.c: fix build bug
    
    SET_MODULE_OWNER() is obsolete.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b3b6f654365c..97adc701a819 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2802,7 +2802,6 @@ void ctc_init_netdevice(struct net_device * dev)
 	dev->type = ARPHRD_SLIP;
 	dev->tx_queue_len = 100;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
-	SET_MODULE_OWNER(dev);
 }
 
 

commit db292ca302e83534f5f0f7139e13d7e6976e51f9
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Dec 4 17:04:39 2007 +0100

    sched: default to more agressive yield for SCHED_BATCH tasks
    
    do more agressive yield for SCHED_BATCH tuned tasks: they are all
    about throughput anyway. This allows a gentler migration path for
    any apps that relied on stronger yield.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37bb265598db..c33f0ceb3de9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
  */
 static void yield_task_fair(struct rq *rq)
 {
-	struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
-	struct sched_entity *rightmost, *se = &rq->curr->se;
+	struct task_struct *curr = rq->curr;
+	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+	struct sched_entity *rightmost, *se = &curr->se;
 
 	/*
 	 * Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
 	if (unlikely(cfs_rq->nr_running == 1))
 		return;
 
-	if (likely(!sysctl_sched_compat_yield)) {
+	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
 		__update_rq_clock(rq);
 		/*
 		 * Update run-time statistics of the 'current'.

commit 77034937dc4575ca0a76bf209838ecd39e804089
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Dec 4 17:04:39 2007 +0100

    sched: fix crash in sys_sched_rr_get_interval()
    
    Luiz Fernando N. Capitulino reported that sched_rr_get_interval()
    crashes for SCHED_OTHER tasks that are on an idle runqueue.
    
    The fix is to return a 0 timeslice for tasks that are on an idle
    runqueue. (and which are not running, obviously)
    
    this also shrinks the code a bit:
    
       text    data     bss     dec     hex filename
      47903    3934     336   52173    cbcd sched.o.before
      47885    3934     336   52155    cbbb sched.o.after
    
    Reported-by: Luiz Fernando N. Capitulino <lcapitulino@mandriva.com.br>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 59ff6b140edb..b062856b946c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4850,17 +4850,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
 	if (retval)
 		goto out_unlock;
 
-	if (p->policy == SCHED_FIFO)
-		time_slice = 0;
-	else if (p->policy == SCHED_RR)
+	/*
+	 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
+	 * tasks that are on an otherwise idle runqueue:
+	 */
+	time_slice = 0;
+	if (p->policy == SCHED_RR) {
 		time_slice = DEF_TIMESLICE;
-	else {
+	} else {
 		struct sched_entity *se = &p->se;
 		unsigned long flags;
 		struct rq *rq;
 
 		rq = task_rq_lock(p, &flags);
-		time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+		if (rq->cfs.load.weight)
+			time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
 		task_rq_unlock(rq, &flags);
 	}
 	read_unlock(&tasklist_lock);

commit 9fc89c2dea7ca7915e6606e49167cdca2f3c4e30
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 16:21:50 2007 -0800

    isdn: bootup crash fix
    
    got this HiSax bootup crash on a "make randconfig" bzImage bootup:
    
     Calling initcall 0xc0bb1320: HiSax_init+0x0/0x380()
     HiSax: Linux Driver for passive ISDN cards
     HiSax: Version 3.5 (kernel)
     HiSax: Layer1 Revision 2.46.2.5
     HiSax: Layer2 Revision 2.30.2.4
     HiSax: TeiMgr Revision 2.20.2.3
     HiSax: Layer3 Revision 2.22.2.3
     HiSax: LinkLayer Revision 2.59.2.4
     HiSax: Total 1 card defined
     HiSax: Card 1 Protocol EDSS1 Id=HiSax (0)
     HiSax: HFC-S driver Rev. 1.10.2.4
     HFCS: defined at 0x500 IRQ 5 HZ 250
     Teles 16.3c: IRQ 5 count 0
     HFCS: resetting card
     Teles 16.3c: IRQ 5 count 0
     Teles 16.3c: IRQ(5) getting no interrupts during init 1
     HFCS: resetting card
     ------------[ cut here ]------------
     kernel BUG at include/linux/timer.h:145!
     invalid opcode: 0000 [#1] PREEMPT DEBUG_PAGEALLOC
     Modules linked in:
    
     Pid: 1, comm: swapper Not tainted (2.6.24-rc3 #2045)
     EIP: 0060:[<c063afbf>] EFLAGS: 00010286 CPU: 0
     EIP is at hfcs_card_msg+0x15f/0x180
     EAX: c0cf2e5c EBX: 000000f2 ECX: 00000000 EDX: ffff1193
     ESI: f76e8000 EDI: f76e8000 EBP: f7c23ec4 ESP: f7c23eac
      DS: 007b ES: 007b FS: 0000 GS: 0000 SS: 0068
     Process swapper (pid: 1, ti=f7c22000 task=f7c0e000 task.ti=f7c22000)
     Stack: 00000000 f7c23ec4 c011703b 00000002 f76e8000 00000000 f7c23ef8 c060c3e5
            c0a7c9c0 c0a315dc 00000005 00000001 00000000 f7c23f34 00000000 c0b5c9c0
            f7c23f34 00000000 c0f5a8e0 f7c23f80 c0bb154f 00000000 00000001 c0a9b5b9
     Call Trace:
      [<c010339a>] show_trace_log_lvl+0x1a/0x40
      [<c0103469>] show_stack_log_lvl+0xa9/0xe0
      [<c010355f>] show_registers+0xbf/0x200
      [<c01037a4>] die+0x104/0x220
      [<c0103943>] do_trap+0x83/0xc0
      [<c0103ca8>] do_invalid_op+0x88/0xa0
      [<c083621a>] error_code+0x6a/0x70
      [<c060c3e5>] checkcard+0x4a5/0x620
      [<c0bb154f>] HiSax_init+0x22f/0x380
      [<c0b867b7>] kernel_init+0x97/0x2a0
      [<c0102f87>] kernel_thread_helper+0x7/0x20
      =======================
     Code: e8 43 ae ff 8b 57 3c 85 d2 0f 84 ef fe ff ff b8 a0 99 ad c0 b9 02 00 00 00 e8 ce 11 ae ff 83 c4 0c b8 00 00 00 00 5b 5e 5f c9 c3 <0f> 0b eb fe 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90
     EIP: [<c063afbf>] hfcs_card_msg+0x15f/0x180 SS:ESP 0068:f7c23eac
     Kernel panic - not syncing: Attempted to kill init!
    
    The box has no HiSax card installed.
    
    the reason for the crash is add_timer() done on an already running
    timer. This happens because for some reason CARD_INIT is called twice.
    
    this patch works this problem around by using mod_timer() - this gets
    a booting system - but it would be nice to figure out why CARD_INIT
    is done twice.
    
    the ISDN config section (generated via make randconfig) is this:
    
    #
    # ISDN feature submodules
    #
    # CONFIG_ISDN_DRV_LOOP is not set
    CONFIG_ISDN_DIVERSION=y
    
    #
    # ISDN4Linux hardware drivers
    #
    
    #
    # Passive cards
    #
    CONFIG_ISDN_DRV_HISAX=y
    
    #
    # D-channel protocol features
    #
    CONFIG_HISAX_EURO=y
    CONFIG_DE_AOC=y
    # CONFIG_HISAX_NO_SENDCOMPLETE is not set
    # CONFIG_HISAX_NO_LLC is not set
    # CONFIG_HISAX_NO_KEYPAD is not set
    CONFIG_HISAX_1TR6=y
    CONFIG_HISAX_NI1=y
    CONFIG_HISAX_MAX_CARDS=8
    
    #
    # HiSax supported cards
    #
    CONFIG_HISAX_16_0=y
    # CONFIG_HISAX_16_3 is not set
    # CONFIG_HISAX_TELESPCI is not set
    CONFIG_HISAX_S0BOX=y
    # CONFIG_HISAX_AVM_A1 is not set
    CONFIG_HISAX_FRITZPCI=y
    CONFIG_HISAX_AVM_A1_PCMCIA=y
    CONFIG_HISAX_ELSA=y
    CONFIG_HISAX_IX1MICROR2=y
    CONFIG_HISAX_DIEHLDIVA=y
    # CONFIG_HISAX_ASUSCOM is not set
    # CONFIG_HISAX_TELEINT is not set
    CONFIG_HISAX_HFCS=y
    # CONFIG_HISAX_SEDLBAUER is not set
    CONFIG_HISAX_SPORTSTER=y
    # CONFIG_HISAX_MIC is not set
    # CONFIG_HISAX_NETJET is not set
    # CONFIG_HISAX_NETJET_U is not set
    # CONFIG_HISAX_NICCY is not set
    # CONFIG_HISAX_ISURF is not set
    # CONFIG_HISAX_HSTSAPHIR is not set
    # CONFIG_HISAX_BKM_A4T is not set
    # CONFIG_HISAX_SCT_QUADRO is not set
    # CONFIG_HISAX_GAZEL is not set
    # CONFIG_HISAX_HFC_PCI is not set
    # CONFIG_HISAX_W6692 is not set
    # CONFIG_HISAX_HFC_SX is not set
    # CONFIG_HISAX_DEBUG is not set
    
    #
    # HiSax PCMCIA card service modules
    #
    
    #
    # HiSax sub driver modules
    #
    CONFIG_HISAX_ST5481=y
    CONFIG_HISAX_HFCUSB=y
    # CONFIG_HISAX_HFC4S8S is not set
    CONFIG_HISAX_FRITZ_PCIPNP=y
    CONFIG_HISAX_HDLC=y
    
    #
    # Active cards
    #
    CONFIG_ISDN_DRV_ICN=m
    CONFIG_ISDN_DRV_PCBIT=m
    CONFIG_ISDN_DRV_SC=y
    # CONFIG_ISDN_DRV_ACT2000 is not set
    CONFIG_HYSDN=m
    # CONFIG_ISDN_DRV_GIGASET is not set
    # CONFIG_ISDN_CAPI is not set
    CONFIG_PHONE=y
    CONFIG_PHONE_IXJ=m
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Cc: Karsten Keil <kkeil@suse.de>
    Cc: Kai Germaschewski <kai@germaschewski.name>
    Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index 57670dc5034d..909d6709ec16 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -118,8 +118,7 @@ hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg)
 			return(0);
 		case CARD_INIT:
 			delay = (75*HZ)/100 +1;
-			cs->hw.hfcD.timer.expires = jiffies + delay;
-			add_timer(&cs->hw.hfcD.timer);
+			mod_timer(&cs->hw.hfcD.timer, jiffies + delay);
 			spin_lock_irqsave(&cs->lock, flags);
 			reset_hfcs(cs);
 			init2bds0(cs);

commit c46f739dd39db3b07ab5deb4e3ec81e1c04a91af
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 13:59:18 2007 +0100

    vfs: coredumping fix
    
    fix: http://bugzilla.kernel.org/show_bug.cgi?id=3043
    
    only allow coredumping to the same uid that the coredumping
    task runs under.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Acked-by: Alan Cox <alan@redhat.com>
    Acked-by: Christoph Hellwig <hch@lst.de>
    Acked-by: Al Viro <viro@ftp.linux.org.uk>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

diff --git a/fs/exec.c b/fs/exec.c
index 4ccaaa4b13b2..282240afe99e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1780,6 +1780,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
 	   but keep the previous behaviour for now. */
 	if (!ispipe && !S_ISREG(inode->i_mode))
 		goto close_fail;
+	/*
+	 * Dont allow local users get cute and trick others to coredump
+	 * into their pre-created files:
+	 */
+	if (inode->i_uid != current->fsuid)
+		goto close_fail;
 	if (!file->f_op)
 		goto close_fail;
 	if (!file->f_op->write)

commit f95e0d1c2ad668c77aa4b272c076faf3aa0d631c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 15:52:56 2007 +0100

    sched: clean up kernel/sched_stat.h
    
    clean up kernel/sched_stat.h.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 630178e53bb6..5b32433e7ee5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
 				    sd->lb_nobusyq[itype],
 				    sd->lb_nobusyg[itype]);
 			}
-			seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+			seq_printf(seq,
+				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
 			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
 			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,

commit c1a89740da168d3431f2f4e7c3b03daacbb55be1
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 15:52:56 2007 +0100

    sched: clean up overlong line in kernel/sched_debug.c
    
    clean up overlong line in kernel/sched_debug.c.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5d0d623a5465..d30467b47ddd 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 			avg_atom = -1LL;
 
 		avg_per_cpu = p->se.sum_exec_runtime;
-		if (p->se.nr_migrations)
-			avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
-		else
+		if (p->se.nr_migrations) {
+			avg_per_cpu = div64_64(avg_per_cpu,
+					       p->se.nr_migrations);
+		} else {
 			avg_per_cpu = -1LL;
+		}
 
 		__PN(avg_atom);
 		__PN(avg_per_cpu);

commit deaf2227ddf657a260e923db44b6f0974d9bb782
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 15:52:56 2007 +0100

    sched: clean up, move __sched_text_start/end to sched.h
    
    move __sched_text_start/end to sched.h. No code changed:
    
       text    data     bss     dec     hex filename
      26582    2310      28   28920    70f8 sched.o.before
      26582    2310      28   28920    70f8 sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ee800e7a70de..ac3d496fbd20 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -282,6 +282,10 @@ static inline void touch_all_softlockup_watchdogs(void)
 
 /* Attach to any functions which should be ignored in wchan output. */
 #define __sched		__attribute__((__section__(".sched.text")))
+
+/* Linker adds these: start and end of __sched functions */
+extern char __sched_text_start[], __sched_text_end[];
+
 /* Is this address in the __sched functions? */
 extern int in_sched_functions(unsigned long addr);
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 89cb2dac8bfd..98dcdf272db3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6708,9 +6708,6 @@ void __init sched_init_smp(void)
 
 int in_sched_functions(unsigned long addr)
 {
-	/* Linker adds these: start and end of __sched functions */
-	extern char __sched_text_start[], __sched_text_end[];
-
 	return in_lock_functions(addr) ||
 		(addr >= (unsigned long)__sched_text_start
 		&& addr < (unsigned long)__sched_text_end);

commit 9a4e715914f07e56cbfa6f2b544a68365d51c3ef
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Nov 28 15:52:56 2007 +0100

    sched: clean up sd_alloc_ctl_cpu_table() definition
    
    clean up sd_alloc_ctl_cpu_table() definition.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 38933cafea8a..89cb2dac8bfd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5466,7 +5466,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
 	return table;
 }
 
-static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 {
 	struct ctl_table *entry, *table;
 	struct sched_domain *sd;