Patches contributed by Eötvös Lorand University
commit d7876a08db50895ed9808ede4a259cccf65eba47
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:19 2008 +0100
sched: remove unused JIFFIES_TO_NS() macro
remove unused JIFFIES_TO_NS() macro.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 35ef06c99214..461ee900d1ac 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -96,10 +96,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
- * Some helpers for converting nanosecond timing to jiffy resolution
+ * Helpers for converting nanosecond timing to jiffy resolution
*/
#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
-#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
commit bdd7c81b4973e72b670eff6b5725bab189b723d6
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:18 2008 +0100
sched: fix sched_rt.c:join/leave_domain
fix build bug in sched_rt.c:join/leave_domain and make them only
be included on SMP builds.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b049e5110eea..3ea0cae513d2 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -767,6 +767,20 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
p->nr_cpus_allowed = weight;
}
+/* Assumes rq->lock is held */
+static void join_domain_rt(struct rq *rq)
+{
+ if (rq->rt.overloaded)
+ rt_set_overload(rq);
+}
+
+/* Assumes rq->lock is held */
+static void leave_domain_rt(struct rq *rq)
+{
+ if (rq->rt.overloaded)
+ rt_clear_overload(rq);
+}
+
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
# define schedule_balance_rt(rq, prev) do { } while (0)
@@ -799,20 +813,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
}
}
-/* Assumes rq->lock is held */
-static void join_domain_rt(struct rq *rq)
-{
- if (rq->rt.overloaded)
- rt_set_overload(rq);
-}
-
-/* Assumes rq->lock is held */
-static void leave_domain_rt(struct rq *rq)
-{
- if (rq->rt.overloaded)
- rt_clear_overload(rq);
-}
-
static void set_curr_task_rt(struct rq *rq)
{
struct task_struct *p = rq->curr;
@@ -838,11 +838,10 @@ const struct sched_class rt_sched_class = {
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
+ .join_domain = join_domain_rt,
+ .leave_domain = leave_domain_rt,
#endif
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,
-
- .join_domain = join_domain_rt,
- .leave_domain = leave_domain_rt,
};
commit 7f51f298204ec0528422cd9b23feac12612c5665
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:17 2008 +0100
sched: clean up schedule_balance_rt()
clean up schedule_balance_rt().
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 05ada7d44800..4d0a60e47dfa 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -701,12 +701,10 @@ static int pull_rt_task(struct rq *this_rq)
return ret;
}
-static void schedule_balance_rt(struct rq *rq,
- struct task_struct *prev)
+static void schedule_balance_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- if (unlikely(rt_task(prev)) &&
- rq->rt.highest_prio > prev->prio)
+ if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
pull_rt_task(rq);
}
commit 80bf3171dcdf0f5d236e2e48afe2a95c7ce23879
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:17 2008 +0100
sched: clean up pull_rt_task()
clean up pull_rt_task().
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index cc38521c5723..05ada7d44800 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -576,12 +576,9 @@ static void push_rt_tasks(struct rq *rq)
static int pull_rt_task(struct rq *this_rq)
{
- struct task_struct *next;
- struct task_struct *p;
+ int this_cpu = this_rq->cpu, ret = 0, cpu;
+ struct task_struct *p, *next;
struct rq *src_rq;
- int this_cpu = this_rq->cpu;
- int cpu;
- int ret = 0;
/*
* If cpusets are used, and we have overlapping
@@ -608,23 +605,25 @@ static int pull_rt_task(struct rq *this_rq)
if (double_lock_balance(this_rq, src_rq)) {
/* unlocked our runqueue lock */
struct task_struct *old_next = next;
+
next = pick_next_task_rt(this_rq);
if (next != old_next)
ret = 1;
}
- if (likely(src_rq->rt.rt_nr_running <= 1))
+ if (likely(src_rq->rt.rt_nr_running <= 1)) {
/*
* Small chance that this_rq->curr changed
* but it's really harmless here.
*/
rt_clear_overload(this_rq);
- else
+ } else {
/*
* Heh, the src_rq is now overloaded, since
* we already have the src_rq lock, go straight
* to pulling tasks from it.
*/
goto try_pulling;
+ }
spin_unlock(&src_rq->lock);
continue;
}
@@ -638,6 +637,7 @@ static int pull_rt_task(struct rq *this_rq)
*/
if (double_lock_balance(this_rq, src_rq)) {
struct task_struct *old_next = next;
+
next = pick_next_task_rt(this_rq);
if (next != old_next)
ret = 1;
@@ -674,7 +674,7 @@ static int pull_rt_task(struct rq *this_rq)
*/
if (p->prio < src_rq->curr->prio ||
(next && next->prio < src_rq->curr->prio))
- goto bail;
+ goto out;
ret = 1;
@@ -686,9 +686,7 @@ static int pull_rt_task(struct rq *this_rq)
* case there's an even higher prio task
* in another runqueue. (low likelyhood
* but possible)
- */
-
- /*
+ *
* Update next so that we won't pick a task
* on another cpu with a priority lower (or equal)
* than the one we just picked.
@@ -696,7 +694,7 @@ static int pull_rt_task(struct rq *this_rq)
next = p;
}
- bail:
+ out:
spin_unlock(&src_rq->lock);
}
commit 00597c3ed78e424bdafff123565c078d8b6088cf
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:16 2008 +0100
sched: remove leftover debugging
remove leftover debugging.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index deff0c77d705..cc38521c5723 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
struct list_head *queue;
int idx;
- assert_spin_locked(&rq->lock);
-
if (likely(rq->rt.rt_nr_running < 2))
return NULL;
@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq)
int ret = 0;
int paranoid = RT_MAX_TRIES;
- assert_spin_locked(&rq->lock);
-
if (!rq->rt.overloaded)
return 0;
@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq)
goto out;
}
- assert_spin_locked(&lowest_rq->lock);
-
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq)
int cpu;
int ret = 0;
- assert_spin_locked(&this_rq->lock);
-
/*
* If cpusets are used, and we have overlapping
* run queue cpusets, then this algorithm may not catch all.
commit 6e1938d3ad58c940ec4119d387dd92a787cb238c
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:16 2008 +0100
sched: remove rt_overload()
remove rt_overload() - it's an unnecessary indirection.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1a2d8f0aa659..deff0c77d705 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -17,11 +17,6 @@ static inline int rt_overloaded(void)
return atomic_read(&rto_count);
}
-static inline cpumask_t *rt_overload(void)
-{
- return &rt_overload_mask;
-}
-
static inline void rt_set_overload(struct rq *rq)
{
rq->rt.overloaded = 1;
@@ -590,7 +585,6 @@ static int pull_rt_task(struct rq *this_rq)
struct task_struct *next;
struct task_struct *p;
struct rq *src_rq;
- cpumask_t *rto_cpumask;
int this_cpu = this_rq->cpu;
int cpu;
int ret = 0;
@@ -608,9 +602,7 @@ static int pull_rt_task(struct rq *this_rq)
next = pick_next_task_rt(this_rq);
- rto_cpumask = rt_overload();
-
- for_each_cpu_mask(cpu, *rto_cpumask) {
+ for_each_cpu_mask(cpu, rt_overload_mask) {
if (this_cpu == cpu)
continue;
commit 84de4274893691aa8c471a1f7336d51e555d23a0
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:15 2008 +0100
sched: clean up kernel/sched_rt.c
clean up whitespace damage and missing comments in kernel/sched_rt.c.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b591b89710a4..1a2d8f0aa659 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -4,16 +4,24 @@
*/
#ifdef CONFIG_SMP
+
+/*
+ * The "RT overload" flag: it gets set if a CPU has more than
+ * one runnable RT task.
+ */
static cpumask_t rt_overload_mask;
static atomic_t rto_count;
+
static inline int rt_overloaded(void)
{
return atomic_read(&rto_count);
}
+
static inline cpumask_t *rt_overload(void)
{
return &rt_overload_mask;
}
+
static inline void rt_set_overload(struct rq *rq)
{
rq->rt.overloaded = 1;
@@ -28,6 +36,7 @@ static inline void rt_set_overload(struct rq *rq)
wmb();
atomic_inc(&rto_count);
}
+
static inline void rt_clear_overload(struct rq *rq)
{
/* the order here really doesn't matter */
commit deeeccd41bd94a9db133d7b923f9a7479a47305d
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:15 2008 +0100
sched: clean up overlong line in kernel/sched_debug.c
clean up overlong line in kernel/sched_debug.c.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0749c1837b10..b591b89710a4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -762,6 +762,7 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
/* don't touch RT tasks */
return 0;
}
+
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
{
int weight = cpus_weight(*new_mask);
@@ -775,9 +776,9 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
struct rq *rq = task_rq(p);
- if ((p->nr_cpus_allowed <= 1) && (weight > 1))
+ if ((p->nr_cpus_allowed <= 1) && (weight > 1)) {
rq->rt.rt_nr_migratory++;
- else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
+ } else if ((p->nr_cpus_allowed > 1) && (weight <= 1)) {
BUG_ON(!rq->rt.rt_nr_migratory);
rq->rt.rt_nr_migratory--;
}
@@ -788,6 +789,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
p->cpus_allowed = *new_mask;
p->nr_cpus_allowed = weight;
}
+
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
# define schedule_balance_rt(rq, prev) do { } while (0)
commit 4df64c0bfb7e0e260d10ebc005f7d0ba1308eed7
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:15 2008 +0100
sched: clean up find_lock_lowest_rq()
clean up find_lock_lowest_rq().
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b8435fd47f78..0749c1837b10 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -438,12 +438,11 @@ static int find_lowest_rq(struct task_struct *task)
}
/* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
- struct rq *rq)
+static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
struct rq *lowest_rq = NULL;
- int cpu;
int tries;
+ int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
@@ -462,9 +461,11 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
- !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
+ !cpu_isset(lowest_rq->cpu,
+ task->cpus_allowed) ||
task_running(rq, task) ||
!task->se.on_rq)) {
+
spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
commit 79064fbf75796c4c6a53e40729dbe52f789a91fd
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:14 2008 +0100
sched: clean up pick_next_highest_task_rt()
clean up pick_next_highest_task_rt().
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 61d198845f00..b8435fd47f78 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -242,8 +242,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
}
/* Return the second highest RT task, NULL otherwise */
-static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
- int cpu)
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
{
struct rt_prio_array *array = &rq->rt.active;
struct task_struct *next;
@@ -270,7 +269,8 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
if (queue->next->next != queue) {
/* same prio task */
- next = list_entry(queue->next->next, struct task_struct, run_list);
+ next = list_entry(queue->next->next, struct task_struct,
+ run_list);
if (pick_rt_task(rq, next, cpu))
goto out;
}