Patches contributed by Eötvös Lorand University
commit d2417e5a3e6c79e79f982c7553301dc3539873b0
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove the 'u64 now' parameter from update_stats_enqueue()
remove the 'u64 now' parameter from update_stats_enqueue().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e48f32e99a0d..66209d688456 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -376,8 +376,7 @@ calc_weighted(unsigned long delta, unsigned long weight, int shift)
/*
* Task is being enqueued - update stats:
*/
-static void
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
s64 key;
@@ -584,7 +583,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
if (wakeup)
enqueue_sleeper(cfs_rq, se, now);
- update_stats_enqueue(cfs_rq, se, now);
+ update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
}
@@ -1035,7 +1034,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
sched_info_queued(p);
- update_stats_enqueue(cfs_rq, se, now);
+ update_stats_enqueue(cfs_rq, se);
/*
* Child runs first: we let it run before the parent
* until it reschedules once. We set up the key so that
commit 5870db5b83932bea0deac3c68e3c40f377d0b8f7
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove the 'u64 now' parameter from update_stats_wait_start()
remove the 'u64 now' parameter from update_stats_wait_start().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 798759882822..e48f32e99a0d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -345,7 +345,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
}
static inline void
-update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
se->wait_start_fair = cfs_rq->fair_clock;
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
@@ -386,7 +386,7 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
* a dequeue/enqueue event is a NOP)
*/
if (se != cfs_rq_curr(cfs_rq))
- update_stats_wait_start(cfs_rq, se, now);
+ update_stats_wait_start(cfs_rq, se);
/*
* Update the key:
*/
@@ -665,7 +665,7 @@ put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now)
update_stats_curr_end(cfs_rq, prev, now);
if (prev->on_rq)
- update_stats_wait_start(cfs_rq, prev, now);
+ update_stats_wait_start(cfs_rq, prev);
set_cfs_rq_curr(cfs_rq, NULL);
}
commit b7cc089657c12340077fe937380f9e54bbd6b300
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove the 'u64 now' parameter from update_curr()
remove the 'u64 now' parameter from update_curr().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 025ac532b27a..798759882822 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -281,7 +281,7 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
* are not in our scheduling class.
*/
static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
unsigned long delta, delta_exec, delta_fair, delta_mine;
struct load_weight *lw = &cfs_rq->load;
@@ -320,7 +320,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
}
-static void update_curr(struct cfs_rq *cfs_rq, u64 now)
+static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq_curr(cfs_rq);
unsigned long delta_exec;
@@ -338,7 +338,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
curr->delta_exec += delta_exec;
if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
- __update_curr(cfs_rq, curr, now);
+ __update_curr(cfs_rq, curr);
curr->delta_exec = 0;
}
curr->exec_start = rq_of(cfs_rq)->clock;
@@ -453,7 +453,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{
- update_curr(cfs_rq, now);
+ update_curr(cfs_rq);
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
@@ -579,7 +579,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
/*
* Update the fair clock.
*/
- update_curr(cfs_rq, now);
+ update_curr(cfs_rq);
if (wakeup)
enqueue_sleeper(cfs_rq, se, now);
@@ -660,7 +660,7 @@ put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now)
* was not called and update_curr() has to be done:
*/
if (prev->on_rq)
- update_curr(cfs_rq, now);
+ update_curr(cfs_rq);
update_stats_curr_end(cfs_rq, prev, now);
@@ -851,7 +851,7 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
- update_curr(cfs_rq, rq->clock);
+ update_curr(cfs_rq);
resched_task(curr);
return;
}
commit 5cef9eca3837a8dcf605a360e213c4179a07c41a
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove the 'u64 now' parameter from print_cfs_rq()
remove the 'u64 now' parameter from print_cfs_rq().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 513b81c60e87..62ddddb49db3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -139,7 +139,7 @@ struct cfs_rq;
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now);
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
@@ -149,7 +149,7 @@ static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
}
#endif
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 8421b9399e10..f977ee53f8ce 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -106,7 +106,7 @@ print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
(long long)wait_runtime_rq_sum);
}
-void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq);
@@ -166,7 +166,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
P(cpu_load[4]);
#undef P
- print_cfs_stats(m, cpu, now);
+ print_cfs_stats(m, cpu);
print_rq(m, rq, cpu, now);
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bcf5fc59e8e9..025ac532b27a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1108,12 +1108,12 @@ struct sched_class fair_sched_class __read_mostly = {
};
#ifdef CONFIG_SCHED_DEBUG
-static void print_cfs_stats(struct seq_file *m, int cpu, u64 now)
+static void print_cfs_stats(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq;
for_each_leaf_cfs_rq(rq, cfs_rq)
- print_cfs_rq(m, cpu, cfs_rq, now);
+ print_cfs_rq(m, cpu, cfs_rq);
}
#endif
commit d281918d7c135c555d9cebcf73d4320efa8177dc
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove 'now' use from assignments
change all 'now' timestamp uses in assignments to rq->clock.
( this is an identity transformation that causes no functionality change:
all such new rq->clock is necessarily preceded by an update_rq_clock()
call. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 65eb484dc268..49a5fb0cdea0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -788,8 +788,8 @@ static void update_curr_load(struct rq *rq, u64 now)
u64 start;
start = ls->load_update_start;
- ls->load_update_start = now;
- ls->delta_stat += now - start;
+ ls->load_update_start = rq->clock;
+ ls->delta_stat += rq->clock - start;
/*
* Stagger updates to ls->delta_fair. Very frequent updates
* can be expensive.
@@ -1979,8 +1979,8 @@ static void update_cpu_load(struct rq *this_rq)
exec_delta64 = ls->delta_exec + 1;
ls->delta_exec = 0;
- sample_interval64 = now - ls->load_update_last;
- ls->load_update_last = now;
+ sample_interval64 = this_rq->clock - ls->load_update_last;
+ ls->load_update_last = this_rq->clock;
if ((s64)sample_interval64 < (s64)TICK_NSEC)
sample_interval64 = TICK_NSEC;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bd20fad3deff..bcf5fc59e8e9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
* since the last time we changed load (this cannot
* overflow on 32 bits):
*/
- delta_exec = (unsigned long)(now - curr->exec_start);
+ delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
curr->delta_exec += delta_exec;
@@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
__update_curr(cfs_rq, curr, now);
curr->delta_exec = 0;
}
- curr->exec_start = now;
+ curr->exec_start = rq_of(cfs_rq)->clock;
}
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{
se->wait_start_fair = cfs_rq->fair_clock;
- schedstat_set(se->wait_start, now);
+ schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
/*
@@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{
unsigned long delta_fair = se->delta_fair_run;
- schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start));
+ schedstat_set(se->wait_max, max(se->wait_max,
+ rq_of(cfs_rq)->clock - se->wait_start));
if (unlikely(se->load.weight != NICE_0_LOAD))
delta_fair = calc_weighted(delta_fair, se->load.weight,
@@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
/*
* We are starting a new run period:
*/
- se->exec_start = now;
+ se->exec_start = rq_of(cfs_rq)->clock;
}
/*
@@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
#ifdef CONFIG_SCHEDSTATS
if (se->sleep_start) {
- u64 delta = now - se->sleep_start;
+ u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
if ((s64)delta < 0)
delta = 0;
@@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
se->sum_sleep_runtime += delta;
}
if (se->block_start) {
- u64 delta = now - se->block_start;
+ u64 delta = rq_of(cfs_rq)->clock - se->block_start;
if ((s64)delta < 0)
delta = 0;
@@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
- se->sleep_start = now;
+ se->sleep_start = rq_of(cfs_rq)->clock;
if (tsk->state & TASK_UNINTERRUPTIBLE)
- se->block_start = now;
+ se->block_start = rq_of(cfs_rq)->clock;
}
cfs_rq->wait_runtime -= se->wait_runtime;
#endif
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 5b559e8c8aa6..5fbd87ad0f56 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -15,14 +15,14 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
if (!task_has_rt_policy(curr))
return;
- delta_exec = now - curr->se.exec_start;
+ delta_exec = rq->clock - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
- curr->se.exec_start = now;
+ curr->se.exec_start = rq->clock;
}
static void
@@ -89,7 +89,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
queue = array->queue + idx;
next = list_entry(queue->next, struct task_struct, run_list);
- next->se.exec_start = now;
+ next->se.exec_start = rq->clock;
return next;
}
commit eb59449400f1e5984509e502711141302a2867ab
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove __rq_clock()
remove the (now unused) __rq_clock() function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index d67345175179..65eb484dc268 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -361,13 +361,6 @@ static void update_rq_clock(struct rq *rq)
__update_rq_clock(rq);
}
-static u64 __rq_clock(struct rq *rq)
-{
- __update_rq_clock(rq);
-
- return rq->clock;
-}
-
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
commit c1b3da3ecdbf9e9f377474c11ba988b8821f86c8
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: eliminate __rq_clock() use
eliminate __rq_clock() use by changing it to:
__update_rq_clock(rq)
now = rq->clock;
identity transformation - no change in behavior.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 893211054790..d67345175179 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1967,9 +1967,12 @@ static void update_cpu_load(struct rq *this_rq)
unsigned long total_load = this_rq->ls.load.weight;
unsigned long this_load = total_load;
struct load_stat *ls = &this_rq->ls;
- u64 now = __rq_clock(this_rq);
+ u64 now;
int i, scale;
+ __update_rq_clock(this_rq);
+ now = this_rq->clock;
+
this_rq->nr_load_updates++;
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
goto do_avg;
@@ -3458,7 +3461,8 @@ asmlinkage void __sched schedule(void)
spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
- now = __rq_clock(rq);
+ __update_rq_clock(rq);
+ now = rq->clock;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 969f08c8bd34..bd20fad3deff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -672,7 +672,10 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
struct rq *rq = rq_of(cfs_rq);
struct sched_entity *next;
- u64 now = __rq_clock(rq);
+ u64 now;
+
+ __update_rq_clock(rq);
+ now = rq->clock;
/*
* Dequeue and enqueue the task to update its
@@ -824,8 +827,10 @@ dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now)
static void yield_task_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- u64 now = __rq_clock(rq);
+ u64 now;
+ __update_rq_clock(rq);
+ now = rq->clock;
/*
* Dequeue and enqueue the task to update its
* position within the tree:
commit 2ab81159fa426bd09c21faf7c25fba13bc9d2902
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: remove rq_clock()
remove the now unused rq_clock() function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index fe3c152d0c68..893211054790 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -368,12 +368,6 @@ static u64 __rq_clock(struct rq *rq)
return rq->clock;
}
-static u64 rq_clock(struct rq *rq)
-{
- update_rq_clock(rq);
- return rq->clock;
-}
-
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
commit a8e504d2a57ecd3f905b402072cdd1903f963bef
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:47 2007 +0200
sched: eliminate rq_clock() use
eliminate rq_clock() use by changing it to:
update_rq_clock(rq)
now = rq->clock;
identity transformation - no change in behavior.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index d613723f324f..fe3c152d0c68 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -927,7 +927,10 @@ static int effective_prio(struct task_struct *p)
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
- u64 now = rq_clock(rq);
+ u64 now;
+
+ update_rq_clock(rq);
+ now = rq->clock;
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
@@ -941,7 +944,10 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
*/
static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
{
- u64 now = rq_clock(rq);
+ u64 now;
+
+ update_rq_clock(rq);
+ now = rq->clock;
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
@@ -1664,7 +1670,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
this_cpu = smp_processor_id(); /* parent's CPU */
- now = rq_clock(rq);
+ update_rq_clock(rq);
+ now = rq->clock;
p->prio = effective_prio(p);
@@ -2134,7 +2141,8 @@ void sched_exec(void)
static void pull_task(struct rq *src_rq, struct task_struct *p,
struct rq *this_rq, int this_cpu)
{
- deactivate_task(src_rq, p, 0, rq_clock(src_rq));
+ update_rq_clock(src_rq);
+ deactivate_task(src_rq, p, 0, src_rq->clock);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
/*
@@ -3221,7 +3229,8 @@ unsigned long long task_sched_runtime(struct task_struct *p)
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime;
if (rq->curr == p) {
- delta_exec = rq_clock(rq) - p->se.exec_start;
+ update_rq_clock(rq);
+ delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0)
ns += delta_exec;
}
@@ -3919,7 +3928,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags);
- now = rq_clock(rq);
+ update_rq_clock(rq);
+ now = rq->clock;
oldprio = p->prio;
on_rq = p->se.on_rq;
@@ -3966,7 +3976,8 @@ void set_user_nice(struct task_struct *p, long nice)
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
- now = rq_clock(rq);
+ update_rq_clock(rq);
+ now = rq->clock;
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
@@ -4228,8 +4239,10 @@ int sched_setscheduler(struct task_struct *p, int policy,
goto recheck;
}
on_rq = p->se.on_rq;
- if (on_rq)
- deactivate_task(rq, p, 0, rq_clock(rq));
+ if (on_rq) {
+ update_rq_clock(rq);
+ deactivate_task(rq, p, 0, rq->clock);
+ }
oldprio = p->prio;
__setscheduler(rq, p, policy, param->sched_priority);
if (on_rq) {
@@ -4981,8 +4994,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
goto out;
on_rq = p->se.on_rq;
- if (on_rq)
- deactivate_task(rq_src, p, 0, rq_clock(rq_src));
+ if (on_rq) {
+ update_rq_clock(rq_src);
+ deactivate_task(rq_src, p, 0, rq_src->clock);
+ }
set_task_cpu(p, dest_cpu);
if (on_rq) {
activate_task(rq_dest, p, 0);
@@ -5215,7 +5230,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
for ( ; ; ) {
if (!rq->nr_running)
break;
- next = pick_next_task(rq, rq->curr, rq_clock(rq));
+ update_rq_clock(rq);
+ next = pick_next_task(rq, rq->curr, rq->clock);
if (!next)
break;
migrate_dead(dead_cpu, next);
@@ -5400,7 +5416,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
rq = task_rq_lock(rq->idle, &flags);
- deactivate_task(rq, rq->idle, 0, rq_clock(rq));
+ update_rq_clock(rq);
+ deactivate_task(rq, rq->idle, 0, rq->clock);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
@@ -6638,8 +6655,10 @@ void normalize_rt_tasks(void)
#endif
on_rq = p->se.on_rq;
- if (on_rq)
- deactivate_task(task_rq(p), p, 0, rq_clock(task_rq(p)));
+ if (on_rq) {
+ update_rq_clock(task_rq(p));
+ deactivate_task(task_rq(p), p, 0, task_rq(p)->clock);
+ }
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
activate_task(task_rq(p), p, 0);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 923bed0b0c42..969f08c8bd34 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -844,7 +844,8 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
unsigned long gran;
if (unlikely(rt_prio(p->prio))) {
- update_curr(cfs_rq, rq_clock(rq));
+ update_rq_clock(rq);
+ update_curr(cfs_rq, rq->clock);
resched_task(curr);
return;
}
@@ -1063,9 +1064,12 @@ static void set_curr_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se;
- u64 now = rq_clock(rq);
+ u64 now;
struct cfs_rq *cfs_rq;
+ update_rq_clock(rq);
+ now = rq->clock;
+
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
set_next_entity(cfs_rq, se, now);
commit b04a0f4c1651a553ee1a03dc70297d66ec74db5c
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Aug 9 11:16:46 2007 +0200
sched: add [__]update_rq_clock(rq)
add the [__]update_rq_clock(rq) functions. (No change in functionality,
just reorganization to prepare for elimination of the heavy 64-bit
timestamp-passing in the scheduler.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 1fa07c14624e..d613723f324f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,15 +318,19 @@ static inline int cpu_of(struct rq *rq)
}
/*
- * Per-runqueue clock, as finegrained as the platform can give us:
+ * Update the per-runqueue clock, as finegrained as the platform can give
+ * us, but without assuming monotonicity, etc.:
*/
-static unsigned long long __rq_clock(struct rq *rq)
+static void __update_rq_clock(struct rq *rq)
{
u64 prev_raw = rq->prev_clock_raw;
u64 now = sched_clock();
s64 delta = now - prev_raw;
u64 clock = rq->clock;
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+#endif
/*
* Protect against sched_clock() occasionally going backwards:
*/
@@ -349,17 +353,24 @@ static unsigned long long __rq_clock(struct rq *rq)
rq->prev_clock_raw = now;
rq->clock = clock;
+}
- return clock;
+static void update_rq_clock(struct rq *rq)
+{
+ if (likely(smp_processor_id() == cpu_of(rq)))
+ __update_rq_clock(rq);
}
-static unsigned long long rq_clock(struct rq *rq)
+static u64 __rq_clock(struct rq *rq)
{
- int this_cpu = smp_processor_id();
+ __update_rq_clock(rq);
- if (this_cpu == cpu_of(rq))
- return __rq_clock(rq);
+ return rq->clock;
+}
+static u64 rq_clock(struct rq *rq)
+{
+ update_rq_clock(rq);
return rq->clock;
}
@@ -386,9 +397,12 @@ unsigned long long cpu_clock(int cpu)
{
unsigned long long now;
unsigned long flags;
+ struct rq *rq;
local_irq_save(flags);
- now = rq_clock(cpu_rq(cpu));
+ rq = cpu_rq(cpu);
+ update_rq_clock(rq);
+ now = rq->clock;
local_irq_restore(flags);
return now;