Patches contributed by Eötvös Lorand University


commit 546fe3c909b0a4235c7237c210da483eaaac1edc
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:51 2007 +0200

    sched: move the __update_rq_clock() call to scheduler_tick()
    
    move the __update_rq_clock() call from update_cpu_load() to
    scheduler_tick().
    
    ( identity transformation that causes no change in functionality. )
    
    this allows the direct use of rq->clock in ->task_tick() functions.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index b78b9d9ffd1c..3f5d52949990 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1949,8 +1949,6 @@ static void update_cpu_load(struct rq *this_rq)
 	struct load_stat *ls = &this_rq->ls;
 	int i, scale;
 
-	__update_rq_clock(this_rq);
-
 	this_rq->nr_load_updates++;
 	if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
 		goto do_avg;
@@ -3301,6 +3299,7 @@ void scheduler_tick(void)
 	struct task_struct *curr = rq->curr;
 
 	spin_lock(&rq->lock);
+	__update_rq_clock(rq);
 	update_cpu_load(rq);
 	if (curr != rq->idle) /* FIXME: needed? */
 		curr->sched_class->task_tick(rq, curr);

commit a48da48b403319918a587be8b5d46fe1d186c2ac
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:51 2007 +0200

    sched debug: remove the 'u64 now' parameter from print_task()/_rq()
    
    remove the 'u64 now' parameter from sched_debug.c:print_task()/_rq().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index f977ee53f8ce..3da32156394e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -29,7 +29,7 @@
  } while (0)
 
 static void
-print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now)
+print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 {
 	if (rq->curr == p)
 		SEQ_printf(m, "R");
@@ -56,7 +56,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now)
 #endif
 }
 
-static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
+static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 {
 	struct task_struct *g, *p;
 
@@ -77,7 +77,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
 		if (!p->se.on_rq || task_cpu(p) != rq_cpu)
 			continue;
 
-		print_task(m, rq, p, now);
+		print_task(m, rq, p);
 	} while_each_thread(g, p);
 
 	read_unlock_irq(&tasklist_lock);
@@ -124,7 +124,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 	print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
 }
 
-static void print_cpu(struct seq_file *m, int cpu, u64 now)
+static void print_cpu(struct seq_file *m, int cpu)
 {
 	struct rq *rq = &per_cpu(runqueues, cpu);
 
@@ -168,7 +168,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
 
 	print_cfs_stats(m, cpu);
 
-	print_rq(m, rq, cpu, now);
+	print_rq(m, rq, cpu);
 }
 
 static int sched_debug_show(struct seq_file *m, void *v)
@@ -184,7 +184,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
 	SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
 
 	for_each_online_cpu(cpu)
-		print_cpu(m, cpu, now);
+		print_cpu(m, cpu);
 
 	SEQ_printf(m, "\n");
 

commit bdd4dfa89c1e3e1379729b9edec1526b3ecc25ec
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:51 2007 +0200

    sched: remove the 'u64 now' local variables
    
    final step: remove all (now superfluous) 'u64 now' variables.
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 2dc5d2f7b392..b78b9d9ffd1c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -910,10 +910,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	u64 now;
-
 	update_rq_clock(rq);
-	now = rq->clock;
 
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible--;
@@ -927,10 +924,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
-	u64 now;
-
 	update_rq_clock(rq);
-	now = rq->clock;
 
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible--;
@@ -1647,13 +1641,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 	unsigned long flags;
 	struct rq *rq;
 	int this_cpu;
-	u64 now;
 
 	rq = task_rq_lock(p, &flags);
 	BUG_ON(p->state != TASK_RUNNING);
 	this_cpu = smp_processor_id(); /* parent's CPU */
 	update_rq_clock(rq);
-	now = rq->clock;
 
 	p->prio = effective_prio(p);
 
@@ -1955,11 +1947,9 @@ static void update_cpu_load(struct rq *this_rq)
 	unsigned long total_load = this_rq->ls.load.weight;
 	unsigned long this_load =  total_load;
 	struct load_stat *ls = &this_rq->ls;
-	u64 now;
 	int i, scale;
 
 	__update_rq_clock(this_rq);
-	now = this_rq->clock;
 
 	this_rq->nr_load_updates++;
 	if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
@@ -3431,7 +3421,6 @@ asmlinkage void __sched schedule(void)
 	struct task_struct *prev, *next;
 	long *switch_count;
 	struct rq *rq;
-	u64 now;
 	int cpu;
 
 need_resched:
@@ -3450,7 +3439,6 @@ asmlinkage void __sched schedule(void)
 	spin_lock_irq(&rq->lock);
 	clear_tsk_need_resched(prev);
 	__update_rq_clock(rq);
-	now = rq->clock;
 
 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
 		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
@@ -3909,13 +3897,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 	unsigned long flags;
 	int oldprio, on_rq;
 	struct rq *rq;
-	u64 now;
 
 	BUG_ON(prio < 0 || prio > MAX_PRIO);
 
 	rq = task_rq_lock(p, &flags);
 	update_rq_clock(rq);
-	now = rq->clock;
 
 	oldprio = p->prio;
 	on_rq = p->se.on_rq;
@@ -3953,7 +3939,6 @@ void set_user_nice(struct task_struct *p, long nice)
 	int old_prio, delta, on_rq;
 	unsigned long flags;
 	struct rq *rq;
-	u64 now;
 
 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 		return;
@@ -3963,7 +3948,6 @@ void set_user_nice(struct task_struct *p, long nice)
 	 */
 	rq = task_rq_lock(p, &flags);
 	update_rq_clock(rq);
-	now = rq->clock;
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
 	 * allow the 'normal' nice value to be set - but as expected
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4a2cbde1057f..eb7ca49c3260 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -667,10 +667,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
 	struct rq *rq = rq_of(cfs_rq);
 	struct sched_entity *next;
-	u64 now;
 
 	__update_rq_clock(rq);
-	now = rq->clock;
 
 	/*
 	 * Dequeue and enqueue the task to update its
@@ -820,10 +818,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 static void yield_task_fair(struct rq *rq, struct task_struct *p)
 {
 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
-	u64 now;
 
 	__update_rq_clock(rq);
-	now = rq->clock;
 	/*
 	 * Dequeue and enqueue the task to update its
 	 * position within the tree:
@@ -1062,11 +1058,9 @@ static void set_curr_task_fair(struct rq *rq)
 {
 	struct task_struct *curr = rq->curr;
 	struct sched_entity *se = &curr->se;
-	u64 now;
 	struct cfs_rq *cfs_rq;
 
 	update_rq_clock(rq);
-	now = rq->clock;
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);

commit 2e1cb74a501c4b1bca5e55dabff24f267349193c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from deactivate_task()
    
    remove the 'u64 now' parameter from deactivate_task().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 05ce3f54e815..2dc5d2f7b392 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -942,8 +942,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 /*
  * deactivate_task - remove a task from the runqueue.
  */
-static void
-deactivate_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible++;
@@ -2128,7 +2127,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
 		      struct rq *this_rq, int this_cpu)
 {
 	update_rq_clock(src_rq);
-	deactivate_task(src_rq, p, 0, src_rq->clock);
+	deactivate_task(src_rq, p, 0);
 	set_task_cpu(p, this_cpu);
 	activate_task(this_rq, p, 0);
 	/*
@@ -3458,7 +3457,7 @@ asmlinkage void __sched schedule(void)
 				unlikely(signal_pending(prev)))) {
 			prev->state = TASK_RUNNING;
 		} else {
-			deactivate_task(rq, prev, 1, now);
+			deactivate_task(rq, prev, 1);
 		}
 		switch_count = &prev->nvcsw;
 	}
@@ -4228,7 +4227,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
 	on_rq = p->se.on_rq;
 	if (on_rq) {
 		update_rq_clock(rq);
-		deactivate_task(rq, p, 0, rq->clock);
+		deactivate_task(rq, p, 0);
 	}
 	oldprio = p->prio;
 	__setscheduler(rq, p, policy, param->sched_priority);
@@ -4983,7 +4982,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 	on_rq = p->se.on_rq;
 	if (on_rq) {
 		update_rq_clock(rq_src);
-		deactivate_task(rq_src, p, 0, rq_src->clock);
+		deactivate_task(rq_src, p, 0);
 	}
 	set_task_cpu(p, dest_cpu);
 	if (on_rq) {
@@ -5404,7 +5403,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		/* Idle task back to normal (off runqueue, low prio) */
 		rq = task_rq_lock(rq->idle, &flags);
 		update_rq_clock(rq);
-		deactivate_task(rq, rq->idle, 0, rq->clock);
+		deactivate_task(rq, rq->idle, 0);
 		rq->idle->static_prio = MAX_PRIO;
 		__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
 		rq->idle->sched_class = &idle_sched_class;
@@ -6644,7 +6643,7 @@ void normalize_rt_tasks(void)
 		on_rq = p->se.on_rq;
 		if (on_rq) {
 			update_rq_clock(task_rq(p));
-			deactivate_task(task_rq(p), p, 0, task_rq(p)->clock);
+			deactivate_task(task_rq(p), p, 0);
 		}
 		__setscheduler(rq, p, SCHED_NORMAL, 0);
 		if (on_rq) {

commit 69be72c13db0e9165796422b544f989033146171
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from dequeue_task()
    
    remove the 'u64 now' parameter from dequeue_task().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 0ecfdd134f77..05ce3f54e815 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -853,8 +853,7 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
 	p->se.on_rq = 1;
 }
 
-static void
-dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
 {
 	p->sched_class->dequeue_task(rq, p, sleep);
 	p->se.on_rq = 0;
@@ -949,7 +948,7 @@ deactivate_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible++;
 
-	dequeue_task(rq, p, sleep, now);
+	dequeue_task(rq, p, sleep);
 	dec_nr_running(p, rq);
 }
 
@@ -3922,7 +3921,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 	oldprio = p->prio;
 	on_rq = p->se.on_rq;
 	if (on_rq)
-		dequeue_task(rq, p, 0, now);
+		dequeue_task(rq, p, 0);
 
 	if (rt_prio(prio))
 		p->sched_class = &rt_sched_class;
@@ -3978,7 +3977,7 @@ void set_user_nice(struct task_struct *p, long nice)
 	}
 	on_rq = p->se.on_rq;
 	if (on_rq) {
-		dequeue_task(rq, p, 0, now);
+		dequeue_task(rq, p, 0);
 		dec_load(rq, p);
 	}
 

commit 8159f87e2bfeeba8887b8ef34f7b523958910132
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from enqueue_task()
    
    remove the 'u64 now' parameter from enqueue_task().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 86e751a19d6b..0ecfdd134f77 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -846,8 +846,7 @@ static void set_load_weight(struct task_struct *p)
 	p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
 }
 
-static void
-enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
 	sched_info_queued(p);
 	p->sched_class->enqueue_task(rq, p, wakeup);
@@ -920,7 +919,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible--;
 
-	enqueue_task(rq, p, wakeup, now);
+	enqueue_task(rq, p, wakeup);
 	inc_nr_running(p, rq);
 }
 
@@ -937,7 +936,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 	if (p->state == TASK_UNINTERRUPTIBLE)
 		rq->nr_uninterruptible--;
 
-	enqueue_task(rq, p, 0, now);
+	enqueue_task(rq, p, 0);
 	inc_nr_running(p, rq);
 }
 
@@ -3933,7 +3932,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 	p->prio = prio;
 
 	if (on_rq) {
-		enqueue_task(rq, p, 0, now);
+		enqueue_task(rq, p, 0);
 		/*
 		 * Reschedule if we are currently running on this runqueue and
 		 * our priority decreased, or if we are not currently running on
@@ -3990,7 +3989,7 @@ void set_user_nice(struct task_struct *p, long nice)
 	delta = p->prio - old_prio;
 
 	if (on_rq) {
-		enqueue_task(rq, p, 0, now);
+		enqueue_task(rq, p, 0);
 		inc_load(rq, p);
 		/*
 		 * If the task increased its priority or is running and

commit db53181e41728cfd58336925422dc17f1d2c655c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from dec_nr_running()
    
    remove the 'u64 now' parameter from dec_nr_running().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index bdb683464c00..86e751a19d6b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -816,7 +816,7 @@ static void inc_nr_running(struct task_struct *p, struct rq *rq)
 	inc_load(rq, p);
 }
 
-static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
+static void dec_nr_running(struct task_struct *p, struct rq *rq)
 {
 	rq->nr_running--;
 	dec_load(rq, p);
@@ -951,7 +951,7 @@ deactivate_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep, now);
-	dec_nr_running(p, rq, now);
+	dec_nr_running(p, rq);
 }
 
 /**

commit e5fa2237b53d751c59f773a68e1b12c411f0b19b
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from inc_nr_running()
    
    remove the 'u64 now' parameter from inc_nr_running().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 23583bb93273..bdb683464c00 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -810,7 +810,7 @@ static inline void dec_load(struct rq *rq, const struct task_struct *p)
 	update_load_sub(&rq->ls.load, p->se.load.weight);
 }
 
-static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
+static void inc_nr_running(struct task_struct *p, struct rq *rq)
 {
 	rq->nr_running++;
 	inc_load(rq, p);
@@ -921,7 +921,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup, now);
-	inc_nr_running(p, rq, now);
+	inc_nr_running(p, rq);
 }
 
 /*
@@ -938,7 +938,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0, now);
-	inc_nr_running(p, rq, now);
+	inc_nr_running(p, rq);
 }
 
 /*
@@ -1671,7 +1671,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 		 * management (if any):
 		 */
 		p->sched_class->task_new(rq, p);
-		inc_nr_running(p, rq, now);
+		inc_nr_running(p, rq);
 	}
 	check_preempt_curr(rq, p);
 	task_rq_unlock(rq, &flags);

commit 79b5dddf831b4719b7ec8dfcfb9bf9c619805b9c
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from dec_load()
    
    remove the 'u64 now' parameter from dec_load().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index aa8cac4ae547..23583bb93273 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -804,8 +804,7 @@ static inline void inc_load(struct rq *rq, const struct task_struct *p)
 	update_load_add(&rq->ls.load, p->se.load.weight);
 }
 
-static inline void
-dec_load(struct rq *rq, const struct task_struct *p, u64 now)
+static inline void dec_load(struct rq *rq, const struct task_struct *p)
 {
 	update_curr_load(rq);
 	update_load_sub(&rq->ls.load, p->se.load.weight);
@@ -820,7 +819,7 @@ static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
 static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
 {
 	rq->nr_running--;
-	dec_load(rq, p, now);
+	dec_load(rq, p);
 }
 
 static void set_load_weight(struct task_struct *p)
@@ -3981,7 +3980,7 @@ void set_user_nice(struct task_struct *p, long nice)
 	on_rq = p->se.on_rq;
 	if (on_rq) {
 		dequeue_task(rq, p, 0, now);
-		dec_load(rq, p, now);
+		dec_load(rq, p);
 	}
 
 	p->static_prio = NICE_TO_PRIO(nice);

commit 29b4b623fe8163ca3c1da125da81234d41c8a3db
Author: Ingo Molnar <mingo@elte.hu>
Date:   Thu Aug 9 11:16:49 2007 +0200

    sched: remove the 'u64 now' parameter from inc_load()
    
    remove the 'u64 now' parameter from inc_load().
    
    ( identity transformation that causes no change in functionality. )
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched.c b/kernel/sched.c
index 5d5859c2e019..aa8cac4ae547 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -798,8 +798,7 @@ static void update_curr_load(struct rq *rq)
 		__update_curr_load(rq, ls);
 }
 
-static inline void
-inc_load(struct rq *rq, const struct task_struct *p, u64 now)
+static inline void inc_load(struct rq *rq, const struct task_struct *p)
 {
 	update_curr_load(rq);
 	update_load_add(&rq->ls.load, p->se.load.weight);
@@ -815,7 +814,7 @@ dec_load(struct rq *rq, const struct task_struct *p, u64 now)
 static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
 {
 	rq->nr_running++;
-	inc_load(rq, p, now);
+	inc_load(rq, p);
 }
 
 static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
@@ -3993,7 +3992,7 @@ void set_user_nice(struct task_struct *p, long nice)
 
 	if (on_rq) {
 		enqueue_task(rq, p, 0, now);
-		inc_load(rq, p, now);
+		inc_load(rq, p);
 		/*
 		 * If the task increased its priority or is running and
 		 * lowered its priority, then reschedule its CPU: