Patches contributed by Eötvös Lorand University
commit b47e8608a08766ef8121cd747d3aaf6c3dc22649
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Jul 26 13:40:43 2007 +0200
[PATCH] sched: increase SCHED_LOAD_SCALE_FUZZ
increase SCHED_LOAD_SCALE_FUZZ that adds a small amount of
over-balancing: to help distribute CPU-bound tasks more fairly on SMP
systems.
the problem of unfair balancing was noticed and reported by Tong N Li.
10 CPU-bound tasks running on 8 CPUs, v2.6.23-rc1:
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
2572 mingo 20 0 1576 244 196 R 100 0.0 1:03.61 loop
2578 mingo 20 0 1576 248 196 R 100 0.0 1:03.59 loop
2576 mingo 20 0 1576 248 196 R 100 0.0 1:03.52 loop
2571 mingo 20 0 1576 244 196 R 100 0.0 1:03.46 loop
2569 mingo 20 0 1576 244 196 R 99 0.0 1:03.36 loop
2570 mingo 20 0 1576 244 196 R 95 0.0 1:00.55 loop
2577 mingo 20 0 1576 248 196 R 50 0.0 0:31.88 loop
2574 mingo 20 0 1576 248 196 R 50 0.0 0:31.87 loop
2573 mingo 20 0 1576 248 196 R 50 0.0 0:31.86 loop
2575 mingo 20 0 1576 248 196 R 50 0.0 0:31.86 loop
v2.6.23-rc1 + patch:
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
2681 mingo 20 0 1576 244 196 R 85 0.0 3:51.68 loop
2688 mingo 20 0 1576 244 196 R 81 0.0 3:46.35 loop
2682 mingo 20 0 1576 244 196 R 80 0.0 3:43.68 loop
2685 mingo 20 0 1576 248 196 R 80 0.0 3:45.97 loop
2683 mingo 20 0 1576 248 196 R 80 0.0 3:40.25 loop
2679 mingo 20 0 1576 244 196 R 80 0.0 3:33.53 loop
2680 mingo 20 0 1576 244 196 R 79 0.0 3:43.53 loop
2686 mingo 20 0 1576 244 196 R 79 0.0 3:39.31 loop
2687 mingo 20 0 1576 244 196 R 78 0.0 3:33.31 loop
2684 mingo 20 0 1576 244 196 R 77 0.0 3:27.52 loop
so they now nicely converge to the expected 80% long-term CPU usage.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 33b9b4841ee7..7c61b50823fa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -681,7 +681,7 @@ enum cpu_idle_type {
#define SCHED_LOAD_SHIFT 10
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
commit 99bc2fcb283852931fb6bbef40f3df8316b59000
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Jul 21 04:37:36 2007 -0700
hrtimer: speedup hrtimer_enqueue
Speedup hrtimer_enqueue by evaluating the rbtree insertion result.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 065a89786628..eb1ddebd2c04 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -686,6 +686,7 @@ static void enqueue_hrtimer(struct hrtimer *timer,
struct rb_node **link = &base->active.rb_node;
struct rb_node *parent = NULL;
struct hrtimer *entry;
+ int leftmost = 1;
/*
* Find the right place in the rbtree:
@@ -697,18 +698,19 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* We dont care about collisions. Nodes with
* the same expiry time stay together.
*/
- if (timer->expires.tv64 < entry->expires.tv64)
+ if (timer->expires.tv64 < entry->expires.tv64) {
link = &(*link)->rb_left;
- else
+ } else {
link = &(*link)->rb_right;
+ leftmost = 0;
+ }
}
/*
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
*/
- if (!base->first || timer->expires.tv64 <
- rb_entry(base->first, struct hrtimer, node)->expires.tv64) {
+ if (leftmost) {
/*
* Reprogram the clock event device. When the timer is already
* expired hrtimer_enqueue_reprogram has either called the
commit 820de5c39ef7f6866d2c9e6c7d208bcd2a6e1942
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Jul 21 04:37:36 2007 -0700
highres: improve debug output
Add some more debug information to the hrtimer and clock events code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 610f44b24367..83988c3c8e22 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -524,6 +524,9 @@ void __init setup_boot_APIC_clock(void)
*/
if (nmi_watchdog != NMI_IO_APIC)
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ else
+ printk(KERN_WARNING "APIC timer registered as dummy,"
+ " due to nmi_watchdog=1!\n");
}
/* Setup the lapic or request the broadcast */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 72d034258ba1..065a89786628 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -558,7 +558,8 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
*/
static int hrtimer_switch_to_hres(void)
{
- struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+ int cpu = smp_processor_id();
+ struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
unsigned long flags;
if (base->hres_active)
@@ -568,6 +569,8 @@ static int hrtimer_switch_to_hres(void)
if (tick_init_highres()) {
local_irq_restore(flags);
+ printk(KERN_WARNING "Could not switch to high resolution "
+ "mode on CPU %d\n", cpu);
return 0;
}
base->hres_active = 1;
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index f6997ab0c3c9..0258d3115d54 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -73,8 +73,21 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
struct clock_event_device *dev = td->evtdev;
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
- !tick_device_is_functional(dev))
+ !tick_device_is_functional(dev)) {
+
+ printk(KERN_INFO "Clockevents: "
+ "could not switch to one-shot mode:");
+ if (!dev) {
+ printk(" no tick device\n");
+ } else {
+ if (!tick_device_is_functional(dev))
+ printk(" %s is not functional.\n", dev->name);
+ else
+ printk(" %s does not support one-shot mode.\n",
+ dev->name);
+ }
return -EINVAL;
+ }
td->mode = TICKDEV_MODE_ONESHOT;
dev->event_handler = handler;
commit e436d80085133858bf2613a630365e8a0459fd58
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Jul 19 21:28:35 2007 +0200
[PATCH] sched: implement cpu_clock(cpu) high-speed time source
Implement the cpu_clock(cpu) interface for kernel-internal use:
high-speed (but slightly incorrect) per-cpu clock constructed from
sched_clock().
This API, unused at the moment, will be used in the future by blktrace,
by the softlockup-watchdog, by printk and by lockstat.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 94f624aef017..33b9b4841ee7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
#endif
extern unsigned long long sched_clock(void);
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
diff --git a/kernel/sched.c b/kernel/sched.c
index a35a92ff38fd..93cf241cfbe9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long long now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ now = rq_clock(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ return now;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Change a task's ->cfs_rq if it moves across CPUs */
static inline void set_task_cfs_rq(struct task_struct *p)
commit aa781aeb49752e5654241b53368c80362ad7bea3
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Jul 19 01:48:32 2007 -0700
add POSIX clocks and timers maintainer
Update the MAINTAINERS file: Thomas Gleixner has been the de-facto
maintainer of POSIX timers and clocks for quite some time.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 70e502e4c295..d4719717b190 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2857,8 +2857,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained
POSIX CLOCKS and TIMERS
-P: George Anzinger
-M: george@mvista.com
+P: Thomas Gleixner
+M: tglx@linutronix.de
L: linux-kernel@vger.kernel.org
S: Supported
commit 4e44f3497d41db4c3b9051c61410dee8ae4fb49c
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Jul 15 23:41:18 2007 -0700
sys_time() speedup
Improve performance of sys_time(). sys_time() returns time in seconds, but
it does so by calling do_gettimeofday() and then returning the tv_sec
portion of the GTOD time. But the data structure "xtime", which is updated
by every timer/scheduler tick, already offers HZ granularity time.
The patch improves the sysbench OLTP macrobenchmark significantly:
2.6.22-rc6:
#threads
1: transactions: 3733 (373.21 per sec.)
2: transactions: 6676 (667.46 per sec.)
3: transactions: 6957 (695.50 per sec.)
4: transactions: 7055 (705.48 per sec.)
5: transactions: 6596 (659.33 per sec.)
2.6.22-rc6 + sys_time.patch:
1: transactions: 4005 (400.47 per sec.)
2: transactions: 7379 (737.77 per sec.)
3: transactions: 7347 (734.49 per sec.)
4: transactions: 7468 (746.65 per sec.)
5: transactions: 7428 (742.47 per sec.)
Mixed API uses of gettimeofday() and time() are guaranteed to be coherent
via the use of a at-most-once-per-second slowpath that updates xtime.
[akpm@linux-foundation.org: build fixes]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/kernel/time.c b/kernel/time.c
index f04791f69408..ffe19149d770 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -57,14 +57,17 @@ EXPORT_SYMBOL(sys_tz);
*/
asmlinkage long sys_time(time_t __user * tloc)
{
- time_t i;
- struct timeval tv;
+ /*
+ * We read xtime.tv_sec atomically - it's updated
+ * atomically by update_wall_time(), so no need to
+ * even read-lock the xtime seqlock:
+ */
+ time_t i = xtime.tv_sec;
- do_gettimeofday(&tv);
- i = tv.tv_sec;
+ smp_rmb(); /* sys_time() results are coherent */
if (tloc) {
- if (put_user(i,tloc))
+ if (put_user(i, tloc))
i = -EFAULT;
}
return i;
@@ -373,12 +376,25 @@ void do_gettimeofday (struct timeval *tv)
tv->tv_sec = sec;
tv->tv_usec = usec;
-}
+ /*
+ * Make sure xtime.tv_sec [returned by sys_time()] always
+ * follows the gettimeofday() result precisely. This
+ * condition is extremely unlikely, it can hit at most
+ * once per second:
+ */
+ if (unlikely(xtime.tv_sec != tv->tv_sec)) {
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ update_wall_time();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ }
+}
EXPORT_SYMBOL(do_gettimeofday);
+#else /* CONFIG_TIME_INTERPOLATION */
-#else
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
@@ -394,7 +410,7 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
-#endif
+#endif /* CONFIG_TIME_INTERPOLATION */
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
commit 45807a1df9f51d28d0ff0c6bcf900c210411d7c9
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Jul 15 23:40:10 2007 -0700
vdso: print fatal signals
Add the print-fatal-signals=1 boot option and the
/proc/sys/kernel/print-fatal-signals runtime switch.
This feature prints some minimal information about userspace segfaults to
the kernel console. This is useful to find early bootup bugs where
userspace debugging is very hard.
Defaults to off.
[akpm@linux-foundation.org: Don't add new sysctl numbers]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 182c6a39d5a2..ab38322111c4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1383,6 +1383,12 @@ and is between 256 and 4096 characters. It is defined in the file
autoconfiguration.
Ranges are in pairs (memory base and size).
+ print-fatal-signals=
+ [KNL] debug: print fatal signals
+ print-fatal-signals=1: print segfault info to
+ the kernel console.
+ default: off.
+
profile= [KNL] Enable kernel profiling via /proc/profile
Format: [schedule,]<number>
Param: "schedule" - profile schedule points.
diff --git a/kernel/signal.c b/kernel/signal.c
index f9405609774e..39d122753bac 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -718,6 +718,37 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
#define LEGACY_QUEUE(sigptr, sig) \
(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
+int print_fatal_signals;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+ printk("%s/%d: potentially unexpected fatal signal %d.\n",
+ current->comm, current->pid, signr);
+
+#ifdef __i386__
+ printk("code at %08lx: ", regs->eip);
+ {
+ int i;
+ for (i = 0; i < 16; i++) {
+ unsigned char insn;
+
+ __get_user(insn, (unsigned char *)(regs->eip + i));
+ printk("%02x ", insn);
+ }
+ }
+#endif
+ printk("\n");
+ show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+ get_option (&str, &print_fatal_signals);
+
+ return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
static int
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -1855,6 +1886,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
* Anything else is fatal, maybe with a core dump.
*/
current->flags |= PF_SIGNALED;
+ if ((signr != SIGKILL) && print_fatal_signals)
+ print_fatal_signal(regs, signr);
if (sig_kernel_coredump(signr)) {
/*
* If it was able to dump core, this kills all
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ccaebbbd75ae..2cce2286bdcd 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -61,6 +61,7 @@ extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
/* External variables not in a header file. */
extern int C_A_D;
+extern int print_fatal_signals;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
@@ -340,6 +341,14 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "print-fatal-signals",
+ .data = &print_fatal_signals,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#ifdef __sparc__
{
.ctl_name = KERN_SPARC_REBOOT,
commit 8ea02606681beb41568c62ba060bdf51fc9ba14e
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 16 09:46:31 2007 +0200
[PATCH] sched: fix up fs/proc/array.c whitespace problems
while changing task_stime() i noticed a whitespace style problem in
array.c - fix it. While at it, fix all the other style problems too,
most of them in the scheduler-stats related portions of array.c.
There is no change in functionality:
text data bss dec hex filename
4356 28 0 4384 1120 array.o-before
4356 28 0 4384 1120 array.o-after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/fs/proc/array.c b/fs/proc/array.c
index c6977796fafd..4cb81776a7ff 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -62,6 +62,8 @@
#include <linux/mman.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
@@ -76,9 +78,7 @@
#include <linux/rcupdate.h>
#include <linux/delayacct.h>
-#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/io.h>
#include <asm/processor.h>
#include "internal.h"
@@ -87,10 +87,10 @@
do { memcpy(buffer, string, strlen(string)); \
buffer += strlen(string); } while (0)
-static inline char * task_name(struct task_struct *p, char * buf)
+static inline char *task_name(struct task_struct *p, char *buf)
{
int i;
- char * name;
+ char *name;
char tcomm[sizeof(p->comm)];
get_task_comm(tcomm, p);
@@ -138,7 +138,7 @@ static const char *task_state_array[] = {
"X (dead)" /* 32 */
};
-static inline const char * get_task_state(struct task_struct *tsk)
+static inline const char *get_task_state(struct task_struct *tsk)
{
unsigned int state = (tsk->state & (TASK_RUNNING |
TASK_INTERRUPTIBLE |
@@ -156,7 +156,7 @@ static inline const char * get_task_state(struct task_struct *tsk)
return *p;
}
-static inline char * task_state(struct task_struct *p, char *buffer)
+static inline char *task_state(struct task_struct *p, char *buffer)
{
struct group_info *group_info;
int g;
@@ -172,8 +172,8 @@ static inline char * task_state(struct task_struct *p, char *buffer)
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
- p->tgid, p->pid,
- pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
+ p->tgid, p->pid,
+ pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
@@ -191,15 +191,15 @@ static inline char * task_state(struct task_struct *p, char *buffer)
get_group_info(group_info);
task_unlock(p);
- for (g = 0; g < min(group_info->ngroups,NGROUPS_SMALL); g++)
- buffer += sprintf(buffer, "%d ", GROUP_AT(group_info,g));
+ for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
+ buffer += sprintf(buffer, "%d ", GROUP_AT(group_info, g));
put_group_info(group_info);
buffer += sprintf(buffer, "\n");
return buffer;
}
-static char * render_sigset_t(const char *header, sigset_t *set, char *buffer)
+static char *render_sigset_t(const char *header, sigset_t *set, char *buffer)
{
int i, len;
@@ -239,7 +239,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
}
}
-static inline char * task_sig(struct task_struct *p, char *buffer)
+static inline char *task_sig(struct task_struct *p, char *buffer)
{
unsigned long flags;
sigset_t pending, shpending, blocked, ignored, caught;
@@ -289,14 +289,14 @@ static inline char *task_cap(struct task_struct *p, char *buffer)
cap_t(p->cap_effective));
}
-int proc_pid_status(struct task_struct *task, char * buffer)
+int proc_pid_status(struct task_struct *task, char *buffer)
{
- char * orig = buffer;
+ char *orig = buffer;
struct mm_struct *mm = get_task_mm(task);
buffer = task_name(task, buffer);
buffer = task_state(task, buffer);
-
+
if (mm) {
buffer = task_mem(mm, buffer);
mmput(mm);
@@ -344,8 +344,7 @@ static clock_t task_stime(struct task_struct *p)
return stime;
}
-
-static int do_task_stat(struct task_struct *task, char * buffer, int whole)
+static int do_task_stat(struct task_struct *task, char *buffer, int whole)
{
unsigned long vsize, eip, esp, wchan = ~0UL;
long priority, nice;
@@ -353,7 +352,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
sigset_t sigign, sigcatch;
char state;
int res;
- pid_t ppid = 0, pgid = -1, sid = -1;
+ pid_t ppid = 0, pgid = -1, sid = -1;
int num_threads = 0;
struct mm_struct *mm;
unsigned long long start_time;
@@ -424,7 +423,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
}
rcu_read_unlock();
- if (!whole || num_threads<2)
+ if (!whole || num_threads < 2)
wchan = get_wchan(task);
if (!whole) {
min_flt = task->min_flt;
@@ -445,7 +444,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
/* convert nsec -> ticks */
start_time = nsec_to_clock_t(start_time);
- res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %u %lu \
+ res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu\n",
task->pid,
@@ -471,7 +470,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
start_time,
vsize,
mm ? get_mm_rss(mm) : 0,
- rsslim,
+ rsslim,
mm ? mm->start_code : 0,
mm ? mm->end_code : 0,
mm ? mm->start_stack : 0,
@@ -493,17 +492,17 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
task->rt_priority,
task->policy,
(unsigned long long)delayacct_blkio_ticks(task));
- if(mm)
+ if (mm)
mmput(mm);
return res;
}
-int proc_tid_stat(struct task_struct *task, char * buffer)
+int proc_tid_stat(struct task_struct *task, char *buffer)
{
return do_task_stat(task, buffer, 0);
}
-int proc_tgid_stat(struct task_struct *task, char * buffer)
+int proc_tgid_stat(struct task_struct *task, char *buffer)
{
return do_task_stat(task, buffer, 1);
}
@@ -512,12 +511,12 @@ int proc_pid_statm(struct task_struct *task, char *buffer)
{
int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
-
+
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
}
- return sprintf(buffer,"%d %d %d %d %d %d %d\n",
+ return sprintf(buffer, "%d %d %d %d %d %d %d\n",
size, resident, shared, text, lib, data, 0);
}
commit e4af30be8fd0bed0e8f96e4e1ebd546a3dfa8f2b
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 16 09:46:31 2007 +0200
[PATCH] sched: prettify prio_to_wmult[]
prettify the prio_to_wmult[] array. (this could have saved us from the typos)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 90d22b72cf81..1c8076676eb1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -756,14 +756,14 @@ static const int prio_to_weight[40] = {
* into multiplications:
*/
static const u32 prio_to_wmult[40] = {
- 48356, 60446, 75558, 94446, 118058, 147573,
- 184467, 230589, 288233, 360285, 450347,
- 562979, 703746, 879575, 1099582, 1374389,
- 1717986, 2147483, 2684354, 3355443, 4194304,
- 5244160, 6557201, 8196502, 10250518, 12782640,
- 16025997, 19976592, 24970740, 31350126, 39045157,
- 49367440, 61356675, 76695844, 95443717, 119304647,
- 148102320, 186737708, 238609294, 286331153,
+/* -20 */ 48356, 60446, 75558, 94446, 118058,
+/* -15 */ 147573, 184467, 230589, 288233, 360285,
+/* -10 */ 450347, 562979, 703746, 879575, 1099582,
+/* -5 */ 1374389, 1717986, 2147483, 2684354, 3355443,
+/* 0 */ 4194304, 5244160, 6557201, 8196502, 10250518,
+/* 5 */ 12782640, 16025997, 19976592, 24970740, 31350126,
+/* 10 */ 39045157, 49367440, 61356675, 76695844, 95443717,
+/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
static inline void
commit 5714d2de93fbb156c5e45fb101a2b4f0cae8fbb7
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 16 09:46:31 2007 +0200
[PATCH] sched: document prio_to_wmult[]
document prio_to_wmult[].
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index a7284bc79cdf..90d22b72cf81 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -748,6 +748,13 @@ static const int prio_to_weight[40] = {
/* 10 */ 110, 87, 70, 56, 45, 36, 29, 23, 18, 15,
};
+/*
+ * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
+ *
+ * In cases where the weight does not change often, we can use the
+ * precalculated inverse to speed up arithmetics by turning divisions
+ * into multiplications:
+ */
static const u32 prio_to_wmult[40] = {
48356, 60446, 75558, 94446, 118058, 147573,
184467, 230589, 288233, 360285, 450347,