Patches contributed by Eötvös Lorand University
commit a09785a2414afb261d9f719d544742af4300df22
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:12 2006 -0700
[PATCH] lockdep: annotate af_unix locking
Teach special (recursive) locking code to the lock validator. Also splits
af_unix's sk_receive_queue.lock class from the other networking skb-queue
locks. Has no effect on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 5ba72d95280c..2fec827c8801 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -67,6 +67,9 @@ struct unix_skb_parms {
#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_wlock_nested(s) \
+ spin_lock_nested(&unix_sk(s)->lock, \
+ SINGLE_DEPTH_NESTING)
#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock)
#ifdef __KERNEL__
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aca650109425..e9a287bc3142 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -565,6 +565,14 @@ static struct proto unix_proto = {
.obj_size = sizeof(struct unix_sock),
};
+/*
+ * AF_UNIX sockets do not interact with hardware, hence they
+ * dont trigger interrupts - so it's safe for them to have
+ * bh-unsafe locking for their sk_receive_queue.lock. Split off
+ * this special lock-class by reinitializing the spinlock key:
+ */
+static struct lock_class_key af_unix_sk_receive_queue_lock_key;
+
static struct sock * unix_create1(struct socket *sock)
{
struct sock *sk = NULL;
@@ -580,6 +588,8 @@ static struct sock * unix_create1(struct socket *sock)
atomic_inc(&unix_nr_socks);
sock_init_data(sock,sk);
+ lockdep_set_class(&sk->sk_receive_queue.lock,
+ &af_unix_sk_receive_queue_lock_key);
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
@@ -1045,7 +1055,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out_unlock;
}
- unix_state_wlock(sk);
+ unix_state_wlock_nested(sk);
if (sk->sk_state != st) {
unix_state_wunlock(sk);
commit da21f24dd73954c2ed0cd39a698e2c9916c05d71
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:12 2006 -0700
[PATCH] lockdep: annotate sock_lock_init()
Teach special (multi-initialized, per-address-family) locking code to the lock
validator. Has no effect on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/net/sock.h b/include/net/sock.h
index 7b3d6b856946..83805feea880 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -80,8 +80,12 @@ typedef struct {
wait_queue_head_t wq;
} socket_lock_t;
+extern struct lock_class_key af_family_keys[AF_MAX];
+
#define sock_lock_init(__sk) \
do { spin_lock_init(&((__sk)->sk_lock.slock)); \
+ lockdep_set_class(&(__sk)->sk_lock.slock, \
+ af_family_keys + (__sk)->sk_family); \
(__sk)->sk_lock.owner = NULL; \
init_waitqueue_head(&((__sk)->sk_lock.wq)); \
} while(0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 533b9317144b..0b4d5d25b23c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -129,6 +129,18 @@
#include <net/tcp.h>
#endif
+/*
+ * Each address family might have different locking rules, so we have
+ * one slock key per address family:
+ */
+struct lock_class_key af_family_keys[AF_MAX];
+
+/*
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+static struct lock_class_key af_callback_keys[AF_MAX];
+
/* Take into consideration the size of the struct sk_buff overhead in the
* determination of these values, since that is non-constant across
* platforms. This makes socket queueing behavior and performance
@@ -848,6 +860,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
rwlock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
+ lockdep_set_class(&newsk->sk_callback_lock,
+ af_callback_keys + newsk->sk_family);
newsk->sk_dst_cache = NULL;
newsk->sk_wmem_queued = 0;
@@ -1422,6 +1436,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
rwlock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
+ lockdep_set_class(&sk->sk_callback_lock,
+ af_callback_keys + sk->sk_family);
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
commit 543655244866b8ec648fea1eb9c32a35ffba5721
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:11 2006 -0700
[PATCH] lockdep: annotate hrtimer base locks
Teach special (recursive) locking code to the lock validator. Has no effect
on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 07d7305f131e..e4bccbcc2750 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,6 +91,7 @@ struct hrtimer_base {
ktime_t (*get_softirq_time)(void);
struct hrtimer *curr_timer;
ktime_t softirq_time;
+ struct lock_class_key lock_key;
};
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 8d3dc29ef41a..617304ce67db 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -782,8 +782,10 @@ static void __devinit init_hrtimers_cpu(int cpu)
struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
int i;
- for (i = 0; i < MAX_HRTIMER_BASES; i++, base++)
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
spin_lock_init(&base->lock);
+ lockdep_set_class(&base->lock, &base->lock_key);
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
commit fcb993712f231a4faea8393513d1276170679107
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:10 2006 -0700
[PATCH] lockdep: annotate scheduler runqueue locks
Teach per-CPU runqueue locks and recursive locking code to the lock validator.
Has no effect on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/sched.c b/kernel/sched.c
index ae4db0185bb2..f4778d1aef69 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -266,6 +266,7 @@ struct runqueue {
unsigned long ttwu_cnt;
unsigned long ttwu_local;
#endif
+ struct lock_class_key rq_lock_key;
};
static DEFINE_PER_CPU(struct runqueue, runqueues);
@@ -6656,6 +6657,7 @@ void __init sched_init(void)
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
+ lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
commit d730e882a15c38de02b63a063be636b2ff9e9ed1
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:10 2006 -0700
[PATCH] lockdep: annotate timer base locks
Split the per-CPU timer base locks up into separate lock classes, because they
are used recursively.
Has no effect on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/timer.c b/kernel/timer.c
index 4dd9a10d67d0..b761898d04c8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1559,6 +1559,13 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
return 0;
}
+/*
+ * lockdep: we want to track each per-CPU base as a separate lock-class,
+ * but timer-bases are kmalloc()-ed, so we need to attach separate
+ * keys to them:
+ */
+static struct lock_class_key base_lock_keys[NR_CPUS];
+
static int __devinit init_timers_cpu(int cpu)
{
int j;
@@ -1594,6 +1601,8 @@ static int __devinit init_timers_cpu(int cpu)
}
spin_lock_init(&base->lock);
+ lockdep_set_class(&base->lock, base_lock_keys + cpu);
+
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
commit 06825ba3553151eea24206bc53d4fc3de49e0ab1
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:09 2006 -0700
[PATCH] lockdep: annotate skb_queue_head_init
Teach special (multi-initialized) locking code to the lock validator. Has no
effect on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 57d7d4965f9a..3597b4f14389 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
return list_->qlen;
}
+extern struct lock_class_key skb_queue_lock_key;
+
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
+ lockdep_set_class(&list->lock, &skb_queue_lock_key);
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7cfbdb215ba2..44f6a181a754 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -70,6 +70,13 @@
static kmem_cache_t *skbuff_head_cache __read_mostly;
static kmem_cache_t *skbuff_fclone_cache __read_mostly;
+/*
+ * lockdep: lock class key used by skb_queue_head_init():
+ */
+struct lock_class_key skb_queue_lock_key;
+
+EXPORT_SYMBOL(skb_queue_lock_key);
+
/*
* Keep out-of-line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always
commit f20dc5f7c1adf1c4b68b7672d6f2002cb824e636
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:08 2006 -0700
[PATCH] lockdep: annotate mm
Teach special (recursive) locking code to the lock validator. Has no effect
on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/mm/memory.c b/mm/memory.c
index 7e2a4b1580e3..c1e14c9e67e4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -503,7 +503,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
- spin_lock(src_ptl);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
do {
/*
diff --git a/mm/mremap.c b/mm/mremap.c
index 1903bdf65e42..7c15cf3373ad 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -97,7 +97,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
new_pte = pte_offset_map_nested(new_pmd, new_addr);
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
- spin_lock(new_ptl);
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
new_pte++, new_addr += PAGE_SIZE) {
commit eb4542b98c81e22e08587b747b21986a45360999
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:07 2006 -0700
[PATCH] lockdep: annotate waitqueues
Create one lock class for all waitqueue locks in the kernel. Has no effect on
non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/wait.h b/include/linux/wait.h
index bc4f389c49bb..794be7af58ae 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,9 +77,15 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
+/*
+ * lockdep: we want one lock-class for all waitqueue locks.
+ */
+extern struct lock_class_key waitqueue_lock_key;
+
static inline void init_waitqueue_head(wait_queue_head_t *q)
{
spin_lock_init(&q->lock);
+ lockdep_set_class(&q->lock, &waitqueue_lock_key);
INIT_LIST_HEAD(&q->task_list);
}
diff --git a/kernel/wait.c b/kernel/wait.c
index 5985d866531f..a1d57aeb7f75 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,6 +10,10 @@
#include <linux/wait.h>
#include <linux/hash.h>
+struct lock_class_key waitqueue_lock_key;
+
+EXPORT_SYMBOL(waitqueue_lock_key);
+
void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
commit 243c7621aac4ed1aa79524c9a1cecf7c05a28124
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:06 2006 -0700
[PATCH] lockdep: annotate genirq
Teach special (recursive) locking code to the lock validator. Has no effect
on non-lockdep kernels.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 80ec7a4dbc98..316e0fb8d7b1 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -270,6 +270,12 @@ static inline int lockdep_internal(void)
struct lock_class_key { };
#endif /* !LOCKDEP */
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+extern void early_init_irq_lock_class(void);
+#else
+# define early_init_irq_lock_class() do { } while (0)
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
extern void early_boot_irqs_off(void);
extern void early_boot_irqs_on(void);
diff --git a/init/main.c b/init/main.c
index fc473d4b56fd..628b8e9e841a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -466,6 +466,7 @@ asmlinkage void __init start_kernel(void)
local_irq_disable();
early_boot_irqs_off();
+ early_init_irq_lock_class();
/*
* Interrupts are still disabled. Do necessary setups, then
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index aeb6e391276c..a7b497ee919e 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -249,3 +249,19 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
return 1;
}
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+static struct lock_class_key irq_desc_lock_class;
+
+void early_init_irq_lock_class(void)
+{
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++)
+ lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
+}
+
+#endif
commit 8b8f319fc7f4ab59f567d6a401a62659b3d37007
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jul 3 00:25:05 2006 -0700
[PATCH] lockdep: annotate futex
Teach special (recursive) locking code to the lock validator. Introduces
double_lock_hb() to unify double- hash-bucket-lock taking.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/futex.c b/kernel/futex.c
index 15caf93e4a43..1dc98e4dd287 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -606,6 +606,22 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
return 0;
}
+/*
+ * Express the locking dependencies for lockdep:
+ */
+static inline void
+double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+{
+ if (hb1 <= hb2) {
+ spin_lock(&hb1->lock);
+ if (hb1 < hb2)
+ spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
+ } else { /* hb1 > hb2 */
+ spin_lock(&hb2->lock);
+ spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
@@ -674,11 +690,7 @@ futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
hb2 = hash_futex(&key2);
retry:
- if (hb1 < hb2)
- spin_lock(&hb1->lock);
- spin_lock(&hb2->lock);
- if (hb1 > hb2)
- spin_lock(&hb1->lock);
+ double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
@@ -787,11 +799,7 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
- if (hb1 < hb2)
- spin_lock(&hb1->lock);
- spin_lock(&hb2->lock);
- if (hb1 > hb2)
- spin_lock(&hb1->lock);
+ double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;