Patches contributed by Eötvös Lorand University
commit 74c383f1400f559562aa517d6d62f77245bddf52
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:43 2006 -0800
[PATCH] lockdep: fix possible races while disabling lock-debugging
Jarek Poplawski noticed that lockdep global state could be accessed in a
racy way if one CPU did a lockdep assert (shutting lockdep down), while the
other CPU would try to do something that changes its global state.
This patch fixes those races and cleans up lockdep's internal locking by
adding a graph_lock()/graph_unlock()/debug_locks_off_graph_unlock helpers.
(Also note that as we all know the Linux kernel is, by definition, bug-free
and perfect, so this code never triggers, so these fixes are highly
theoretical. I wrote this patch for aesthetic reasons alone.)
[akpm@osdl.org: build fix]
[jarkao2@o2.pl: build fix's refix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Jarek Poplawski <jarkao2@o2.pl>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 07a3d74a84be..01e750559034 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,13 +43,49 @@
#include "lockdep_internals.h"
/*
- * hash_lock: protects the lockdep hashes and class/list/hash allocators.
+ * lockdep_lock: protects the lockdep graph, the hashes and the
+ * class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
- * code to recurse back into the lockdep code.
+ * code to recurse back into the lockdep code...
*/
-static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+static int graph_lock(void)
+{
+ __raw_spin_lock(&lockdep_lock);
+ /*
+ * Make sure that if another CPU detected a bug while
+ * walking the graph we dont change it (while the other
+ * CPU is busy printing out stuff with the graph lock
+ * dropped already)
+ */
+ if (!debug_locks) {
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+ }
+ return 1;
+}
+
+static inline int graph_unlock(void)
+{
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+}
+
+/*
+ * Turn lock debugging off and return with 0 if it was off already,
+ * and also release the graph lock:
+ */
+static inline int debug_locks_off_graph_unlock(void)
+{
+ int ret = debug_locks_off();
+
+ __raw_spin_unlock(&lockdep_lock);
+
+ return ret;
+}
static int lockdep_initialized;
@@ -57,14 +93,15 @@ unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
- * Allocate a lockdep entry. (assumes hash_lock held, returns
+ * Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
@@ -205,7 +242,7 @@ static int softirq_verbose(struct lock_class *class)
/*
* Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the hash_lock.
+ * addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
@@ -224,18 +261,15 @@ static int save_trace(struct stack_trace *trace)
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
- __raw_spin_unlock(&hash_lock);
- return 0;
- }
if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_off()) {
- printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- }
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+
return 0;
}
@@ -524,9 +558,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
{
struct task_struct *curr = current;
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=======================================================\n");
@@ -554,12 +586,10 @@ static noinline int print_circular_bug_tail(void)
if (debug_locks_silent)
return 0;
- /* hash_lock unlocked by the header */
- __raw_spin_lock(&hash_lock);
this.class = check_source->class;
if (!save_trace(&this.trace))
return 0;
- __raw_spin_unlock(&hash_lock);
+
print_circular_bug_entry(&this, 0);
printk("\nother info that might help us debug this:\n\n");
@@ -575,8 +605,10 @@ static noinline int print_circular_bug_tail(void)
static int noinline print_infinite_recursion_bug(void)
{
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
@@ -711,9 +743,7 @@ print_bad_irq_dependency(struct task_struct *curr,
enum lock_usage_bit bit2,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n======================================================\n");
@@ -794,9 +824,7 @@ static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
- debug_locks_off();
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=============================================\n");
@@ -972,14 +1000,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Debugging printouts:
*/
if (verbose(prev->class) || verbose(next->class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
printk("\n new dependency: ");
print_lock_name(prev->class);
printk(" => ");
print_lock_name(next->class);
printk("\n");
dump_stack();
- __raw_spin_lock(&hash_lock);
+ return graph_lock();
}
return 1;
}
@@ -1044,8 +1072,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
}
return 1;
out_bug:
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
@@ -1199,7 +1229,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
hash_head = classhashentry(key);
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
@@ -1212,9 +1245,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- __raw_spin_unlock(&hash_lock);
+ if (!debug_locks_off_graph_unlock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
raw_local_irq_restore(flags);
- debug_locks_off();
+
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
@@ -1235,18 +1271,23 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
list_add_tail_rcu(&class->hash_entry, hash_head);
if (verbose(class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
+
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
+
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
}
out_unlock_set:
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
if (!subclass || force)
@@ -1287,19 +1328,21 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
@@ -1375,9 +1418,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=========================================================\n");
@@ -1466,9 +1507,7 @@ static int
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=================================\n");
@@ -1529,12 +1568,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (likely(this->class->usage_mask & new_mask))
return 1;
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* Make sure we didnt race:
*/
if (unlikely(this->class->usage_mask & new_mask)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
return 1;
}
@@ -1720,16 +1760,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
debug_atomic_dec(&nr_unused_locks);
break;
default:
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
WARN_ON(1);
return 0;
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
/*
- * We must printk outside of the hash_lock:
+ * We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
@@ -2127,7 +2167,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
- * hash_lock for us)
+ * graph_lock for us)
*/
if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
/*
@@ -2160,7 +2200,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
}
curr->lockdep_depth++;
check_chain_key(curr);
@@ -2472,7 +2512,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
int i;
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ graph_lock();
/*
* Unhash all classes that were created by this module:
@@ -2486,7 +2526,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
zap_class(class);
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
}
@@ -2514,20 +2554,20 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
- __raw_spin_lock(&hash_lock);
+ graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
if (unlikely(class == lock->class_cache)) {
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (debug_locks_off_graph_unlock())
+ WARN_ON(1);
goto out_restore;
}
}
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
out_restore:
raw_local_irq_restore(flags);
commit 3117df0453828bd045c16244e6f50e5714667a8a
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:43 2006 -0800
[PATCH] lockdep: print irq-trace info on asserts
When we print an assert due to scheduling-in-atomic bugs, and if lockdep
is enabled, then the IRQ tracing information of lockdep can be printed
to pinpoint the code location that disabled interrupts. This saved me
quite a bit of debugging time in cases where the backtrace did not
identify the irq-disabling site well enough.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 498bfbd3b4e1..ea097dddc44f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -281,15 +281,25 @@ struct lock_class_key { };
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
extern void early_init_irq_lock_class(void);
#else
-# define early_init_irq_lock_class() do { } while (0)
+static inline void early_init_irq_lock_class(void)
+{
+}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
extern void early_boot_irqs_off(void);
extern void early_boot_irqs_on(void);
+extern void print_irqtrace_events(struct task_struct *curr);
#else
-# define early_boot_irqs_off() do { } while (0)
-# define early_boot_irqs_on() do { } while (0)
+static inline void early_boot_irqs_off(void)
+{
+}
+static inline void early_boot_irqs_on(void)
+{
+}
+static inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
#endif
/*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 69e92c6b0472..07a3d74a84be 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1447,7 +1447,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
}
-static inline void print_irqtrace_events(struct task_struct *curr)
+void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
@@ -1460,10 +1460,6 @@ static inline void print_irqtrace_events(struct task_struct *curr)
print_ip_sym(curr->softirq_disable_ip);
}
-#else
-static inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
#endif
static int
diff --git a/kernel/sched.c b/kernel/sched.c
index 8a0afb97af71..5cd833bc2173 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3429,6 +3429,8 @@ asmlinkage void __sched schedule(void)
"%s/0x%08x/%d\n",
current->comm, preempt_count(), current->pid);
debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
dump_stack();
}
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -6977,6 +6979,8 @@ void __might_sleep(char *file, int line)
printk("in_atomic():%d, irqs_disabled():%d\n",
in_atomic(), irqs_disabled());
debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
dump_stack();
}
#endif
commit 27c3b23226fc649de47e4886ccbf994482f388ba
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:42 2006 -0800
[PATCH] lockdep: use chain hash on CONFIG_DEBUG_LOCKDEP too
CONFIG_DEBUG_LOCKDEP is unacceptably slow because it does not utilize
the chain-hash. Turn the chain-hash back on in this case too.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c4ffd3c1dec4..69e92c6b0472 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1276,14 +1276,6 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(&chain_lookup_hits);
- /*
- * In the debugging case, force redundant checking
- * by returning 1:
- */
-#ifdef CONFIG_DEBUG_LOCKDEP
- __raw_spin_lock(&hash_lock);
- return 1;
-#endif
if (very_verbose(class))
printk("\nhash chain already cached, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
return 0;
commit 33e94e960b57497fe7fd9493080210b6d87e88e6
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:41 2006 -0800
[PATCH] lockdep: clean up VERY_VERBOSE define
Cleanup: the VERY_VERBOSE define was unnecessarily dependent on #ifdef VERBOSE
- while the VERBOSE switch is 0 or 1 (always defined).
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 77fa791f6f31..c4ffd3c1dec4 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -145,9 +145,7 @@ EXPORT_SYMBOL(lockdep_on);
*/
#define VERBOSE 0
-#ifdef VERBOSE
-# define VERY_VERBOSE 0
-#endif
+#define VERY_VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
commit 23d95a03d63eff25118b50737006ce6e7c1b8def
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:40 2006 -0800
[PATCH] lockdep: improve lockdep_reset()
Clear all the chains during lockdep_reset(). This fixes some locking-selftest
false positives i saw on -rt. (never saw those on mainline though, but it
could happen.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5ba2825bd46a..77fa791f6f31 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2437,6 +2437,7 @@ EXPORT_SYMBOL_GPL(lock_release);
void lockdep_reset(void)
{
unsigned long flags;
+ int i;
raw_local_irq_save(flags);
current->curr_chain_key = 0;
@@ -2447,6 +2448,8 @@ void lockdep_reset(void)
nr_softirq_chains = 0;
nr_process_chains = 0;
debug_locks = 1;
+ for (i = 0; i < CHAINHASH_SIZE; i++)
+ INIT_LIST_HEAD(chainhash_table + i);
raw_local_irq_restore(flags);
}
commit 81fc685a898f84d0787eeebb1c118de0bd3484a0
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:40 2006 -0800
[PATCH] lockdep: improve verbose messages
Make verbose lockdep messages (off by default) more informative by printing
out the hash chain key. (this patch was what helped me catch the earlier
lockdep hash-collision bug)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5d69c7d7407e..5ba2825bd46a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1264,7 +1264,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* add it and return 0 - in this case the new dependency chain is
* validated. If the key is already hashed, return 1.
*/
-static inline int lookup_chain_cache(u64 chain_key)
+static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
{
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
@@ -1286,9 +1286,13 @@ static inline int lookup_chain_cache(u64 chain_key)
__raw_spin_lock(&hash_lock);
return 1;
#endif
+ if (very_verbose(class))
+ printk("\nhash chain already cached, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
return 0;
}
}
+ if (very_verbose(class))
+ printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
@@ -2139,7 +2143,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* (If lookup_chain_cache() returns with 1 it acquires
* hash_lock for us)
*/
- if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
+ if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
/*
* Check whether last held lock:
*
commit a664089741aa9010333ecbdadbf5d9de62bafa2d
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:39 2006 -0800
[PATCH] lockdep: filter off by default
Fix typo in the class_filter() function. (filtering is not used by default so
this only affects lockdep-internal debugging cases)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b02032476dc2..5d69c7d7407e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -172,8 +172,8 @@ static int class_filter(struct lock_class *class)
!strcmp(class->name, "&struct->lockfield"))
return 1;
#endif
- /* Allow everything else. 0 would be filter everything else */
- return 1;
+ /* Filter everything else. 1 would be to allow everything else */
+ return 0;
}
#endif
commit 5d6f647fc6bb57377c9f417c4752e43189f56bb1
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 13 00:34:36 2006 -0800
[PATCH] debug: add sysrq_always_enabled boot option
Most distributions enable sysrq support but set it to 0 by default. Add a
sysrq_always_enabled boot option to always-enable sysrq keys. Useful for
debugging - without having to modify the disribution's config files (which
might not be possible if the kernel is on a live CD, etc.).
Also, while at it, clean up the sysrq interfaces.
[bunk@stusta.de: make sysrq_always_enabled_setup() static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d8323b8893c3..ef69c75780bf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1656,6 +1656,12 @@ and is between 256 and 4096 characters. It is defined in the file
sym53c416= [HW,SCSI]
See header of drivers/scsi/sym53c416.c.
+ sysrq_always_enabled
+ [KNL]
+ Ignore sysrq setting - this boot parameter will
+ neutralize any effect of /proc/sys/kernel/sysrq.
+ Useful for debugging.
+
t128= [HW,SCSI]
See header of drivers/scsi/t128.c.
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 05810c8d20bc..13935235e066 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -41,7 +41,34 @@
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-int sysrq_enabled = 1;
+int __read_mostly __sysrq_enabled = 1;
+
+static int __read_mostly sysrq_always_enabled;
+
+int sysrq_on(void)
+{
+ return __sysrq_enabled || sysrq_always_enabled;
+}
+
+/*
+ * A value of 1 means 'all', other nonzero values are an op mask:
+ */
+static inline int sysrq_on_mask(int mask)
+{
+ return sysrq_always_enabled || __sysrq_enabled == 1 ||
+ (__sysrq_enabled & mask);
+}
+
+static int __init sysrq_always_enabled_setup(char *str)
+{
+ sysrq_always_enabled = 1;
+ printk(KERN_INFO "debug: sysrq always enabled.\n");
+
+ return 1;
+}
+
+__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
+
static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
@@ -379,8 +406,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
* Should we check for enabled operations (/proc/sysrq-trigger
* should not) and is the invoked operation enabled?
*/
- if (!check_mask || sysrq_enabled == 1 ||
- (sysrq_enabled & op_p->enable_mask)) {
+ if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
op_p->handler(key, tty);
@@ -414,9 +440,8 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
*/
void handle_sysrq(int key, struct tty_struct *tty)
{
- if (!sysrq_enabled)
- return;
- __handle_sysrq(key, tty, 1);
+ if (sysrq_on())
+ __handle_sysrq(key, tty, 1);
}
EXPORT_SYMBOL(handle_sysrq);
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 6d2e314860df..0e0da443cbd5 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -61,10 +61,7 @@
static DEFINE_SPINLOCK(consolelock);
static DEFINE_SPINLOCK(consoleloglock);
-#ifdef CONFIG_MAGIC_SYSRQ
static int vio_sysrq_pressed;
-extern int sysrq_enabled;
-#endif
#define VIOCHAR_NUM_BUF 16
@@ -936,8 +933,10 @@ static void vioHandleData(struct HvLpEvent *event)
*/
num_pushed = 0;
for (index = 0; index < cevent->len; index++) {
-#ifdef CONFIG_MAGIC_SYSRQ
- if (sysrq_enabled) {
+ /*
+ * Will be optimized away if !CONFIG_MAGIC_SYSRQ:
+ */
+ if (sysrq_on()) {
/* 0x0f is the ascii character for ^O */
if (cevent->data[index] == '\x0f') {
vio_sysrq_pressed = 1;
@@ -956,7 +955,6 @@ static void vioHandleData(struct HvLpEvent *event)
continue;
}
}
-#endif
/*
* The sysrq sequence isn't included in this check if
* sysrq is enabled and compiled into the kernel because
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 9df8833670cb..98a1d8cfb73d 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -37,23 +37,37 @@ struct sysrq_key_op {
#ifdef CONFIG_MAGIC_SYSRQ
+extern int sysrq_on(void);
+
+/*
+ * Do not use this one directly:
+ */
+extern int __sysrq_enabled;
+
/* Generic SysRq interface -- you may call it from any device driver, supplying
* ASCII code of the key, pointer to registers and kbd/tty structs (if they
* are available -- else NULL's).
*/
-void handle_sysrq(int, struct tty_struct *);
-void __handle_sysrq(int, struct tty_struct *, int check_mask);
-int register_sysrq_key(int, struct sysrq_key_op *);
-int unregister_sysrq_key(int, struct sysrq_key_op *);
+void handle_sysrq(int key, struct tty_struct *tty);
+void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
+int register_sysrq_key(int key, struct sysrq_key_op *op);
+int unregister_sysrq_key(int key, struct sysrq_key_op *op);
struct sysrq_key_op *__sysrq_get_key_op(int key);
#else
+static inline int sysrq_on(void)
+{
+ return 0;
+}
static inline int __reterr(void)
{
return -EINVAL;
}
+static inline void handle_sysrq(int key, struct tty_struct *tty)
+{
+}
#define register_sysrq_key(ig,nore) __reterr()
#define unregister_sysrq_key(ig,nore) __reterr()
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 130c5ec9ee0b..600b33358ded 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,7 +65,6 @@ extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
extern int max_threads;
-extern int sysrq_enabled;
extern int core_uses_pid;
extern int suid_dumpable;
extern char core_pattern[];
@@ -543,7 +542,7 @@ static ctl_table kern_table[] = {
{
.ctl_name = KERN_SYSRQ,
.procname = "sysrq",
- .data = &sysrq_enabled,
+ .data = &__sysrq_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = &proc_dointvec,
commit 3640543df26fd38f31f0c6decc35c07be2a6307c
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Dec 12 17:20:42 2006 +0100
[PATCH] netpoll: fix netpoll lockup
current -git doesnt boot on my laptop due to netpoll not unlocking the
tx lock in the else branch.
booted this up on my laptop with lockdep enabled and there are no
locking complaints and it works fine.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a01abdd2d3ea..823215d8e90f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -55,6 +55,7 @@ static void queue_process(struct work_struct *work)
struct netpoll_info *npinfo =
container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
+ unsigned long flags;
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
@@ -64,15 +65,19 @@ static void queue_process(struct work_struct *work)
continue;
}
- netif_tx_lock_bh(dev);
+ local_irq_save(flags);
+ netif_tx_lock(dev);
if (netif_queue_stopped(dev) ||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
- netif_tx_unlock_bh(dev);
+ netif_tx_unlock(dev);
+ local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
+ netif_tx_unlock(dev);
+ local_irq_restore(flags);
}
}
commit b57bd06655a028aba7b92e1c19c2093e7fcfb341
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Dec 12 13:49:35 2006 +0100
[PATCH] net, 8139too.c: fix netpoll deadlock
fix deadlock in the 8139too driver: poll handlers should never forcibly
enable local interrupts, because they might be used by netpoll/printk
from IRQ context.
=================================
[ INFO: inconsistent lock state ]
2.6.19 #11
---------------------------------
inconsistent {softirq-on-W} -> {in-softirq-W} usage.
swapper/1 [HC0[0]:SC1[1]:HE1:SE0] takes:
(&npinfo->poll_lock){-+..}, at: [<c0350a41>] net_rx_action+0x64/0x1de
{softirq-on-W} state was registered at:
[<c0134c86>] mark_lock+0x5b/0x39c
[<c0135012>] mark_held_locks+0x4b/0x68
[<c01351e9>] trace_hardirqs_on+0x115/0x139
[<c02879e6>] rtl8139_poll+0x3d7/0x3f4
[<c035c85d>] netpoll_poll+0x82/0x32f
[<c035c775>] netpoll_send_skb+0xc9/0x12f
[<c035cdcc>] netpoll_send_udp+0x253/0x25b
[<c0288463>] write_msg+0x40/0x65
[<c011cead>] __call_console_drivers+0x45/0x51
[<c011cf16>] _call_console_drivers+0x5d/0x61
[<c011d4fb>] release_console_sem+0x11f/0x1d8
[<c011d7d7>] register_console+0x1ac/0x1b3
[<c02883f8>] init_netconsole+0x55/0x67
[<c010040c>] init+0x9a/0x24e
[<c01049cf>] kernel_thread_helper+0x7/0x10
[<ffffffff>] 0xffffffff
irq event stamp: 819992
hardirqs last enabled at (819992): [<c0350a16>] net_rx_action+0x39/0x1de
hardirqs last disabled at (819991): [<c0350b1e>] net_rx_action+0x141/0x1de
softirqs last enabled at (817552): [<c01214e4>] __do_softirq+0xa3/0xa8
softirqs last disabled at (819987): [<c0106051>] do_softirq+0x5b/0xc9
other info that might help us debug this:
no locks held by swapper/1.
stack backtrace:
[<c0104d88>] dump_trace+0x63/0x1e8
[<c0104f26>] show_trace_log_lvl+0x19/0x2e
[<c010532d>] show_trace+0x12/0x14
[<c0105343>] dump_stack+0x14/0x16
[<c0134980>] print_usage_bug+0x23c/0x246
[<c0134d33>] mark_lock+0x108/0x39c
[<c01356a7>] __lock_acquire+0x361/0x9ed
[<c0136018>] lock_acquire+0x56/0x72
[<c03aff1f>] _spin_lock+0x35/0x42
[<c0350a41>] net_rx_action+0x64/0x1de
[<c0121493>] __do_softirq+0x52/0xa8
[<c0106051>] do_softirq+0x5b/0xc9
[<c0121338>] irq_exit+0x3c/0x48
[<c0106163>] do_IRQ+0xa4/0xbd
[<c01047c6>] common_interrupt+0x2e/0x34
[<c011db92>] vprintk+0x2c0/0x309
[<c011dbf6>] printk+0x1b/0x1d
[<c01003f2>] init+0x80/0x24e
[<c01049cf>] kernel_thread_helper+0x7/0x10
=======================
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeff Garzik <jeff@garzik.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 931028f672de..35ad5cff18e6 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2131,14 +2131,15 @@ static int rtl8139_poll(struct net_device *dev, int *budget)
}
if (done) {
+ unsigned long flags;
/*
* Order is important since data can get interrupted
* again when we think we are done.
*/
- local_irq_disable();
+ local_irq_save(flags);
RTL_W16_F(IntrMask, rtl8139_intr_mask);
__netif_rx_complete(dev);
- local_irq_enable();
+ local_irq_restore(flags);
}
spin_unlock(&tp->rx_lock);