X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Flockdep.c;fp=kernel%2Flockdep.c;h=33c9eeb6127c33664d6a0822fb5ed4fd801994f6;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3a0c44c60b3d7319955e1d376dd082925aa23e23;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3a0c44c60..33c9eeb61 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -43,13 +43,49 @@ #include "lockdep_internals.h" /* - * hash_lock: protects the lockdep hashes and class/list/hash allocators. + * lockdep_lock: protects the lockdep graph, the hashes and the + * class/list/hash allocators. * * This is one of the rare exceptions where it's justified * to use a raw spinlock - we really dont want the spinlock - * code to recurse back into the lockdep code. + * code to recurse back into the lockdep code... */ -static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + +static int graph_lock(void) +{ + __raw_spin_lock(&lockdep_lock); + /* + * Make sure that if another CPU detected a bug while + * walking the graph we dont change it (while the other + * CPU is busy printing out stuff with the graph lock + * dropped already) + */ + if (!debug_locks) { + __raw_spin_unlock(&lockdep_lock); + return 0; + } + return 1; +} + +static inline int graph_unlock(void) +{ + __raw_spin_unlock(&lockdep_lock); + return 0; +} + +/* + * Turn lock debugging off and return with 0 if it was off already, + * and also release the graph lock: + */ +static inline int debug_locks_off_graph_unlock(void) +{ + int ret = debug_locks_off(); + + __raw_spin_unlock(&lockdep_lock); + + return ret; +} static int lockdep_initialized; @@ -57,14 +93,15 @@ unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; /* - * Allocate a lockdep entry. (assumes hash_lock held, returns + * Allocate a lockdep entry. (assumes the graph_lock held, returns * with NULL on failure) */ static struct lock_list *alloc_list_entry(void) { if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); + if (!debug_locks_off_graph_unlock()) + return NULL; + printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); printk("turning off the locking correctness validator.\n"); return NULL; @@ -122,8 +159,8 @@ static struct list_head chainhash_table[CHAINHASH_SIZE]; * unique. */ #define iterate_chain_key(key1, key2) \ - (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \ - ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \ + (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ + ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ (key2)) void lockdep_off(void) @@ -140,21 +177,12 @@ void lockdep_on(void) EXPORT_SYMBOL(lockdep_on); -int lockdep_internal(void) -{ - return current->lockdep_recursion != 0; -} - -EXPORT_SYMBOL(lockdep_internal); - /* * Debugging switches: */ #define VERBOSE 0 -#ifdef VERBOSE -# define VERY_VERBOSE 0 -#endif +#define VERY_VERBOSE 0 #if VERBOSE # define HARDIRQ_VERBOSE 1 @@ -179,8 +207,8 @@ static int class_filter(struct lock_class *class) !strcmp(class->name, "&struct->lockfield")) return 1; #endif - /* Allow everything else. 0 would be filter everything else */ - return 1; + /* Filter everything else. 1 would be to allow everything else */ + return 0; } #endif @@ -214,7 +242,7 @@ static int softirq_verbose(struct lock_class *class) /* * Stack-trace: tightly packed array of stack backtrace - * addresses. Protected by the hash_lock. + * addresses. Protected by the graph_lock. */ unsigned long nr_stack_trace_entries; static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; @@ -228,25 +256,20 @@ static int save_trace(struct stack_trace *trace) trace->skip = 3; trace->all_contexts = 0; - /* Make sure to not recurse in case the the unwinder needs to tak -e locks. */ - lockdep_off(); save_stack_trace(trace, NULL); - lockdep_on(); trace->max_entries = trace->nr_entries; nr_stack_trace_entries += trace->nr_entries; - if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) - return 0; if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { - __raw_spin_unlock(&hash_lock); - if (debug_locks_off()) { - printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); - printk("turning off the locking correctness validator.\n"); - dump_stack(); - } + if (!debug_locks_off_graph_unlock()) + return 0; + + printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); + return 0; } @@ -357,7 +380,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 static void print_lock_name(struct lock_class *class) { - char str[128], c1, c2, c3, c4; + char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4; const char *name; get_usage_chars(class, &c1, &c2, &c3, &c4); @@ -379,7 +402,7 @@ static void print_lock_name(struct lock_class *class) static void print_lockdep_cache(struct lockdep_map *lock) { const char *name; - char str[128]; + char str[KSYM_NAME_LEN + 1]; name = lock->name; if (!name) @@ -449,7 +472,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) print_lock_class_header(class, depth); list_for_each_entry(entry, &class->locks_after, entry) { - DEBUG_LOCKS_WARN_ON(!entry->class); + if (DEBUG_LOCKS_WARN_ON(!entry->class)) + return; + print_lock_dependencies(entry->class, depth + 1); printk("%*s ... acquired at:\n",depth,""); @@ -474,7 +499,8 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, return 0; entry->class = this; - save_trace(&entry->trace); + if (!save_trace(&entry->trace)) + return 0; /* * Since we never remove from the dependency list, the list can @@ -518,9 +544,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) static void print_kernel_version(void) { - printk("%s %.*s\n", system_utsname.release, - (int)strcspn(system_utsname.version, " "), - system_utsname.version); + printk("%s %.*s\n", init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); } /* @@ -532,9 +558,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) { struct task_struct *curr = current; - __raw_spin_unlock(&hash_lock); - debug_locks_off(); - if (debug_locks_silent) + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=======================================================\n"); @@ -563,7 +587,9 @@ static noinline int print_circular_bug_tail(void) return 0; this.class = check_source->class; - save_trace(&this.trace); + if (!save_trace(&this.trace)) + return 0; + print_circular_bug_entry(&this, 0); printk("\nother info that might help us debug this:\n\n"); @@ -575,10 +601,14 @@ static noinline int print_circular_bug_tail(void) return 0; } +#define RECURSION_LIMIT 40 + static int noinline print_infinite_recursion_bug(void) { - __raw_spin_unlock(&hash_lock); - DEBUG_LOCKS_WARN_ON(1); + if (!debug_locks_off_graph_unlock()) + return 0; + + WARN_ON(1); return 0; } @@ -595,7 +625,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) debug_atomic_inc(&nr_cyclic_check_recursions); if (depth > max_recursion_depth) max_recursion_depth = depth; - if (depth >= 20) + if (depth >= RECURSION_LIMIT) return print_infinite_recursion_bug(); /* * Check this lock's dependency list: @@ -645,7 +675,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) if (depth > max_recursion_depth) max_recursion_depth = depth; - if (depth >= 20) + if (depth >= RECURSION_LIMIT) return print_infinite_recursion_bug(); debug_atomic_inc(&nr_find_usage_forwards_checks); @@ -684,7 +714,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) if (depth > max_recursion_depth) max_recursion_depth = depth; - if (depth >= 20) + if (depth >= RECURSION_LIMIT) return print_infinite_recursion_bug(); debug_atomic_inc(&nr_find_usage_backwards_checks); @@ -713,9 +743,7 @@ print_bad_irq_dependency(struct task_struct *curr, enum lock_usage_bit bit2, const char *irqclass) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); - if (debug_locks_silent) + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n======================================================\n"); @@ -796,9 +824,7 @@ static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { - debug_locks_off(); - __raw_spin_unlock(&hash_lock); - if (debug_locks_silent) + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=============================================\n"); @@ -964,27 +990,24 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, &prev->class->locks_after, next->acquire_ip); if (!ret) return 0; - /* - * Return value of 2 signals 'dependency already added', - * in that case we dont have to add the backlink either. - */ - if (ret == 2) - return 2; + ret = add_lock_to_list(next->class, prev->class, &next->class->locks_before, next->acquire_ip); + if (!ret) + return 0; /* * Debugging printouts: */ if (verbose(prev->class) || verbose(next->class)) { - __raw_spin_unlock(&hash_lock); + graph_unlock(); printk("\n new dependency: "); print_lock_name(prev->class); printk(" => "); print_lock_name(next->class); printk("\n"); dump_stack(); - __raw_spin_lock(&hash_lock); + return graph_lock(); } return 1; } @@ -1023,7 +1046,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) * added: */ if (hlock->read != 2) { - check_prev_add(curr, hlock, next); + if (!check_prev_add(curr, hlock, next)) + return 0; /* * Stop after the first non-trylock entry, * as non-trylock entries have added their @@ -1048,8 +1072,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) } return 1; out_bug: - __raw_spin_unlock(&hash_lock); - DEBUG_LOCKS_WARN_ON(1); + if (!debug_locks_off_graph_unlock()) + return 0; + + WARN_ON(1); return 0; } @@ -1079,7 +1105,8 @@ static int static_obj(void *obj) */ for_each_possible_cpu(i) { start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); - end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); + end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM + + per_cpu_offset(i); if ((addr >= start) && (addr < end)) return 1; @@ -1114,8 +1141,6 @@ static int count_matching_names(struct lock_class *new_class) return count + 1; } -extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); - /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object @@ -1153,8 +1178,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) * (or spin_lock_init()) call - which acts as the key. For static * locks we use the lock object itself as the key. */ - if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) - __error_too_big_MAX_LOCKDEP_SUBCLASSES(); + BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); key = lock->key->subkeys + subclass; @@ -1182,6 +1206,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; + unsigned long flags; class = look_up_lock_class(lock, subclass); if (likely(class)) @@ -1203,7 +1228,11 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) key = lock->key->subkeys + subclass; hash_head = classhashentry(key); - __raw_spin_lock(&hash_lock); + raw_local_irq_save(flags); + if (!graph_lock()) { + raw_local_irq_restore(flags); + return NULL; + } /* * We have to do the hash-walk again, to avoid races * with another CPU: @@ -1216,8 +1245,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) * the hash: */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); + if (!debug_locks_off_graph_unlock()) { + raw_local_irq_restore(flags); + return NULL; + } + raw_local_irq_restore(flags); + printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); printk("turning off the locking correctness validator.\n"); return NULL; @@ -1238,16 +1271,24 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) list_add_tail_rcu(&class->hash_entry, hash_head); if (verbose(class)) { - __raw_spin_unlock(&hash_lock); + graph_unlock(); + raw_local_irq_restore(flags); + printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); - __raw_spin_lock(&hash_lock); + + raw_local_irq_save(flags); + if (!graph_lock()) { + raw_local_irq_restore(flags); + return NULL; + } } out_unlock_set: - __raw_spin_unlock(&hash_lock); + graph_unlock(); + raw_local_irq_restore(flags); if (!subclass || force) lock->class_cache = class; @@ -1262,7 +1303,7 @@ out_unlock_set: * add it and return 0 - in this case the new dependency chain is * validated. If the key is already hashed, return 1. */ -static inline int lookup_chain_cache(u64 chain_key) +static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) { struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; @@ -1276,34 +1317,36 @@ static inline int lookup_chain_cache(u64 chain_key) if (chain->chain_key == chain_key) { cache_hit: debug_atomic_inc(&chain_lookup_hits); - /* - * In the debugging case, force redundant checking - * by returning 1: - */ -#ifdef CONFIG_DEBUG_LOCKDEP - __raw_spin_lock(&hash_lock); - return 1; -#endif + if (very_verbose(class)) + printk("\nhash chain already cached, key: " + "%016Lx tail class: [%p] %s\n", + (unsigned long long)chain_key, + class->key, class->name); return 0; } } + if (very_verbose(class)) + printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", + (unsigned long long)chain_key, class->key, class->name); /* * Allocate a new chain entry from the static array, and add * it to the hash: */ - __raw_spin_lock(&hash_lock); + if (!graph_lock()) + return 0; /* * We have to walk the chain again locked - to avoid duplicates: */ list_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { - __raw_spin_unlock(&hash_lock); + graph_unlock(); goto cache_hit; } } if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); + if (!debug_locks_off_graph_unlock()) + return 0; + printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); printk("turning off the locking correctness validator.\n"); return 0; @@ -1379,9 +1422,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, struct held_lock *this, int forwards, const char *irqclass) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); - if (debug_locks_silent) + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=========================================================\n"); @@ -1451,7 +1492,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); } -static inline void print_irqtrace_events(struct task_struct *curr) +void print_irqtrace_events(struct task_struct *curr) { printk("irq event stamp: %u\n", curr->irq_events); printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); @@ -1464,19 +1505,13 @@ static inline void print_irqtrace_events(struct task_struct *curr) print_ip_sym(curr->softirq_disable_ip); } -#else -static inline void print_irqtrace_events(struct task_struct *curr) -{ -} #endif static int print_usage_bug(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) { - __raw_spin_unlock(&hash_lock); - debug_locks_off(); - if (debug_locks_silent) + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=================================\n"); @@ -1537,12 +1572,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, if (likely(this->class->usage_mask & new_mask)) return 1; - __raw_spin_lock(&hash_lock); + if (!graph_lock()) + return 0; /* * Make sure we didnt race: */ if (unlikely(this->class->usage_mask & new_mask)) { - __raw_spin_unlock(&hash_lock); + graph_unlock(); return 1; } @@ -1728,15 +1764,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, debug_atomic_dec(&nr_unused_locks); break; default: - debug_locks_off(); + if (!debug_locks_off_graph_unlock()) + return 0; WARN_ON(1); return 0; } - __raw_spin_unlock(&hash_lock); + graph_unlock(); /* - * We must printk outside of the hash_lock: + * We must printk outside of the graph_lock: */ if (ret == 2) { printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); @@ -2134,9 +2171,9 @@ out_calc_hash: * We look up the chain_key and do the O(N^2) check and update of * the dependencies only if this is a new dependency chain. * (If lookup_chain_cache() returns with 1 it acquires - * hash_lock for us) + * graph_lock for us) */ - if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { + if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) { /* * Check whether last held lock: * @@ -2167,7 +2204,7 @@ out_calc_hash: if (!chain_head && ret != 2) if (!check_prevs_add(curr, hlock)) return 0; - __raw_spin_unlock(&hash_lock); + graph_unlock(); } curr->lockdep_depth++; check_chain_key(curr); @@ -2430,6 +2467,7 @@ EXPORT_SYMBOL_GPL(lock_release); void lockdep_reset(void) { unsigned long flags; + int i; raw_local_irq_save(flags); current->curr_chain_key = 0; @@ -2440,6 +2478,8 @@ void lockdep_reset(void) nr_softirq_chains = 0; nr_process_chains = 0; debug_locks = 1; + for (i = 0; i < CHAINHASH_SIZE; i++) + INIT_LIST_HEAD(chainhash_table + i); raw_local_irq_restore(flags); } @@ -2476,7 +2516,7 @@ void lockdep_free_key_range(void *start, unsigned long size) int i; raw_local_irq_save(flags); - __raw_spin_lock(&hash_lock); + graph_lock(); /* * Unhash all classes that were created by this module: @@ -2490,7 +2530,7 @@ void lockdep_free_key_range(void *start, unsigned long size) zap_class(class); } - __raw_spin_unlock(&hash_lock); + graph_unlock(); raw_local_irq_restore(flags); } @@ -2518,26 +2558,26 @@ void lockdep_reset_lock(struct lockdep_map *lock) * Debug check: in the end all mapped classes should * be gone. */ - __raw_spin_lock(&hash_lock); + graph_lock(); for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { if (unlikely(class == lock->class_cache)) { - __raw_spin_unlock(&hash_lock); - DEBUG_LOCKS_WARN_ON(1); + if (debug_locks_off_graph_unlock()) + WARN_ON(1); goto out_restore; } } } - __raw_spin_unlock(&hash_lock); + graph_unlock(); out_restore: raw_local_irq_restore(flags); } -void __init lockdep_init(void) +void lockdep_init(void) { int i;