Loading...
1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
28#define DISABLE_BRANCH_PROFILING
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/sched/clock.h>
32#include <linux/sched/task.h>
33#include <linux/sched/mm.h>
34#include <linux/delay.h>
35#include <linux/module.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/spinlock.h>
39#include <linux/kallsyms.h>
40#include <linux/interrupt.h>
41#include <linux/stacktrace.h>
42#include <linux/debug_locks.h>
43#include <linux/irqflags.h>
44#include <linux/utsname.h>
45#include <linux/hash.h>
46#include <linux/ftrace.h>
47#include <linux/stringify.h>
48#include <linux/bitops.h>
49#include <linux/gfp.h>
50#include <linux/random.h>
51#include <linux/jhash.h>
52#include <linux/nmi.h>
53
54#include <asm/sections.h>
55
56#include "lockdep_internals.h"
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/lock.h>
60
61#ifdef CONFIG_PROVE_LOCKING
62int prove_locking = 1;
63module_param(prove_locking, int, 0644);
64#else
65#define prove_locking 0
66#endif
67
68#ifdef CONFIG_LOCK_STAT
69int lock_stat = 1;
70module_param(lock_stat, int, 0644);
71#else
72#define lock_stat 0
73#endif
74
75/*
76 * lockdep_lock: protects the lockdep graph, the hashes and the
77 * class/list/hash allocators.
78 *
79 * This is one of the rare exceptions where it's justified
80 * to use a raw spinlock - we really dont want the spinlock
81 * code to recurse back into the lockdep code...
82 */
83static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84
85static int graph_lock(void)
86{
87 arch_spin_lock(&lockdep_lock);
88 /*
89 * Make sure that if another CPU detected a bug while
90 * walking the graph we dont change it (while the other
91 * CPU is busy printing out stuff with the graph lock
92 * dropped already)
93 */
94 if (!debug_locks) {
95 arch_spin_unlock(&lockdep_lock);
96 return 0;
97 }
98 /* prevent any recursions within lockdep from causing deadlocks */
99 current->lockdep_recursion++;
100 return 1;
101}
102
103static inline int graph_unlock(void)
104{
105 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106 /*
107 * The lockdep graph lock isn't locked while we expect it to
108 * be, we're confused now, bye!
109 */
110 return DEBUG_LOCKS_WARN_ON(1);
111 }
112
113 current->lockdep_recursion--;
114 arch_spin_unlock(&lockdep_lock);
115 return 0;
116}
117
118/*
119 * Turn lock debugging off and return with 0 if it was off already,
120 * and also release the graph lock:
121 */
122static inline int debug_locks_off_graph_unlock(void)
123{
124 int ret = debug_locks_off();
125
126 arch_spin_unlock(&lockdep_lock);
127
128 return ret;
129}
130
131unsigned long nr_list_entries;
132static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
133
134/*
135 * All data structures here are protected by the global debug_lock.
136 *
137 * Mutex key structs only get allocated, once during bootup, and never
138 * get freed - this significantly simplifies the debugging code.
139 */
140unsigned long nr_lock_classes;
141static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142
143static inline struct lock_class *hlock_class(struct held_lock *hlock)
144{
145 if (!hlock->class_idx) {
146 /*
147 * Someone passed in garbage, we give up.
148 */
149 DEBUG_LOCKS_WARN_ON(1);
150 return NULL;
151 }
152 return lock_classes + hlock->class_idx - 1;
153}
154
155#ifdef CONFIG_LOCK_STAT
156static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
157
158static inline u64 lockstat_clock(void)
159{
160 return local_clock();
161}
162
163static int lock_point(unsigned long points[], unsigned long ip)
164{
165 int i;
166
167 for (i = 0; i < LOCKSTAT_POINTS; i++) {
168 if (points[i] == 0) {
169 points[i] = ip;
170 break;
171 }
172 if (points[i] == ip)
173 break;
174 }
175
176 return i;
177}
178
179static void lock_time_inc(struct lock_time *lt, u64 time)
180{
181 if (time > lt->max)
182 lt->max = time;
183
184 if (time < lt->min || !lt->nr)
185 lt->min = time;
186
187 lt->total += time;
188 lt->nr++;
189}
190
191static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
192{
193 if (!src->nr)
194 return;
195
196 if (src->max > dst->max)
197 dst->max = src->max;
198
199 if (src->min < dst->min || !dst->nr)
200 dst->min = src->min;
201
202 dst->total += src->total;
203 dst->nr += src->nr;
204}
205
206struct lock_class_stats lock_stats(struct lock_class *class)
207{
208 struct lock_class_stats stats;
209 int cpu, i;
210
211 memset(&stats, 0, sizeof(struct lock_class_stats));
212 for_each_possible_cpu(cpu) {
213 struct lock_class_stats *pcs =
214 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
215
216 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
217 stats.contention_point[i] += pcs->contention_point[i];
218
219 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
220 stats.contending_point[i] += pcs->contending_point[i];
221
222 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
223 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
224
225 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
226 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
227
228 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
229 stats.bounces[i] += pcs->bounces[i];
230 }
231
232 return stats;
233}
234
235void clear_lock_stats(struct lock_class *class)
236{
237 int cpu;
238
239 for_each_possible_cpu(cpu) {
240 struct lock_class_stats *cpu_stats =
241 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
242
243 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
244 }
245 memset(class->contention_point, 0, sizeof(class->contention_point));
246 memset(class->contending_point, 0, sizeof(class->contending_point));
247}
248
249static struct lock_class_stats *get_lock_stats(struct lock_class *class)
250{
251 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
252}
253
254static void put_lock_stats(struct lock_class_stats *stats)
255{
256 put_cpu_var(cpu_lock_stats);
257}
258
259static void lock_release_holdtime(struct held_lock *hlock)
260{
261 struct lock_class_stats *stats;
262 u64 holdtime;
263
264 if (!lock_stat)
265 return;
266
267 holdtime = lockstat_clock() - hlock->holdtime_stamp;
268
269 stats = get_lock_stats(hlock_class(hlock));
270 if (hlock->read)
271 lock_time_inc(&stats->read_holdtime, holdtime);
272 else
273 lock_time_inc(&stats->write_holdtime, holdtime);
274 put_lock_stats(stats);
275}
276#else
277static inline void lock_release_holdtime(struct held_lock *hlock)
278{
279}
280#endif
281
282/*
283 * We keep a global list of all lock classes. The list only grows,
284 * never shrinks. The list is only accessed with the lockdep
285 * spinlock lock held.
286 */
287LIST_HEAD(all_lock_classes);
288
289/*
290 * The lockdep classes are in a hash-table as well, for fast lookup:
291 */
292#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
293#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
294#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
295#define classhashentry(key) (classhash_table + __classhashfn((key)))
296
297static struct hlist_head classhash_table[CLASSHASH_SIZE];
298
299/*
300 * We put the lock dependency chains into a hash-table as well, to cache
301 * their existence:
302 */
303#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
304#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
305#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
306#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
307
308static struct hlist_head chainhash_table[CHAINHASH_SIZE];
309
310/*
311 * The hash key of the lock dependency chains is a hash itself too:
312 * it's a hash of all locks taken up to that lock, including that lock.
313 * It's a 64-bit hash, because it's important for the keys to be
314 * unique.
315 */
316static inline u64 iterate_chain_key(u64 key, u32 idx)
317{
318 u32 k0 = key, k1 = key >> 32;
319
320 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
321
322 return k0 | (u64)k1 << 32;
323}
324
325void lockdep_off(void)
326{
327 current->lockdep_recursion++;
328}
329EXPORT_SYMBOL(lockdep_off);
330
331void lockdep_on(void)
332{
333 current->lockdep_recursion--;
334}
335EXPORT_SYMBOL(lockdep_on);
336
337/*
338 * Debugging switches:
339 */
340
341#define VERBOSE 0
342#define VERY_VERBOSE 0
343
344#if VERBOSE
345# define HARDIRQ_VERBOSE 1
346# define SOFTIRQ_VERBOSE 1
347#else
348# define HARDIRQ_VERBOSE 0
349# define SOFTIRQ_VERBOSE 0
350#endif
351
352#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
353/*
354 * Quick filtering for interesting events:
355 */
356static int class_filter(struct lock_class *class)
357{
358#if 0
359 /* Example */
360 if (class->name_version == 1 &&
361 !strcmp(class->name, "lockname"))
362 return 1;
363 if (class->name_version == 1 &&
364 !strcmp(class->name, "&struct->lockfield"))
365 return 1;
366#endif
367 /* Filter everything else. 1 would be to allow everything else */
368 return 0;
369}
370#endif
371
372static int verbose(struct lock_class *class)
373{
374#if VERBOSE
375 return class_filter(class);
376#endif
377 return 0;
378}
379
380/*
381 * Stack-trace: tightly packed array of stack backtrace
382 * addresses. Protected by the graph_lock.
383 */
384unsigned long nr_stack_trace_entries;
385static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
386
387static void print_lockdep_off(const char *bug_msg)
388{
389 printk(KERN_DEBUG "%s\n", bug_msg);
390 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
391#ifdef CONFIG_LOCK_STAT
392 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
393#endif
394}
395
396static int save_trace(struct stack_trace *trace)
397{
398 trace->nr_entries = 0;
399 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
400 trace->entries = stack_trace + nr_stack_trace_entries;
401
402 trace->skip = 3;
403
404 save_stack_trace(trace);
405
406 /*
407 * Some daft arches put -1 at the end to indicate its a full trace.
408 *
409 * <rant> this is buggy anyway, since it takes a whole extra entry so a
410 * complete trace that maxes out the entries provided will be reported
411 * as incomplete, friggin useless </rant>
412 */
413 if (trace->nr_entries != 0 &&
414 trace->entries[trace->nr_entries-1] == ULONG_MAX)
415 trace->nr_entries--;
416
417 trace->max_entries = trace->nr_entries;
418
419 nr_stack_trace_entries += trace->nr_entries;
420
421 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
422 if (!debug_locks_off_graph_unlock())
423 return 0;
424
425 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
426 dump_stack();
427
428 return 0;
429 }
430
431 return 1;
432}
433
434unsigned int nr_hardirq_chains;
435unsigned int nr_softirq_chains;
436unsigned int nr_process_chains;
437unsigned int max_lockdep_depth;
438
439#ifdef CONFIG_DEBUG_LOCKDEP
440/*
441 * Various lockdep statistics:
442 */
443DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
444#endif
445
446/*
447 * Locking printouts:
448 */
449
450#define __USAGE(__STATE) \
451 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
452 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
453 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
454 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
455
456static const char *usage_str[] =
457{
458#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
459#include "lockdep_states.h"
460#undef LOCKDEP_STATE
461 [LOCK_USED] = "INITIAL USE",
462};
463
464const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
465{
466 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
467}
468
469static inline unsigned long lock_flag(enum lock_usage_bit bit)
470{
471 return 1UL << bit;
472}
473
474static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
475{
476 char c = '.';
477
478 if (class->usage_mask & lock_flag(bit + 2))
479 c = '+';
480 if (class->usage_mask & lock_flag(bit)) {
481 c = '-';
482 if (class->usage_mask & lock_flag(bit + 2))
483 c = '?';
484 }
485
486 return c;
487}
488
489void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
490{
491 int i = 0;
492
493#define LOCKDEP_STATE(__STATE) \
494 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
495 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
496#include "lockdep_states.h"
497#undef LOCKDEP_STATE
498
499 usage[i] = '\0';
500}
501
502static void __print_lock_name(struct lock_class *class)
503{
504 char str[KSYM_NAME_LEN];
505 const char *name;
506
507 name = class->name;
508 if (!name) {
509 name = __get_key_name(class->key, str);
510 printk(KERN_CONT "%s", name);
511 } else {
512 printk(KERN_CONT "%s", name);
513 if (class->name_version > 1)
514 printk(KERN_CONT "#%d", class->name_version);
515 if (class->subclass)
516 printk(KERN_CONT "/%d", class->subclass);
517 }
518}
519
520static void print_lock_name(struct lock_class *class)
521{
522 char usage[LOCK_USAGE_CHARS];
523
524 get_usage_chars(class, usage);
525
526 printk(KERN_CONT " (");
527 __print_lock_name(class);
528 printk(KERN_CONT "){%s}", usage);
529}
530
531static void print_lockdep_cache(struct lockdep_map *lock)
532{
533 const char *name;
534 char str[KSYM_NAME_LEN];
535
536 name = lock->name;
537 if (!name)
538 name = __get_key_name(lock->key->subkeys, str);
539
540 printk(KERN_CONT "%s", name);
541}
542
543static void print_lock(struct held_lock *hlock)
544{
545 /*
546 * We can be called locklessly through debug_show_all_locks() so be
547 * extra careful, the hlock might have been released and cleared.
548 */
549 unsigned int class_idx = hlock->class_idx;
550
551 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
552 barrier();
553
554 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
555 printk(KERN_CONT "<RELEASED>\n");
556 return;
557 }
558
559 printk(KERN_CONT "%p", hlock->instance);
560 print_lock_name(lock_classes + class_idx - 1);
561 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
562}
563
564static void lockdep_print_held_locks(struct task_struct *curr)
565{
566 int i, depth = curr->lockdep_depth;
567
568 if (!depth) {
569 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
570 return;
571 }
572 printk("%d lock%s held by %s/%d:\n",
573 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
574
575 for (i = 0; i < depth; i++) {
576 printk(" #%d: ", i);
577 print_lock(curr->held_locks + i);
578 }
579}
580
581static void print_kernel_ident(void)
582{
583 printk("%s %.*s %s\n", init_utsname()->release,
584 (int)strcspn(init_utsname()->version, " "),
585 init_utsname()->version,
586 print_tainted());
587}
588
589static int very_verbose(struct lock_class *class)
590{
591#if VERY_VERBOSE
592 return class_filter(class);
593#endif
594 return 0;
595}
596
597/*
598 * Is this the address of a static object:
599 */
600#ifdef __KERNEL__
601static int static_obj(void *obj)
602{
603 unsigned long start = (unsigned long) &_stext,
604 end = (unsigned long) &_end,
605 addr = (unsigned long) obj;
606
607 /*
608 * static variable?
609 */
610 if ((addr >= start) && (addr < end))
611 return 1;
612
613 if (arch_is_kernel_data(addr))
614 return 1;
615
616 /*
617 * in-kernel percpu var?
618 */
619 if (is_kernel_percpu_address(addr))
620 return 1;
621
622 /*
623 * module static or percpu var?
624 */
625 return is_module_address(addr) || is_module_percpu_address(addr);
626}
627#endif
628
629/*
630 * To make lock name printouts unique, we calculate a unique
631 * class->name_version generation counter:
632 */
633static int count_matching_names(struct lock_class *new_class)
634{
635 struct lock_class *class;
636 int count = 0;
637
638 if (!new_class->name)
639 return 0;
640
641 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
642 if (new_class->key - new_class->subclass == class->key)
643 return class->name_version;
644 if (class->name && !strcmp(class->name, new_class->name))
645 count = max(count, class->name_version);
646 }
647
648 return count + 1;
649}
650
651static inline struct lock_class *
652look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
653{
654 struct lockdep_subclass_key *key;
655 struct hlist_head *hash_head;
656 struct lock_class *class;
657
658 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
659 debug_locks_off();
660 printk(KERN_ERR
661 "BUG: looking up invalid subclass: %u\n", subclass);
662 printk(KERN_ERR
663 "turning off the locking correctness validator.\n");
664 dump_stack();
665 return NULL;
666 }
667
668 /*
669 * If it is not initialised then it has never been locked,
670 * so it won't be present in the hash table.
671 */
672 if (unlikely(!lock->key))
673 return NULL;
674
675 /*
676 * NOTE: the class-key must be unique. For dynamic locks, a static
677 * lock_class_key variable is passed in through the mutex_init()
678 * (or spin_lock_init()) call - which acts as the key. For static
679 * locks we use the lock object itself as the key.
680 */
681 BUILD_BUG_ON(sizeof(struct lock_class_key) >
682 sizeof(struct lockdep_map));
683
684 key = lock->key->subkeys + subclass;
685
686 hash_head = classhashentry(key);
687
688 /*
689 * We do an RCU walk of the hash, see lockdep_free_key_range().
690 */
691 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
692 return NULL;
693
694 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
695 if (class->key == key) {
696 /*
697 * Huh! same key, different name? Did someone trample
698 * on some memory? We're most confused.
699 */
700 WARN_ON_ONCE(class->name != lock->name);
701 return class;
702 }
703 }
704
705 return NULL;
706}
707
708/*
709 * Static locks do not have their class-keys yet - for them the key is
710 * the lock object itself. If the lock is in the per cpu area, the
711 * canonical address of the lock (per cpu offset removed) is used.
712 */
713static bool assign_lock_key(struct lockdep_map *lock)
714{
715 unsigned long can_addr, addr = (unsigned long)lock;
716
717 if (__is_kernel_percpu_address(addr, &can_addr))
718 lock->key = (void *)can_addr;
719 else if (__is_module_percpu_address(addr, &can_addr))
720 lock->key = (void *)can_addr;
721 else if (static_obj(lock))
722 lock->key = (void *)lock;
723 else {
724 /* Debug-check: all keys must be persistent! */
725 debug_locks_off();
726 pr_err("INFO: trying to register non-static key.\n");
727 pr_err("the code is fine but needs lockdep annotation.\n");
728 pr_err("turning off the locking correctness validator.\n");
729 dump_stack();
730 return false;
731 }
732
733 return true;
734}
735
736/*
737 * Register a lock's class in the hash-table, if the class is not present
738 * yet. Otherwise we look it up. We cache the result in the lock object
739 * itself, so actual lookup of the hash should be once per lock object.
740 */
741static struct lock_class *
742register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
743{
744 struct lockdep_subclass_key *key;
745 struct hlist_head *hash_head;
746 struct lock_class *class;
747
748 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
749
750 class = look_up_lock_class(lock, subclass);
751 if (likely(class))
752 goto out_set_class_cache;
753
754 if (!lock->key) {
755 if (!assign_lock_key(lock))
756 return NULL;
757 } else if (!static_obj(lock->key)) {
758 return NULL;
759 }
760
761 key = lock->key->subkeys + subclass;
762 hash_head = classhashentry(key);
763
764 if (!graph_lock()) {
765 return NULL;
766 }
767 /*
768 * We have to do the hash-walk again, to avoid races
769 * with another CPU:
770 */
771 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
772 if (class->key == key)
773 goto out_unlock_set;
774 }
775
776 /*
777 * Allocate a new key from the static array, and add it to
778 * the hash:
779 */
780 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
781 if (!debug_locks_off_graph_unlock()) {
782 return NULL;
783 }
784
785 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
786 dump_stack();
787 return NULL;
788 }
789 class = lock_classes + nr_lock_classes++;
790 debug_atomic_inc(nr_unused_locks);
791 class->key = key;
792 class->name = lock->name;
793 class->subclass = subclass;
794 INIT_LIST_HEAD(&class->lock_entry);
795 INIT_LIST_HEAD(&class->locks_before);
796 INIT_LIST_HEAD(&class->locks_after);
797 class->name_version = count_matching_names(class);
798 /*
799 * We use RCU's safe list-add method to make
800 * parallel walking of the hash-list safe:
801 */
802 hlist_add_head_rcu(&class->hash_entry, hash_head);
803 /*
804 * Add it to the global list of classes:
805 */
806 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
807
808 if (verbose(class)) {
809 graph_unlock();
810
811 printk("\nnew class %px: %s", class->key, class->name);
812 if (class->name_version > 1)
813 printk(KERN_CONT "#%d", class->name_version);
814 printk(KERN_CONT "\n");
815 dump_stack();
816
817 if (!graph_lock()) {
818 return NULL;
819 }
820 }
821out_unlock_set:
822 graph_unlock();
823
824out_set_class_cache:
825 if (!subclass || force)
826 lock->class_cache[0] = class;
827 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
828 lock->class_cache[subclass] = class;
829
830 /*
831 * Hash collision, did we smoke some? We found a class with a matching
832 * hash but the subclass -- which is hashed in -- didn't match.
833 */
834 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
835 return NULL;
836
837 return class;
838}
839
840#ifdef CONFIG_PROVE_LOCKING
841/*
842 * Allocate a lockdep entry. (assumes the graph_lock held, returns
843 * with NULL on failure)
844 */
845static struct lock_list *alloc_list_entry(void)
846{
847 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
848 if (!debug_locks_off_graph_unlock())
849 return NULL;
850
851 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
852 dump_stack();
853 return NULL;
854 }
855 return list_entries + nr_list_entries++;
856}
857
858/*
859 * Add a new dependency to the head of the list:
860 */
861static int add_lock_to_list(struct lock_class *this, struct list_head *head,
862 unsigned long ip, int distance,
863 struct stack_trace *trace)
864{
865 struct lock_list *entry;
866 /*
867 * Lock not present yet - get a new dependency struct and
868 * add it to the list:
869 */
870 entry = alloc_list_entry();
871 if (!entry)
872 return 0;
873
874 entry->class = this;
875 entry->distance = distance;
876 entry->trace = *trace;
877 /*
878 * Both allocation and removal are done under the graph lock; but
879 * iteration is under RCU-sched; see look_up_lock_class() and
880 * lockdep_free_key_range().
881 */
882 list_add_tail_rcu(&entry->entry, head);
883
884 return 1;
885}
886
887/*
888 * For good efficiency of modular, we use power of 2
889 */
890#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
891#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
892
893/*
894 * The circular_queue and helpers is used to implement the
895 * breadth-first search(BFS)algorithem, by which we can build
896 * the shortest path from the next lock to be acquired to the
897 * previous held lock if there is a circular between them.
898 */
899struct circular_queue {
900 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
901 unsigned int front, rear;
902};
903
904static struct circular_queue lock_cq;
905
906unsigned int max_bfs_queue_depth;
907
908static unsigned int lockdep_dependency_gen_id;
909
910static inline void __cq_init(struct circular_queue *cq)
911{
912 cq->front = cq->rear = 0;
913 lockdep_dependency_gen_id++;
914}
915
916static inline int __cq_empty(struct circular_queue *cq)
917{
918 return (cq->front == cq->rear);
919}
920
921static inline int __cq_full(struct circular_queue *cq)
922{
923 return ((cq->rear + 1) & CQ_MASK) == cq->front;
924}
925
926static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
927{
928 if (__cq_full(cq))
929 return -1;
930
931 cq->element[cq->rear] = elem;
932 cq->rear = (cq->rear + 1) & CQ_MASK;
933 return 0;
934}
935
936static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
937{
938 if (__cq_empty(cq))
939 return -1;
940
941 *elem = cq->element[cq->front];
942 cq->front = (cq->front + 1) & CQ_MASK;
943 return 0;
944}
945
946static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
947{
948 return (cq->rear - cq->front) & CQ_MASK;
949}
950
951static inline void mark_lock_accessed(struct lock_list *lock,
952 struct lock_list *parent)
953{
954 unsigned long nr;
955
956 nr = lock - list_entries;
957 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
958 lock->parent = parent;
959 lock->class->dep_gen_id = lockdep_dependency_gen_id;
960}
961
962static inline unsigned long lock_accessed(struct lock_list *lock)
963{
964 unsigned long nr;
965
966 nr = lock - list_entries;
967 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
968 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
969}
970
971static inline struct lock_list *get_lock_parent(struct lock_list *child)
972{
973 return child->parent;
974}
975
976static inline int get_lock_depth(struct lock_list *child)
977{
978 int depth = 0;
979 struct lock_list *parent;
980
981 while ((parent = get_lock_parent(child))) {
982 child = parent;
983 depth++;
984 }
985 return depth;
986}
987
988static int __bfs(struct lock_list *source_entry,
989 void *data,
990 int (*match)(struct lock_list *entry, void *data),
991 struct lock_list **target_entry,
992 int forward)
993{
994 struct lock_list *entry;
995 struct list_head *head;
996 struct circular_queue *cq = &lock_cq;
997 int ret = 1;
998
999 if (match(source_entry, data)) {
1000 *target_entry = source_entry;
1001 ret = 0;
1002 goto exit;
1003 }
1004
1005 if (forward)
1006 head = &source_entry->class->locks_after;
1007 else
1008 head = &source_entry->class->locks_before;
1009
1010 if (list_empty(head))
1011 goto exit;
1012
1013 __cq_init(cq);
1014 __cq_enqueue(cq, (unsigned long)source_entry);
1015
1016 while (!__cq_empty(cq)) {
1017 struct lock_list *lock;
1018
1019 __cq_dequeue(cq, (unsigned long *)&lock);
1020
1021 if (!lock->class) {
1022 ret = -2;
1023 goto exit;
1024 }
1025
1026 if (forward)
1027 head = &lock->class->locks_after;
1028 else
1029 head = &lock->class->locks_before;
1030
1031 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1032
1033 list_for_each_entry_rcu(entry, head, entry) {
1034 if (!lock_accessed(entry)) {
1035 unsigned int cq_depth;
1036 mark_lock_accessed(entry, lock);
1037 if (match(entry, data)) {
1038 *target_entry = entry;
1039 ret = 0;
1040 goto exit;
1041 }
1042
1043 if (__cq_enqueue(cq, (unsigned long)entry)) {
1044 ret = -1;
1045 goto exit;
1046 }
1047 cq_depth = __cq_get_elem_count(cq);
1048 if (max_bfs_queue_depth < cq_depth)
1049 max_bfs_queue_depth = cq_depth;
1050 }
1051 }
1052 }
1053exit:
1054 return ret;
1055}
1056
1057static inline int __bfs_forwards(struct lock_list *src_entry,
1058 void *data,
1059 int (*match)(struct lock_list *entry, void *data),
1060 struct lock_list **target_entry)
1061{
1062 return __bfs(src_entry, data, match, target_entry, 1);
1063
1064}
1065
1066static inline int __bfs_backwards(struct lock_list *src_entry,
1067 void *data,
1068 int (*match)(struct lock_list *entry, void *data),
1069 struct lock_list **target_entry)
1070{
1071 return __bfs(src_entry, data, match, target_entry, 0);
1072
1073}
1074
1075/*
1076 * Recursive, forwards-direction lock-dependency checking, used for
1077 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1078 * checking.
1079 */
1080
1081/*
1082 * Print a dependency chain entry (this is only done when a deadlock
1083 * has been detected):
1084 */
1085static noinline int
1086print_circular_bug_entry(struct lock_list *target, int depth)
1087{
1088 if (debug_locks_silent)
1089 return 0;
1090 printk("\n-> #%u", depth);
1091 print_lock_name(target->class);
1092 printk(KERN_CONT ":\n");
1093 print_stack_trace(&target->trace, 6);
1094
1095 return 0;
1096}
1097
1098static void
1099print_circular_lock_scenario(struct held_lock *src,
1100 struct held_lock *tgt,
1101 struct lock_list *prt)
1102{
1103 struct lock_class *source = hlock_class(src);
1104 struct lock_class *target = hlock_class(tgt);
1105 struct lock_class *parent = prt->class;
1106
1107 /*
1108 * A direct locking problem where unsafe_class lock is taken
1109 * directly by safe_class lock, then all we need to show
1110 * is the deadlock scenario, as it is obvious that the
1111 * unsafe lock is taken under the safe lock.
1112 *
1113 * But if there is a chain instead, where the safe lock takes
1114 * an intermediate lock (middle_class) where this lock is
1115 * not the same as the safe lock, then the lock chain is
1116 * used to describe the problem. Otherwise we would need
1117 * to show a different CPU case for each link in the chain
1118 * from the safe_class lock to the unsafe_class lock.
1119 */
1120 if (parent != source) {
1121 printk("Chain exists of:\n ");
1122 __print_lock_name(source);
1123 printk(KERN_CONT " --> ");
1124 __print_lock_name(parent);
1125 printk(KERN_CONT " --> ");
1126 __print_lock_name(target);
1127 printk(KERN_CONT "\n\n");
1128 }
1129
1130 printk(" Possible unsafe locking scenario:\n\n");
1131 printk(" CPU0 CPU1\n");
1132 printk(" ---- ----\n");
1133 printk(" lock(");
1134 __print_lock_name(target);
1135 printk(KERN_CONT ");\n");
1136 printk(" lock(");
1137 __print_lock_name(parent);
1138 printk(KERN_CONT ");\n");
1139 printk(" lock(");
1140 __print_lock_name(target);
1141 printk(KERN_CONT ");\n");
1142 printk(" lock(");
1143 __print_lock_name(source);
1144 printk(KERN_CONT ");\n");
1145 printk("\n *** DEADLOCK ***\n\n");
1146}
1147
1148/*
1149 * When a circular dependency is detected, print the
1150 * header first:
1151 */
1152static noinline int
1153print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1154 struct held_lock *check_src,
1155 struct held_lock *check_tgt)
1156{
1157 struct task_struct *curr = current;
1158
1159 if (debug_locks_silent)
1160 return 0;
1161
1162 pr_warn("\n");
1163 pr_warn("======================================================\n");
1164 pr_warn("WARNING: possible circular locking dependency detected\n");
1165 print_kernel_ident();
1166 pr_warn("------------------------------------------------------\n");
1167 pr_warn("%s/%d is trying to acquire lock:\n",
1168 curr->comm, task_pid_nr(curr));
1169 print_lock(check_src);
1170
1171 pr_warn("\nbut task is already holding lock:\n");
1172
1173 print_lock(check_tgt);
1174 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1175 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1176
1177 print_circular_bug_entry(entry, depth);
1178
1179 return 0;
1180}
1181
1182static inline int class_equal(struct lock_list *entry, void *data)
1183{
1184 return entry->class == data;
1185}
1186
1187static noinline int print_circular_bug(struct lock_list *this,
1188 struct lock_list *target,
1189 struct held_lock *check_src,
1190 struct held_lock *check_tgt,
1191 struct stack_trace *trace)
1192{
1193 struct task_struct *curr = current;
1194 struct lock_list *parent;
1195 struct lock_list *first_parent;
1196 int depth;
1197
1198 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1199 return 0;
1200
1201 if (!save_trace(&this->trace))
1202 return 0;
1203
1204 depth = get_lock_depth(target);
1205
1206 print_circular_bug_header(target, depth, check_src, check_tgt);
1207
1208 parent = get_lock_parent(target);
1209 first_parent = parent;
1210
1211 while (parent) {
1212 print_circular_bug_entry(parent, --depth);
1213 parent = get_lock_parent(parent);
1214 }
1215
1216 printk("\nother info that might help us debug this:\n\n");
1217 print_circular_lock_scenario(check_src, check_tgt,
1218 first_parent);
1219
1220 lockdep_print_held_locks(curr);
1221
1222 printk("\nstack backtrace:\n");
1223 dump_stack();
1224
1225 return 0;
1226}
1227
1228static noinline int print_bfs_bug(int ret)
1229{
1230 if (!debug_locks_off_graph_unlock())
1231 return 0;
1232
1233 /*
1234 * Breadth-first-search failed, graph got corrupted?
1235 */
1236 WARN(1, "lockdep bfs error:%d\n", ret);
1237
1238 return 0;
1239}
1240
1241static int noop_count(struct lock_list *entry, void *data)
1242{
1243 (*(unsigned long *)data)++;
1244 return 0;
1245}
1246
1247static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1248{
1249 unsigned long count = 0;
1250 struct lock_list *uninitialized_var(target_entry);
1251
1252 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1253
1254 return count;
1255}
1256unsigned long lockdep_count_forward_deps(struct lock_class *class)
1257{
1258 unsigned long ret, flags;
1259 struct lock_list this;
1260
1261 this.parent = NULL;
1262 this.class = class;
1263
1264 local_irq_save(flags);
1265 arch_spin_lock(&lockdep_lock);
1266 ret = __lockdep_count_forward_deps(&this);
1267 arch_spin_unlock(&lockdep_lock);
1268 local_irq_restore(flags);
1269
1270 return ret;
1271}
1272
1273static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1274{
1275 unsigned long count = 0;
1276 struct lock_list *uninitialized_var(target_entry);
1277
1278 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1279
1280 return count;
1281}
1282
1283unsigned long lockdep_count_backward_deps(struct lock_class *class)
1284{
1285 unsigned long ret, flags;
1286 struct lock_list this;
1287
1288 this.parent = NULL;
1289 this.class = class;
1290
1291 local_irq_save(flags);
1292 arch_spin_lock(&lockdep_lock);
1293 ret = __lockdep_count_backward_deps(&this);
1294 arch_spin_unlock(&lockdep_lock);
1295 local_irq_restore(flags);
1296
1297 return ret;
1298}
1299
1300/*
1301 * Prove that the dependency graph starting at <entry> can not
1302 * lead to <target>. Print an error and return 0 if it does.
1303 */
1304static noinline int
1305check_noncircular(struct lock_list *root, struct lock_class *target,
1306 struct lock_list **target_entry)
1307{
1308 int result;
1309
1310 debug_atomic_inc(nr_cyclic_checks);
1311
1312 result = __bfs_forwards(root, target, class_equal, target_entry);
1313
1314 return result;
1315}
1316
1317static noinline int
1318check_redundant(struct lock_list *root, struct lock_class *target,
1319 struct lock_list **target_entry)
1320{
1321 int result;
1322
1323 debug_atomic_inc(nr_redundant_checks);
1324
1325 result = __bfs_forwards(root, target, class_equal, target_entry);
1326
1327 return result;
1328}
1329
1330#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1331/*
1332 * Forwards and backwards subgraph searching, for the purposes of
1333 * proving that two subgraphs can be connected by a new dependency
1334 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1335 */
1336
1337static inline int usage_match(struct lock_list *entry, void *bit)
1338{
1339 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1340}
1341
1342
1343
1344/*
1345 * Find a node in the forwards-direction dependency sub-graph starting
1346 * at @root->class that matches @bit.
1347 *
1348 * Return 0 if such a node exists in the subgraph, and put that node
1349 * into *@target_entry.
1350 *
1351 * Return 1 otherwise and keep *@target_entry unchanged.
1352 * Return <0 on error.
1353 */
1354static int
1355find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1356 struct lock_list **target_entry)
1357{
1358 int result;
1359
1360 debug_atomic_inc(nr_find_usage_forwards_checks);
1361
1362 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1363
1364 return result;
1365}
1366
1367/*
1368 * Find a node in the backwards-direction dependency sub-graph starting
1369 * at @root->class that matches @bit.
1370 *
1371 * Return 0 if such a node exists in the subgraph, and put that node
1372 * into *@target_entry.
1373 *
1374 * Return 1 otherwise and keep *@target_entry unchanged.
1375 * Return <0 on error.
1376 */
1377static int
1378find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1379 struct lock_list **target_entry)
1380{
1381 int result;
1382
1383 debug_atomic_inc(nr_find_usage_backwards_checks);
1384
1385 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1386
1387 return result;
1388}
1389
1390static void print_lock_class_header(struct lock_class *class, int depth)
1391{
1392 int bit;
1393
1394 printk("%*s->", depth, "");
1395 print_lock_name(class);
1396 printk(KERN_CONT " ops: %lu", class->ops);
1397 printk(KERN_CONT " {\n");
1398
1399 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1400 if (class->usage_mask & (1 << bit)) {
1401 int len = depth;
1402
1403 len += printk("%*s %s", depth, "", usage_str[bit]);
1404 len += printk(KERN_CONT " at:\n");
1405 print_stack_trace(class->usage_traces + bit, len);
1406 }
1407 }
1408 printk("%*s }\n", depth, "");
1409
1410 printk("%*s ... key at: [<%px>] %pS\n",
1411 depth, "", class->key, class->key);
1412}
1413
1414/*
1415 * printk the shortest lock dependencies from @start to @end in reverse order:
1416 */
1417static void __used
1418print_shortest_lock_dependencies(struct lock_list *leaf,
1419 struct lock_list *root)
1420{
1421 struct lock_list *entry = leaf;
1422 int depth;
1423
1424 /*compute depth from generated tree by BFS*/
1425 depth = get_lock_depth(leaf);
1426
1427 do {
1428 print_lock_class_header(entry->class, depth);
1429 printk("%*s ... acquired at:\n", depth, "");
1430 print_stack_trace(&entry->trace, 2);
1431 printk("\n");
1432
1433 if (depth == 0 && (entry != root)) {
1434 printk("lockdep:%s bad path found in chain graph\n", __func__);
1435 break;
1436 }
1437
1438 entry = get_lock_parent(entry);
1439 depth--;
1440 } while (entry && (depth >= 0));
1441
1442 return;
1443}
1444
1445static void
1446print_irq_lock_scenario(struct lock_list *safe_entry,
1447 struct lock_list *unsafe_entry,
1448 struct lock_class *prev_class,
1449 struct lock_class *next_class)
1450{
1451 struct lock_class *safe_class = safe_entry->class;
1452 struct lock_class *unsafe_class = unsafe_entry->class;
1453 struct lock_class *middle_class = prev_class;
1454
1455 if (middle_class == safe_class)
1456 middle_class = next_class;
1457
1458 /*
1459 * A direct locking problem where unsafe_class lock is taken
1460 * directly by safe_class lock, then all we need to show
1461 * is the deadlock scenario, as it is obvious that the
1462 * unsafe lock is taken under the safe lock.
1463 *
1464 * But if there is a chain instead, where the safe lock takes
1465 * an intermediate lock (middle_class) where this lock is
1466 * not the same as the safe lock, then the lock chain is
1467 * used to describe the problem. Otherwise we would need
1468 * to show a different CPU case for each link in the chain
1469 * from the safe_class lock to the unsafe_class lock.
1470 */
1471 if (middle_class != unsafe_class) {
1472 printk("Chain exists of:\n ");
1473 __print_lock_name(safe_class);
1474 printk(KERN_CONT " --> ");
1475 __print_lock_name(middle_class);
1476 printk(KERN_CONT " --> ");
1477 __print_lock_name(unsafe_class);
1478 printk(KERN_CONT "\n\n");
1479 }
1480
1481 printk(" Possible interrupt unsafe locking scenario:\n\n");
1482 printk(" CPU0 CPU1\n");
1483 printk(" ---- ----\n");
1484 printk(" lock(");
1485 __print_lock_name(unsafe_class);
1486 printk(KERN_CONT ");\n");
1487 printk(" local_irq_disable();\n");
1488 printk(" lock(");
1489 __print_lock_name(safe_class);
1490 printk(KERN_CONT ");\n");
1491 printk(" lock(");
1492 __print_lock_name(middle_class);
1493 printk(KERN_CONT ");\n");
1494 printk(" <Interrupt>\n");
1495 printk(" lock(");
1496 __print_lock_name(safe_class);
1497 printk(KERN_CONT ");\n");
1498 printk("\n *** DEADLOCK ***\n\n");
1499}
1500
1501static int
1502print_bad_irq_dependency(struct task_struct *curr,
1503 struct lock_list *prev_root,
1504 struct lock_list *next_root,
1505 struct lock_list *backwards_entry,
1506 struct lock_list *forwards_entry,
1507 struct held_lock *prev,
1508 struct held_lock *next,
1509 enum lock_usage_bit bit1,
1510 enum lock_usage_bit bit2,
1511 const char *irqclass)
1512{
1513 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1514 return 0;
1515
1516 pr_warn("\n");
1517 pr_warn("=====================================================\n");
1518 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1519 irqclass, irqclass);
1520 print_kernel_ident();
1521 pr_warn("-----------------------------------------------------\n");
1522 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1523 curr->comm, task_pid_nr(curr),
1524 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1525 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1526 curr->hardirqs_enabled,
1527 curr->softirqs_enabled);
1528 print_lock(next);
1529
1530 pr_warn("\nand this task is already holding:\n");
1531 print_lock(prev);
1532 pr_warn("which would create a new lock dependency:\n");
1533 print_lock_name(hlock_class(prev));
1534 pr_cont(" ->");
1535 print_lock_name(hlock_class(next));
1536 pr_cont("\n");
1537
1538 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1539 irqclass);
1540 print_lock_name(backwards_entry->class);
1541 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1542
1543 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1544
1545 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1546 print_lock_name(forwards_entry->class);
1547 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1548 pr_warn("...");
1549
1550 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1551
1552 pr_warn("\nother info that might help us debug this:\n\n");
1553 print_irq_lock_scenario(backwards_entry, forwards_entry,
1554 hlock_class(prev), hlock_class(next));
1555
1556 lockdep_print_held_locks(curr);
1557
1558 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1559 if (!save_trace(&prev_root->trace))
1560 return 0;
1561 print_shortest_lock_dependencies(backwards_entry, prev_root);
1562
1563 pr_warn("\nthe dependencies between the lock to be acquired");
1564 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1565 if (!save_trace(&next_root->trace))
1566 return 0;
1567 print_shortest_lock_dependencies(forwards_entry, next_root);
1568
1569 pr_warn("\nstack backtrace:\n");
1570 dump_stack();
1571
1572 return 0;
1573}
1574
1575static int
1576check_usage(struct task_struct *curr, struct held_lock *prev,
1577 struct held_lock *next, enum lock_usage_bit bit_backwards,
1578 enum lock_usage_bit bit_forwards, const char *irqclass)
1579{
1580 int ret;
1581 struct lock_list this, that;
1582 struct lock_list *uninitialized_var(target_entry);
1583 struct lock_list *uninitialized_var(target_entry1);
1584
1585 this.parent = NULL;
1586
1587 this.class = hlock_class(prev);
1588 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1589 if (ret < 0)
1590 return print_bfs_bug(ret);
1591 if (ret == 1)
1592 return ret;
1593
1594 that.parent = NULL;
1595 that.class = hlock_class(next);
1596 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1597 if (ret < 0)
1598 return print_bfs_bug(ret);
1599 if (ret == 1)
1600 return ret;
1601
1602 return print_bad_irq_dependency(curr, &this, &that,
1603 target_entry, target_entry1,
1604 prev, next,
1605 bit_backwards, bit_forwards, irqclass);
1606}
1607
1608static const char *state_names[] = {
1609#define LOCKDEP_STATE(__STATE) \
1610 __stringify(__STATE),
1611#include "lockdep_states.h"
1612#undef LOCKDEP_STATE
1613};
1614
1615static const char *state_rnames[] = {
1616#define LOCKDEP_STATE(__STATE) \
1617 __stringify(__STATE)"-READ",
1618#include "lockdep_states.h"
1619#undef LOCKDEP_STATE
1620};
1621
1622static inline const char *state_name(enum lock_usage_bit bit)
1623{
1624 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1625}
1626
1627static int exclusive_bit(int new_bit)
1628{
1629 /*
1630 * USED_IN
1631 * USED_IN_READ
1632 * ENABLED
1633 * ENABLED_READ
1634 *
1635 * bit 0 - write/read
1636 * bit 1 - used_in/enabled
1637 * bit 2+ state
1638 */
1639
1640 int state = new_bit & ~3;
1641 int dir = new_bit & 2;
1642
1643 /*
1644 * keep state, bit flip the direction and strip read.
1645 */
1646 return state | (dir ^ 2);
1647}
1648
1649static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1650 struct held_lock *next, enum lock_usage_bit bit)
1651{
1652 /*
1653 * Prove that the new dependency does not connect a hardirq-safe
1654 * lock with a hardirq-unsafe lock - to achieve this we search
1655 * the backwards-subgraph starting at <prev>, and the
1656 * forwards-subgraph starting at <next>:
1657 */
1658 if (!check_usage(curr, prev, next, bit,
1659 exclusive_bit(bit), state_name(bit)))
1660 return 0;
1661
1662 bit++; /* _READ */
1663
1664 /*
1665 * Prove that the new dependency does not connect a hardirq-safe-read
1666 * lock with a hardirq-unsafe lock - to achieve this we search
1667 * the backwards-subgraph starting at <prev>, and the
1668 * forwards-subgraph starting at <next>:
1669 */
1670 if (!check_usage(curr, prev, next, bit,
1671 exclusive_bit(bit), state_name(bit)))
1672 return 0;
1673
1674 return 1;
1675}
1676
1677static int
1678check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1679 struct held_lock *next)
1680{
1681#define LOCKDEP_STATE(__STATE) \
1682 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1683 return 0;
1684#include "lockdep_states.h"
1685#undef LOCKDEP_STATE
1686
1687 return 1;
1688}
1689
1690static void inc_chains(void)
1691{
1692 if (current->hardirq_context)
1693 nr_hardirq_chains++;
1694 else {
1695 if (current->softirq_context)
1696 nr_softirq_chains++;
1697 else
1698 nr_process_chains++;
1699 }
1700}
1701
1702#else
1703
1704static inline int
1705check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1706 struct held_lock *next)
1707{
1708 return 1;
1709}
1710
1711static inline void inc_chains(void)
1712{
1713 nr_process_chains++;
1714}
1715
1716#endif
1717
1718static void
1719print_deadlock_scenario(struct held_lock *nxt,
1720 struct held_lock *prv)
1721{
1722 struct lock_class *next = hlock_class(nxt);
1723 struct lock_class *prev = hlock_class(prv);
1724
1725 printk(" Possible unsafe locking scenario:\n\n");
1726 printk(" CPU0\n");
1727 printk(" ----\n");
1728 printk(" lock(");
1729 __print_lock_name(prev);
1730 printk(KERN_CONT ");\n");
1731 printk(" lock(");
1732 __print_lock_name(next);
1733 printk(KERN_CONT ");\n");
1734 printk("\n *** DEADLOCK ***\n\n");
1735 printk(" May be due to missing lock nesting notation\n\n");
1736}
1737
1738static int
1739print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1740 struct held_lock *next)
1741{
1742 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1743 return 0;
1744
1745 pr_warn("\n");
1746 pr_warn("============================================\n");
1747 pr_warn("WARNING: possible recursive locking detected\n");
1748 print_kernel_ident();
1749 pr_warn("--------------------------------------------\n");
1750 pr_warn("%s/%d is trying to acquire lock:\n",
1751 curr->comm, task_pid_nr(curr));
1752 print_lock(next);
1753 pr_warn("\nbut task is already holding lock:\n");
1754 print_lock(prev);
1755
1756 pr_warn("\nother info that might help us debug this:\n");
1757 print_deadlock_scenario(next, prev);
1758 lockdep_print_held_locks(curr);
1759
1760 pr_warn("\nstack backtrace:\n");
1761 dump_stack();
1762
1763 return 0;
1764}
1765
1766/*
1767 * Check whether we are holding such a class already.
1768 *
1769 * (Note that this has to be done separately, because the graph cannot
1770 * detect such classes of deadlocks.)
1771 *
1772 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1773 */
1774static int
1775check_deadlock(struct task_struct *curr, struct held_lock *next,
1776 struct lockdep_map *next_instance, int read)
1777{
1778 struct held_lock *prev;
1779 struct held_lock *nest = NULL;
1780 int i;
1781
1782 for (i = 0; i < curr->lockdep_depth; i++) {
1783 prev = curr->held_locks + i;
1784
1785 if (prev->instance == next->nest_lock)
1786 nest = prev;
1787
1788 if (hlock_class(prev) != hlock_class(next))
1789 continue;
1790
1791 /*
1792 * Allow read-after-read recursion of the same
1793 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1794 */
1795 if ((read == 2) && prev->read)
1796 return 2;
1797
1798 /*
1799 * We're holding the nest_lock, which serializes this lock's
1800 * nesting behaviour.
1801 */
1802 if (nest)
1803 return 2;
1804
1805 return print_deadlock_bug(curr, prev, next);
1806 }
1807 return 1;
1808}
1809
1810/*
1811 * There was a chain-cache miss, and we are about to add a new dependency
1812 * to a previous lock. We recursively validate the following rules:
1813 *
1814 * - would the adding of the <prev> -> <next> dependency create a
1815 * circular dependency in the graph? [== circular deadlock]
1816 *
1817 * - does the new prev->next dependency connect any hardirq-safe lock
1818 * (in the full backwards-subgraph starting at <prev>) with any
1819 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1820 * <next>)? [== illegal lock inversion with hardirq contexts]
1821 *
1822 * - does the new prev->next dependency connect any softirq-safe lock
1823 * (in the full backwards-subgraph starting at <prev>) with any
1824 * softirq-unsafe lock (in the full forwards-subgraph starting at
1825 * <next>)? [== illegal lock inversion with softirq contexts]
1826 *
1827 * any of these scenarios could lead to a deadlock.
1828 *
1829 * Then if all the validations pass, we add the forwards and backwards
1830 * dependency.
1831 */
1832static int
1833check_prev_add(struct task_struct *curr, struct held_lock *prev,
1834 struct held_lock *next, int distance, struct stack_trace *trace,
1835 int (*save)(struct stack_trace *trace))
1836{
1837 struct lock_list *uninitialized_var(target_entry);
1838 struct lock_list *entry;
1839 struct lock_list this;
1840 int ret;
1841
1842 /*
1843 * Prove that the new <prev> -> <next> dependency would not
1844 * create a circular dependency in the graph. (We do this by
1845 * forward-recursing into the graph starting at <next>, and
1846 * checking whether we can reach <prev>.)
1847 *
1848 * We are using global variables to control the recursion, to
1849 * keep the stackframe size of the recursive functions low:
1850 */
1851 this.class = hlock_class(next);
1852 this.parent = NULL;
1853 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1854 if (unlikely(!ret)) {
1855 if (!trace->entries) {
1856 /*
1857 * If @save fails here, the printing might trigger
1858 * a WARN but because of the !nr_entries it should
1859 * not do bad things.
1860 */
1861 save(trace);
1862 }
1863 return print_circular_bug(&this, target_entry, next, prev, trace);
1864 }
1865 else if (unlikely(ret < 0))
1866 return print_bfs_bug(ret);
1867
1868 if (!check_prev_add_irq(curr, prev, next))
1869 return 0;
1870
1871 /*
1872 * For recursive read-locks we do all the dependency checks,
1873 * but we dont store read-triggered dependencies (only
1874 * write-triggered dependencies). This ensures that only the
1875 * write-side dependencies matter, and that if for example a
1876 * write-lock never takes any other locks, then the reads are
1877 * equivalent to a NOP.
1878 */
1879 if (next->read == 2 || prev->read == 2)
1880 return 1;
1881 /*
1882 * Is the <prev> -> <next> dependency already present?
1883 *
1884 * (this may occur even though this is a new chain: consider
1885 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1886 * chains - the second one will be new, but L1 already has
1887 * L2 added to its dependency list, due to the first chain.)
1888 */
1889 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1890 if (entry->class == hlock_class(next)) {
1891 if (distance == 1)
1892 entry->distance = 1;
1893 return 1;
1894 }
1895 }
1896
1897 /*
1898 * Is the <prev> -> <next> link redundant?
1899 */
1900 this.class = hlock_class(prev);
1901 this.parent = NULL;
1902 ret = check_redundant(&this, hlock_class(next), &target_entry);
1903 if (!ret) {
1904 debug_atomic_inc(nr_redundant);
1905 return 2;
1906 }
1907 if (ret < 0)
1908 return print_bfs_bug(ret);
1909
1910
1911 if (!trace->entries && !save(trace))
1912 return 0;
1913
1914 /*
1915 * Ok, all validations passed, add the new lock
1916 * to the previous lock's dependency list:
1917 */
1918 ret = add_lock_to_list(hlock_class(next),
1919 &hlock_class(prev)->locks_after,
1920 next->acquire_ip, distance, trace);
1921
1922 if (!ret)
1923 return 0;
1924
1925 ret = add_lock_to_list(hlock_class(prev),
1926 &hlock_class(next)->locks_before,
1927 next->acquire_ip, distance, trace);
1928 if (!ret)
1929 return 0;
1930
1931 return 2;
1932}
1933
1934/*
1935 * Add the dependency to all directly-previous locks that are 'relevant'.
1936 * The ones that are relevant are (in increasing distance from curr):
1937 * all consecutive trylock entries and the final non-trylock entry - or
1938 * the end of this context's lock-chain - whichever comes first.
1939 */
1940static int
1941check_prevs_add(struct task_struct *curr, struct held_lock *next)
1942{
1943 int depth = curr->lockdep_depth;
1944 struct held_lock *hlock;
1945 struct stack_trace trace = {
1946 .nr_entries = 0,
1947 .max_entries = 0,
1948 .entries = NULL,
1949 .skip = 0,
1950 };
1951
1952 /*
1953 * Debugging checks.
1954 *
1955 * Depth must not be zero for a non-head lock:
1956 */
1957 if (!depth)
1958 goto out_bug;
1959 /*
1960 * At least two relevant locks must exist for this
1961 * to be a head:
1962 */
1963 if (curr->held_locks[depth].irq_context !=
1964 curr->held_locks[depth-1].irq_context)
1965 goto out_bug;
1966
1967 for (;;) {
1968 int distance = curr->lockdep_depth - depth + 1;
1969 hlock = curr->held_locks + depth - 1;
1970
1971 /*
1972 * Only non-recursive-read entries get new dependencies
1973 * added:
1974 */
1975 if (hlock->read != 2 && hlock->check) {
1976 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1977 if (!ret)
1978 return 0;
1979
1980 /*
1981 * Stop after the first non-trylock entry,
1982 * as non-trylock entries have added their
1983 * own direct dependencies already, so this
1984 * lock is connected to them indirectly:
1985 */
1986 if (!hlock->trylock)
1987 break;
1988 }
1989
1990 depth--;
1991 /*
1992 * End of lock-stack?
1993 */
1994 if (!depth)
1995 break;
1996 /*
1997 * Stop the search if we cross into another context:
1998 */
1999 if (curr->held_locks[depth].irq_context !=
2000 curr->held_locks[depth-1].irq_context)
2001 break;
2002 }
2003 return 1;
2004out_bug:
2005 if (!debug_locks_off_graph_unlock())
2006 return 0;
2007
2008 /*
2009 * Clearly we all shouldn't be here, but since we made it we
2010 * can reliable say we messed up our state. See the above two
2011 * gotos for reasons why we could possibly end up here.
2012 */
2013 WARN_ON(1);
2014
2015 return 0;
2016}
2017
2018unsigned long nr_lock_chains;
2019struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2020int nr_chain_hlocks;
2021static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2022
2023struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2024{
2025 return lock_classes + chain_hlocks[chain->base + i];
2026}
2027
2028/*
2029 * Returns the index of the first held_lock of the current chain
2030 */
2031static inline int get_first_held_lock(struct task_struct *curr,
2032 struct held_lock *hlock)
2033{
2034 int i;
2035 struct held_lock *hlock_curr;
2036
2037 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2038 hlock_curr = curr->held_locks + i;
2039 if (hlock_curr->irq_context != hlock->irq_context)
2040 break;
2041
2042 }
2043
2044 return ++i;
2045}
2046
2047#ifdef CONFIG_DEBUG_LOCKDEP
2048/*
2049 * Returns the next chain_key iteration
2050 */
2051static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2052{
2053 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2054
2055 printk(" class_idx:%d -> chain_key:%016Lx",
2056 class_idx,
2057 (unsigned long long)new_chain_key);
2058 return new_chain_key;
2059}
2060
2061static void
2062print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2063{
2064 struct held_lock *hlock;
2065 u64 chain_key = 0;
2066 int depth = curr->lockdep_depth;
2067 int i;
2068
2069 printk("depth: %u\n", depth + 1);
2070 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2071 hlock = curr->held_locks + i;
2072 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2073
2074 print_lock(hlock);
2075 }
2076
2077 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2078 print_lock(hlock_next);
2079}
2080
2081static void print_chain_keys_chain(struct lock_chain *chain)
2082{
2083 int i;
2084 u64 chain_key = 0;
2085 int class_id;
2086
2087 printk("depth: %u\n", chain->depth);
2088 for (i = 0; i < chain->depth; i++) {
2089 class_id = chain_hlocks[chain->base + i];
2090 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2091
2092 print_lock_name(lock_classes + class_id);
2093 printk("\n");
2094 }
2095}
2096
2097static void print_collision(struct task_struct *curr,
2098 struct held_lock *hlock_next,
2099 struct lock_chain *chain)
2100{
2101 pr_warn("\n");
2102 pr_warn("============================\n");
2103 pr_warn("WARNING: chain_key collision\n");
2104 print_kernel_ident();
2105 pr_warn("----------------------------\n");
2106 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2107 pr_warn("Hash chain already cached but the contents don't match!\n");
2108
2109 pr_warn("Held locks:");
2110 print_chain_keys_held_locks(curr, hlock_next);
2111
2112 pr_warn("Locks in cached chain:");
2113 print_chain_keys_chain(chain);
2114
2115 pr_warn("\nstack backtrace:\n");
2116 dump_stack();
2117}
2118#endif
2119
2120/*
2121 * Checks whether the chain and the current held locks are consistent
2122 * in depth and also in content. If they are not it most likely means
2123 * that there was a collision during the calculation of the chain_key.
2124 * Returns: 0 not passed, 1 passed
2125 */
2126static int check_no_collision(struct task_struct *curr,
2127 struct held_lock *hlock,
2128 struct lock_chain *chain)
2129{
2130#ifdef CONFIG_DEBUG_LOCKDEP
2131 int i, j, id;
2132
2133 i = get_first_held_lock(curr, hlock);
2134
2135 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2136 print_collision(curr, hlock, chain);
2137 return 0;
2138 }
2139
2140 for (j = 0; j < chain->depth - 1; j++, i++) {
2141 id = curr->held_locks[i].class_idx - 1;
2142
2143 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2144 print_collision(curr, hlock, chain);
2145 return 0;
2146 }
2147 }
2148#endif
2149 return 1;
2150}
2151
2152/*
2153 * This is for building a chain between just two different classes,
2154 * instead of adding a new hlock upon current, which is done by
2155 * add_chain_cache().
2156 *
2157 * This can be called in any context with two classes, while
2158 * add_chain_cache() must be done within the lock owener's context
2159 * since it uses hlock which might be racy in another context.
2160 */
2161static inline int add_chain_cache_classes(unsigned int prev,
2162 unsigned int next,
2163 unsigned int irq_context,
2164 u64 chain_key)
2165{
2166 struct hlist_head *hash_head = chainhashentry(chain_key);
2167 struct lock_chain *chain;
2168
2169 /*
2170 * Allocate a new chain entry from the static array, and add
2171 * it to the hash:
2172 */
2173
2174 /*
2175 * We might need to take the graph lock, ensure we've got IRQs
2176 * disabled to make this an IRQ-safe lock.. for recursion reasons
2177 * lockdep won't complain about its own locking errors.
2178 */
2179 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2180 return 0;
2181
2182 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2183 if (!debug_locks_off_graph_unlock())
2184 return 0;
2185
2186 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2187 dump_stack();
2188 return 0;
2189 }
2190
2191 chain = lock_chains + nr_lock_chains++;
2192 chain->chain_key = chain_key;
2193 chain->irq_context = irq_context;
2194 chain->depth = 2;
2195 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2196 chain->base = nr_chain_hlocks;
2197 nr_chain_hlocks += chain->depth;
2198 chain_hlocks[chain->base] = prev - 1;
2199 chain_hlocks[chain->base + 1] = next -1;
2200 }
2201#ifdef CONFIG_DEBUG_LOCKDEP
2202 /*
2203 * Important for check_no_collision().
2204 */
2205 else {
2206 if (!debug_locks_off_graph_unlock())
2207 return 0;
2208
2209 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2210 dump_stack();
2211 return 0;
2212 }
2213#endif
2214
2215 hlist_add_head_rcu(&chain->entry, hash_head);
2216 debug_atomic_inc(chain_lookup_misses);
2217 inc_chains();
2218
2219 return 1;
2220}
2221
2222/*
2223 * Adds a dependency chain into chain hashtable. And must be called with
2224 * graph_lock held.
2225 *
2226 * Return 0 if fail, and graph_lock is released.
2227 * Return 1 if succeed, with graph_lock held.
2228 */
2229static inline int add_chain_cache(struct task_struct *curr,
2230 struct held_lock *hlock,
2231 u64 chain_key)
2232{
2233 struct lock_class *class = hlock_class(hlock);
2234 struct hlist_head *hash_head = chainhashentry(chain_key);
2235 struct lock_chain *chain;
2236 int i, j;
2237
2238 /*
2239 * Allocate a new chain entry from the static array, and add
2240 * it to the hash:
2241 */
2242
2243 /*
2244 * We might need to take the graph lock, ensure we've got IRQs
2245 * disabled to make this an IRQ-safe lock.. for recursion reasons
2246 * lockdep won't complain about its own locking errors.
2247 */
2248 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2249 return 0;
2250
2251 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2252 if (!debug_locks_off_graph_unlock())
2253 return 0;
2254
2255 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2256 dump_stack();
2257 return 0;
2258 }
2259 chain = lock_chains + nr_lock_chains++;
2260 chain->chain_key = chain_key;
2261 chain->irq_context = hlock->irq_context;
2262 i = get_first_held_lock(curr, hlock);
2263 chain->depth = curr->lockdep_depth + 1 - i;
2264
2265 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2266 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2267 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2268
2269 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2270 chain->base = nr_chain_hlocks;
2271 for (j = 0; j < chain->depth - 1; j++, i++) {
2272 int lock_id = curr->held_locks[i].class_idx - 1;
2273 chain_hlocks[chain->base + j] = lock_id;
2274 }
2275 chain_hlocks[chain->base + j] = class - lock_classes;
2276 }
2277
2278 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2279 nr_chain_hlocks += chain->depth;
2280
2281#ifdef CONFIG_DEBUG_LOCKDEP
2282 /*
2283 * Important for check_no_collision().
2284 */
2285 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2286 if (!debug_locks_off_graph_unlock())
2287 return 0;
2288
2289 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2290 dump_stack();
2291 return 0;
2292 }
2293#endif
2294
2295 hlist_add_head_rcu(&chain->entry, hash_head);
2296 debug_atomic_inc(chain_lookup_misses);
2297 inc_chains();
2298
2299 return 1;
2300}
2301
2302/*
2303 * Look up a dependency chain.
2304 */
2305static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2306{
2307 struct hlist_head *hash_head = chainhashentry(chain_key);
2308 struct lock_chain *chain;
2309
2310 /*
2311 * We can walk it lock-free, because entries only get added
2312 * to the hash:
2313 */
2314 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2315 if (chain->chain_key == chain_key) {
2316 debug_atomic_inc(chain_lookup_hits);
2317 return chain;
2318 }
2319 }
2320 return NULL;
2321}
2322
2323/*
2324 * If the key is not present yet in dependency chain cache then
2325 * add it and return 1 - in this case the new dependency chain is
2326 * validated. If the key is already hashed, return 0.
2327 * (On return with 1 graph_lock is held.)
2328 */
2329static inline int lookup_chain_cache_add(struct task_struct *curr,
2330 struct held_lock *hlock,
2331 u64 chain_key)
2332{
2333 struct lock_class *class = hlock_class(hlock);
2334 struct lock_chain *chain = lookup_chain_cache(chain_key);
2335
2336 if (chain) {
2337cache_hit:
2338 if (!check_no_collision(curr, hlock, chain))
2339 return 0;
2340
2341 if (very_verbose(class)) {
2342 printk("\nhash chain already cached, key: "
2343 "%016Lx tail class: [%px] %s\n",
2344 (unsigned long long)chain_key,
2345 class->key, class->name);
2346 }
2347
2348 return 0;
2349 }
2350
2351 if (very_verbose(class)) {
2352 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2353 (unsigned long long)chain_key, class->key, class->name);
2354 }
2355
2356 if (!graph_lock())
2357 return 0;
2358
2359 /*
2360 * We have to walk the chain again locked - to avoid duplicates:
2361 */
2362 chain = lookup_chain_cache(chain_key);
2363 if (chain) {
2364 graph_unlock();
2365 goto cache_hit;
2366 }
2367
2368 if (!add_chain_cache(curr, hlock, chain_key))
2369 return 0;
2370
2371 return 1;
2372}
2373
2374static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2375 struct held_lock *hlock, int chain_head, u64 chain_key)
2376{
2377 /*
2378 * Trylock needs to maintain the stack of held locks, but it
2379 * does not add new dependencies, because trylock can be done
2380 * in any order.
2381 *
2382 * We look up the chain_key and do the O(N^2) check and update of
2383 * the dependencies only if this is a new dependency chain.
2384 * (If lookup_chain_cache_add() return with 1 it acquires
2385 * graph_lock for us)
2386 */
2387 if (!hlock->trylock && hlock->check &&
2388 lookup_chain_cache_add(curr, hlock, chain_key)) {
2389 /*
2390 * Check whether last held lock:
2391 *
2392 * - is irq-safe, if this lock is irq-unsafe
2393 * - is softirq-safe, if this lock is hardirq-unsafe
2394 *
2395 * And check whether the new lock's dependency graph
2396 * could lead back to the previous lock.
2397 *
2398 * any of these scenarios could lead to a deadlock. If
2399 * All validations
2400 */
2401 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2402
2403 if (!ret)
2404 return 0;
2405 /*
2406 * Mark recursive read, as we jump over it when
2407 * building dependencies (just like we jump over
2408 * trylock entries):
2409 */
2410 if (ret == 2)
2411 hlock->read = 2;
2412 /*
2413 * Add dependency only if this lock is not the head
2414 * of the chain, and if it's not a secondary read-lock:
2415 */
2416 if (!chain_head && ret != 2) {
2417 if (!check_prevs_add(curr, hlock))
2418 return 0;
2419 }
2420
2421 graph_unlock();
2422 } else {
2423 /* after lookup_chain_cache_add(): */
2424 if (unlikely(!debug_locks))
2425 return 0;
2426 }
2427
2428 return 1;
2429}
2430#else
2431static inline int validate_chain(struct task_struct *curr,
2432 struct lockdep_map *lock, struct held_lock *hlock,
2433 int chain_head, u64 chain_key)
2434{
2435 return 1;
2436}
2437#endif
2438
2439/*
2440 * We are building curr_chain_key incrementally, so double-check
2441 * it from scratch, to make sure that it's done correctly:
2442 */
2443static void check_chain_key(struct task_struct *curr)
2444{
2445#ifdef CONFIG_DEBUG_LOCKDEP
2446 struct held_lock *hlock, *prev_hlock = NULL;
2447 unsigned int i;
2448 u64 chain_key = 0;
2449
2450 for (i = 0; i < curr->lockdep_depth; i++) {
2451 hlock = curr->held_locks + i;
2452 if (chain_key != hlock->prev_chain_key) {
2453 debug_locks_off();
2454 /*
2455 * We got mighty confused, our chain keys don't match
2456 * with what we expect, someone trample on our task state?
2457 */
2458 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2459 curr->lockdep_depth, i,
2460 (unsigned long long)chain_key,
2461 (unsigned long long)hlock->prev_chain_key);
2462 return;
2463 }
2464 /*
2465 * Whoops ran out of static storage again?
2466 */
2467 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2468 return;
2469
2470 if (prev_hlock && (prev_hlock->irq_context !=
2471 hlock->irq_context))
2472 chain_key = 0;
2473 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2474 prev_hlock = hlock;
2475 }
2476 if (chain_key != curr->curr_chain_key) {
2477 debug_locks_off();
2478 /*
2479 * More smoking hash instead of calculating it, damn see these
2480 * numbers float.. I bet that a pink elephant stepped on my memory.
2481 */
2482 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2483 curr->lockdep_depth, i,
2484 (unsigned long long)chain_key,
2485 (unsigned long long)curr->curr_chain_key);
2486 }
2487#endif
2488}
2489
2490static void
2491print_usage_bug_scenario(struct held_lock *lock)
2492{
2493 struct lock_class *class = hlock_class(lock);
2494
2495 printk(" Possible unsafe locking scenario:\n\n");
2496 printk(" CPU0\n");
2497 printk(" ----\n");
2498 printk(" lock(");
2499 __print_lock_name(class);
2500 printk(KERN_CONT ");\n");
2501 printk(" <Interrupt>\n");
2502 printk(" lock(");
2503 __print_lock_name(class);
2504 printk(KERN_CONT ");\n");
2505 printk("\n *** DEADLOCK ***\n\n");
2506}
2507
2508static int
2509print_usage_bug(struct task_struct *curr, struct held_lock *this,
2510 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2511{
2512 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2513 return 0;
2514
2515 pr_warn("\n");
2516 pr_warn("================================\n");
2517 pr_warn("WARNING: inconsistent lock state\n");
2518 print_kernel_ident();
2519 pr_warn("--------------------------------\n");
2520
2521 pr_warn("inconsistent {%s} -> {%s} usage.\n",
2522 usage_str[prev_bit], usage_str[new_bit]);
2523
2524 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2525 curr->comm, task_pid_nr(curr),
2526 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2527 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2528 trace_hardirqs_enabled(curr),
2529 trace_softirqs_enabled(curr));
2530 print_lock(this);
2531
2532 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2533 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2534
2535 print_irqtrace_events(curr);
2536 pr_warn("\nother info that might help us debug this:\n");
2537 print_usage_bug_scenario(this);
2538
2539 lockdep_print_held_locks(curr);
2540
2541 pr_warn("\nstack backtrace:\n");
2542 dump_stack();
2543
2544 return 0;
2545}
2546
2547/*
2548 * Print out an error if an invalid bit is set:
2549 */
2550static inline int
2551valid_state(struct task_struct *curr, struct held_lock *this,
2552 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2553{
2554 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2555 return print_usage_bug(curr, this, bad_bit, new_bit);
2556 return 1;
2557}
2558
2559static int mark_lock(struct task_struct *curr, struct held_lock *this,
2560 enum lock_usage_bit new_bit);
2561
2562#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2563
2564/*
2565 * print irq inversion bug:
2566 */
2567static int
2568print_irq_inversion_bug(struct task_struct *curr,
2569 struct lock_list *root, struct lock_list *other,
2570 struct held_lock *this, int forwards,
2571 const char *irqclass)
2572{
2573 struct lock_list *entry = other;
2574 struct lock_list *middle = NULL;
2575 int depth;
2576
2577 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2578 return 0;
2579
2580 pr_warn("\n");
2581 pr_warn("========================================================\n");
2582 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2583 print_kernel_ident();
2584 pr_warn("--------------------------------------------------------\n");
2585 pr_warn("%s/%d just changed the state of lock:\n",
2586 curr->comm, task_pid_nr(curr));
2587 print_lock(this);
2588 if (forwards)
2589 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2590 else
2591 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2592 print_lock_name(other->class);
2593 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2594
2595 pr_warn("\nother info that might help us debug this:\n");
2596
2597 /* Find a middle lock (if one exists) */
2598 depth = get_lock_depth(other);
2599 do {
2600 if (depth == 0 && (entry != root)) {
2601 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2602 break;
2603 }
2604 middle = entry;
2605 entry = get_lock_parent(entry);
2606 depth--;
2607 } while (entry && entry != root && (depth >= 0));
2608 if (forwards)
2609 print_irq_lock_scenario(root, other,
2610 middle ? middle->class : root->class, other->class);
2611 else
2612 print_irq_lock_scenario(other, root,
2613 middle ? middle->class : other->class, root->class);
2614
2615 lockdep_print_held_locks(curr);
2616
2617 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2618 if (!save_trace(&root->trace))
2619 return 0;
2620 print_shortest_lock_dependencies(other, root);
2621
2622 pr_warn("\nstack backtrace:\n");
2623 dump_stack();
2624
2625 return 0;
2626}
2627
2628/*
2629 * Prove that in the forwards-direction subgraph starting at <this>
2630 * there is no lock matching <mask>:
2631 */
2632static int
2633check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2634 enum lock_usage_bit bit, const char *irqclass)
2635{
2636 int ret;
2637 struct lock_list root;
2638 struct lock_list *uninitialized_var(target_entry);
2639
2640 root.parent = NULL;
2641 root.class = hlock_class(this);
2642 ret = find_usage_forwards(&root, bit, &target_entry);
2643 if (ret < 0)
2644 return print_bfs_bug(ret);
2645 if (ret == 1)
2646 return ret;
2647
2648 return print_irq_inversion_bug(curr, &root, target_entry,
2649 this, 1, irqclass);
2650}
2651
2652/*
2653 * Prove that in the backwards-direction subgraph starting at <this>
2654 * there is no lock matching <mask>:
2655 */
2656static int
2657check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2658 enum lock_usage_bit bit, const char *irqclass)
2659{
2660 int ret;
2661 struct lock_list root;
2662 struct lock_list *uninitialized_var(target_entry);
2663
2664 root.parent = NULL;
2665 root.class = hlock_class(this);
2666 ret = find_usage_backwards(&root, bit, &target_entry);
2667 if (ret < 0)
2668 return print_bfs_bug(ret);
2669 if (ret == 1)
2670 return ret;
2671
2672 return print_irq_inversion_bug(curr, &root, target_entry,
2673 this, 0, irqclass);
2674}
2675
2676void print_irqtrace_events(struct task_struct *curr)
2677{
2678 printk("irq event stamp: %u\n", curr->irq_events);
2679 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
2680 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2681 (void *)curr->hardirq_enable_ip);
2682 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2683 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2684 (void *)curr->hardirq_disable_ip);
2685 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
2686 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2687 (void *)curr->softirq_enable_ip);
2688 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2689 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2690 (void *)curr->softirq_disable_ip);
2691}
2692
2693static int HARDIRQ_verbose(struct lock_class *class)
2694{
2695#if HARDIRQ_VERBOSE
2696 return class_filter(class);
2697#endif
2698 return 0;
2699}
2700
2701static int SOFTIRQ_verbose(struct lock_class *class)
2702{
2703#if SOFTIRQ_VERBOSE
2704 return class_filter(class);
2705#endif
2706 return 0;
2707}
2708
2709#define STRICT_READ_CHECKS 1
2710
2711static int (*state_verbose_f[])(struct lock_class *class) = {
2712#define LOCKDEP_STATE(__STATE) \
2713 __STATE##_verbose,
2714#include "lockdep_states.h"
2715#undef LOCKDEP_STATE
2716};
2717
2718static inline int state_verbose(enum lock_usage_bit bit,
2719 struct lock_class *class)
2720{
2721 return state_verbose_f[bit >> 2](class);
2722}
2723
2724typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2725 enum lock_usage_bit bit, const char *name);
2726
2727static int
2728mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2729 enum lock_usage_bit new_bit)
2730{
2731 int excl_bit = exclusive_bit(new_bit);
2732 int read = new_bit & 1;
2733 int dir = new_bit & 2;
2734
2735 /*
2736 * mark USED_IN has to look forwards -- to ensure no dependency
2737 * has ENABLED state, which would allow recursion deadlocks.
2738 *
2739 * mark ENABLED has to look backwards -- to ensure no dependee
2740 * has USED_IN state, which, again, would allow recursion deadlocks.
2741 */
2742 check_usage_f usage = dir ?
2743 check_usage_backwards : check_usage_forwards;
2744
2745 /*
2746 * Validate that this particular lock does not have conflicting
2747 * usage states.
2748 */
2749 if (!valid_state(curr, this, new_bit, excl_bit))
2750 return 0;
2751
2752 /*
2753 * Validate that the lock dependencies don't have conflicting usage
2754 * states.
2755 */
2756 if ((!read || !dir || STRICT_READ_CHECKS) &&
2757 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2758 return 0;
2759
2760 /*
2761 * Check for read in write conflicts
2762 */
2763 if (!read) {
2764 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2765 return 0;
2766
2767 if (STRICT_READ_CHECKS &&
2768 !usage(curr, this, excl_bit + 1,
2769 state_name(new_bit + 1)))
2770 return 0;
2771 }
2772
2773 if (state_verbose(new_bit, hlock_class(this)))
2774 return 2;
2775
2776 return 1;
2777}
2778
2779enum mark_type {
2780#define LOCKDEP_STATE(__STATE) __STATE,
2781#include "lockdep_states.h"
2782#undef LOCKDEP_STATE
2783};
2784
2785/*
2786 * Mark all held locks with a usage bit:
2787 */
2788static int
2789mark_held_locks(struct task_struct *curr, enum mark_type mark)
2790{
2791 enum lock_usage_bit usage_bit;
2792 struct held_lock *hlock;
2793 int i;
2794
2795 for (i = 0; i < curr->lockdep_depth; i++) {
2796 hlock = curr->held_locks + i;
2797
2798 usage_bit = 2 + (mark << 2); /* ENABLED */
2799 if (hlock->read)
2800 usage_bit += 1; /* READ */
2801
2802 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2803
2804 if (!hlock->check)
2805 continue;
2806
2807 if (!mark_lock(curr, hlock, usage_bit))
2808 return 0;
2809 }
2810
2811 return 1;
2812}
2813
2814/*
2815 * Hardirqs will be enabled:
2816 */
2817static void __trace_hardirqs_on_caller(unsigned long ip)
2818{
2819 struct task_struct *curr = current;
2820
2821 /* we'll do an OFF -> ON transition: */
2822 curr->hardirqs_enabled = 1;
2823
2824 /*
2825 * We are going to turn hardirqs on, so set the
2826 * usage bit for all held locks:
2827 */
2828 if (!mark_held_locks(curr, HARDIRQ))
2829 return;
2830 /*
2831 * If we have softirqs enabled, then set the usage
2832 * bit for all held locks. (disabled hardirqs prevented
2833 * this bit from being set before)
2834 */
2835 if (curr->softirqs_enabled)
2836 if (!mark_held_locks(curr, SOFTIRQ))
2837 return;
2838
2839 curr->hardirq_enable_ip = ip;
2840 curr->hardirq_enable_event = ++curr->irq_events;
2841 debug_atomic_inc(hardirqs_on_events);
2842}
2843
2844__visible void trace_hardirqs_on_caller(unsigned long ip)
2845{
2846 time_hardirqs_on(CALLER_ADDR0, ip);
2847
2848 if (unlikely(!debug_locks || current->lockdep_recursion))
2849 return;
2850
2851 if (unlikely(current->hardirqs_enabled)) {
2852 /*
2853 * Neither irq nor preemption are disabled here
2854 * so this is racy by nature but losing one hit
2855 * in a stat is not a big deal.
2856 */
2857 __debug_atomic_inc(redundant_hardirqs_on);
2858 return;
2859 }
2860
2861 /*
2862 * We're enabling irqs and according to our state above irqs weren't
2863 * already enabled, yet we find the hardware thinks they are in fact
2864 * enabled.. someone messed up their IRQ state tracing.
2865 */
2866 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2867 return;
2868
2869 /*
2870 * See the fine text that goes along with this variable definition.
2871 */
2872 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2873 return;
2874
2875 /*
2876 * Can't allow enabling interrupts while in an interrupt handler,
2877 * that's general bad form and such. Recursion, limited stack etc..
2878 */
2879 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2880 return;
2881
2882 current->lockdep_recursion = 1;
2883 __trace_hardirqs_on_caller(ip);
2884 current->lockdep_recursion = 0;
2885}
2886EXPORT_SYMBOL(trace_hardirqs_on_caller);
2887
2888void trace_hardirqs_on(void)
2889{
2890 trace_hardirqs_on_caller(CALLER_ADDR0);
2891}
2892EXPORT_SYMBOL(trace_hardirqs_on);
2893
2894/*
2895 * Hardirqs were disabled:
2896 */
2897__visible void trace_hardirqs_off_caller(unsigned long ip)
2898{
2899 struct task_struct *curr = current;
2900
2901 time_hardirqs_off(CALLER_ADDR0, ip);
2902
2903 if (unlikely(!debug_locks || current->lockdep_recursion))
2904 return;
2905
2906 /*
2907 * So we're supposed to get called after you mask local IRQs, but for
2908 * some reason the hardware doesn't quite think you did a proper job.
2909 */
2910 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2911 return;
2912
2913 if (curr->hardirqs_enabled) {
2914 /*
2915 * We have done an ON -> OFF transition:
2916 */
2917 curr->hardirqs_enabled = 0;
2918 curr->hardirq_disable_ip = ip;
2919 curr->hardirq_disable_event = ++curr->irq_events;
2920 debug_atomic_inc(hardirqs_off_events);
2921 } else
2922 debug_atomic_inc(redundant_hardirqs_off);
2923}
2924EXPORT_SYMBOL(trace_hardirqs_off_caller);
2925
2926void trace_hardirqs_off(void)
2927{
2928 trace_hardirqs_off_caller(CALLER_ADDR0);
2929}
2930EXPORT_SYMBOL(trace_hardirqs_off);
2931
2932/*
2933 * Softirqs will be enabled:
2934 */
2935void trace_softirqs_on(unsigned long ip)
2936{
2937 struct task_struct *curr = current;
2938
2939 if (unlikely(!debug_locks || current->lockdep_recursion))
2940 return;
2941
2942 /*
2943 * We fancy IRQs being disabled here, see softirq.c, avoids
2944 * funny state and nesting things.
2945 */
2946 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2947 return;
2948
2949 if (curr->softirqs_enabled) {
2950 debug_atomic_inc(redundant_softirqs_on);
2951 return;
2952 }
2953
2954 current->lockdep_recursion = 1;
2955 /*
2956 * We'll do an OFF -> ON transition:
2957 */
2958 curr->softirqs_enabled = 1;
2959 curr->softirq_enable_ip = ip;
2960 curr->softirq_enable_event = ++curr->irq_events;
2961 debug_atomic_inc(softirqs_on_events);
2962 /*
2963 * We are going to turn softirqs on, so set the
2964 * usage bit for all held locks, if hardirqs are
2965 * enabled too:
2966 */
2967 if (curr->hardirqs_enabled)
2968 mark_held_locks(curr, SOFTIRQ);
2969 current->lockdep_recursion = 0;
2970}
2971
2972/*
2973 * Softirqs were disabled:
2974 */
2975void trace_softirqs_off(unsigned long ip)
2976{
2977 struct task_struct *curr = current;
2978
2979 if (unlikely(!debug_locks || current->lockdep_recursion))
2980 return;
2981
2982 /*
2983 * We fancy IRQs being disabled here, see softirq.c
2984 */
2985 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2986 return;
2987
2988 if (curr->softirqs_enabled) {
2989 /*
2990 * We have done an ON -> OFF transition:
2991 */
2992 curr->softirqs_enabled = 0;
2993 curr->softirq_disable_ip = ip;
2994 curr->softirq_disable_event = ++curr->irq_events;
2995 debug_atomic_inc(softirqs_off_events);
2996 /*
2997 * Whoops, we wanted softirqs off, so why aren't they?
2998 */
2999 DEBUG_LOCKS_WARN_ON(!softirq_count());
3000 } else
3001 debug_atomic_inc(redundant_softirqs_off);
3002}
3003
3004static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3005{
3006 /*
3007 * If non-trylock use in a hardirq or softirq context, then
3008 * mark the lock as used in these contexts:
3009 */
3010 if (!hlock->trylock) {
3011 if (hlock->read) {
3012 if (curr->hardirq_context)
3013 if (!mark_lock(curr, hlock,
3014 LOCK_USED_IN_HARDIRQ_READ))
3015 return 0;
3016 if (curr->softirq_context)
3017 if (!mark_lock(curr, hlock,
3018 LOCK_USED_IN_SOFTIRQ_READ))
3019 return 0;
3020 } else {
3021 if (curr->hardirq_context)
3022 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3023 return 0;
3024 if (curr->softirq_context)
3025 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3026 return 0;
3027 }
3028 }
3029 if (!hlock->hardirqs_off) {
3030 if (hlock->read) {
3031 if (!mark_lock(curr, hlock,
3032 LOCK_ENABLED_HARDIRQ_READ))
3033 return 0;
3034 if (curr->softirqs_enabled)
3035 if (!mark_lock(curr, hlock,
3036 LOCK_ENABLED_SOFTIRQ_READ))
3037 return 0;
3038 } else {
3039 if (!mark_lock(curr, hlock,
3040 LOCK_ENABLED_HARDIRQ))
3041 return 0;
3042 if (curr->softirqs_enabled)
3043 if (!mark_lock(curr, hlock,
3044 LOCK_ENABLED_SOFTIRQ))
3045 return 0;
3046 }
3047 }
3048
3049 return 1;
3050}
3051
3052static inline unsigned int task_irq_context(struct task_struct *task)
3053{
3054 return 2 * !!task->hardirq_context + !!task->softirq_context;
3055}
3056
3057static int separate_irq_context(struct task_struct *curr,
3058 struct held_lock *hlock)
3059{
3060 unsigned int depth = curr->lockdep_depth;
3061
3062 /*
3063 * Keep track of points where we cross into an interrupt context:
3064 */
3065 if (depth) {
3066 struct held_lock *prev_hlock;
3067
3068 prev_hlock = curr->held_locks + depth-1;
3069 /*
3070 * If we cross into another context, reset the
3071 * hash key (this also prevents the checking and the
3072 * adding of the dependency to 'prev'):
3073 */
3074 if (prev_hlock->irq_context != hlock->irq_context)
3075 return 1;
3076 }
3077 return 0;
3078}
3079
3080#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3081
3082static inline
3083int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3084 enum lock_usage_bit new_bit)
3085{
3086 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3087 return 1;
3088}
3089
3090static inline int mark_irqflags(struct task_struct *curr,
3091 struct held_lock *hlock)
3092{
3093 return 1;
3094}
3095
3096static inline unsigned int task_irq_context(struct task_struct *task)
3097{
3098 return 0;
3099}
3100
3101static inline int separate_irq_context(struct task_struct *curr,
3102 struct held_lock *hlock)
3103{
3104 return 0;
3105}
3106
3107#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3108
3109/*
3110 * Mark a lock with a usage bit, and validate the state transition:
3111 */
3112static int mark_lock(struct task_struct *curr, struct held_lock *this,
3113 enum lock_usage_bit new_bit)
3114{
3115 unsigned int new_mask = 1 << new_bit, ret = 1;
3116
3117 /*
3118 * If already set then do not dirty the cacheline,
3119 * nor do any checks:
3120 */
3121 if (likely(hlock_class(this)->usage_mask & new_mask))
3122 return 1;
3123
3124 if (!graph_lock())
3125 return 0;
3126 /*
3127 * Make sure we didn't race:
3128 */
3129 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3130 graph_unlock();
3131 return 1;
3132 }
3133
3134 hlock_class(this)->usage_mask |= new_mask;
3135
3136 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3137 return 0;
3138
3139 switch (new_bit) {
3140#define LOCKDEP_STATE(__STATE) \
3141 case LOCK_USED_IN_##__STATE: \
3142 case LOCK_USED_IN_##__STATE##_READ: \
3143 case LOCK_ENABLED_##__STATE: \
3144 case LOCK_ENABLED_##__STATE##_READ:
3145#include "lockdep_states.h"
3146#undef LOCKDEP_STATE
3147 ret = mark_lock_irq(curr, this, new_bit);
3148 if (!ret)
3149 return 0;
3150 break;
3151 case LOCK_USED:
3152 debug_atomic_dec(nr_unused_locks);
3153 break;
3154 default:
3155 if (!debug_locks_off_graph_unlock())
3156 return 0;
3157 WARN_ON(1);
3158 return 0;
3159 }
3160
3161 graph_unlock();
3162
3163 /*
3164 * We must printk outside of the graph_lock:
3165 */
3166 if (ret == 2) {
3167 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3168 print_lock(this);
3169 print_irqtrace_events(curr);
3170 dump_stack();
3171 }
3172
3173 return ret;
3174}
3175
3176/*
3177 * Initialize a lock instance's lock-class mapping info:
3178 */
3179static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3180 struct lock_class_key *key, int subclass)
3181{
3182 int i;
3183
3184 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3185 lock->class_cache[i] = NULL;
3186
3187#ifdef CONFIG_LOCK_STAT
3188 lock->cpu = raw_smp_processor_id();
3189#endif
3190
3191 /*
3192 * Can't be having no nameless bastards around this place!
3193 */
3194 if (DEBUG_LOCKS_WARN_ON(!name)) {
3195 lock->name = "NULL";
3196 return;
3197 }
3198
3199 lock->name = name;
3200
3201 /*
3202 * No key, no joy, we need to hash something.
3203 */
3204 if (DEBUG_LOCKS_WARN_ON(!key))
3205 return;
3206 /*
3207 * Sanity check, the lock-class key must be persistent:
3208 */
3209 if (!static_obj(key)) {
3210 printk("BUG: key %px not in .data!\n", key);
3211 /*
3212 * What it says above ^^^^^, I suggest you read it.
3213 */
3214 DEBUG_LOCKS_WARN_ON(1);
3215 return;
3216 }
3217 lock->key = key;
3218
3219 if (unlikely(!debug_locks))
3220 return;
3221
3222 if (subclass) {
3223 unsigned long flags;
3224
3225 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3226 return;
3227
3228 raw_local_irq_save(flags);
3229 current->lockdep_recursion = 1;
3230 register_lock_class(lock, subclass, 1);
3231 current->lockdep_recursion = 0;
3232 raw_local_irq_restore(flags);
3233 }
3234}
3235
3236void lockdep_init_map(struct lockdep_map *lock, const char *name,
3237 struct lock_class_key *key, int subclass)
3238{
3239 __lockdep_init_map(lock, name, key, subclass);
3240}
3241EXPORT_SYMBOL_GPL(lockdep_init_map);
3242
3243struct lock_class_key __lockdep_no_validate__;
3244EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3245
3246static int
3247print_lock_nested_lock_not_held(struct task_struct *curr,
3248 struct held_lock *hlock,
3249 unsigned long ip)
3250{
3251 if (!debug_locks_off())
3252 return 0;
3253 if (debug_locks_silent)
3254 return 0;
3255
3256 pr_warn("\n");
3257 pr_warn("==================================\n");
3258 pr_warn("WARNING: Nested lock was not taken\n");
3259 print_kernel_ident();
3260 pr_warn("----------------------------------\n");
3261
3262 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3263 print_lock(hlock);
3264
3265 pr_warn("\nbut this task is not holding:\n");
3266 pr_warn("%s\n", hlock->nest_lock->name);
3267
3268 pr_warn("\nstack backtrace:\n");
3269 dump_stack();
3270
3271 pr_warn("\nother info that might help us debug this:\n");
3272 lockdep_print_held_locks(curr);
3273
3274 pr_warn("\nstack backtrace:\n");
3275 dump_stack();
3276
3277 return 0;
3278}
3279
3280static int __lock_is_held(const struct lockdep_map *lock, int read);
3281
3282/*
3283 * This gets called for every mutex_lock*()/spin_lock*() operation.
3284 * We maintain the dependency maps and validate the locking attempt:
3285 */
3286static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3287 int trylock, int read, int check, int hardirqs_off,
3288 struct lockdep_map *nest_lock, unsigned long ip,
3289 int references, int pin_count)
3290{
3291 struct task_struct *curr = current;
3292 struct lock_class *class = NULL;
3293 struct held_lock *hlock;
3294 unsigned int depth;
3295 int chain_head = 0;
3296 int class_idx;
3297 u64 chain_key;
3298
3299 if (unlikely(!debug_locks))
3300 return 0;
3301
3302 /*
3303 * Lockdep should run with IRQs disabled, otherwise we could
3304 * get an interrupt which would want to take locks, which would
3305 * end up in lockdep and have you got a head-ache already?
3306 */
3307 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3308 return 0;
3309
3310 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3311 check = 0;
3312
3313 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3314 class = lock->class_cache[subclass];
3315 /*
3316 * Not cached?
3317 */
3318 if (unlikely(!class)) {
3319 class = register_lock_class(lock, subclass, 0);
3320 if (!class)
3321 return 0;
3322 }
3323 atomic_inc((atomic_t *)&class->ops);
3324 if (very_verbose(class)) {
3325 printk("\nacquire class [%px] %s", class->key, class->name);
3326 if (class->name_version > 1)
3327 printk(KERN_CONT "#%d", class->name_version);
3328 printk(KERN_CONT "\n");
3329 dump_stack();
3330 }
3331
3332 /*
3333 * Add the lock to the list of currently held locks.
3334 * (we dont increase the depth just yet, up until the
3335 * dependency checks are done)
3336 */
3337 depth = curr->lockdep_depth;
3338 /*
3339 * Ran out of static storage for our per-task lock stack again have we?
3340 */
3341 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3342 return 0;
3343
3344 class_idx = class - lock_classes + 1;
3345
3346 if (depth) {
3347 hlock = curr->held_locks + depth - 1;
3348 if (hlock->class_idx == class_idx && nest_lock) {
3349 if (hlock->references) {
3350 /*
3351 * Check: unsigned int references:12, overflow.
3352 */
3353 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3354 return 0;
3355
3356 hlock->references++;
3357 } else {
3358 hlock->references = 2;
3359 }
3360
3361 return 1;
3362 }
3363 }
3364
3365 hlock = curr->held_locks + depth;
3366 /*
3367 * Plain impossible, we just registered it and checked it weren't no
3368 * NULL like.. I bet this mushroom I ate was good!
3369 */
3370 if (DEBUG_LOCKS_WARN_ON(!class))
3371 return 0;
3372 hlock->class_idx = class_idx;
3373 hlock->acquire_ip = ip;
3374 hlock->instance = lock;
3375 hlock->nest_lock = nest_lock;
3376 hlock->irq_context = task_irq_context(curr);
3377 hlock->trylock = trylock;
3378 hlock->read = read;
3379 hlock->check = check;
3380 hlock->hardirqs_off = !!hardirqs_off;
3381 hlock->references = references;
3382#ifdef CONFIG_LOCK_STAT
3383 hlock->waittime_stamp = 0;
3384 hlock->holdtime_stamp = lockstat_clock();
3385#endif
3386 hlock->pin_count = pin_count;
3387
3388 if (check && !mark_irqflags(curr, hlock))
3389 return 0;
3390
3391 /* mark it as used: */
3392 if (!mark_lock(curr, hlock, LOCK_USED))
3393 return 0;
3394
3395 /*
3396 * Calculate the chain hash: it's the combined hash of all the
3397 * lock keys along the dependency chain. We save the hash value
3398 * at every step so that we can get the current hash easily
3399 * after unlock. The chain hash is then used to cache dependency
3400 * results.
3401 *
3402 * The 'key ID' is what is the most compact key value to drive
3403 * the hash, not class->key.
3404 */
3405 /*
3406 * Whoops, we did it again.. ran straight out of our static allocation.
3407 */
3408 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3409 return 0;
3410
3411 chain_key = curr->curr_chain_key;
3412 if (!depth) {
3413 /*
3414 * How can we have a chain hash when we ain't got no keys?!
3415 */
3416 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3417 return 0;
3418 chain_head = 1;
3419 }
3420
3421 hlock->prev_chain_key = chain_key;
3422 if (separate_irq_context(curr, hlock)) {
3423 chain_key = 0;
3424 chain_head = 1;
3425 }
3426 chain_key = iterate_chain_key(chain_key, class_idx);
3427
3428 if (nest_lock && !__lock_is_held(nest_lock, -1))
3429 return print_lock_nested_lock_not_held(curr, hlock, ip);
3430
3431 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3432 return 0;
3433
3434 curr->curr_chain_key = chain_key;
3435 curr->lockdep_depth++;
3436 check_chain_key(curr);
3437#ifdef CONFIG_DEBUG_LOCKDEP
3438 if (unlikely(!debug_locks))
3439 return 0;
3440#endif
3441 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3442 debug_locks_off();
3443 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3444 printk(KERN_DEBUG "depth: %i max: %lu!\n",
3445 curr->lockdep_depth, MAX_LOCK_DEPTH);
3446
3447 lockdep_print_held_locks(current);
3448 debug_show_all_locks();
3449 dump_stack();
3450
3451 return 0;
3452 }
3453
3454 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3455 max_lockdep_depth = curr->lockdep_depth;
3456
3457 return 1;
3458}
3459
3460static int
3461print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3462 unsigned long ip)
3463{
3464 if (!debug_locks_off())
3465 return 0;
3466 if (debug_locks_silent)
3467 return 0;
3468
3469 pr_warn("\n");
3470 pr_warn("=====================================\n");
3471 pr_warn("WARNING: bad unlock balance detected!\n");
3472 print_kernel_ident();
3473 pr_warn("-------------------------------------\n");
3474 pr_warn("%s/%d is trying to release lock (",
3475 curr->comm, task_pid_nr(curr));
3476 print_lockdep_cache(lock);
3477 pr_cont(") at:\n");
3478 print_ip_sym(ip);
3479 pr_warn("but there are no more locks to release!\n");
3480 pr_warn("\nother info that might help us debug this:\n");
3481 lockdep_print_held_locks(curr);
3482
3483 pr_warn("\nstack backtrace:\n");
3484 dump_stack();
3485
3486 return 0;
3487}
3488
3489static int match_held_lock(const struct held_lock *hlock,
3490 const struct lockdep_map *lock)
3491{
3492 if (hlock->instance == lock)
3493 return 1;
3494
3495 if (hlock->references) {
3496 const struct lock_class *class = lock->class_cache[0];
3497
3498 if (!class)
3499 class = look_up_lock_class(lock, 0);
3500
3501 /*
3502 * If look_up_lock_class() failed to find a class, we're trying
3503 * to test if we hold a lock that has never yet been acquired.
3504 * Clearly if the lock hasn't been acquired _ever_, we're not
3505 * holding it either, so report failure.
3506 */
3507 if (!class)
3508 return 0;
3509
3510 /*
3511 * References, but not a lock we're actually ref-counting?
3512 * State got messed up, follow the sites that change ->references
3513 * and try to make sense of it.
3514 */
3515 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3516 return 0;
3517
3518 if (hlock->class_idx == class - lock_classes + 1)
3519 return 1;
3520 }
3521
3522 return 0;
3523}
3524
3525/* @depth must not be zero */
3526static struct held_lock *find_held_lock(struct task_struct *curr,
3527 struct lockdep_map *lock,
3528 unsigned int depth, int *idx)
3529{
3530 struct held_lock *ret, *hlock, *prev_hlock;
3531 int i;
3532
3533 i = depth - 1;
3534 hlock = curr->held_locks + i;
3535 ret = hlock;
3536 if (match_held_lock(hlock, lock))
3537 goto out;
3538
3539 ret = NULL;
3540 for (i--, prev_hlock = hlock--;
3541 i >= 0;
3542 i--, prev_hlock = hlock--) {
3543 /*
3544 * We must not cross into another context:
3545 */
3546 if (prev_hlock->irq_context != hlock->irq_context) {
3547 ret = NULL;
3548 break;
3549 }
3550 if (match_held_lock(hlock, lock)) {
3551 ret = hlock;
3552 break;
3553 }
3554 }
3555
3556out:
3557 *idx = i;
3558 return ret;
3559}
3560
3561static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3562 int idx)
3563{
3564 struct held_lock *hlock;
3565
3566 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3567 if (!__lock_acquire(hlock->instance,
3568 hlock_class(hlock)->subclass,
3569 hlock->trylock,
3570 hlock->read, hlock->check,
3571 hlock->hardirqs_off,
3572 hlock->nest_lock, hlock->acquire_ip,
3573 hlock->references, hlock->pin_count))
3574 return 1;
3575 }
3576 return 0;
3577}
3578
3579static int
3580__lock_set_class(struct lockdep_map *lock, const char *name,
3581 struct lock_class_key *key, unsigned int subclass,
3582 unsigned long ip)
3583{
3584 struct task_struct *curr = current;
3585 struct held_lock *hlock;
3586 struct lock_class *class;
3587 unsigned int depth;
3588 int i;
3589
3590 depth = curr->lockdep_depth;
3591 /*
3592 * This function is about (re)setting the class of a held lock,
3593 * yet we're not actually holding any locks. Naughty user!
3594 */
3595 if (DEBUG_LOCKS_WARN_ON(!depth))
3596 return 0;
3597
3598 hlock = find_held_lock(curr, lock, depth, &i);
3599 if (!hlock)
3600 return print_unlock_imbalance_bug(curr, lock, ip);
3601
3602 lockdep_init_map(lock, name, key, 0);
3603 class = register_lock_class(lock, subclass, 0);
3604 hlock->class_idx = class - lock_classes + 1;
3605
3606 curr->lockdep_depth = i;
3607 curr->curr_chain_key = hlock->prev_chain_key;
3608
3609 if (reacquire_held_locks(curr, depth, i))
3610 return 0;
3611
3612 /*
3613 * I took it apart and put it back together again, except now I have
3614 * these 'spare' parts.. where shall I put them.
3615 */
3616 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3617 return 0;
3618 return 1;
3619}
3620
3621static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3622{
3623 struct task_struct *curr = current;
3624 struct held_lock *hlock;
3625 unsigned int depth;
3626 int i;
3627
3628 depth = curr->lockdep_depth;
3629 /*
3630 * This function is about (re)setting the class of a held lock,
3631 * yet we're not actually holding any locks. Naughty user!
3632 */
3633 if (DEBUG_LOCKS_WARN_ON(!depth))
3634 return 0;
3635
3636 hlock = find_held_lock(curr, lock, depth, &i);
3637 if (!hlock)
3638 return print_unlock_imbalance_bug(curr, lock, ip);
3639
3640 curr->lockdep_depth = i;
3641 curr->curr_chain_key = hlock->prev_chain_key;
3642
3643 WARN(hlock->read, "downgrading a read lock");
3644 hlock->read = 1;
3645 hlock->acquire_ip = ip;
3646
3647 if (reacquire_held_locks(curr, depth, i))
3648 return 0;
3649
3650 /*
3651 * I took it apart and put it back together again, except now I have
3652 * these 'spare' parts.. where shall I put them.
3653 */
3654 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3655 return 0;
3656 return 1;
3657}
3658
3659/*
3660 * Remove the lock to the list of currently held locks - this gets
3661 * called on mutex_unlock()/spin_unlock*() (or on a failed
3662 * mutex_lock_interruptible()).
3663 *
3664 * @nested is an hysterical artifact, needs a tree wide cleanup.
3665 */
3666static int
3667__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3668{
3669 struct task_struct *curr = current;
3670 struct held_lock *hlock;
3671 unsigned int depth;
3672 int i;
3673
3674 if (unlikely(!debug_locks))
3675 return 0;
3676
3677 depth = curr->lockdep_depth;
3678 /*
3679 * So we're all set to release this lock.. wait what lock? We don't
3680 * own any locks, you've been drinking again?
3681 */
3682 if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3683 return print_unlock_imbalance_bug(curr, lock, ip);
3684
3685 /*
3686 * Check whether the lock exists in the current stack
3687 * of held locks:
3688 */
3689 hlock = find_held_lock(curr, lock, depth, &i);
3690 if (!hlock)
3691 return print_unlock_imbalance_bug(curr, lock, ip);
3692
3693 if (hlock->instance == lock)
3694 lock_release_holdtime(hlock);
3695
3696 WARN(hlock->pin_count, "releasing a pinned lock\n");
3697
3698 if (hlock->references) {
3699 hlock->references--;
3700 if (hlock->references) {
3701 /*
3702 * We had, and after removing one, still have
3703 * references, the current lock stack is still
3704 * valid. We're done!
3705 */
3706 return 1;
3707 }
3708 }
3709
3710 /*
3711 * We have the right lock to unlock, 'hlock' points to it.
3712 * Now we remove it from the stack, and add back the other
3713 * entries (if any), recalculating the hash along the way:
3714 */
3715
3716 curr->lockdep_depth = i;
3717 curr->curr_chain_key = hlock->prev_chain_key;
3718
3719 if (reacquire_held_locks(curr, depth, i + 1))
3720 return 0;
3721
3722 /*
3723 * We had N bottles of beer on the wall, we drank one, but now
3724 * there's not N-1 bottles of beer left on the wall...
3725 */
3726 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3727 return 0;
3728
3729 return 1;
3730}
3731
3732static int __lock_is_held(const struct lockdep_map *lock, int read)
3733{
3734 struct task_struct *curr = current;
3735 int i;
3736
3737 for (i = 0; i < curr->lockdep_depth; i++) {
3738 struct held_lock *hlock = curr->held_locks + i;
3739
3740 if (match_held_lock(hlock, lock)) {
3741 if (read == -1 || hlock->read == read)
3742 return 1;
3743
3744 return 0;
3745 }
3746 }
3747
3748 return 0;
3749}
3750
3751static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3752{
3753 struct pin_cookie cookie = NIL_COOKIE;
3754 struct task_struct *curr = current;
3755 int i;
3756
3757 if (unlikely(!debug_locks))
3758 return cookie;
3759
3760 for (i = 0; i < curr->lockdep_depth; i++) {
3761 struct held_lock *hlock = curr->held_locks + i;
3762
3763 if (match_held_lock(hlock, lock)) {
3764 /*
3765 * Grab 16bits of randomness; this is sufficient to not
3766 * be guessable and still allows some pin nesting in
3767 * our u32 pin_count.
3768 */
3769 cookie.val = 1 + (prandom_u32() >> 16);
3770 hlock->pin_count += cookie.val;
3771 return cookie;
3772 }
3773 }
3774
3775 WARN(1, "pinning an unheld lock\n");
3776 return cookie;
3777}
3778
3779static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3780{
3781 struct task_struct *curr = current;
3782 int i;
3783
3784 if (unlikely(!debug_locks))
3785 return;
3786
3787 for (i = 0; i < curr->lockdep_depth; i++) {
3788 struct held_lock *hlock = curr->held_locks + i;
3789
3790 if (match_held_lock(hlock, lock)) {
3791 hlock->pin_count += cookie.val;
3792 return;
3793 }
3794 }
3795
3796 WARN(1, "pinning an unheld lock\n");
3797}
3798
3799static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3800{
3801 struct task_struct *curr = current;
3802 int i;
3803
3804 if (unlikely(!debug_locks))
3805 return;
3806
3807 for (i = 0; i < curr->lockdep_depth; i++) {
3808 struct held_lock *hlock = curr->held_locks + i;
3809
3810 if (match_held_lock(hlock, lock)) {
3811 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3812 return;
3813
3814 hlock->pin_count -= cookie.val;
3815
3816 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3817 hlock->pin_count = 0;
3818
3819 return;
3820 }
3821 }
3822
3823 WARN(1, "unpinning an unheld lock\n");
3824}
3825
3826/*
3827 * Check whether we follow the irq-flags state precisely:
3828 */
3829static void check_flags(unsigned long flags)
3830{
3831#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3832 defined(CONFIG_TRACE_IRQFLAGS)
3833 if (!debug_locks)
3834 return;
3835
3836 if (irqs_disabled_flags(flags)) {
3837 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3838 printk("possible reason: unannotated irqs-off.\n");
3839 }
3840 } else {
3841 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3842 printk("possible reason: unannotated irqs-on.\n");
3843 }
3844 }
3845
3846 /*
3847 * We dont accurately track softirq state in e.g.
3848 * hardirq contexts (such as on 4KSTACKS), so only
3849 * check if not in hardirq contexts:
3850 */
3851 if (!hardirq_count()) {
3852 if (softirq_count()) {
3853 /* like the above, but with softirqs */
3854 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3855 } else {
3856 /* lick the above, does it taste good? */
3857 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3858 }
3859 }
3860
3861 if (!debug_locks)
3862 print_irqtrace_events(current);
3863#endif
3864}
3865
3866void lock_set_class(struct lockdep_map *lock, const char *name,
3867 struct lock_class_key *key, unsigned int subclass,
3868 unsigned long ip)
3869{
3870 unsigned long flags;
3871
3872 if (unlikely(current->lockdep_recursion))
3873 return;
3874
3875 raw_local_irq_save(flags);
3876 current->lockdep_recursion = 1;
3877 check_flags(flags);
3878 if (__lock_set_class(lock, name, key, subclass, ip))
3879 check_chain_key(current);
3880 current->lockdep_recursion = 0;
3881 raw_local_irq_restore(flags);
3882}
3883EXPORT_SYMBOL_GPL(lock_set_class);
3884
3885void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3886{
3887 unsigned long flags;
3888
3889 if (unlikely(current->lockdep_recursion))
3890 return;
3891
3892 raw_local_irq_save(flags);
3893 current->lockdep_recursion = 1;
3894 check_flags(flags);
3895 if (__lock_downgrade(lock, ip))
3896 check_chain_key(current);
3897 current->lockdep_recursion = 0;
3898 raw_local_irq_restore(flags);
3899}
3900EXPORT_SYMBOL_GPL(lock_downgrade);
3901
3902/*
3903 * We are not always called with irqs disabled - do that here,
3904 * and also avoid lockdep recursion:
3905 */
3906void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3907 int trylock, int read, int check,
3908 struct lockdep_map *nest_lock, unsigned long ip)
3909{
3910 unsigned long flags;
3911
3912 if (unlikely(current->lockdep_recursion))
3913 return;
3914
3915 raw_local_irq_save(flags);
3916 check_flags(flags);
3917
3918 current->lockdep_recursion = 1;
3919 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3920 __lock_acquire(lock, subclass, trylock, read, check,
3921 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3922 current->lockdep_recursion = 0;
3923 raw_local_irq_restore(flags);
3924}
3925EXPORT_SYMBOL_GPL(lock_acquire);
3926
3927void lock_release(struct lockdep_map *lock, int nested,
3928 unsigned long ip)
3929{
3930 unsigned long flags;
3931
3932 if (unlikely(current->lockdep_recursion))
3933 return;
3934
3935 raw_local_irq_save(flags);
3936 check_flags(flags);
3937 current->lockdep_recursion = 1;
3938 trace_lock_release(lock, ip);
3939 if (__lock_release(lock, nested, ip))
3940 check_chain_key(current);
3941 current->lockdep_recursion = 0;
3942 raw_local_irq_restore(flags);
3943}
3944EXPORT_SYMBOL_GPL(lock_release);
3945
3946int lock_is_held_type(const struct lockdep_map *lock, int read)
3947{
3948 unsigned long flags;
3949 int ret = 0;
3950
3951 if (unlikely(current->lockdep_recursion))
3952 return 1; /* avoid false negative lockdep_assert_held() */
3953
3954 raw_local_irq_save(flags);
3955 check_flags(flags);
3956
3957 current->lockdep_recursion = 1;
3958 ret = __lock_is_held(lock, read);
3959 current->lockdep_recursion = 0;
3960 raw_local_irq_restore(flags);
3961
3962 return ret;
3963}
3964EXPORT_SYMBOL_GPL(lock_is_held_type);
3965
3966struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3967{
3968 struct pin_cookie cookie = NIL_COOKIE;
3969 unsigned long flags;
3970
3971 if (unlikely(current->lockdep_recursion))
3972 return cookie;
3973
3974 raw_local_irq_save(flags);
3975 check_flags(flags);
3976
3977 current->lockdep_recursion = 1;
3978 cookie = __lock_pin_lock(lock);
3979 current->lockdep_recursion = 0;
3980 raw_local_irq_restore(flags);
3981
3982 return cookie;
3983}
3984EXPORT_SYMBOL_GPL(lock_pin_lock);
3985
3986void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3987{
3988 unsigned long flags;
3989
3990 if (unlikely(current->lockdep_recursion))
3991 return;
3992
3993 raw_local_irq_save(flags);
3994 check_flags(flags);
3995
3996 current->lockdep_recursion = 1;
3997 __lock_repin_lock(lock, cookie);
3998 current->lockdep_recursion = 0;
3999 raw_local_irq_restore(flags);
4000}
4001EXPORT_SYMBOL_GPL(lock_repin_lock);
4002
4003void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4004{
4005 unsigned long flags;
4006
4007 if (unlikely(current->lockdep_recursion))
4008 return;
4009
4010 raw_local_irq_save(flags);
4011 check_flags(flags);
4012
4013 current->lockdep_recursion = 1;
4014 __lock_unpin_lock(lock, cookie);
4015 current->lockdep_recursion = 0;
4016 raw_local_irq_restore(flags);
4017}
4018EXPORT_SYMBOL_GPL(lock_unpin_lock);
4019
4020#ifdef CONFIG_LOCK_STAT
4021static int
4022print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4023 unsigned long ip)
4024{
4025 if (!debug_locks_off())
4026 return 0;
4027 if (debug_locks_silent)
4028 return 0;
4029
4030 pr_warn("\n");
4031 pr_warn("=================================\n");
4032 pr_warn("WARNING: bad contention detected!\n");
4033 print_kernel_ident();
4034 pr_warn("---------------------------------\n");
4035 pr_warn("%s/%d is trying to contend lock (",
4036 curr->comm, task_pid_nr(curr));
4037 print_lockdep_cache(lock);
4038 pr_cont(") at:\n");
4039 print_ip_sym(ip);
4040 pr_warn("but there are no locks held!\n");
4041 pr_warn("\nother info that might help us debug this:\n");
4042 lockdep_print_held_locks(curr);
4043
4044 pr_warn("\nstack backtrace:\n");
4045 dump_stack();
4046
4047 return 0;
4048}
4049
4050static void
4051__lock_contended(struct lockdep_map *lock, unsigned long ip)
4052{
4053 struct task_struct *curr = current;
4054 struct held_lock *hlock;
4055 struct lock_class_stats *stats;
4056 unsigned int depth;
4057 int i, contention_point, contending_point;
4058
4059 depth = curr->lockdep_depth;
4060 /*
4061 * Whee, we contended on this lock, except it seems we're not
4062 * actually trying to acquire anything much at all..
4063 */
4064 if (DEBUG_LOCKS_WARN_ON(!depth))
4065 return;
4066
4067 hlock = find_held_lock(curr, lock, depth, &i);
4068 if (!hlock) {
4069 print_lock_contention_bug(curr, lock, ip);
4070 return;
4071 }
4072
4073 if (hlock->instance != lock)
4074 return;
4075
4076 hlock->waittime_stamp = lockstat_clock();
4077
4078 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4079 contending_point = lock_point(hlock_class(hlock)->contending_point,
4080 lock->ip);
4081
4082 stats = get_lock_stats(hlock_class(hlock));
4083 if (contention_point < LOCKSTAT_POINTS)
4084 stats->contention_point[contention_point]++;
4085 if (contending_point < LOCKSTAT_POINTS)
4086 stats->contending_point[contending_point]++;
4087 if (lock->cpu != smp_processor_id())
4088 stats->bounces[bounce_contended + !!hlock->read]++;
4089 put_lock_stats(stats);
4090}
4091
4092static void
4093__lock_acquired(struct lockdep_map *lock, unsigned long ip)
4094{
4095 struct task_struct *curr = current;
4096 struct held_lock *hlock;
4097 struct lock_class_stats *stats;
4098 unsigned int depth;
4099 u64 now, waittime = 0;
4100 int i, cpu;
4101
4102 depth = curr->lockdep_depth;
4103 /*
4104 * Yay, we acquired ownership of this lock we didn't try to
4105 * acquire, how the heck did that happen?
4106 */
4107 if (DEBUG_LOCKS_WARN_ON(!depth))
4108 return;
4109
4110 hlock = find_held_lock(curr, lock, depth, &i);
4111 if (!hlock) {
4112 print_lock_contention_bug(curr, lock, _RET_IP_);
4113 return;
4114 }
4115
4116 if (hlock->instance != lock)
4117 return;
4118
4119 cpu = smp_processor_id();
4120 if (hlock->waittime_stamp) {
4121 now = lockstat_clock();
4122 waittime = now - hlock->waittime_stamp;
4123 hlock->holdtime_stamp = now;
4124 }
4125
4126 trace_lock_acquired(lock, ip);
4127
4128 stats = get_lock_stats(hlock_class(hlock));
4129 if (waittime) {
4130 if (hlock->read)
4131 lock_time_inc(&stats->read_waittime, waittime);
4132 else
4133 lock_time_inc(&stats->write_waittime, waittime);
4134 }
4135 if (lock->cpu != cpu)
4136 stats->bounces[bounce_acquired + !!hlock->read]++;
4137 put_lock_stats(stats);
4138
4139 lock->cpu = cpu;
4140 lock->ip = ip;
4141}
4142
4143void lock_contended(struct lockdep_map *lock, unsigned long ip)
4144{
4145 unsigned long flags;
4146
4147 if (unlikely(!lock_stat))
4148 return;
4149
4150 if (unlikely(current->lockdep_recursion))
4151 return;
4152
4153 raw_local_irq_save(flags);
4154 check_flags(flags);
4155 current->lockdep_recursion = 1;
4156 trace_lock_contended(lock, ip);
4157 __lock_contended(lock, ip);
4158 current->lockdep_recursion = 0;
4159 raw_local_irq_restore(flags);
4160}
4161EXPORT_SYMBOL_GPL(lock_contended);
4162
4163void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4164{
4165 unsigned long flags;
4166
4167 if (unlikely(!lock_stat))
4168 return;
4169
4170 if (unlikely(current->lockdep_recursion))
4171 return;
4172
4173 raw_local_irq_save(flags);
4174 check_flags(flags);
4175 current->lockdep_recursion = 1;
4176 __lock_acquired(lock, ip);
4177 current->lockdep_recursion = 0;
4178 raw_local_irq_restore(flags);
4179}
4180EXPORT_SYMBOL_GPL(lock_acquired);
4181#endif
4182
4183/*
4184 * Used by the testsuite, sanitize the validator state
4185 * after a simulated failure:
4186 */
4187
4188void lockdep_reset(void)
4189{
4190 unsigned long flags;
4191 int i;
4192
4193 raw_local_irq_save(flags);
4194 current->curr_chain_key = 0;
4195 current->lockdep_depth = 0;
4196 current->lockdep_recursion = 0;
4197 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4198 nr_hardirq_chains = 0;
4199 nr_softirq_chains = 0;
4200 nr_process_chains = 0;
4201 debug_locks = 1;
4202 for (i = 0; i < CHAINHASH_SIZE; i++)
4203 INIT_HLIST_HEAD(chainhash_table + i);
4204 raw_local_irq_restore(flags);
4205}
4206
4207static void zap_class(struct lock_class *class)
4208{
4209 int i;
4210
4211 /*
4212 * Remove all dependencies this lock is
4213 * involved in:
4214 */
4215 for (i = 0; i < nr_list_entries; i++) {
4216 if (list_entries[i].class == class)
4217 list_del_rcu(&list_entries[i].entry);
4218 }
4219 /*
4220 * Unhash the class and remove it from the all_lock_classes list:
4221 */
4222 hlist_del_rcu(&class->hash_entry);
4223 list_del_rcu(&class->lock_entry);
4224
4225 RCU_INIT_POINTER(class->key, NULL);
4226 RCU_INIT_POINTER(class->name, NULL);
4227}
4228
4229static inline int within(const void *addr, void *start, unsigned long size)
4230{
4231 return addr >= start && addr < start + size;
4232}
4233
4234/*
4235 * Used in module.c to remove lock classes from memory that is going to be
4236 * freed; and possibly re-used by other modules.
4237 *
4238 * We will have had one sync_sched() before getting here, so we're guaranteed
4239 * nobody will look up these exact classes -- they're properly dead but still
4240 * allocated.
4241 */
4242void lockdep_free_key_range(void *start, unsigned long size)
4243{
4244 struct lock_class *class;
4245 struct hlist_head *head;
4246 unsigned long flags;
4247 int i;
4248 int locked;
4249
4250 raw_local_irq_save(flags);
4251 locked = graph_lock();
4252
4253 /*
4254 * Unhash all classes that were created by this module:
4255 */
4256 for (i = 0; i < CLASSHASH_SIZE; i++) {
4257 head = classhash_table + i;
4258 hlist_for_each_entry_rcu(class, head, hash_entry) {
4259 if (within(class->key, start, size))
4260 zap_class(class);
4261 else if (within(class->name, start, size))
4262 zap_class(class);
4263 }
4264 }
4265
4266 if (locked)
4267 graph_unlock();
4268 raw_local_irq_restore(flags);
4269
4270 /*
4271 * Wait for any possible iterators from look_up_lock_class() to pass
4272 * before continuing to free the memory they refer to.
4273 *
4274 * sync_sched() is sufficient because the read-side is IRQ disable.
4275 */
4276 synchronize_sched();
4277
4278 /*
4279 * XXX at this point we could return the resources to the pool;
4280 * instead we leak them. We would need to change to bitmap allocators
4281 * instead of the linear allocators we have now.
4282 */
4283}
4284
4285void lockdep_reset_lock(struct lockdep_map *lock)
4286{
4287 struct lock_class *class;
4288 struct hlist_head *head;
4289 unsigned long flags;
4290 int i, j;
4291 int locked;
4292
4293 raw_local_irq_save(flags);
4294
4295 /*
4296 * Remove all classes this lock might have:
4297 */
4298 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4299 /*
4300 * If the class exists we look it up and zap it:
4301 */
4302 class = look_up_lock_class(lock, j);
4303 if (class)
4304 zap_class(class);
4305 }
4306 /*
4307 * Debug check: in the end all mapped classes should
4308 * be gone.
4309 */
4310 locked = graph_lock();
4311 for (i = 0; i < CLASSHASH_SIZE; i++) {
4312 head = classhash_table + i;
4313 hlist_for_each_entry_rcu(class, head, hash_entry) {
4314 int match = 0;
4315
4316 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4317 match |= class == lock->class_cache[j];
4318
4319 if (unlikely(match)) {
4320 if (debug_locks_off_graph_unlock()) {
4321 /*
4322 * We all just reset everything, how did it match?
4323 */
4324 WARN_ON(1);
4325 }
4326 goto out_restore;
4327 }
4328 }
4329 }
4330 if (locked)
4331 graph_unlock();
4332
4333out_restore:
4334 raw_local_irq_restore(flags);
4335}
4336
4337void __init lockdep_info(void)
4338{
4339 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4340
4341 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
4342 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
4343 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
4344 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
4345 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
4346 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
4347 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
4348
4349 printk(" memory used by lock dependency info: %lu kB\n",
4350 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4351 sizeof(struct list_head) * CLASSHASH_SIZE +
4352 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4353 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4354 sizeof(struct list_head) * CHAINHASH_SIZE
4355#ifdef CONFIG_PROVE_LOCKING
4356 + sizeof(struct circular_queue)
4357#endif
4358 ) / 1024
4359 );
4360
4361 printk(" per task-struct memory footprint: %lu bytes\n",
4362 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4363}
4364
4365static void
4366print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4367 const void *mem_to, struct held_lock *hlock)
4368{
4369 if (!debug_locks_off())
4370 return;
4371 if (debug_locks_silent)
4372 return;
4373
4374 pr_warn("\n");
4375 pr_warn("=========================\n");
4376 pr_warn("WARNING: held lock freed!\n");
4377 print_kernel_ident();
4378 pr_warn("-------------------------\n");
4379 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4380 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4381 print_lock(hlock);
4382 lockdep_print_held_locks(curr);
4383
4384 pr_warn("\nstack backtrace:\n");
4385 dump_stack();
4386}
4387
4388static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4389 const void* lock_from, unsigned long lock_len)
4390{
4391 return lock_from + lock_len <= mem_from ||
4392 mem_from + mem_len <= lock_from;
4393}
4394
4395/*
4396 * Called when kernel memory is freed (or unmapped), or if a lock
4397 * is destroyed or reinitialized - this code checks whether there is
4398 * any held lock in the memory range of <from> to <to>:
4399 */
4400void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4401{
4402 struct task_struct *curr = current;
4403 struct held_lock *hlock;
4404 unsigned long flags;
4405 int i;
4406
4407 if (unlikely(!debug_locks))
4408 return;
4409
4410 local_irq_save(flags);
4411 for (i = 0; i < curr->lockdep_depth; i++) {
4412 hlock = curr->held_locks + i;
4413
4414 if (not_in_range(mem_from, mem_len, hlock->instance,
4415 sizeof(*hlock->instance)))
4416 continue;
4417
4418 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4419 break;
4420 }
4421 local_irq_restore(flags);
4422}
4423EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4424
4425static void print_held_locks_bug(void)
4426{
4427 if (!debug_locks_off())
4428 return;
4429 if (debug_locks_silent)
4430 return;
4431
4432 pr_warn("\n");
4433 pr_warn("====================================\n");
4434 pr_warn("WARNING: %s/%d still has locks held!\n",
4435 current->comm, task_pid_nr(current));
4436 print_kernel_ident();
4437 pr_warn("------------------------------------\n");
4438 lockdep_print_held_locks(current);
4439 pr_warn("\nstack backtrace:\n");
4440 dump_stack();
4441}
4442
4443void debug_check_no_locks_held(void)
4444{
4445 if (unlikely(current->lockdep_depth > 0))
4446 print_held_locks_bug();
4447}
4448EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4449
4450#ifdef __KERNEL__
4451void debug_show_all_locks(void)
4452{
4453 struct task_struct *g, *p;
4454 int count = 10;
4455 int unlock = 1;
4456
4457 if (unlikely(!debug_locks)) {
4458 pr_warn("INFO: lockdep is turned off.\n");
4459 return;
4460 }
4461 pr_warn("\nShowing all locks held in the system:\n");
4462
4463 /*
4464 * Here we try to get the tasklist_lock as hard as possible,
4465 * if not successful after 2 seconds we ignore it (but keep
4466 * trying). This is to enable a debug printout even if a
4467 * tasklist_lock-holding task deadlocks or crashes.
4468 */
4469retry:
4470 if (!read_trylock(&tasklist_lock)) {
4471 if (count == 10)
4472 pr_warn("hm, tasklist_lock locked, retrying... ");
4473 if (count) {
4474 count--;
4475 pr_cont(" #%d", 10-count);
4476 mdelay(200);
4477 goto retry;
4478 }
4479 pr_cont(" ignoring it.\n");
4480 unlock = 0;
4481 } else {
4482 if (count != 10)
4483 pr_cont(" locked it.\n");
4484 }
4485
4486 do_each_thread(g, p) {
4487 /*
4488 * It's not reliable to print a task's held locks
4489 * if it's not sleeping (or if it's not the current
4490 * task):
4491 */
4492 if (p->state == TASK_RUNNING && p != current)
4493 continue;
4494 if (p->lockdep_depth)
4495 lockdep_print_held_locks(p);
4496 if (!unlock)
4497 if (read_trylock(&tasklist_lock))
4498 unlock = 1;
4499 touch_nmi_watchdog();
4500 } while_each_thread(g, p);
4501
4502 pr_warn("\n");
4503 pr_warn("=============================================\n\n");
4504
4505 if (unlock)
4506 read_unlock(&tasklist_lock);
4507}
4508EXPORT_SYMBOL_GPL(debug_show_all_locks);
4509#endif
4510
4511/*
4512 * Careful: only use this function if you are sure that
4513 * the task cannot run in parallel!
4514 */
4515void debug_show_held_locks(struct task_struct *task)
4516{
4517 if (unlikely(!debug_locks)) {
4518 printk("INFO: lockdep is turned off.\n");
4519 return;
4520 }
4521 lockdep_print_held_locks(task);
4522}
4523EXPORT_SYMBOL_GPL(debug_show_held_locks);
4524
4525asmlinkage __visible void lockdep_sys_exit(void)
4526{
4527 struct task_struct *curr = current;
4528
4529 if (unlikely(curr->lockdep_depth)) {
4530 if (!debug_locks_off())
4531 return;
4532 pr_warn("\n");
4533 pr_warn("================================================\n");
4534 pr_warn("WARNING: lock held when returning to user space!\n");
4535 print_kernel_ident();
4536 pr_warn("------------------------------------------------\n");
4537 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4538 curr->comm, curr->pid);
4539 lockdep_print_held_locks(curr);
4540 }
4541
4542 /*
4543 * The lock history for each syscall should be independent. So wipe the
4544 * slate clean on return to userspace.
4545 */
4546 lockdep_invariant_state(false);
4547}
4548
4549void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4550{
4551 struct task_struct *curr = current;
4552
4553 /* Note: the following can be executed concurrently, so be careful. */
4554 pr_warn("\n");
4555 pr_warn("=============================\n");
4556 pr_warn("WARNING: suspicious RCU usage\n");
4557 print_kernel_ident();
4558 pr_warn("-----------------------------\n");
4559 pr_warn("%s:%d %s!\n", file, line, s);
4560 pr_warn("\nother info that might help us debug this:\n\n");
4561 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4562 !rcu_lockdep_current_cpu_online()
4563 ? "RCU used illegally from offline CPU!\n"
4564 : !rcu_is_watching()
4565 ? "RCU used illegally from idle CPU!\n"
4566 : "",
4567 rcu_scheduler_active, debug_locks);
4568
4569 /*
4570 * If a CPU is in the RCU-free window in idle (ie: in the section
4571 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4572 * considers that CPU to be in an "extended quiescent state",
4573 * which means that RCU will be completely ignoring that CPU.
4574 * Therefore, rcu_read_lock() and friends have absolutely no
4575 * effect on a CPU running in that state. In other words, even if
4576 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4577 * delete data structures out from under it. RCU really has no
4578 * choice here: we need to keep an RCU-free window in idle where
4579 * the CPU may possibly enter into low power mode. This way we can
4580 * notice an extended quiescent state to other CPUs that started a grace
4581 * period. Otherwise we would delay any grace period as long as we run
4582 * in the idle task.
4583 *
4584 * So complain bitterly if someone does call rcu_read_lock(),
4585 * rcu_read_lock_bh() and so on from extended quiescent states.
4586 */
4587 if (!rcu_is_watching())
4588 pr_warn("RCU used illegally from extended quiescent state!\n");
4589
4590 lockdep_print_held_locks(curr);
4591 pr_warn("\nstack backtrace:\n");
4592 dump_stack();
4593}
4594EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
28#define DISABLE_BRANCH_PROFILING
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/delay.h>
32#include <linux/module.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/spinlock.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/stacktrace.h>
39#include <linux/debug_locks.h>
40#include <linux/irqflags.h>
41#include <linux/utsname.h>
42#include <linux/hash.h>
43#include <linux/ftrace.h>
44#include <linux/stringify.h>
45#include <linux/bitops.h>
46#include <linux/gfp.h>
47#include <linux/kmemcheck.h>
48#include <linux/random.h>
49#include <linux/jhash.h>
50
51#include <asm/sections.h>
52
53#include "lockdep_internals.h"
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/lock.h>
57
58#ifdef CONFIG_PROVE_LOCKING
59int prove_locking = 1;
60module_param(prove_locking, int, 0644);
61#else
62#define prove_locking 0
63#endif
64
65#ifdef CONFIG_LOCK_STAT
66int lock_stat = 1;
67module_param(lock_stat, int, 0644);
68#else
69#define lock_stat 0
70#endif
71
72/*
73 * lockdep_lock: protects the lockdep graph, the hashes and the
74 * class/list/hash allocators.
75 *
76 * This is one of the rare exceptions where it's justified
77 * to use a raw spinlock - we really dont want the spinlock
78 * code to recurse back into the lockdep code...
79 */
80static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
81
82static int graph_lock(void)
83{
84 arch_spin_lock(&lockdep_lock);
85 /*
86 * Make sure that if another CPU detected a bug while
87 * walking the graph we dont change it (while the other
88 * CPU is busy printing out stuff with the graph lock
89 * dropped already)
90 */
91 if (!debug_locks) {
92 arch_spin_unlock(&lockdep_lock);
93 return 0;
94 }
95 /* prevent any recursions within lockdep from causing deadlocks */
96 current->lockdep_recursion++;
97 return 1;
98}
99
100static inline int graph_unlock(void)
101{
102 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
103 /*
104 * The lockdep graph lock isn't locked while we expect it to
105 * be, we're confused now, bye!
106 */
107 return DEBUG_LOCKS_WARN_ON(1);
108 }
109
110 current->lockdep_recursion--;
111 arch_spin_unlock(&lockdep_lock);
112 return 0;
113}
114
115/*
116 * Turn lock debugging off and return with 0 if it was off already,
117 * and also release the graph lock:
118 */
119static inline int debug_locks_off_graph_unlock(void)
120{
121 int ret = debug_locks_off();
122
123 arch_spin_unlock(&lockdep_lock);
124
125 return ret;
126}
127
128unsigned long nr_list_entries;
129static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
130
131/*
132 * All data structures here are protected by the global debug_lock.
133 *
134 * Mutex key structs only get allocated, once during bootup, and never
135 * get freed - this significantly simplifies the debugging code.
136 */
137unsigned long nr_lock_classes;
138static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
139
140static inline struct lock_class *hlock_class(struct held_lock *hlock)
141{
142 if (!hlock->class_idx) {
143 /*
144 * Someone passed in garbage, we give up.
145 */
146 DEBUG_LOCKS_WARN_ON(1);
147 return NULL;
148 }
149 return lock_classes + hlock->class_idx - 1;
150}
151
152#ifdef CONFIG_LOCK_STAT
153static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
154
155static inline u64 lockstat_clock(void)
156{
157 return local_clock();
158}
159
160static int lock_point(unsigned long points[], unsigned long ip)
161{
162 int i;
163
164 for (i = 0; i < LOCKSTAT_POINTS; i++) {
165 if (points[i] == 0) {
166 points[i] = ip;
167 break;
168 }
169 if (points[i] == ip)
170 break;
171 }
172
173 return i;
174}
175
176static void lock_time_inc(struct lock_time *lt, u64 time)
177{
178 if (time > lt->max)
179 lt->max = time;
180
181 if (time < lt->min || !lt->nr)
182 lt->min = time;
183
184 lt->total += time;
185 lt->nr++;
186}
187
188static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
189{
190 if (!src->nr)
191 return;
192
193 if (src->max > dst->max)
194 dst->max = src->max;
195
196 if (src->min < dst->min || !dst->nr)
197 dst->min = src->min;
198
199 dst->total += src->total;
200 dst->nr += src->nr;
201}
202
203struct lock_class_stats lock_stats(struct lock_class *class)
204{
205 struct lock_class_stats stats;
206 int cpu, i;
207
208 memset(&stats, 0, sizeof(struct lock_class_stats));
209 for_each_possible_cpu(cpu) {
210 struct lock_class_stats *pcs =
211 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
212
213 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
214 stats.contention_point[i] += pcs->contention_point[i];
215
216 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
217 stats.contending_point[i] += pcs->contending_point[i];
218
219 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
220 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
221
222 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
223 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
224
225 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
226 stats.bounces[i] += pcs->bounces[i];
227 }
228
229 return stats;
230}
231
232void clear_lock_stats(struct lock_class *class)
233{
234 int cpu;
235
236 for_each_possible_cpu(cpu) {
237 struct lock_class_stats *cpu_stats =
238 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
239
240 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
241 }
242 memset(class->contention_point, 0, sizeof(class->contention_point));
243 memset(class->contending_point, 0, sizeof(class->contending_point));
244}
245
246static struct lock_class_stats *get_lock_stats(struct lock_class *class)
247{
248 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
249}
250
251static void put_lock_stats(struct lock_class_stats *stats)
252{
253 put_cpu_var(cpu_lock_stats);
254}
255
256static void lock_release_holdtime(struct held_lock *hlock)
257{
258 struct lock_class_stats *stats;
259 u64 holdtime;
260
261 if (!lock_stat)
262 return;
263
264 holdtime = lockstat_clock() - hlock->holdtime_stamp;
265
266 stats = get_lock_stats(hlock_class(hlock));
267 if (hlock->read)
268 lock_time_inc(&stats->read_holdtime, holdtime);
269 else
270 lock_time_inc(&stats->write_holdtime, holdtime);
271 put_lock_stats(stats);
272}
273#else
274static inline void lock_release_holdtime(struct held_lock *hlock)
275{
276}
277#endif
278
279/*
280 * We keep a global list of all lock classes. The list only grows,
281 * never shrinks. The list is only accessed with the lockdep
282 * spinlock lock held.
283 */
284LIST_HEAD(all_lock_classes);
285
286/*
287 * The lockdep classes are in a hash-table as well, for fast lookup:
288 */
289#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
290#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
291#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
292#define classhashentry(key) (classhash_table + __classhashfn((key)))
293
294static struct hlist_head classhash_table[CLASSHASH_SIZE];
295
296/*
297 * We put the lock dependency chains into a hash-table as well, to cache
298 * their existence:
299 */
300#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
301#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
302#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
303#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
304
305static struct hlist_head chainhash_table[CHAINHASH_SIZE];
306
307/*
308 * The hash key of the lock dependency chains is a hash itself too:
309 * it's a hash of all locks taken up to that lock, including that lock.
310 * It's a 64-bit hash, because it's important for the keys to be
311 * unique.
312 */
313static inline u64 iterate_chain_key(u64 key, u32 idx)
314{
315 u32 k0 = key, k1 = key >> 32;
316
317 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
318
319 return k0 | (u64)k1 << 32;
320}
321
322void lockdep_off(void)
323{
324 current->lockdep_recursion++;
325}
326EXPORT_SYMBOL(lockdep_off);
327
328void lockdep_on(void)
329{
330 current->lockdep_recursion--;
331}
332EXPORT_SYMBOL(lockdep_on);
333
334/*
335 * Debugging switches:
336 */
337
338#define VERBOSE 0
339#define VERY_VERBOSE 0
340
341#if VERBOSE
342# define HARDIRQ_VERBOSE 1
343# define SOFTIRQ_VERBOSE 1
344# define RECLAIM_VERBOSE 1
345#else
346# define HARDIRQ_VERBOSE 0
347# define SOFTIRQ_VERBOSE 0
348# define RECLAIM_VERBOSE 0
349#endif
350
351#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
352/*
353 * Quick filtering for interesting events:
354 */
355static int class_filter(struct lock_class *class)
356{
357#if 0
358 /* Example */
359 if (class->name_version == 1 &&
360 !strcmp(class->name, "lockname"))
361 return 1;
362 if (class->name_version == 1 &&
363 !strcmp(class->name, "&struct->lockfield"))
364 return 1;
365#endif
366 /* Filter everything else. 1 would be to allow everything else */
367 return 0;
368}
369#endif
370
371static int verbose(struct lock_class *class)
372{
373#if VERBOSE
374 return class_filter(class);
375#endif
376 return 0;
377}
378
379/*
380 * Stack-trace: tightly packed array of stack backtrace
381 * addresses. Protected by the graph_lock.
382 */
383unsigned long nr_stack_trace_entries;
384static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
385
386static void print_lockdep_off(const char *bug_msg)
387{
388 printk(KERN_DEBUG "%s\n", bug_msg);
389 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
390#ifdef CONFIG_LOCK_STAT
391 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
392#endif
393}
394
395static int save_trace(struct stack_trace *trace)
396{
397 trace->nr_entries = 0;
398 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
399 trace->entries = stack_trace + nr_stack_trace_entries;
400
401 trace->skip = 3;
402
403 save_stack_trace(trace);
404
405 /*
406 * Some daft arches put -1 at the end to indicate its a full trace.
407 *
408 * <rant> this is buggy anyway, since it takes a whole extra entry so a
409 * complete trace that maxes out the entries provided will be reported
410 * as incomplete, friggin useless </rant>
411 */
412 if (trace->nr_entries != 0 &&
413 trace->entries[trace->nr_entries-1] == ULONG_MAX)
414 trace->nr_entries--;
415
416 trace->max_entries = trace->nr_entries;
417
418 nr_stack_trace_entries += trace->nr_entries;
419
420 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
421 if (!debug_locks_off_graph_unlock())
422 return 0;
423
424 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
425 dump_stack();
426
427 return 0;
428 }
429
430 return 1;
431}
432
433unsigned int nr_hardirq_chains;
434unsigned int nr_softirq_chains;
435unsigned int nr_process_chains;
436unsigned int max_lockdep_depth;
437
438#ifdef CONFIG_DEBUG_LOCKDEP
439/*
440 * Various lockdep statistics:
441 */
442DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
443#endif
444
445/*
446 * Locking printouts:
447 */
448
449#define __USAGE(__STATE) \
450 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
451 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
452 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
453 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
454
455static const char *usage_str[] =
456{
457#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
458#include "lockdep_states.h"
459#undef LOCKDEP_STATE
460 [LOCK_USED] = "INITIAL USE",
461};
462
463const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
464{
465 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
466}
467
468static inline unsigned long lock_flag(enum lock_usage_bit bit)
469{
470 return 1UL << bit;
471}
472
473static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
474{
475 char c = '.';
476
477 if (class->usage_mask & lock_flag(bit + 2))
478 c = '+';
479 if (class->usage_mask & lock_flag(bit)) {
480 c = '-';
481 if (class->usage_mask & lock_flag(bit + 2))
482 c = '?';
483 }
484
485 return c;
486}
487
488void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
489{
490 int i = 0;
491
492#define LOCKDEP_STATE(__STATE) \
493 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
494 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
495#include "lockdep_states.h"
496#undef LOCKDEP_STATE
497
498 usage[i] = '\0';
499}
500
501static void __print_lock_name(struct lock_class *class)
502{
503 char str[KSYM_NAME_LEN];
504 const char *name;
505
506 name = class->name;
507 if (!name) {
508 name = __get_key_name(class->key, str);
509 printk(KERN_CONT "%s", name);
510 } else {
511 printk(KERN_CONT "%s", name);
512 if (class->name_version > 1)
513 printk(KERN_CONT "#%d", class->name_version);
514 if (class->subclass)
515 printk(KERN_CONT "/%d", class->subclass);
516 }
517}
518
519static void print_lock_name(struct lock_class *class)
520{
521 char usage[LOCK_USAGE_CHARS];
522
523 get_usage_chars(class, usage);
524
525 printk(KERN_CONT " (");
526 __print_lock_name(class);
527 printk(KERN_CONT "){%s}", usage);
528}
529
530static void print_lockdep_cache(struct lockdep_map *lock)
531{
532 const char *name;
533 char str[KSYM_NAME_LEN];
534
535 name = lock->name;
536 if (!name)
537 name = __get_key_name(lock->key->subkeys, str);
538
539 printk(KERN_CONT "%s", name);
540}
541
542static void print_lock(struct held_lock *hlock)
543{
544 /*
545 * We can be called locklessly through debug_show_all_locks() so be
546 * extra careful, the hlock might have been released and cleared.
547 */
548 unsigned int class_idx = hlock->class_idx;
549
550 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
551 barrier();
552
553 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
554 printk(KERN_CONT "<RELEASED>\n");
555 return;
556 }
557
558 print_lock_name(lock_classes + class_idx - 1);
559 printk(KERN_CONT ", at: [<%p>] %pS\n",
560 (void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
561}
562
563static void lockdep_print_held_locks(struct task_struct *curr)
564{
565 int i, depth = curr->lockdep_depth;
566
567 if (!depth) {
568 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
569 return;
570 }
571 printk("%d lock%s held by %s/%d:\n",
572 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
573
574 for (i = 0; i < depth; i++) {
575 printk(" #%d: ", i);
576 print_lock(curr->held_locks + i);
577 }
578}
579
580static void print_kernel_ident(void)
581{
582 printk("%s %.*s %s\n", init_utsname()->release,
583 (int)strcspn(init_utsname()->version, " "),
584 init_utsname()->version,
585 print_tainted());
586}
587
588static int very_verbose(struct lock_class *class)
589{
590#if VERY_VERBOSE
591 return class_filter(class);
592#endif
593 return 0;
594}
595
596/*
597 * Is this the address of a static object:
598 */
599#ifdef __KERNEL__
600static int static_obj(void *obj)
601{
602 unsigned long start = (unsigned long) &_stext,
603 end = (unsigned long) &_end,
604 addr = (unsigned long) obj;
605
606 /*
607 * static variable?
608 */
609 if ((addr >= start) && (addr < end))
610 return 1;
611
612 if (arch_is_kernel_data(addr))
613 return 1;
614
615 /*
616 * in-kernel percpu var?
617 */
618 if (is_kernel_percpu_address(addr))
619 return 1;
620
621 /*
622 * module static or percpu var?
623 */
624 return is_module_address(addr) || is_module_percpu_address(addr);
625}
626#endif
627
628/*
629 * To make lock name printouts unique, we calculate a unique
630 * class->name_version generation counter:
631 */
632static int count_matching_names(struct lock_class *new_class)
633{
634 struct lock_class *class;
635 int count = 0;
636
637 if (!new_class->name)
638 return 0;
639
640 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
641 if (new_class->key - new_class->subclass == class->key)
642 return class->name_version;
643 if (class->name && !strcmp(class->name, new_class->name))
644 count = max(count, class->name_version);
645 }
646
647 return count + 1;
648}
649
650/*
651 * Register a lock's class in the hash-table, if the class is not present
652 * yet. Otherwise we look it up. We cache the result in the lock object
653 * itself, so actual lookup of the hash should be once per lock object.
654 */
655static inline struct lock_class *
656look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
657{
658 struct lockdep_subclass_key *key;
659 struct hlist_head *hash_head;
660 struct lock_class *class;
661
662 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
663 debug_locks_off();
664 printk(KERN_ERR
665 "BUG: looking up invalid subclass: %u\n", subclass);
666 printk(KERN_ERR
667 "turning off the locking correctness validator.\n");
668 dump_stack();
669 return NULL;
670 }
671
672 /*
673 * Static locks do not have their class-keys yet - for them the key
674 * is the lock object itself:
675 */
676 if (unlikely(!lock->key))
677 lock->key = (void *)lock;
678
679 /*
680 * NOTE: the class-key must be unique. For dynamic locks, a static
681 * lock_class_key variable is passed in through the mutex_init()
682 * (or spin_lock_init()) call - which acts as the key. For static
683 * locks we use the lock object itself as the key.
684 */
685 BUILD_BUG_ON(sizeof(struct lock_class_key) >
686 sizeof(struct lockdep_map));
687
688 key = lock->key->subkeys + subclass;
689
690 hash_head = classhashentry(key);
691
692 /*
693 * We do an RCU walk of the hash, see lockdep_free_key_range().
694 */
695 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
696 return NULL;
697
698 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
699 if (class->key == key) {
700 /*
701 * Huh! same key, different name? Did someone trample
702 * on some memory? We're most confused.
703 */
704 WARN_ON_ONCE(class->name != lock->name);
705 return class;
706 }
707 }
708
709 return NULL;
710}
711
712/*
713 * Register a lock's class in the hash-table, if the class is not present
714 * yet. Otherwise we look it up. We cache the result in the lock object
715 * itself, so actual lookup of the hash should be once per lock object.
716 */
717static struct lock_class *
718register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
719{
720 struct lockdep_subclass_key *key;
721 struct hlist_head *hash_head;
722 struct lock_class *class;
723
724 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
725
726 class = look_up_lock_class(lock, subclass);
727 if (likely(class))
728 goto out_set_class_cache;
729
730 /*
731 * Debug-check: all keys must be persistent!
732 */
733 if (!static_obj(lock->key)) {
734 debug_locks_off();
735 printk("INFO: trying to register non-static key.\n");
736 printk("the code is fine but needs lockdep annotation.\n");
737 printk("turning off the locking correctness validator.\n");
738 dump_stack();
739
740 return NULL;
741 }
742
743 key = lock->key->subkeys + subclass;
744 hash_head = classhashentry(key);
745
746 if (!graph_lock()) {
747 return NULL;
748 }
749 /*
750 * We have to do the hash-walk again, to avoid races
751 * with another CPU:
752 */
753 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
754 if (class->key == key)
755 goto out_unlock_set;
756 }
757
758 /*
759 * Allocate a new key from the static array, and add it to
760 * the hash:
761 */
762 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
763 if (!debug_locks_off_graph_unlock()) {
764 return NULL;
765 }
766
767 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
768 dump_stack();
769 return NULL;
770 }
771 class = lock_classes + nr_lock_classes++;
772 debug_atomic_inc(nr_unused_locks);
773 class->key = key;
774 class->name = lock->name;
775 class->subclass = subclass;
776 INIT_LIST_HEAD(&class->lock_entry);
777 INIT_LIST_HEAD(&class->locks_before);
778 INIT_LIST_HEAD(&class->locks_after);
779 class->name_version = count_matching_names(class);
780 /*
781 * We use RCU's safe list-add method to make
782 * parallel walking of the hash-list safe:
783 */
784 hlist_add_head_rcu(&class->hash_entry, hash_head);
785 /*
786 * Add it to the global list of classes:
787 */
788 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
789
790 if (verbose(class)) {
791 graph_unlock();
792
793 printk("\nnew class %p: %s", class->key, class->name);
794 if (class->name_version > 1)
795 printk(KERN_CONT "#%d", class->name_version);
796 printk(KERN_CONT "\n");
797 dump_stack();
798
799 if (!graph_lock()) {
800 return NULL;
801 }
802 }
803out_unlock_set:
804 graph_unlock();
805
806out_set_class_cache:
807 if (!subclass || force)
808 lock->class_cache[0] = class;
809 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
810 lock->class_cache[subclass] = class;
811
812 /*
813 * Hash collision, did we smoke some? We found a class with a matching
814 * hash but the subclass -- which is hashed in -- didn't match.
815 */
816 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
817 return NULL;
818
819 return class;
820}
821
822#ifdef CONFIG_PROVE_LOCKING
823/*
824 * Allocate a lockdep entry. (assumes the graph_lock held, returns
825 * with NULL on failure)
826 */
827static struct lock_list *alloc_list_entry(void)
828{
829 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
830 if (!debug_locks_off_graph_unlock())
831 return NULL;
832
833 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
834 dump_stack();
835 return NULL;
836 }
837 return list_entries + nr_list_entries++;
838}
839
840/*
841 * Add a new dependency to the head of the list:
842 */
843static int add_lock_to_list(struct lock_class *this, struct list_head *head,
844 unsigned long ip, int distance,
845 struct stack_trace *trace)
846{
847 struct lock_list *entry;
848 /*
849 * Lock not present yet - get a new dependency struct and
850 * add it to the list:
851 */
852 entry = alloc_list_entry();
853 if (!entry)
854 return 0;
855
856 entry->class = this;
857 entry->distance = distance;
858 entry->trace = *trace;
859 /*
860 * Both allocation and removal are done under the graph lock; but
861 * iteration is under RCU-sched; see look_up_lock_class() and
862 * lockdep_free_key_range().
863 */
864 list_add_tail_rcu(&entry->entry, head);
865
866 return 1;
867}
868
869/*
870 * For good efficiency of modular, we use power of 2
871 */
872#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
873#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
874
875/*
876 * The circular_queue and helpers is used to implement the
877 * breadth-first search(BFS)algorithem, by which we can build
878 * the shortest path from the next lock to be acquired to the
879 * previous held lock if there is a circular between them.
880 */
881struct circular_queue {
882 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
883 unsigned int front, rear;
884};
885
886static struct circular_queue lock_cq;
887
888unsigned int max_bfs_queue_depth;
889
890static unsigned int lockdep_dependency_gen_id;
891
892static inline void __cq_init(struct circular_queue *cq)
893{
894 cq->front = cq->rear = 0;
895 lockdep_dependency_gen_id++;
896}
897
898static inline int __cq_empty(struct circular_queue *cq)
899{
900 return (cq->front == cq->rear);
901}
902
903static inline int __cq_full(struct circular_queue *cq)
904{
905 return ((cq->rear + 1) & CQ_MASK) == cq->front;
906}
907
908static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
909{
910 if (__cq_full(cq))
911 return -1;
912
913 cq->element[cq->rear] = elem;
914 cq->rear = (cq->rear + 1) & CQ_MASK;
915 return 0;
916}
917
918static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
919{
920 if (__cq_empty(cq))
921 return -1;
922
923 *elem = cq->element[cq->front];
924 cq->front = (cq->front + 1) & CQ_MASK;
925 return 0;
926}
927
928static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
929{
930 return (cq->rear - cq->front) & CQ_MASK;
931}
932
933static inline void mark_lock_accessed(struct lock_list *lock,
934 struct lock_list *parent)
935{
936 unsigned long nr;
937
938 nr = lock - list_entries;
939 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
940 lock->parent = parent;
941 lock->class->dep_gen_id = lockdep_dependency_gen_id;
942}
943
944static inline unsigned long lock_accessed(struct lock_list *lock)
945{
946 unsigned long nr;
947
948 nr = lock - list_entries;
949 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
950 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
951}
952
953static inline struct lock_list *get_lock_parent(struct lock_list *child)
954{
955 return child->parent;
956}
957
958static inline int get_lock_depth(struct lock_list *child)
959{
960 int depth = 0;
961 struct lock_list *parent;
962
963 while ((parent = get_lock_parent(child))) {
964 child = parent;
965 depth++;
966 }
967 return depth;
968}
969
970static int __bfs(struct lock_list *source_entry,
971 void *data,
972 int (*match)(struct lock_list *entry, void *data),
973 struct lock_list **target_entry,
974 int forward)
975{
976 struct lock_list *entry;
977 struct list_head *head;
978 struct circular_queue *cq = &lock_cq;
979 int ret = 1;
980
981 if (match(source_entry, data)) {
982 *target_entry = source_entry;
983 ret = 0;
984 goto exit;
985 }
986
987 if (forward)
988 head = &source_entry->class->locks_after;
989 else
990 head = &source_entry->class->locks_before;
991
992 if (list_empty(head))
993 goto exit;
994
995 __cq_init(cq);
996 __cq_enqueue(cq, (unsigned long)source_entry);
997
998 while (!__cq_empty(cq)) {
999 struct lock_list *lock;
1000
1001 __cq_dequeue(cq, (unsigned long *)&lock);
1002
1003 if (!lock->class) {
1004 ret = -2;
1005 goto exit;
1006 }
1007
1008 if (forward)
1009 head = &lock->class->locks_after;
1010 else
1011 head = &lock->class->locks_before;
1012
1013 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1014
1015 list_for_each_entry_rcu(entry, head, entry) {
1016 if (!lock_accessed(entry)) {
1017 unsigned int cq_depth;
1018 mark_lock_accessed(entry, lock);
1019 if (match(entry, data)) {
1020 *target_entry = entry;
1021 ret = 0;
1022 goto exit;
1023 }
1024
1025 if (__cq_enqueue(cq, (unsigned long)entry)) {
1026 ret = -1;
1027 goto exit;
1028 }
1029 cq_depth = __cq_get_elem_count(cq);
1030 if (max_bfs_queue_depth < cq_depth)
1031 max_bfs_queue_depth = cq_depth;
1032 }
1033 }
1034 }
1035exit:
1036 return ret;
1037}
1038
1039static inline int __bfs_forwards(struct lock_list *src_entry,
1040 void *data,
1041 int (*match)(struct lock_list *entry, void *data),
1042 struct lock_list **target_entry)
1043{
1044 return __bfs(src_entry, data, match, target_entry, 1);
1045
1046}
1047
1048static inline int __bfs_backwards(struct lock_list *src_entry,
1049 void *data,
1050 int (*match)(struct lock_list *entry, void *data),
1051 struct lock_list **target_entry)
1052{
1053 return __bfs(src_entry, data, match, target_entry, 0);
1054
1055}
1056
1057/*
1058 * Recursive, forwards-direction lock-dependency checking, used for
1059 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1060 * checking.
1061 */
1062
1063/*
1064 * Print a dependency chain entry (this is only done when a deadlock
1065 * has been detected):
1066 */
1067static noinline int
1068print_circular_bug_entry(struct lock_list *target, int depth)
1069{
1070 if (debug_locks_silent)
1071 return 0;
1072 printk("\n-> #%u", depth);
1073 print_lock_name(target->class);
1074 printk(KERN_CONT ":\n");
1075 print_stack_trace(&target->trace, 6);
1076
1077 return 0;
1078}
1079
1080static void
1081print_circular_lock_scenario(struct held_lock *src,
1082 struct held_lock *tgt,
1083 struct lock_list *prt)
1084{
1085 struct lock_class *source = hlock_class(src);
1086 struct lock_class *target = hlock_class(tgt);
1087 struct lock_class *parent = prt->class;
1088
1089 /*
1090 * A direct locking problem where unsafe_class lock is taken
1091 * directly by safe_class lock, then all we need to show
1092 * is the deadlock scenario, as it is obvious that the
1093 * unsafe lock is taken under the safe lock.
1094 *
1095 * But if there is a chain instead, where the safe lock takes
1096 * an intermediate lock (middle_class) where this lock is
1097 * not the same as the safe lock, then the lock chain is
1098 * used to describe the problem. Otherwise we would need
1099 * to show a different CPU case for each link in the chain
1100 * from the safe_class lock to the unsafe_class lock.
1101 */
1102 if (parent != source) {
1103 printk("Chain exists of:\n ");
1104 __print_lock_name(source);
1105 printk(KERN_CONT " --> ");
1106 __print_lock_name(parent);
1107 printk(KERN_CONT " --> ");
1108 __print_lock_name(target);
1109 printk(KERN_CONT "\n\n");
1110 }
1111
1112 printk(" Possible unsafe locking scenario:\n\n");
1113 printk(" CPU0 CPU1\n");
1114 printk(" ---- ----\n");
1115 printk(" lock(");
1116 __print_lock_name(target);
1117 printk(KERN_CONT ");\n");
1118 printk(" lock(");
1119 __print_lock_name(parent);
1120 printk(KERN_CONT ");\n");
1121 printk(" lock(");
1122 __print_lock_name(target);
1123 printk(KERN_CONT ");\n");
1124 printk(" lock(");
1125 __print_lock_name(source);
1126 printk(KERN_CONT ");\n");
1127 printk("\n *** DEADLOCK ***\n\n");
1128}
1129
1130/*
1131 * When a circular dependency is detected, print the
1132 * header first:
1133 */
1134static noinline int
1135print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1136 struct held_lock *check_src,
1137 struct held_lock *check_tgt)
1138{
1139 struct task_struct *curr = current;
1140
1141 if (debug_locks_silent)
1142 return 0;
1143
1144 printk("\n");
1145 printk("======================================================\n");
1146 printk("[ INFO: possible circular locking dependency detected ]\n");
1147 print_kernel_ident();
1148 printk("-------------------------------------------------------\n");
1149 printk("%s/%d is trying to acquire lock:\n",
1150 curr->comm, task_pid_nr(curr));
1151 print_lock(check_src);
1152 printk("\nbut task is already holding lock:\n");
1153 print_lock(check_tgt);
1154 printk("\nwhich lock already depends on the new lock.\n\n");
1155 printk("\nthe existing dependency chain (in reverse order) is:\n");
1156
1157 print_circular_bug_entry(entry, depth);
1158
1159 return 0;
1160}
1161
1162static inline int class_equal(struct lock_list *entry, void *data)
1163{
1164 return entry->class == data;
1165}
1166
1167static noinline int print_circular_bug(struct lock_list *this,
1168 struct lock_list *target,
1169 struct held_lock *check_src,
1170 struct held_lock *check_tgt)
1171{
1172 struct task_struct *curr = current;
1173 struct lock_list *parent;
1174 struct lock_list *first_parent;
1175 int depth;
1176
1177 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1178 return 0;
1179
1180 if (!save_trace(&this->trace))
1181 return 0;
1182
1183 depth = get_lock_depth(target);
1184
1185 print_circular_bug_header(target, depth, check_src, check_tgt);
1186
1187 parent = get_lock_parent(target);
1188 first_parent = parent;
1189
1190 while (parent) {
1191 print_circular_bug_entry(parent, --depth);
1192 parent = get_lock_parent(parent);
1193 }
1194
1195 printk("\nother info that might help us debug this:\n\n");
1196 print_circular_lock_scenario(check_src, check_tgt,
1197 first_parent);
1198
1199 lockdep_print_held_locks(curr);
1200
1201 printk("\nstack backtrace:\n");
1202 dump_stack();
1203
1204 return 0;
1205}
1206
1207static noinline int print_bfs_bug(int ret)
1208{
1209 if (!debug_locks_off_graph_unlock())
1210 return 0;
1211
1212 /*
1213 * Breadth-first-search failed, graph got corrupted?
1214 */
1215 WARN(1, "lockdep bfs error:%d\n", ret);
1216
1217 return 0;
1218}
1219
1220static int noop_count(struct lock_list *entry, void *data)
1221{
1222 (*(unsigned long *)data)++;
1223 return 0;
1224}
1225
1226static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1227{
1228 unsigned long count = 0;
1229 struct lock_list *uninitialized_var(target_entry);
1230
1231 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1232
1233 return count;
1234}
1235unsigned long lockdep_count_forward_deps(struct lock_class *class)
1236{
1237 unsigned long ret, flags;
1238 struct lock_list this;
1239
1240 this.parent = NULL;
1241 this.class = class;
1242
1243 local_irq_save(flags);
1244 arch_spin_lock(&lockdep_lock);
1245 ret = __lockdep_count_forward_deps(&this);
1246 arch_spin_unlock(&lockdep_lock);
1247 local_irq_restore(flags);
1248
1249 return ret;
1250}
1251
1252static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1253{
1254 unsigned long count = 0;
1255 struct lock_list *uninitialized_var(target_entry);
1256
1257 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1258
1259 return count;
1260}
1261
1262unsigned long lockdep_count_backward_deps(struct lock_class *class)
1263{
1264 unsigned long ret, flags;
1265 struct lock_list this;
1266
1267 this.parent = NULL;
1268 this.class = class;
1269
1270 local_irq_save(flags);
1271 arch_spin_lock(&lockdep_lock);
1272 ret = __lockdep_count_backward_deps(&this);
1273 arch_spin_unlock(&lockdep_lock);
1274 local_irq_restore(flags);
1275
1276 return ret;
1277}
1278
1279/*
1280 * Prove that the dependency graph starting at <entry> can not
1281 * lead to <target>. Print an error and return 0 if it does.
1282 */
1283static noinline int
1284check_noncircular(struct lock_list *root, struct lock_class *target,
1285 struct lock_list **target_entry)
1286{
1287 int result;
1288
1289 debug_atomic_inc(nr_cyclic_checks);
1290
1291 result = __bfs_forwards(root, target, class_equal, target_entry);
1292
1293 return result;
1294}
1295
1296#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1297/*
1298 * Forwards and backwards subgraph searching, for the purposes of
1299 * proving that two subgraphs can be connected by a new dependency
1300 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1301 */
1302
1303static inline int usage_match(struct lock_list *entry, void *bit)
1304{
1305 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1306}
1307
1308
1309
1310/*
1311 * Find a node in the forwards-direction dependency sub-graph starting
1312 * at @root->class that matches @bit.
1313 *
1314 * Return 0 if such a node exists in the subgraph, and put that node
1315 * into *@target_entry.
1316 *
1317 * Return 1 otherwise and keep *@target_entry unchanged.
1318 * Return <0 on error.
1319 */
1320static int
1321find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1322 struct lock_list **target_entry)
1323{
1324 int result;
1325
1326 debug_atomic_inc(nr_find_usage_forwards_checks);
1327
1328 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1329
1330 return result;
1331}
1332
1333/*
1334 * Find a node in the backwards-direction dependency sub-graph starting
1335 * at @root->class that matches @bit.
1336 *
1337 * Return 0 if such a node exists in the subgraph, and put that node
1338 * into *@target_entry.
1339 *
1340 * Return 1 otherwise and keep *@target_entry unchanged.
1341 * Return <0 on error.
1342 */
1343static int
1344find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1345 struct lock_list **target_entry)
1346{
1347 int result;
1348
1349 debug_atomic_inc(nr_find_usage_backwards_checks);
1350
1351 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1352
1353 return result;
1354}
1355
1356static void print_lock_class_header(struct lock_class *class, int depth)
1357{
1358 int bit;
1359
1360 printk("%*s->", depth, "");
1361 print_lock_name(class);
1362 printk(KERN_CONT " ops: %lu", class->ops);
1363 printk(KERN_CONT " {\n");
1364
1365 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1366 if (class->usage_mask & (1 << bit)) {
1367 int len = depth;
1368
1369 len += printk("%*s %s", depth, "", usage_str[bit]);
1370 len += printk(KERN_CONT " at:\n");
1371 print_stack_trace(class->usage_traces + bit, len);
1372 }
1373 }
1374 printk("%*s }\n", depth, "");
1375
1376 printk("%*s ... key at: [<%p>] %pS\n",
1377 depth, "", class->key, class->key);
1378}
1379
1380/*
1381 * printk the shortest lock dependencies from @start to @end in reverse order:
1382 */
1383static void __used
1384print_shortest_lock_dependencies(struct lock_list *leaf,
1385 struct lock_list *root)
1386{
1387 struct lock_list *entry = leaf;
1388 int depth;
1389
1390 /*compute depth from generated tree by BFS*/
1391 depth = get_lock_depth(leaf);
1392
1393 do {
1394 print_lock_class_header(entry->class, depth);
1395 printk("%*s ... acquired at:\n", depth, "");
1396 print_stack_trace(&entry->trace, 2);
1397 printk("\n");
1398
1399 if (depth == 0 && (entry != root)) {
1400 printk("lockdep:%s bad path found in chain graph\n", __func__);
1401 break;
1402 }
1403
1404 entry = get_lock_parent(entry);
1405 depth--;
1406 } while (entry && (depth >= 0));
1407
1408 return;
1409}
1410
1411static void
1412print_irq_lock_scenario(struct lock_list *safe_entry,
1413 struct lock_list *unsafe_entry,
1414 struct lock_class *prev_class,
1415 struct lock_class *next_class)
1416{
1417 struct lock_class *safe_class = safe_entry->class;
1418 struct lock_class *unsafe_class = unsafe_entry->class;
1419 struct lock_class *middle_class = prev_class;
1420
1421 if (middle_class == safe_class)
1422 middle_class = next_class;
1423
1424 /*
1425 * A direct locking problem where unsafe_class lock is taken
1426 * directly by safe_class lock, then all we need to show
1427 * is the deadlock scenario, as it is obvious that the
1428 * unsafe lock is taken under the safe lock.
1429 *
1430 * But if there is a chain instead, where the safe lock takes
1431 * an intermediate lock (middle_class) where this lock is
1432 * not the same as the safe lock, then the lock chain is
1433 * used to describe the problem. Otherwise we would need
1434 * to show a different CPU case for each link in the chain
1435 * from the safe_class lock to the unsafe_class lock.
1436 */
1437 if (middle_class != unsafe_class) {
1438 printk("Chain exists of:\n ");
1439 __print_lock_name(safe_class);
1440 printk(KERN_CONT " --> ");
1441 __print_lock_name(middle_class);
1442 printk(KERN_CONT " --> ");
1443 __print_lock_name(unsafe_class);
1444 printk(KERN_CONT "\n\n");
1445 }
1446
1447 printk(" Possible interrupt unsafe locking scenario:\n\n");
1448 printk(" CPU0 CPU1\n");
1449 printk(" ---- ----\n");
1450 printk(" lock(");
1451 __print_lock_name(unsafe_class);
1452 printk(KERN_CONT ");\n");
1453 printk(" local_irq_disable();\n");
1454 printk(" lock(");
1455 __print_lock_name(safe_class);
1456 printk(KERN_CONT ");\n");
1457 printk(" lock(");
1458 __print_lock_name(middle_class);
1459 printk(KERN_CONT ");\n");
1460 printk(" <Interrupt>\n");
1461 printk(" lock(");
1462 __print_lock_name(safe_class);
1463 printk(KERN_CONT ");\n");
1464 printk("\n *** DEADLOCK ***\n\n");
1465}
1466
1467static int
1468print_bad_irq_dependency(struct task_struct *curr,
1469 struct lock_list *prev_root,
1470 struct lock_list *next_root,
1471 struct lock_list *backwards_entry,
1472 struct lock_list *forwards_entry,
1473 struct held_lock *prev,
1474 struct held_lock *next,
1475 enum lock_usage_bit bit1,
1476 enum lock_usage_bit bit2,
1477 const char *irqclass)
1478{
1479 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1480 return 0;
1481
1482 printk("\n");
1483 printk("======================================================\n");
1484 printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1485 irqclass, irqclass);
1486 print_kernel_ident();
1487 printk("------------------------------------------------------\n");
1488 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1489 curr->comm, task_pid_nr(curr),
1490 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1491 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1492 curr->hardirqs_enabled,
1493 curr->softirqs_enabled);
1494 print_lock(next);
1495
1496 printk("\nand this task is already holding:\n");
1497 print_lock(prev);
1498 printk("which would create a new lock dependency:\n");
1499 print_lock_name(hlock_class(prev));
1500 printk(KERN_CONT " ->");
1501 print_lock_name(hlock_class(next));
1502 printk(KERN_CONT "\n");
1503
1504 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1505 irqclass);
1506 print_lock_name(backwards_entry->class);
1507 printk("\n... which became %s-irq-safe at:\n", irqclass);
1508
1509 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1510
1511 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1512 print_lock_name(forwards_entry->class);
1513 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1514 printk("...");
1515
1516 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1517
1518 printk("\nother info that might help us debug this:\n\n");
1519 print_irq_lock_scenario(backwards_entry, forwards_entry,
1520 hlock_class(prev), hlock_class(next));
1521
1522 lockdep_print_held_locks(curr);
1523
1524 printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1525 if (!save_trace(&prev_root->trace))
1526 return 0;
1527 print_shortest_lock_dependencies(backwards_entry, prev_root);
1528
1529 printk("\nthe dependencies between the lock to be acquired");
1530 printk(" and %s-irq-unsafe lock:\n", irqclass);
1531 if (!save_trace(&next_root->trace))
1532 return 0;
1533 print_shortest_lock_dependencies(forwards_entry, next_root);
1534
1535 printk("\nstack backtrace:\n");
1536 dump_stack();
1537
1538 return 0;
1539}
1540
1541static int
1542check_usage(struct task_struct *curr, struct held_lock *prev,
1543 struct held_lock *next, enum lock_usage_bit bit_backwards,
1544 enum lock_usage_bit bit_forwards, const char *irqclass)
1545{
1546 int ret;
1547 struct lock_list this, that;
1548 struct lock_list *uninitialized_var(target_entry);
1549 struct lock_list *uninitialized_var(target_entry1);
1550
1551 this.parent = NULL;
1552
1553 this.class = hlock_class(prev);
1554 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1555 if (ret < 0)
1556 return print_bfs_bug(ret);
1557 if (ret == 1)
1558 return ret;
1559
1560 that.parent = NULL;
1561 that.class = hlock_class(next);
1562 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1563 if (ret < 0)
1564 return print_bfs_bug(ret);
1565 if (ret == 1)
1566 return ret;
1567
1568 return print_bad_irq_dependency(curr, &this, &that,
1569 target_entry, target_entry1,
1570 prev, next,
1571 bit_backwards, bit_forwards, irqclass);
1572}
1573
1574static const char *state_names[] = {
1575#define LOCKDEP_STATE(__STATE) \
1576 __stringify(__STATE),
1577#include "lockdep_states.h"
1578#undef LOCKDEP_STATE
1579};
1580
1581static const char *state_rnames[] = {
1582#define LOCKDEP_STATE(__STATE) \
1583 __stringify(__STATE)"-READ",
1584#include "lockdep_states.h"
1585#undef LOCKDEP_STATE
1586};
1587
1588static inline const char *state_name(enum lock_usage_bit bit)
1589{
1590 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1591}
1592
1593static int exclusive_bit(int new_bit)
1594{
1595 /*
1596 * USED_IN
1597 * USED_IN_READ
1598 * ENABLED
1599 * ENABLED_READ
1600 *
1601 * bit 0 - write/read
1602 * bit 1 - used_in/enabled
1603 * bit 2+ state
1604 */
1605
1606 int state = new_bit & ~3;
1607 int dir = new_bit & 2;
1608
1609 /*
1610 * keep state, bit flip the direction and strip read.
1611 */
1612 return state | (dir ^ 2);
1613}
1614
1615static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1616 struct held_lock *next, enum lock_usage_bit bit)
1617{
1618 /*
1619 * Prove that the new dependency does not connect a hardirq-safe
1620 * lock with a hardirq-unsafe lock - to achieve this we search
1621 * the backwards-subgraph starting at <prev>, and the
1622 * forwards-subgraph starting at <next>:
1623 */
1624 if (!check_usage(curr, prev, next, bit,
1625 exclusive_bit(bit), state_name(bit)))
1626 return 0;
1627
1628 bit++; /* _READ */
1629
1630 /*
1631 * Prove that the new dependency does not connect a hardirq-safe-read
1632 * lock with a hardirq-unsafe lock - to achieve this we search
1633 * the backwards-subgraph starting at <prev>, and the
1634 * forwards-subgraph starting at <next>:
1635 */
1636 if (!check_usage(curr, prev, next, bit,
1637 exclusive_bit(bit), state_name(bit)))
1638 return 0;
1639
1640 return 1;
1641}
1642
1643static int
1644check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1645 struct held_lock *next)
1646{
1647#define LOCKDEP_STATE(__STATE) \
1648 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1649 return 0;
1650#include "lockdep_states.h"
1651#undef LOCKDEP_STATE
1652
1653 return 1;
1654}
1655
1656static void inc_chains(void)
1657{
1658 if (current->hardirq_context)
1659 nr_hardirq_chains++;
1660 else {
1661 if (current->softirq_context)
1662 nr_softirq_chains++;
1663 else
1664 nr_process_chains++;
1665 }
1666}
1667
1668#else
1669
1670static inline int
1671check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1672 struct held_lock *next)
1673{
1674 return 1;
1675}
1676
1677static inline void inc_chains(void)
1678{
1679 nr_process_chains++;
1680}
1681
1682#endif
1683
1684static void
1685print_deadlock_scenario(struct held_lock *nxt,
1686 struct held_lock *prv)
1687{
1688 struct lock_class *next = hlock_class(nxt);
1689 struct lock_class *prev = hlock_class(prv);
1690
1691 printk(" Possible unsafe locking scenario:\n\n");
1692 printk(" CPU0\n");
1693 printk(" ----\n");
1694 printk(" lock(");
1695 __print_lock_name(prev);
1696 printk(KERN_CONT ");\n");
1697 printk(" lock(");
1698 __print_lock_name(next);
1699 printk(KERN_CONT ");\n");
1700 printk("\n *** DEADLOCK ***\n\n");
1701 printk(" May be due to missing lock nesting notation\n\n");
1702}
1703
1704static int
1705print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1706 struct held_lock *next)
1707{
1708 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1709 return 0;
1710
1711 printk("\n");
1712 printk("=============================================\n");
1713 printk("[ INFO: possible recursive locking detected ]\n");
1714 print_kernel_ident();
1715 printk("---------------------------------------------\n");
1716 printk("%s/%d is trying to acquire lock:\n",
1717 curr->comm, task_pid_nr(curr));
1718 print_lock(next);
1719 printk("\nbut task is already holding lock:\n");
1720 print_lock(prev);
1721
1722 printk("\nother info that might help us debug this:\n");
1723 print_deadlock_scenario(next, prev);
1724 lockdep_print_held_locks(curr);
1725
1726 printk("\nstack backtrace:\n");
1727 dump_stack();
1728
1729 return 0;
1730}
1731
1732/*
1733 * Check whether we are holding such a class already.
1734 *
1735 * (Note that this has to be done separately, because the graph cannot
1736 * detect such classes of deadlocks.)
1737 *
1738 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1739 */
1740static int
1741check_deadlock(struct task_struct *curr, struct held_lock *next,
1742 struct lockdep_map *next_instance, int read)
1743{
1744 struct held_lock *prev;
1745 struct held_lock *nest = NULL;
1746 int i;
1747
1748 for (i = 0; i < curr->lockdep_depth; i++) {
1749 prev = curr->held_locks + i;
1750
1751 if (prev->instance == next->nest_lock)
1752 nest = prev;
1753
1754 if (hlock_class(prev) != hlock_class(next))
1755 continue;
1756
1757 /*
1758 * Allow read-after-read recursion of the same
1759 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1760 */
1761 if ((read == 2) && prev->read)
1762 return 2;
1763
1764 /*
1765 * We're holding the nest_lock, which serializes this lock's
1766 * nesting behaviour.
1767 */
1768 if (nest)
1769 return 2;
1770
1771 return print_deadlock_bug(curr, prev, next);
1772 }
1773 return 1;
1774}
1775
1776/*
1777 * There was a chain-cache miss, and we are about to add a new dependency
1778 * to a previous lock. We recursively validate the following rules:
1779 *
1780 * - would the adding of the <prev> -> <next> dependency create a
1781 * circular dependency in the graph? [== circular deadlock]
1782 *
1783 * - does the new prev->next dependency connect any hardirq-safe lock
1784 * (in the full backwards-subgraph starting at <prev>) with any
1785 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1786 * <next>)? [== illegal lock inversion with hardirq contexts]
1787 *
1788 * - does the new prev->next dependency connect any softirq-safe lock
1789 * (in the full backwards-subgraph starting at <prev>) with any
1790 * softirq-unsafe lock (in the full forwards-subgraph starting at
1791 * <next>)? [== illegal lock inversion with softirq contexts]
1792 *
1793 * any of these scenarios could lead to a deadlock.
1794 *
1795 * Then if all the validations pass, we add the forwards and backwards
1796 * dependency.
1797 */
1798static int
1799check_prev_add(struct task_struct *curr, struct held_lock *prev,
1800 struct held_lock *next, int distance, int *stack_saved)
1801{
1802 struct lock_list *entry;
1803 int ret;
1804 struct lock_list this;
1805 struct lock_list *uninitialized_var(target_entry);
1806 /*
1807 * Static variable, serialized by the graph_lock().
1808 *
1809 * We use this static variable to save the stack trace in case
1810 * we call into this function multiple times due to encountering
1811 * trylocks in the held lock stack.
1812 */
1813 static struct stack_trace trace;
1814
1815 /*
1816 * Prove that the new <prev> -> <next> dependency would not
1817 * create a circular dependency in the graph. (We do this by
1818 * forward-recursing into the graph starting at <next>, and
1819 * checking whether we can reach <prev>.)
1820 *
1821 * We are using global variables to control the recursion, to
1822 * keep the stackframe size of the recursive functions low:
1823 */
1824 this.class = hlock_class(next);
1825 this.parent = NULL;
1826 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1827 if (unlikely(!ret))
1828 return print_circular_bug(&this, target_entry, next, prev);
1829 else if (unlikely(ret < 0))
1830 return print_bfs_bug(ret);
1831
1832 if (!check_prev_add_irq(curr, prev, next))
1833 return 0;
1834
1835 /*
1836 * For recursive read-locks we do all the dependency checks,
1837 * but we dont store read-triggered dependencies (only
1838 * write-triggered dependencies). This ensures that only the
1839 * write-side dependencies matter, and that if for example a
1840 * write-lock never takes any other locks, then the reads are
1841 * equivalent to a NOP.
1842 */
1843 if (next->read == 2 || prev->read == 2)
1844 return 1;
1845 /*
1846 * Is the <prev> -> <next> dependency already present?
1847 *
1848 * (this may occur even though this is a new chain: consider
1849 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1850 * chains - the second one will be new, but L1 already has
1851 * L2 added to its dependency list, due to the first chain.)
1852 */
1853 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1854 if (entry->class == hlock_class(next)) {
1855 if (distance == 1)
1856 entry->distance = 1;
1857 return 2;
1858 }
1859 }
1860
1861 if (!*stack_saved) {
1862 if (!save_trace(&trace))
1863 return 0;
1864 *stack_saved = 1;
1865 }
1866
1867 /*
1868 * Ok, all validations passed, add the new lock
1869 * to the previous lock's dependency list:
1870 */
1871 ret = add_lock_to_list(hlock_class(next),
1872 &hlock_class(prev)->locks_after,
1873 next->acquire_ip, distance, &trace);
1874
1875 if (!ret)
1876 return 0;
1877
1878 ret = add_lock_to_list(hlock_class(prev),
1879 &hlock_class(next)->locks_before,
1880 next->acquire_ip, distance, &trace);
1881 if (!ret)
1882 return 0;
1883
1884 /*
1885 * Debugging printouts:
1886 */
1887 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1888 /* We drop graph lock, so another thread can overwrite trace. */
1889 *stack_saved = 0;
1890 graph_unlock();
1891 printk("\n new dependency: ");
1892 print_lock_name(hlock_class(prev));
1893 printk(KERN_CONT " => ");
1894 print_lock_name(hlock_class(next));
1895 printk(KERN_CONT "\n");
1896 dump_stack();
1897 return graph_lock();
1898 }
1899 return 1;
1900}
1901
1902/*
1903 * Add the dependency to all directly-previous locks that are 'relevant'.
1904 * The ones that are relevant are (in increasing distance from curr):
1905 * all consecutive trylock entries and the final non-trylock entry - or
1906 * the end of this context's lock-chain - whichever comes first.
1907 */
1908static int
1909check_prevs_add(struct task_struct *curr, struct held_lock *next)
1910{
1911 int depth = curr->lockdep_depth;
1912 int stack_saved = 0;
1913 struct held_lock *hlock;
1914
1915 /*
1916 * Debugging checks.
1917 *
1918 * Depth must not be zero for a non-head lock:
1919 */
1920 if (!depth)
1921 goto out_bug;
1922 /*
1923 * At least two relevant locks must exist for this
1924 * to be a head:
1925 */
1926 if (curr->held_locks[depth].irq_context !=
1927 curr->held_locks[depth-1].irq_context)
1928 goto out_bug;
1929
1930 for (;;) {
1931 int distance = curr->lockdep_depth - depth + 1;
1932 hlock = curr->held_locks + depth - 1;
1933 /*
1934 * Only non-recursive-read entries get new dependencies
1935 * added:
1936 */
1937 if (hlock->read != 2 && hlock->check) {
1938 if (!check_prev_add(curr, hlock, next,
1939 distance, &stack_saved))
1940 return 0;
1941 /*
1942 * Stop after the first non-trylock entry,
1943 * as non-trylock entries have added their
1944 * own direct dependencies already, so this
1945 * lock is connected to them indirectly:
1946 */
1947 if (!hlock->trylock)
1948 break;
1949 }
1950 depth--;
1951 /*
1952 * End of lock-stack?
1953 */
1954 if (!depth)
1955 break;
1956 /*
1957 * Stop the search if we cross into another context:
1958 */
1959 if (curr->held_locks[depth].irq_context !=
1960 curr->held_locks[depth-1].irq_context)
1961 break;
1962 }
1963 return 1;
1964out_bug:
1965 if (!debug_locks_off_graph_unlock())
1966 return 0;
1967
1968 /*
1969 * Clearly we all shouldn't be here, but since we made it we
1970 * can reliable say we messed up our state. See the above two
1971 * gotos for reasons why we could possibly end up here.
1972 */
1973 WARN_ON(1);
1974
1975 return 0;
1976}
1977
1978unsigned long nr_lock_chains;
1979struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1980int nr_chain_hlocks;
1981static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1982
1983struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1984{
1985 return lock_classes + chain_hlocks[chain->base + i];
1986}
1987
1988/*
1989 * Returns the index of the first held_lock of the current chain
1990 */
1991static inline int get_first_held_lock(struct task_struct *curr,
1992 struct held_lock *hlock)
1993{
1994 int i;
1995 struct held_lock *hlock_curr;
1996
1997 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1998 hlock_curr = curr->held_locks + i;
1999 if (hlock_curr->irq_context != hlock->irq_context)
2000 break;
2001
2002 }
2003
2004 return ++i;
2005}
2006
2007#ifdef CONFIG_DEBUG_LOCKDEP
2008/*
2009 * Returns the next chain_key iteration
2010 */
2011static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2012{
2013 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2014
2015 printk(" class_idx:%d -> chain_key:%016Lx",
2016 class_idx,
2017 (unsigned long long)new_chain_key);
2018 return new_chain_key;
2019}
2020
2021static void
2022print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2023{
2024 struct held_lock *hlock;
2025 u64 chain_key = 0;
2026 int depth = curr->lockdep_depth;
2027 int i;
2028
2029 printk("depth: %u\n", depth + 1);
2030 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2031 hlock = curr->held_locks + i;
2032 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2033
2034 print_lock(hlock);
2035 }
2036
2037 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2038 print_lock(hlock_next);
2039}
2040
2041static void print_chain_keys_chain(struct lock_chain *chain)
2042{
2043 int i;
2044 u64 chain_key = 0;
2045 int class_id;
2046
2047 printk("depth: %u\n", chain->depth);
2048 for (i = 0; i < chain->depth; i++) {
2049 class_id = chain_hlocks[chain->base + i];
2050 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2051
2052 print_lock_name(lock_classes + class_id);
2053 printk("\n");
2054 }
2055}
2056
2057static void print_collision(struct task_struct *curr,
2058 struct held_lock *hlock_next,
2059 struct lock_chain *chain)
2060{
2061 printk("\n");
2062 printk("======================\n");
2063 printk("[chain_key collision ]\n");
2064 print_kernel_ident();
2065 printk("----------------------\n");
2066 printk("%s/%d: ", current->comm, task_pid_nr(current));
2067 printk("Hash chain already cached but the contents don't match!\n");
2068
2069 printk("Held locks:");
2070 print_chain_keys_held_locks(curr, hlock_next);
2071
2072 printk("Locks in cached chain:");
2073 print_chain_keys_chain(chain);
2074
2075 printk("\nstack backtrace:\n");
2076 dump_stack();
2077}
2078#endif
2079
2080/*
2081 * Checks whether the chain and the current held locks are consistent
2082 * in depth and also in content. If they are not it most likely means
2083 * that there was a collision during the calculation of the chain_key.
2084 * Returns: 0 not passed, 1 passed
2085 */
2086static int check_no_collision(struct task_struct *curr,
2087 struct held_lock *hlock,
2088 struct lock_chain *chain)
2089{
2090#ifdef CONFIG_DEBUG_LOCKDEP
2091 int i, j, id;
2092
2093 i = get_first_held_lock(curr, hlock);
2094
2095 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2096 print_collision(curr, hlock, chain);
2097 return 0;
2098 }
2099
2100 for (j = 0; j < chain->depth - 1; j++, i++) {
2101 id = curr->held_locks[i].class_idx - 1;
2102
2103 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2104 print_collision(curr, hlock, chain);
2105 return 0;
2106 }
2107 }
2108#endif
2109 return 1;
2110}
2111
2112/*
2113 * Look up a dependency chain. If the key is not present yet then
2114 * add it and return 1 - in this case the new dependency chain is
2115 * validated. If the key is already hashed, return 0.
2116 * (On return with 1 graph_lock is held.)
2117 */
2118static inline int lookup_chain_cache(struct task_struct *curr,
2119 struct held_lock *hlock,
2120 u64 chain_key)
2121{
2122 struct lock_class *class = hlock_class(hlock);
2123 struct hlist_head *hash_head = chainhashentry(chain_key);
2124 struct lock_chain *chain;
2125 int i, j;
2126
2127 /*
2128 * We might need to take the graph lock, ensure we've got IRQs
2129 * disabled to make this an IRQ-safe lock.. for recursion reasons
2130 * lockdep won't complain about its own locking errors.
2131 */
2132 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2133 return 0;
2134 /*
2135 * We can walk it lock-free, because entries only get added
2136 * to the hash:
2137 */
2138 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2139 if (chain->chain_key == chain_key) {
2140cache_hit:
2141 debug_atomic_inc(chain_lookup_hits);
2142 if (!check_no_collision(curr, hlock, chain))
2143 return 0;
2144
2145 if (very_verbose(class))
2146 printk("\nhash chain already cached, key: "
2147 "%016Lx tail class: [%p] %s\n",
2148 (unsigned long long)chain_key,
2149 class->key, class->name);
2150 return 0;
2151 }
2152 }
2153 if (very_verbose(class))
2154 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2155 (unsigned long long)chain_key, class->key, class->name);
2156 /*
2157 * Allocate a new chain entry from the static array, and add
2158 * it to the hash:
2159 */
2160 if (!graph_lock())
2161 return 0;
2162 /*
2163 * We have to walk the chain again locked - to avoid duplicates:
2164 */
2165 hlist_for_each_entry(chain, hash_head, entry) {
2166 if (chain->chain_key == chain_key) {
2167 graph_unlock();
2168 goto cache_hit;
2169 }
2170 }
2171 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2172 if (!debug_locks_off_graph_unlock())
2173 return 0;
2174
2175 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2176 dump_stack();
2177 return 0;
2178 }
2179 chain = lock_chains + nr_lock_chains++;
2180 chain->chain_key = chain_key;
2181 chain->irq_context = hlock->irq_context;
2182 i = get_first_held_lock(curr, hlock);
2183 chain->depth = curr->lockdep_depth + 1 - i;
2184
2185 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2186 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2187 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2188
2189 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2190 chain->base = nr_chain_hlocks;
2191 for (j = 0; j < chain->depth - 1; j++, i++) {
2192 int lock_id = curr->held_locks[i].class_idx - 1;
2193 chain_hlocks[chain->base + j] = lock_id;
2194 }
2195 chain_hlocks[chain->base + j] = class - lock_classes;
2196 }
2197
2198 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2199 nr_chain_hlocks += chain->depth;
2200
2201#ifdef CONFIG_DEBUG_LOCKDEP
2202 /*
2203 * Important for check_no_collision().
2204 */
2205 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2206 if (debug_locks_off_graph_unlock())
2207 return 0;
2208
2209 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2210 dump_stack();
2211 return 0;
2212 }
2213#endif
2214
2215 hlist_add_head_rcu(&chain->entry, hash_head);
2216 debug_atomic_inc(chain_lookup_misses);
2217 inc_chains();
2218
2219 return 1;
2220}
2221
2222static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2223 struct held_lock *hlock, int chain_head, u64 chain_key)
2224{
2225 /*
2226 * Trylock needs to maintain the stack of held locks, but it
2227 * does not add new dependencies, because trylock can be done
2228 * in any order.
2229 *
2230 * We look up the chain_key and do the O(N^2) check and update of
2231 * the dependencies only if this is a new dependency chain.
2232 * (If lookup_chain_cache() returns with 1 it acquires
2233 * graph_lock for us)
2234 */
2235 if (!hlock->trylock && hlock->check &&
2236 lookup_chain_cache(curr, hlock, chain_key)) {
2237 /*
2238 * Check whether last held lock:
2239 *
2240 * - is irq-safe, if this lock is irq-unsafe
2241 * - is softirq-safe, if this lock is hardirq-unsafe
2242 *
2243 * And check whether the new lock's dependency graph
2244 * could lead back to the previous lock.
2245 *
2246 * any of these scenarios could lead to a deadlock. If
2247 * All validations
2248 */
2249 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2250
2251 if (!ret)
2252 return 0;
2253 /*
2254 * Mark recursive read, as we jump over it when
2255 * building dependencies (just like we jump over
2256 * trylock entries):
2257 */
2258 if (ret == 2)
2259 hlock->read = 2;
2260 /*
2261 * Add dependency only if this lock is not the head
2262 * of the chain, and if it's not a secondary read-lock:
2263 */
2264 if (!chain_head && ret != 2)
2265 if (!check_prevs_add(curr, hlock))
2266 return 0;
2267 graph_unlock();
2268 } else
2269 /* after lookup_chain_cache(): */
2270 if (unlikely(!debug_locks))
2271 return 0;
2272
2273 return 1;
2274}
2275#else
2276static inline int validate_chain(struct task_struct *curr,
2277 struct lockdep_map *lock, struct held_lock *hlock,
2278 int chain_head, u64 chain_key)
2279{
2280 return 1;
2281}
2282#endif
2283
2284/*
2285 * We are building curr_chain_key incrementally, so double-check
2286 * it from scratch, to make sure that it's done correctly:
2287 */
2288static void check_chain_key(struct task_struct *curr)
2289{
2290#ifdef CONFIG_DEBUG_LOCKDEP
2291 struct held_lock *hlock, *prev_hlock = NULL;
2292 unsigned int i;
2293 u64 chain_key = 0;
2294
2295 for (i = 0; i < curr->lockdep_depth; i++) {
2296 hlock = curr->held_locks + i;
2297 if (chain_key != hlock->prev_chain_key) {
2298 debug_locks_off();
2299 /*
2300 * We got mighty confused, our chain keys don't match
2301 * with what we expect, someone trample on our task state?
2302 */
2303 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2304 curr->lockdep_depth, i,
2305 (unsigned long long)chain_key,
2306 (unsigned long long)hlock->prev_chain_key);
2307 return;
2308 }
2309 /*
2310 * Whoops ran out of static storage again?
2311 */
2312 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2313 return;
2314
2315 if (prev_hlock && (prev_hlock->irq_context !=
2316 hlock->irq_context))
2317 chain_key = 0;
2318 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2319 prev_hlock = hlock;
2320 }
2321 if (chain_key != curr->curr_chain_key) {
2322 debug_locks_off();
2323 /*
2324 * More smoking hash instead of calculating it, damn see these
2325 * numbers float.. I bet that a pink elephant stepped on my memory.
2326 */
2327 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2328 curr->lockdep_depth, i,
2329 (unsigned long long)chain_key,
2330 (unsigned long long)curr->curr_chain_key);
2331 }
2332#endif
2333}
2334
2335static void
2336print_usage_bug_scenario(struct held_lock *lock)
2337{
2338 struct lock_class *class = hlock_class(lock);
2339
2340 printk(" Possible unsafe locking scenario:\n\n");
2341 printk(" CPU0\n");
2342 printk(" ----\n");
2343 printk(" lock(");
2344 __print_lock_name(class);
2345 printk(KERN_CONT ");\n");
2346 printk(" <Interrupt>\n");
2347 printk(" lock(");
2348 __print_lock_name(class);
2349 printk(KERN_CONT ");\n");
2350 printk("\n *** DEADLOCK ***\n\n");
2351}
2352
2353static int
2354print_usage_bug(struct task_struct *curr, struct held_lock *this,
2355 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2356{
2357 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2358 return 0;
2359
2360 printk("\n");
2361 printk("=================================\n");
2362 printk("[ INFO: inconsistent lock state ]\n");
2363 print_kernel_ident();
2364 printk("---------------------------------\n");
2365
2366 printk("inconsistent {%s} -> {%s} usage.\n",
2367 usage_str[prev_bit], usage_str[new_bit]);
2368
2369 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2370 curr->comm, task_pid_nr(curr),
2371 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2372 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2373 trace_hardirqs_enabled(curr),
2374 trace_softirqs_enabled(curr));
2375 print_lock(this);
2376
2377 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2378 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2379
2380 print_irqtrace_events(curr);
2381 printk("\nother info that might help us debug this:\n");
2382 print_usage_bug_scenario(this);
2383
2384 lockdep_print_held_locks(curr);
2385
2386 printk("\nstack backtrace:\n");
2387 dump_stack();
2388
2389 return 0;
2390}
2391
2392/*
2393 * Print out an error if an invalid bit is set:
2394 */
2395static inline int
2396valid_state(struct task_struct *curr, struct held_lock *this,
2397 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2398{
2399 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2400 return print_usage_bug(curr, this, bad_bit, new_bit);
2401 return 1;
2402}
2403
2404static int mark_lock(struct task_struct *curr, struct held_lock *this,
2405 enum lock_usage_bit new_bit);
2406
2407#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2408
2409/*
2410 * print irq inversion bug:
2411 */
2412static int
2413print_irq_inversion_bug(struct task_struct *curr,
2414 struct lock_list *root, struct lock_list *other,
2415 struct held_lock *this, int forwards,
2416 const char *irqclass)
2417{
2418 struct lock_list *entry = other;
2419 struct lock_list *middle = NULL;
2420 int depth;
2421
2422 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2423 return 0;
2424
2425 printk("\n");
2426 printk("=========================================================\n");
2427 printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2428 print_kernel_ident();
2429 printk("---------------------------------------------------------\n");
2430 printk("%s/%d just changed the state of lock:\n",
2431 curr->comm, task_pid_nr(curr));
2432 print_lock(this);
2433 if (forwards)
2434 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2435 else
2436 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2437 print_lock_name(other->class);
2438 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2439
2440 printk("\nother info that might help us debug this:\n");
2441
2442 /* Find a middle lock (if one exists) */
2443 depth = get_lock_depth(other);
2444 do {
2445 if (depth == 0 && (entry != root)) {
2446 printk("lockdep:%s bad path found in chain graph\n", __func__);
2447 break;
2448 }
2449 middle = entry;
2450 entry = get_lock_parent(entry);
2451 depth--;
2452 } while (entry && entry != root && (depth >= 0));
2453 if (forwards)
2454 print_irq_lock_scenario(root, other,
2455 middle ? middle->class : root->class, other->class);
2456 else
2457 print_irq_lock_scenario(other, root,
2458 middle ? middle->class : other->class, root->class);
2459
2460 lockdep_print_held_locks(curr);
2461
2462 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2463 if (!save_trace(&root->trace))
2464 return 0;
2465 print_shortest_lock_dependencies(other, root);
2466
2467 printk("\nstack backtrace:\n");
2468 dump_stack();
2469
2470 return 0;
2471}
2472
2473/*
2474 * Prove that in the forwards-direction subgraph starting at <this>
2475 * there is no lock matching <mask>:
2476 */
2477static int
2478check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2479 enum lock_usage_bit bit, const char *irqclass)
2480{
2481 int ret;
2482 struct lock_list root;
2483 struct lock_list *uninitialized_var(target_entry);
2484
2485 root.parent = NULL;
2486 root.class = hlock_class(this);
2487 ret = find_usage_forwards(&root, bit, &target_entry);
2488 if (ret < 0)
2489 return print_bfs_bug(ret);
2490 if (ret == 1)
2491 return ret;
2492
2493 return print_irq_inversion_bug(curr, &root, target_entry,
2494 this, 1, irqclass);
2495}
2496
2497/*
2498 * Prove that in the backwards-direction subgraph starting at <this>
2499 * there is no lock matching <mask>:
2500 */
2501static int
2502check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2503 enum lock_usage_bit bit, const char *irqclass)
2504{
2505 int ret;
2506 struct lock_list root;
2507 struct lock_list *uninitialized_var(target_entry);
2508
2509 root.parent = NULL;
2510 root.class = hlock_class(this);
2511 ret = find_usage_backwards(&root, bit, &target_entry);
2512 if (ret < 0)
2513 return print_bfs_bug(ret);
2514 if (ret == 1)
2515 return ret;
2516
2517 return print_irq_inversion_bug(curr, &root, target_entry,
2518 this, 0, irqclass);
2519}
2520
2521void print_irqtrace_events(struct task_struct *curr)
2522{
2523 printk("irq event stamp: %u\n", curr->irq_events);
2524 printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
2525 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2526 (void *)curr->hardirq_enable_ip);
2527 printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
2528 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2529 (void *)curr->hardirq_disable_ip);
2530 printk("softirqs last enabled at (%u): [<%p>] %pS\n",
2531 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2532 (void *)curr->softirq_enable_ip);
2533 printk("softirqs last disabled at (%u): [<%p>] %pS\n",
2534 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2535 (void *)curr->softirq_disable_ip);
2536}
2537
2538static int HARDIRQ_verbose(struct lock_class *class)
2539{
2540#if HARDIRQ_VERBOSE
2541 return class_filter(class);
2542#endif
2543 return 0;
2544}
2545
2546static int SOFTIRQ_verbose(struct lock_class *class)
2547{
2548#if SOFTIRQ_VERBOSE
2549 return class_filter(class);
2550#endif
2551 return 0;
2552}
2553
2554static int RECLAIM_FS_verbose(struct lock_class *class)
2555{
2556#if RECLAIM_VERBOSE
2557 return class_filter(class);
2558#endif
2559 return 0;
2560}
2561
2562#define STRICT_READ_CHECKS 1
2563
2564static int (*state_verbose_f[])(struct lock_class *class) = {
2565#define LOCKDEP_STATE(__STATE) \
2566 __STATE##_verbose,
2567#include "lockdep_states.h"
2568#undef LOCKDEP_STATE
2569};
2570
2571static inline int state_verbose(enum lock_usage_bit bit,
2572 struct lock_class *class)
2573{
2574 return state_verbose_f[bit >> 2](class);
2575}
2576
2577typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2578 enum lock_usage_bit bit, const char *name);
2579
2580static int
2581mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2582 enum lock_usage_bit new_bit)
2583{
2584 int excl_bit = exclusive_bit(new_bit);
2585 int read = new_bit & 1;
2586 int dir = new_bit & 2;
2587
2588 /*
2589 * mark USED_IN has to look forwards -- to ensure no dependency
2590 * has ENABLED state, which would allow recursion deadlocks.
2591 *
2592 * mark ENABLED has to look backwards -- to ensure no dependee
2593 * has USED_IN state, which, again, would allow recursion deadlocks.
2594 */
2595 check_usage_f usage = dir ?
2596 check_usage_backwards : check_usage_forwards;
2597
2598 /*
2599 * Validate that this particular lock does not have conflicting
2600 * usage states.
2601 */
2602 if (!valid_state(curr, this, new_bit, excl_bit))
2603 return 0;
2604
2605 /*
2606 * Validate that the lock dependencies don't have conflicting usage
2607 * states.
2608 */
2609 if ((!read || !dir || STRICT_READ_CHECKS) &&
2610 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2611 return 0;
2612
2613 /*
2614 * Check for read in write conflicts
2615 */
2616 if (!read) {
2617 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2618 return 0;
2619
2620 if (STRICT_READ_CHECKS &&
2621 !usage(curr, this, excl_bit + 1,
2622 state_name(new_bit + 1)))
2623 return 0;
2624 }
2625
2626 if (state_verbose(new_bit, hlock_class(this)))
2627 return 2;
2628
2629 return 1;
2630}
2631
2632enum mark_type {
2633#define LOCKDEP_STATE(__STATE) __STATE,
2634#include "lockdep_states.h"
2635#undef LOCKDEP_STATE
2636};
2637
2638/*
2639 * Mark all held locks with a usage bit:
2640 */
2641static int
2642mark_held_locks(struct task_struct *curr, enum mark_type mark)
2643{
2644 enum lock_usage_bit usage_bit;
2645 struct held_lock *hlock;
2646 int i;
2647
2648 for (i = 0; i < curr->lockdep_depth; i++) {
2649 hlock = curr->held_locks + i;
2650
2651 usage_bit = 2 + (mark << 2); /* ENABLED */
2652 if (hlock->read)
2653 usage_bit += 1; /* READ */
2654
2655 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2656
2657 if (!hlock->check)
2658 continue;
2659
2660 if (!mark_lock(curr, hlock, usage_bit))
2661 return 0;
2662 }
2663
2664 return 1;
2665}
2666
2667/*
2668 * Hardirqs will be enabled:
2669 */
2670static void __trace_hardirqs_on_caller(unsigned long ip)
2671{
2672 struct task_struct *curr = current;
2673
2674 /* we'll do an OFF -> ON transition: */
2675 curr->hardirqs_enabled = 1;
2676
2677 /*
2678 * We are going to turn hardirqs on, so set the
2679 * usage bit for all held locks:
2680 */
2681 if (!mark_held_locks(curr, HARDIRQ))
2682 return;
2683 /*
2684 * If we have softirqs enabled, then set the usage
2685 * bit for all held locks. (disabled hardirqs prevented
2686 * this bit from being set before)
2687 */
2688 if (curr->softirqs_enabled)
2689 if (!mark_held_locks(curr, SOFTIRQ))
2690 return;
2691
2692 curr->hardirq_enable_ip = ip;
2693 curr->hardirq_enable_event = ++curr->irq_events;
2694 debug_atomic_inc(hardirqs_on_events);
2695}
2696
2697__visible void trace_hardirqs_on_caller(unsigned long ip)
2698{
2699 time_hardirqs_on(CALLER_ADDR0, ip);
2700
2701 if (unlikely(!debug_locks || current->lockdep_recursion))
2702 return;
2703
2704 if (unlikely(current->hardirqs_enabled)) {
2705 /*
2706 * Neither irq nor preemption are disabled here
2707 * so this is racy by nature but losing one hit
2708 * in a stat is not a big deal.
2709 */
2710 __debug_atomic_inc(redundant_hardirqs_on);
2711 return;
2712 }
2713
2714 /*
2715 * We're enabling irqs and according to our state above irqs weren't
2716 * already enabled, yet we find the hardware thinks they are in fact
2717 * enabled.. someone messed up their IRQ state tracing.
2718 */
2719 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2720 return;
2721
2722 /*
2723 * See the fine text that goes along with this variable definition.
2724 */
2725 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2726 return;
2727
2728 /*
2729 * Can't allow enabling interrupts while in an interrupt handler,
2730 * that's general bad form and such. Recursion, limited stack etc..
2731 */
2732 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2733 return;
2734
2735 current->lockdep_recursion = 1;
2736 __trace_hardirqs_on_caller(ip);
2737 current->lockdep_recursion = 0;
2738}
2739EXPORT_SYMBOL(trace_hardirqs_on_caller);
2740
2741void trace_hardirqs_on(void)
2742{
2743 trace_hardirqs_on_caller(CALLER_ADDR0);
2744}
2745EXPORT_SYMBOL(trace_hardirqs_on);
2746
2747/*
2748 * Hardirqs were disabled:
2749 */
2750__visible void trace_hardirqs_off_caller(unsigned long ip)
2751{
2752 struct task_struct *curr = current;
2753
2754 time_hardirqs_off(CALLER_ADDR0, ip);
2755
2756 if (unlikely(!debug_locks || current->lockdep_recursion))
2757 return;
2758
2759 /*
2760 * So we're supposed to get called after you mask local IRQs, but for
2761 * some reason the hardware doesn't quite think you did a proper job.
2762 */
2763 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2764 return;
2765
2766 if (curr->hardirqs_enabled) {
2767 /*
2768 * We have done an ON -> OFF transition:
2769 */
2770 curr->hardirqs_enabled = 0;
2771 curr->hardirq_disable_ip = ip;
2772 curr->hardirq_disable_event = ++curr->irq_events;
2773 debug_atomic_inc(hardirqs_off_events);
2774 } else
2775 debug_atomic_inc(redundant_hardirqs_off);
2776}
2777EXPORT_SYMBOL(trace_hardirqs_off_caller);
2778
2779void trace_hardirqs_off(void)
2780{
2781 trace_hardirqs_off_caller(CALLER_ADDR0);
2782}
2783EXPORT_SYMBOL(trace_hardirqs_off);
2784
2785/*
2786 * Softirqs will be enabled:
2787 */
2788void trace_softirqs_on(unsigned long ip)
2789{
2790 struct task_struct *curr = current;
2791
2792 if (unlikely(!debug_locks || current->lockdep_recursion))
2793 return;
2794
2795 /*
2796 * We fancy IRQs being disabled here, see softirq.c, avoids
2797 * funny state and nesting things.
2798 */
2799 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2800 return;
2801
2802 if (curr->softirqs_enabled) {
2803 debug_atomic_inc(redundant_softirqs_on);
2804 return;
2805 }
2806
2807 current->lockdep_recursion = 1;
2808 /*
2809 * We'll do an OFF -> ON transition:
2810 */
2811 curr->softirqs_enabled = 1;
2812 curr->softirq_enable_ip = ip;
2813 curr->softirq_enable_event = ++curr->irq_events;
2814 debug_atomic_inc(softirqs_on_events);
2815 /*
2816 * We are going to turn softirqs on, so set the
2817 * usage bit for all held locks, if hardirqs are
2818 * enabled too:
2819 */
2820 if (curr->hardirqs_enabled)
2821 mark_held_locks(curr, SOFTIRQ);
2822 current->lockdep_recursion = 0;
2823}
2824
2825/*
2826 * Softirqs were disabled:
2827 */
2828void trace_softirqs_off(unsigned long ip)
2829{
2830 struct task_struct *curr = current;
2831
2832 if (unlikely(!debug_locks || current->lockdep_recursion))
2833 return;
2834
2835 /*
2836 * We fancy IRQs being disabled here, see softirq.c
2837 */
2838 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2839 return;
2840
2841 if (curr->softirqs_enabled) {
2842 /*
2843 * We have done an ON -> OFF transition:
2844 */
2845 curr->softirqs_enabled = 0;
2846 curr->softirq_disable_ip = ip;
2847 curr->softirq_disable_event = ++curr->irq_events;
2848 debug_atomic_inc(softirqs_off_events);
2849 /*
2850 * Whoops, we wanted softirqs off, so why aren't they?
2851 */
2852 DEBUG_LOCKS_WARN_ON(!softirq_count());
2853 } else
2854 debug_atomic_inc(redundant_softirqs_off);
2855}
2856
2857static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2858{
2859 struct task_struct *curr = current;
2860
2861 if (unlikely(!debug_locks))
2862 return;
2863
2864 /* no reclaim without waiting on it */
2865 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
2866 return;
2867
2868 /* this guy won't enter reclaim */
2869 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2870 return;
2871
2872 /* We're only interested __GFP_FS allocations for now */
2873 if (!(gfp_mask & __GFP_FS))
2874 return;
2875
2876 /*
2877 * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2878 */
2879 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2880 return;
2881
2882 mark_held_locks(curr, RECLAIM_FS);
2883}
2884
2885static void check_flags(unsigned long flags);
2886
2887void lockdep_trace_alloc(gfp_t gfp_mask)
2888{
2889 unsigned long flags;
2890
2891 if (unlikely(current->lockdep_recursion))
2892 return;
2893
2894 raw_local_irq_save(flags);
2895 check_flags(flags);
2896 current->lockdep_recursion = 1;
2897 __lockdep_trace_alloc(gfp_mask, flags);
2898 current->lockdep_recursion = 0;
2899 raw_local_irq_restore(flags);
2900}
2901
2902static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2903{
2904 /*
2905 * If non-trylock use in a hardirq or softirq context, then
2906 * mark the lock as used in these contexts:
2907 */
2908 if (!hlock->trylock) {
2909 if (hlock->read) {
2910 if (curr->hardirq_context)
2911 if (!mark_lock(curr, hlock,
2912 LOCK_USED_IN_HARDIRQ_READ))
2913 return 0;
2914 if (curr->softirq_context)
2915 if (!mark_lock(curr, hlock,
2916 LOCK_USED_IN_SOFTIRQ_READ))
2917 return 0;
2918 } else {
2919 if (curr->hardirq_context)
2920 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2921 return 0;
2922 if (curr->softirq_context)
2923 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2924 return 0;
2925 }
2926 }
2927 if (!hlock->hardirqs_off) {
2928 if (hlock->read) {
2929 if (!mark_lock(curr, hlock,
2930 LOCK_ENABLED_HARDIRQ_READ))
2931 return 0;
2932 if (curr->softirqs_enabled)
2933 if (!mark_lock(curr, hlock,
2934 LOCK_ENABLED_SOFTIRQ_READ))
2935 return 0;
2936 } else {
2937 if (!mark_lock(curr, hlock,
2938 LOCK_ENABLED_HARDIRQ))
2939 return 0;
2940 if (curr->softirqs_enabled)
2941 if (!mark_lock(curr, hlock,
2942 LOCK_ENABLED_SOFTIRQ))
2943 return 0;
2944 }
2945 }
2946
2947 /*
2948 * We reuse the irq context infrastructure more broadly as a general
2949 * context checking code. This tests GFP_FS recursion (a lock taken
2950 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2951 * allocation).
2952 */
2953 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2954 if (hlock->read) {
2955 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2956 return 0;
2957 } else {
2958 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2959 return 0;
2960 }
2961 }
2962
2963 return 1;
2964}
2965
2966static inline unsigned int task_irq_context(struct task_struct *task)
2967{
2968 return 2 * !!task->hardirq_context + !!task->softirq_context;
2969}
2970
2971static int separate_irq_context(struct task_struct *curr,
2972 struct held_lock *hlock)
2973{
2974 unsigned int depth = curr->lockdep_depth;
2975
2976 /*
2977 * Keep track of points where we cross into an interrupt context:
2978 */
2979 if (depth) {
2980 struct held_lock *prev_hlock;
2981
2982 prev_hlock = curr->held_locks + depth-1;
2983 /*
2984 * If we cross into another context, reset the
2985 * hash key (this also prevents the checking and the
2986 * adding of the dependency to 'prev'):
2987 */
2988 if (prev_hlock->irq_context != hlock->irq_context)
2989 return 1;
2990 }
2991 return 0;
2992}
2993
2994#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2995
2996static inline
2997int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2998 enum lock_usage_bit new_bit)
2999{
3000 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3001 return 1;
3002}
3003
3004static inline int mark_irqflags(struct task_struct *curr,
3005 struct held_lock *hlock)
3006{
3007 return 1;
3008}
3009
3010static inline unsigned int task_irq_context(struct task_struct *task)
3011{
3012 return 0;
3013}
3014
3015static inline int separate_irq_context(struct task_struct *curr,
3016 struct held_lock *hlock)
3017{
3018 return 0;
3019}
3020
3021void lockdep_trace_alloc(gfp_t gfp_mask)
3022{
3023}
3024
3025#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3026
3027/*
3028 * Mark a lock with a usage bit, and validate the state transition:
3029 */
3030static int mark_lock(struct task_struct *curr, struct held_lock *this,
3031 enum lock_usage_bit new_bit)
3032{
3033 unsigned int new_mask = 1 << new_bit, ret = 1;
3034
3035 /*
3036 * If already set then do not dirty the cacheline,
3037 * nor do any checks:
3038 */
3039 if (likely(hlock_class(this)->usage_mask & new_mask))
3040 return 1;
3041
3042 if (!graph_lock())
3043 return 0;
3044 /*
3045 * Make sure we didn't race:
3046 */
3047 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3048 graph_unlock();
3049 return 1;
3050 }
3051
3052 hlock_class(this)->usage_mask |= new_mask;
3053
3054 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3055 return 0;
3056
3057 switch (new_bit) {
3058#define LOCKDEP_STATE(__STATE) \
3059 case LOCK_USED_IN_##__STATE: \
3060 case LOCK_USED_IN_##__STATE##_READ: \
3061 case LOCK_ENABLED_##__STATE: \
3062 case LOCK_ENABLED_##__STATE##_READ:
3063#include "lockdep_states.h"
3064#undef LOCKDEP_STATE
3065 ret = mark_lock_irq(curr, this, new_bit);
3066 if (!ret)
3067 return 0;
3068 break;
3069 case LOCK_USED:
3070 debug_atomic_dec(nr_unused_locks);
3071 break;
3072 default:
3073 if (!debug_locks_off_graph_unlock())
3074 return 0;
3075 WARN_ON(1);
3076 return 0;
3077 }
3078
3079 graph_unlock();
3080
3081 /*
3082 * We must printk outside of the graph_lock:
3083 */
3084 if (ret == 2) {
3085 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3086 print_lock(this);
3087 print_irqtrace_events(curr);
3088 dump_stack();
3089 }
3090
3091 return ret;
3092}
3093
3094/*
3095 * Initialize a lock instance's lock-class mapping info:
3096 */
3097void lockdep_init_map(struct lockdep_map *lock, const char *name,
3098 struct lock_class_key *key, int subclass)
3099{
3100 int i;
3101
3102 kmemcheck_mark_initialized(lock, sizeof(*lock));
3103
3104 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3105 lock->class_cache[i] = NULL;
3106
3107#ifdef CONFIG_LOCK_STAT
3108 lock->cpu = raw_smp_processor_id();
3109#endif
3110
3111 /*
3112 * Can't be having no nameless bastards around this place!
3113 */
3114 if (DEBUG_LOCKS_WARN_ON(!name)) {
3115 lock->name = "NULL";
3116 return;
3117 }
3118
3119 lock->name = name;
3120
3121 /*
3122 * No key, no joy, we need to hash something.
3123 */
3124 if (DEBUG_LOCKS_WARN_ON(!key))
3125 return;
3126 /*
3127 * Sanity check, the lock-class key must be persistent:
3128 */
3129 if (!static_obj(key)) {
3130 printk("BUG: key %p not in .data!\n", key);
3131 /*
3132 * What it says above ^^^^^, I suggest you read it.
3133 */
3134 DEBUG_LOCKS_WARN_ON(1);
3135 return;
3136 }
3137 lock->key = key;
3138
3139 if (unlikely(!debug_locks))
3140 return;
3141
3142 if (subclass) {
3143 unsigned long flags;
3144
3145 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3146 return;
3147
3148 raw_local_irq_save(flags);
3149 current->lockdep_recursion = 1;
3150 register_lock_class(lock, subclass, 1);
3151 current->lockdep_recursion = 0;
3152 raw_local_irq_restore(flags);
3153 }
3154}
3155EXPORT_SYMBOL_GPL(lockdep_init_map);
3156
3157struct lock_class_key __lockdep_no_validate__;
3158EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3159
3160static int
3161print_lock_nested_lock_not_held(struct task_struct *curr,
3162 struct held_lock *hlock,
3163 unsigned long ip)
3164{
3165 if (!debug_locks_off())
3166 return 0;
3167 if (debug_locks_silent)
3168 return 0;
3169
3170 printk("\n");
3171 printk("==================================\n");
3172 printk("[ BUG: Nested lock was not taken ]\n");
3173 print_kernel_ident();
3174 printk("----------------------------------\n");
3175
3176 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3177 print_lock(hlock);
3178
3179 printk("\nbut this task is not holding:\n");
3180 printk("%s\n", hlock->nest_lock->name);
3181
3182 printk("\nstack backtrace:\n");
3183 dump_stack();
3184
3185 printk("\nother info that might help us debug this:\n");
3186 lockdep_print_held_locks(curr);
3187
3188 printk("\nstack backtrace:\n");
3189 dump_stack();
3190
3191 return 0;
3192}
3193
3194static int __lock_is_held(struct lockdep_map *lock, int read);
3195
3196/*
3197 * This gets called for every mutex_lock*()/spin_lock*() operation.
3198 * We maintain the dependency maps and validate the locking attempt:
3199 */
3200static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3201 int trylock, int read, int check, int hardirqs_off,
3202 struct lockdep_map *nest_lock, unsigned long ip,
3203 int references, int pin_count)
3204{
3205 struct task_struct *curr = current;
3206 struct lock_class *class = NULL;
3207 struct held_lock *hlock;
3208 unsigned int depth;
3209 int chain_head = 0;
3210 int class_idx;
3211 u64 chain_key;
3212
3213 if (unlikely(!debug_locks))
3214 return 0;
3215
3216 /*
3217 * Lockdep should run with IRQs disabled, otherwise we could
3218 * get an interrupt which would want to take locks, which would
3219 * end up in lockdep and have you got a head-ache already?
3220 */
3221 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3222 return 0;
3223
3224 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3225 check = 0;
3226
3227 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3228 class = lock->class_cache[subclass];
3229 /*
3230 * Not cached?
3231 */
3232 if (unlikely(!class)) {
3233 class = register_lock_class(lock, subclass, 0);
3234 if (!class)
3235 return 0;
3236 }
3237 atomic_inc((atomic_t *)&class->ops);
3238 if (very_verbose(class)) {
3239 printk("\nacquire class [%p] %s", class->key, class->name);
3240 if (class->name_version > 1)
3241 printk(KERN_CONT "#%d", class->name_version);
3242 printk(KERN_CONT "\n");
3243 dump_stack();
3244 }
3245
3246 /*
3247 * Add the lock to the list of currently held locks.
3248 * (we dont increase the depth just yet, up until the
3249 * dependency checks are done)
3250 */
3251 depth = curr->lockdep_depth;
3252 /*
3253 * Ran out of static storage for our per-task lock stack again have we?
3254 */
3255 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3256 return 0;
3257
3258 class_idx = class - lock_classes + 1;
3259
3260 if (depth) {
3261 hlock = curr->held_locks + depth - 1;
3262 if (hlock->class_idx == class_idx && nest_lock) {
3263 if (hlock->references)
3264 hlock->references++;
3265 else
3266 hlock->references = 2;
3267
3268 return 1;
3269 }
3270 }
3271
3272 hlock = curr->held_locks + depth;
3273 /*
3274 * Plain impossible, we just registered it and checked it weren't no
3275 * NULL like.. I bet this mushroom I ate was good!
3276 */
3277 if (DEBUG_LOCKS_WARN_ON(!class))
3278 return 0;
3279 hlock->class_idx = class_idx;
3280 hlock->acquire_ip = ip;
3281 hlock->instance = lock;
3282 hlock->nest_lock = nest_lock;
3283 hlock->irq_context = task_irq_context(curr);
3284 hlock->trylock = trylock;
3285 hlock->read = read;
3286 hlock->check = check;
3287 hlock->hardirqs_off = !!hardirqs_off;
3288 hlock->references = references;
3289#ifdef CONFIG_LOCK_STAT
3290 hlock->waittime_stamp = 0;
3291 hlock->holdtime_stamp = lockstat_clock();
3292#endif
3293 hlock->pin_count = pin_count;
3294
3295 if (check && !mark_irqflags(curr, hlock))
3296 return 0;
3297
3298 /* mark it as used: */
3299 if (!mark_lock(curr, hlock, LOCK_USED))
3300 return 0;
3301
3302 /*
3303 * Calculate the chain hash: it's the combined hash of all the
3304 * lock keys along the dependency chain. We save the hash value
3305 * at every step so that we can get the current hash easily
3306 * after unlock. The chain hash is then used to cache dependency
3307 * results.
3308 *
3309 * The 'key ID' is what is the most compact key value to drive
3310 * the hash, not class->key.
3311 */
3312 /*
3313 * Whoops, we did it again.. ran straight out of our static allocation.
3314 */
3315 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3316 return 0;
3317
3318 chain_key = curr->curr_chain_key;
3319 if (!depth) {
3320 /*
3321 * How can we have a chain hash when we ain't got no keys?!
3322 */
3323 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3324 return 0;
3325 chain_head = 1;
3326 }
3327
3328 hlock->prev_chain_key = chain_key;
3329 if (separate_irq_context(curr, hlock)) {
3330 chain_key = 0;
3331 chain_head = 1;
3332 }
3333 chain_key = iterate_chain_key(chain_key, class_idx);
3334
3335 if (nest_lock && !__lock_is_held(nest_lock, -1))
3336 return print_lock_nested_lock_not_held(curr, hlock, ip);
3337
3338 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3339 return 0;
3340
3341 curr->curr_chain_key = chain_key;
3342 curr->lockdep_depth++;
3343 check_chain_key(curr);
3344#ifdef CONFIG_DEBUG_LOCKDEP
3345 if (unlikely(!debug_locks))
3346 return 0;
3347#endif
3348 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3349 debug_locks_off();
3350 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3351 printk(KERN_DEBUG "depth: %i max: %lu!\n",
3352 curr->lockdep_depth, MAX_LOCK_DEPTH);
3353
3354 lockdep_print_held_locks(current);
3355 debug_show_all_locks();
3356 dump_stack();
3357
3358 return 0;
3359 }
3360
3361 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3362 max_lockdep_depth = curr->lockdep_depth;
3363
3364 return 1;
3365}
3366
3367static int
3368print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3369 unsigned long ip)
3370{
3371 if (!debug_locks_off())
3372 return 0;
3373 if (debug_locks_silent)
3374 return 0;
3375
3376 printk("\n");
3377 printk("=====================================\n");
3378 printk("[ BUG: bad unlock balance detected! ]\n");
3379 print_kernel_ident();
3380 printk("-------------------------------------\n");
3381 printk("%s/%d is trying to release lock (",
3382 curr->comm, task_pid_nr(curr));
3383 print_lockdep_cache(lock);
3384 printk(KERN_CONT ") at:\n");
3385 print_ip_sym(ip);
3386 printk("but there are no more locks to release!\n");
3387 printk("\nother info that might help us debug this:\n");
3388 lockdep_print_held_locks(curr);
3389
3390 printk("\nstack backtrace:\n");
3391 dump_stack();
3392
3393 return 0;
3394}
3395
3396static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3397{
3398 if (hlock->instance == lock)
3399 return 1;
3400
3401 if (hlock->references) {
3402 struct lock_class *class = lock->class_cache[0];
3403
3404 if (!class)
3405 class = look_up_lock_class(lock, 0);
3406
3407 /*
3408 * If look_up_lock_class() failed to find a class, we're trying
3409 * to test if we hold a lock that has never yet been acquired.
3410 * Clearly if the lock hasn't been acquired _ever_, we're not
3411 * holding it either, so report failure.
3412 */
3413 if (!class)
3414 return 0;
3415
3416 /*
3417 * References, but not a lock we're actually ref-counting?
3418 * State got messed up, follow the sites that change ->references
3419 * and try to make sense of it.
3420 */
3421 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3422 return 0;
3423
3424 if (hlock->class_idx == class - lock_classes + 1)
3425 return 1;
3426 }
3427
3428 return 0;
3429}
3430
3431static int
3432__lock_set_class(struct lockdep_map *lock, const char *name,
3433 struct lock_class_key *key, unsigned int subclass,
3434 unsigned long ip)
3435{
3436 struct task_struct *curr = current;
3437 struct held_lock *hlock, *prev_hlock;
3438 struct lock_class *class;
3439 unsigned int depth;
3440 int i;
3441
3442 depth = curr->lockdep_depth;
3443 /*
3444 * This function is about (re)setting the class of a held lock,
3445 * yet we're not actually holding any locks. Naughty user!
3446 */
3447 if (DEBUG_LOCKS_WARN_ON(!depth))
3448 return 0;
3449
3450 prev_hlock = NULL;
3451 for (i = depth-1; i >= 0; i--) {
3452 hlock = curr->held_locks + i;
3453 /*
3454 * We must not cross into another context:
3455 */
3456 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3457 break;
3458 if (match_held_lock(hlock, lock))
3459 goto found_it;
3460 prev_hlock = hlock;
3461 }
3462 return print_unlock_imbalance_bug(curr, lock, ip);
3463
3464found_it:
3465 lockdep_init_map(lock, name, key, 0);
3466 class = register_lock_class(lock, subclass, 0);
3467 hlock->class_idx = class - lock_classes + 1;
3468
3469 curr->lockdep_depth = i;
3470 curr->curr_chain_key = hlock->prev_chain_key;
3471
3472 for (; i < depth; i++) {
3473 hlock = curr->held_locks + i;
3474 if (!__lock_acquire(hlock->instance,
3475 hlock_class(hlock)->subclass, hlock->trylock,
3476 hlock->read, hlock->check, hlock->hardirqs_off,
3477 hlock->nest_lock, hlock->acquire_ip,
3478 hlock->references, hlock->pin_count))
3479 return 0;
3480 }
3481
3482 /*
3483 * I took it apart and put it back together again, except now I have
3484 * these 'spare' parts.. where shall I put them.
3485 */
3486 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3487 return 0;
3488 return 1;
3489}
3490
3491/*
3492 * Remove the lock to the list of currently held locks - this gets
3493 * called on mutex_unlock()/spin_unlock*() (or on a failed
3494 * mutex_lock_interruptible()).
3495 *
3496 * @nested is an hysterical artifact, needs a tree wide cleanup.
3497 */
3498static int
3499__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3500{
3501 struct task_struct *curr = current;
3502 struct held_lock *hlock, *prev_hlock;
3503 unsigned int depth;
3504 int i;
3505
3506 if (unlikely(!debug_locks))
3507 return 0;
3508
3509 depth = curr->lockdep_depth;
3510 /*
3511 * So we're all set to release this lock.. wait what lock? We don't
3512 * own any locks, you've been drinking again?
3513 */
3514 if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3515 return print_unlock_imbalance_bug(curr, lock, ip);
3516
3517 /*
3518 * Check whether the lock exists in the current stack
3519 * of held locks:
3520 */
3521 prev_hlock = NULL;
3522 for (i = depth-1; i >= 0; i--) {
3523 hlock = curr->held_locks + i;
3524 /*
3525 * We must not cross into another context:
3526 */
3527 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3528 break;
3529 if (match_held_lock(hlock, lock))
3530 goto found_it;
3531 prev_hlock = hlock;
3532 }
3533 return print_unlock_imbalance_bug(curr, lock, ip);
3534
3535found_it:
3536 if (hlock->instance == lock)
3537 lock_release_holdtime(hlock);
3538
3539 WARN(hlock->pin_count, "releasing a pinned lock\n");
3540
3541 if (hlock->references) {
3542 hlock->references--;
3543 if (hlock->references) {
3544 /*
3545 * We had, and after removing one, still have
3546 * references, the current lock stack is still
3547 * valid. We're done!
3548 */
3549 return 1;
3550 }
3551 }
3552
3553 /*
3554 * We have the right lock to unlock, 'hlock' points to it.
3555 * Now we remove it from the stack, and add back the other
3556 * entries (if any), recalculating the hash along the way:
3557 */
3558
3559 curr->lockdep_depth = i;
3560 curr->curr_chain_key = hlock->prev_chain_key;
3561
3562 for (i++; i < depth; i++) {
3563 hlock = curr->held_locks + i;
3564 if (!__lock_acquire(hlock->instance,
3565 hlock_class(hlock)->subclass, hlock->trylock,
3566 hlock->read, hlock->check, hlock->hardirqs_off,
3567 hlock->nest_lock, hlock->acquire_ip,
3568 hlock->references, hlock->pin_count))
3569 return 0;
3570 }
3571
3572 /*
3573 * We had N bottles of beer on the wall, we drank one, but now
3574 * there's not N-1 bottles of beer left on the wall...
3575 */
3576 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3577 return 0;
3578
3579 return 1;
3580}
3581
3582static int __lock_is_held(struct lockdep_map *lock, int read)
3583{
3584 struct task_struct *curr = current;
3585 int i;
3586
3587 for (i = 0; i < curr->lockdep_depth; i++) {
3588 struct held_lock *hlock = curr->held_locks + i;
3589
3590 if (match_held_lock(hlock, lock)) {
3591 if (read == -1 || hlock->read == read)
3592 return 1;
3593
3594 return 0;
3595 }
3596 }
3597
3598 return 0;
3599}
3600
3601static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3602{
3603 struct pin_cookie cookie = NIL_COOKIE;
3604 struct task_struct *curr = current;
3605 int i;
3606
3607 if (unlikely(!debug_locks))
3608 return cookie;
3609
3610 for (i = 0; i < curr->lockdep_depth; i++) {
3611 struct held_lock *hlock = curr->held_locks + i;
3612
3613 if (match_held_lock(hlock, lock)) {
3614 /*
3615 * Grab 16bits of randomness; this is sufficient to not
3616 * be guessable and still allows some pin nesting in
3617 * our u32 pin_count.
3618 */
3619 cookie.val = 1 + (prandom_u32() >> 16);
3620 hlock->pin_count += cookie.val;
3621 return cookie;
3622 }
3623 }
3624
3625 WARN(1, "pinning an unheld lock\n");
3626 return cookie;
3627}
3628
3629static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3630{
3631 struct task_struct *curr = current;
3632 int i;
3633
3634 if (unlikely(!debug_locks))
3635 return;
3636
3637 for (i = 0; i < curr->lockdep_depth; i++) {
3638 struct held_lock *hlock = curr->held_locks + i;
3639
3640 if (match_held_lock(hlock, lock)) {
3641 hlock->pin_count += cookie.val;
3642 return;
3643 }
3644 }
3645
3646 WARN(1, "pinning an unheld lock\n");
3647}
3648
3649static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3650{
3651 struct task_struct *curr = current;
3652 int i;
3653
3654 if (unlikely(!debug_locks))
3655 return;
3656
3657 for (i = 0; i < curr->lockdep_depth; i++) {
3658 struct held_lock *hlock = curr->held_locks + i;
3659
3660 if (match_held_lock(hlock, lock)) {
3661 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3662 return;
3663
3664 hlock->pin_count -= cookie.val;
3665
3666 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3667 hlock->pin_count = 0;
3668
3669 return;
3670 }
3671 }
3672
3673 WARN(1, "unpinning an unheld lock\n");
3674}
3675
3676/*
3677 * Check whether we follow the irq-flags state precisely:
3678 */
3679static void check_flags(unsigned long flags)
3680{
3681#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3682 defined(CONFIG_TRACE_IRQFLAGS)
3683 if (!debug_locks)
3684 return;
3685
3686 if (irqs_disabled_flags(flags)) {
3687 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3688 printk("possible reason: unannotated irqs-off.\n");
3689 }
3690 } else {
3691 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3692 printk("possible reason: unannotated irqs-on.\n");
3693 }
3694 }
3695
3696 /*
3697 * We dont accurately track softirq state in e.g.
3698 * hardirq contexts (such as on 4KSTACKS), so only
3699 * check if not in hardirq contexts:
3700 */
3701 if (!hardirq_count()) {
3702 if (softirq_count()) {
3703 /* like the above, but with softirqs */
3704 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3705 } else {
3706 /* lick the above, does it taste good? */
3707 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3708 }
3709 }
3710
3711 if (!debug_locks)
3712 print_irqtrace_events(current);
3713#endif
3714}
3715
3716void lock_set_class(struct lockdep_map *lock, const char *name,
3717 struct lock_class_key *key, unsigned int subclass,
3718 unsigned long ip)
3719{
3720 unsigned long flags;
3721
3722 if (unlikely(current->lockdep_recursion))
3723 return;
3724
3725 raw_local_irq_save(flags);
3726 current->lockdep_recursion = 1;
3727 check_flags(flags);
3728 if (__lock_set_class(lock, name, key, subclass, ip))
3729 check_chain_key(current);
3730 current->lockdep_recursion = 0;
3731 raw_local_irq_restore(flags);
3732}
3733EXPORT_SYMBOL_GPL(lock_set_class);
3734
3735/*
3736 * We are not always called with irqs disabled - do that here,
3737 * and also avoid lockdep recursion:
3738 */
3739void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3740 int trylock, int read, int check,
3741 struct lockdep_map *nest_lock, unsigned long ip)
3742{
3743 unsigned long flags;
3744
3745 if (unlikely(current->lockdep_recursion))
3746 return;
3747
3748 raw_local_irq_save(flags);
3749 check_flags(flags);
3750
3751 current->lockdep_recursion = 1;
3752 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3753 __lock_acquire(lock, subclass, trylock, read, check,
3754 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3755 current->lockdep_recursion = 0;
3756 raw_local_irq_restore(flags);
3757}
3758EXPORT_SYMBOL_GPL(lock_acquire);
3759
3760void lock_release(struct lockdep_map *lock, int nested,
3761 unsigned long ip)
3762{
3763 unsigned long flags;
3764
3765 if (unlikely(current->lockdep_recursion))
3766 return;
3767
3768 raw_local_irq_save(flags);
3769 check_flags(flags);
3770 current->lockdep_recursion = 1;
3771 trace_lock_release(lock, ip);
3772 if (__lock_release(lock, nested, ip))
3773 check_chain_key(current);
3774 current->lockdep_recursion = 0;
3775 raw_local_irq_restore(flags);
3776}
3777EXPORT_SYMBOL_GPL(lock_release);
3778
3779int lock_is_held_type(struct lockdep_map *lock, int read)
3780{
3781 unsigned long flags;
3782 int ret = 0;
3783
3784 if (unlikely(current->lockdep_recursion))
3785 return 1; /* avoid false negative lockdep_assert_held() */
3786
3787 raw_local_irq_save(flags);
3788 check_flags(flags);
3789
3790 current->lockdep_recursion = 1;
3791 ret = __lock_is_held(lock, read);
3792 current->lockdep_recursion = 0;
3793 raw_local_irq_restore(flags);
3794
3795 return ret;
3796}
3797EXPORT_SYMBOL_GPL(lock_is_held_type);
3798
3799struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3800{
3801 struct pin_cookie cookie = NIL_COOKIE;
3802 unsigned long flags;
3803
3804 if (unlikely(current->lockdep_recursion))
3805 return cookie;
3806
3807 raw_local_irq_save(flags);
3808 check_flags(flags);
3809
3810 current->lockdep_recursion = 1;
3811 cookie = __lock_pin_lock(lock);
3812 current->lockdep_recursion = 0;
3813 raw_local_irq_restore(flags);
3814
3815 return cookie;
3816}
3817EXPORT_SYMBOL_GPL(lock_pin_lock);
3818
3819void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3820{
3821 unsigned long flags;
3822
3823 if (unlikely(current->lockdep_recursion))
3824 return;
3825
3826 raw_local_irq_save(flags);
3827 check_flags(flags);
3828
3829 current->lockdep_recursion = 1;
3830 __lock_repin_lock(lock, cookie);
3831 current->lockdep_recursion = 0;
3832 raw_local_irq_restore(flags);
3833}
3834EXPORT_SYMBOL_GPL(lock_repin_lock);
3835
3836void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3837{
3838 unsigned long flags;
3839
3840 if (unlikely(current->lockdep_recursion))
3841 return;
3842
3843 raw_local_irq_save(flags);
3844 check_flags(flags);
3845
3846 current->lockdep_recursion = 1;
3847 __lock_unpin_lock(lock, cookie);
3848 current->lockdep_recursion = 0;
3849 raw_local_irq_restore(flags);
3850}
3851EXPORT_SYMBOL_GPL(lock_unpin_lock);
3852
3853void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3854{
3855 current->lockdep_reclaim_gfp = gfp_mask;
3856}
3857
3858void lockdep_clear_current_reclaim_state(void)
3859{
3860 current->lockdep_reclaim_gfp = 0;
3861}
3862
3863#ifdef CONFIG_LOCK_STAT
3864static int
3865print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3866 unsigned long ip)
3867{
3868 if (!debug_locks_off())
3869 return 0;
3870 if (debug_locks_silent)
3871 return 0;
3872
3873 printk("\n");
3874 printk("=================================\n");
3875 printk("[ BUG: bad contention detected! ]\n");
3876 print_kernel_ident();
3877 printk("---------------------------------\n");
3878 printk("%s/%d is trying to contend lock (",
3879 curr->comm, task_pid_nr(curr));
3880 print_lockdep_cache(lock);
3881 printk(KERN_CONT ") at:\n");
3882 print_ip_sym(ip);
3883 printk("but there are no locks held!\n");
3884 printk("\nother info that might help us debug this:\n");
3885 lockdep_print_held_locks(curr);
3886
3887 printk("\nstack backtrace:\n");
3888 dump_stack();
3889
3890 return 0;
3891}
3892
3893static void
3894__lock_contended(struct lockdep_map *lock, unsigned long ip)
3895{
3896 struct task_struct *curr = current;
3897 struct held_lock *hlock, *prev_hlock;
3898 struct lock_class_stats *stats;
3899 unsigned int depth;
3900 int i, contention_point, contending_point;
3901
3902 depth = curr->lockdep_depth;
3903 /*
3904 * Whee, we contended on this lock, except it seems we're not
3905 * actually trying to acquire anything much at all..
3906 */
3907 if (DEBUG_LOCKS_WARN_ON(!depth))
3908 return;
3909
3910 prev_hlock = NULL;
3911 for (i = depth-1; i >= 0; i--) {
3912 hlock = curr->held_locks + i;
3913 /*
3914 * We must not cross into another context:
3915 */
3916 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3917 break;
3918 if (match_held_lock(hlock, lock))
3919 goto found_it;
3920 prev_hlock = hlock;
3921 }
3922 print_lock_contention_bug(curr, lock, ip);
3923 return;
3924
3925found_it:
3926 if (hlock->instance != lock)
3927 return;
3928
3929 hlock->waittime_stamp = lockstat_clock();
3930
3931 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3932 contending_point = lock_point(hlock_class(hlock)->contending_point,
3933 lock->ip);
3934
3935 stats = get_lock_stats(hlock_class(hlock));
3936 if (contention_point < LOCKSTAT_POINTS)
3937 stats->contention_point[contention_point]++;
3938 if (contending_point < LOCKSTAT_POINTS)
3939 stats->contending_point[contending_point]++;
3940 if (lock->cpu != smp_processor_id())
3941 stats->bounces[bounce_contended + !!hlock->read]++;
3942 put_lock_stats(stats);
3943}
3944
3945static void
3946__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3947{
3948 struct task_struct *curr = current;
3949 struct held_lock *hlock, *prev_hlock;
3950 struct lock_class_stats *stats;
3951 unsigned int depth;
3952 u64 now, waittime = 0;
3953 int i, cpu;
3954
3955 depth = curr->lockdep_depth;
3956 /*
3957 * Yay, we acquired ownership of this lock we didn't try to
3958 * acquire, how the heck did that happen?
3959 */
3960 if (DEBUG_LOCKS_WARN_ON(!depth))
3961 return;
3962
3963 prev_hlock = NULL;
3964 for (i = depth-1; i >= 0; i--) {
3965 hlock = curr->held_locks + i;
3966 /*
3967 * We must not cross into another context:
3968 */
3969 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3970 break;
3971 if (match_held_lock(hlock, lock))
3972 goto found_it;
3973 prev_hlock = hlock;
3974 }
3975 print_lock_contention_bug(curr, lock, _RET_IP_);
3976 return;
3977
3978found_it:
3979 if (hlock->instance != lock)
3980 return;
3981
3982 cpu = smp_processor_id();
3983 if (hlock->waittime_stamp) {
3984 now = lockstat_clock();
3985 waittime = now - hlock->waittime_stamp;
3986 hlock->holdtime_stamp = now;
3987 }
3988
3989 trace_lock_acquired(lock, ip);
3990
3991 stats = get_lock_stats(hlock_class(hlock));
3992 if (waittime) {
3993 if (hlock->read)
3994 lock_time_inc(&stats->read_waittime, waittime);
3995 else
3996 lock_time_inc(&stats->write_waittime, waittime);
3997 }
3998 if (lock->cpu != cpu)
3999 stats->bounces[bounce_acquired + !!hlock->read]++;
4000 put_lock_stats(stats);
4001
4002 lock->cpu = cpu;
4003 lock->ip = ip;
4004}
4005
4006void lock_contended(struct lockdep_map *lock, unsigned long ip)
4007{
4008 unsigned long flags;
4009
4010 if (unlikely(!lock_stat))
4011 return;
4012
4013 if (unlikely(current->lockdep_recursion))
4014 return;
4015
4016 raw_local_irq_save(flags);
4017 check_flags(flags);
4018 current->lockdep_recursion = 1;
4019 trace_lock_contended(lock, ip);
4020 __lock_contended(lock, ip);
4021 current->lockdep_recursion = 0;
4022 raw_local_irq_restore(flags);
4023}
4024EXPORT_SYMBOL_GPL(lock_contended);
4025
4026void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4027{
4028 unsigned long flags;
4029
4030 if (unlikely(!lock_stat))
4031 return;
4032
4033 if (unlikely(current->lockdep_recursion))
4034 return;
4035
4036 raw_local_irq_save(flags);
4037 check_flags(flags);
4038 current->lockdep_recursion = 1;
4039 __lock_acquired(lock, ip);
4040 current->lockdep_recursion = 0;
4041 raw_local_irq_restore(flags);
4042}
4043EXPORT_SYMBOL_GPL(lock_acquired);
4044#endif
4045
4046/*
4047 * Used by the testsuite, sanitize the validator state
4048 * after a simulated failure:
4049 */
4050
4051void lockdep_reset(void)
4052{
4053 unsigned long flags;
4054 int i;
4055
4056 raw_local_irq_save(flags);
4057 current->curr_chain_key = 0;
4058 current->lockdep_depth = 0;
4059 current->lockdep_recursion = 0;
4060 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4061 nr_hardirq_chains = 0;
4062 nr_softirq_chains = 0;
4063 nr_process_chains = 0;
4064 debug_locks = 1;
4065 for (i = 0; i < CHAINHASH_SIZE; i++)
4066 INIT_HLIST_HEAD(chainhash_table + i);
4067 raw_local_irq_restore(flags);
4068}
4069
4070static void zap_class(struct lock_class *class)
4071{
4072 int i;
4073
4074 /*
4075 * Remove all dependencies this lock is
4076 * involved in:
4077 */
4078 for (i = 0; i < nr_list_entries; i++) {
4079 if (list_entries[i].class == class)
4080 list_del_rcu(&list_entries[i].entry);
4081 }
4082 /*
4083 * Unhash the class and remove it from the all_lock_classes list:
4084 */
4085 hlist_del_rcu(&class->hash_entry);
4086 list_del_rcu(&class->lock_entry);
4087
4088 RCU_INIT_POINTER(class->key, NULL);
4089 RCU_INIT_POINTER(class->name, NULL);
4090}
4091
4092static inline int within(const void *addr, void *start, unsigned long size)
4093{
4094 return addr >= start && addr < start + size;
4095}
4096
4097/*
4098 * Used in module.c to remove lock classes from memory that is going to be
4099 * freed; and possibly re-used by other modules.
4100 *
4101 * We will have had one sync_sched() before getting here, so we're guaranteed
4102 * nobody will look up these exact classes -- they're properly dead but still
4103 * allocated.
4104 */
4105void lockdep_free_key_range(void *start, unsigned long size)
4106{
4107 struct lock_class *class;
4108 struct hlist_head *head;
4109 unsigned long flags;
4110 int i;
4111 int locked;
4112
4113 raw_local_irq_save(flags);
4114 locked = graph_lock();
4115
4116 /*
4117 * Unhash all classes that were created by this module:
4118 */
4119 for (i = 0; i < CLASSHASH_SIZE; i++) {
4120 head = classhash_table + i;
4121 hlist_for_each_entry_rcu(class, head, hash_entry) {
4122 if (within(class->key, start, size))
4123 zap_class(class);
4124 else if (within(class->name, start, size))
4125 zap_class(class);
4126 }
4127 }
4128
4129 if (locked)
4130 graph_unlock();
4131 raw_local_irq_restore(flags);
4132
4133 /*
4134 * Wait for any possible iterators from look_up_lock_class() to pass
4135 * before continuing to free the memory they refer to.
4136 *
4137 * sync_sched() is sufficient because the read-side is IRQ disable.
4138 */
4139 synchronize_sched();
4140
4141 /*
4142 * XXX at this point we could return the resources to the pool;
4143 * instead we leak them. We would need to change to bitmap allocators
4144 * instead of the linear allocators we have now.
4145 */
4146}
4147
4148void lockdep_reset_lock(struct lockdep_map *lock)
4149{
4150 struct lock_class *class;
4151 struct hlist_head *head;
4152 unsigned long flags;
4153 int i, j;
4154 int locked;
4155
4156 raw_local_irq_save(flags);
4157
4158 /*
4159 * Remove all classes this lock might have:
4160 */
4161 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4162 /*
4163 * If the class exists we look it up and zap it:
4164 */
4165 class = look_up_lock_class(lock, j);
4166 if (class)
4167 zap_class(class);
4168 }
4169 /*
4170 * Debug check: in the end all mapped classes should
4171 * be gone.
4172 */
4173 locked = graph_lock();
4174 for (i = 0; i < CLASSHASH_SIZE; i++) {
4175 head = classhash_table + i;
4176 hlist_for_each_entry_rcu(class, head, hash_entry) {
4177 int match = 0;
4178
4179 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4180 match |= class == lock->class_cache[j];
4181
4182 if (unlikely(match)) {
4183 if (debug_locks_off_graph_unlock()) {
4184 /*
4185 * We all just reset everything, how did it match?
4186 */
4187 WARN_ON(1);
4188 }
4189 goto out_restore;
4190 }
4191 }
4192 }
4193 if (locked)
4194 graph_unlock();
4195
4196out_restore:
4197 raw_local_irq_restore(flags);
4198}
4199
4200void __init lockdep_info(void)
4201{
4202 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4203
4204 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
4205 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
4206 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
4207 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
4208 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
4209 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
4210 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
4211
4212 printk(" memory used by lock dependency info: %lu kB\n",
4213 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4214 sizeof(struct list_head) * CLASSHASH_SIZE +
4215 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4216 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4217 sizeof(struct list_head) * CHAINHASH_SIZE
4218#ifdef CONFIG_PROVE_LOCKING
4219 + sizeof(struct circular_queue)
4220#endif
4221 ) / 1024
4222 );
4223
4224 printk(" per task-struct memory footprint: %lu bytes\n",
4225 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4226}
4227
4228static void
4229print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4230 const void *mem_to, struct held_lock *hlock)
4231{
4232 if (!debug_locks_off())
4233 return;
4234 if (debug_locks_silent)
4235 return;
4236
4237 printk("\n");
4238 printk("=========================\n");
4239 printk("[ BUG: held lock freed! ]\n");
4240 print_kernel_ident();
4241 printk("-------------------------\n");
4242 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4243 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4244 print_lock(hlock);
4245 lockdep_print_held_locks(curr);
4246
4247 printk("\nstack backtrace:\n");
4248 dump_stack();
4249}
4250
4251static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4252 const void* lock_from, unsigned long lock_len)
4253{
4254 return lock_from + lock_len <= mem_from ||
4255 mem_from + mem_len <= lock_from;
4256}
4257
4258/*
4259 * Called when kernel memory is freed (or unmapped), or if a lock
4260 * is destroyed or reinitialized - this code checks whether there is
4261 * any held lock in the memory range of <from> to <to>:
4262 */
4263void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4264{
4265 struct task_struct *curr = current;
4266 struct held_lock *hlock;
4267 unsigned long flags;
4268 int i;
4269
4270 if (unlikely(!debug_locks))
4271 return;
4272
4273 local_irq_save(flags);
4274 for (i = 0; i < curr->lockdep_depth; i++) {
4275 hlock = curr->held_locks + i;
4276
4277 if (not_in_range(mem_from, mem_len, hlock->instance,
4278 sizeof(*hlock->instance)))
4279 continue;
4280
4281 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4282 break;
4283 }
4284 local_irq_restore(flags);
4285}
4286EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4287
4288static void print_held_locks_bug(void)
4289{
4290 if (!debug_locks_off())
4291 return;
4292 if (debug_locks_silent)
4293 return;
4294
4295 printk("\n");
4296 printk("=====================================\n");
4297 printk("[ BUG: %s/%d still has locks held! ]\n",
4298 current->comm, task_pid_nr(current));
4299 print_kernel_ident();
4300 printk("-------------------------------------\n");
4301 lockdep_print_held_locks(current);
4302 printk("\nstack backtrace:\n");
4303 dump_stack();
4304}
4305
4306void debug_check_no_locks_held(void)
4307{
4308 if (unlikely(current->lockdep_depth > 0))
4309 print_held_locks_bug();
4310}
4311EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4312
4313#ifdef __KERNEL__
4314void debug_show_all_locks(void)
4315{
4316 struct task_struct *g, *p;
4317 int count = 10;
4318 int unlock = 1;
4319
4320 if (unlikely(!debug_locks)) {
4321 printk("INFO: lockdep is turned off.\n");
4322 return;
4323 }
4324 printk("\nShowing all locks held in the system:\n");
4325
4326 /*
4327 * Here we try to get the tasklist_lock as hard as possible,
4328 * if not successful after 2 seconds we ignore it (but keep
4329 * trying). This is to enable a debug printout even if a
4330 * tasklist_lock-holding task deadlocks or crashes.
4331 */
4332retry:
4333 if (!read_trylock(&tasklist_lock)) {
4334 if (count == 10)
4335 printk("hm, tasklist_lock locked, retrying... ");
4336 if (count) {
4337 count--;
4338 printk(" #%d", 10-count);
4339 mdelay(200);
4340 goto retry;
4341 }
4342 printk(" ignoring it.\n");
4343 unlock = 0;
4344 } else {
4345 if (count != 10)
4346 printk(KERN_CONT " locked it.\n");
4347 }
4348
4349 do_each_thread(g, p) {
4350 /*
4351 * It's not reliable to print a task's held locks
4352 * if it's not sleeping (or if it's not the current
4353 * task):
4354 */
4355 if (p->state == TASK_RUNNING && p != current)
4356 continue;
4357 if (p->lockdep_depth)
4358 lockdep_print_held_locks(p);
4359 if (!unlock)
4360 if (read_trylock(&tasklist_lock))
4361 unlock = 1;
4362 } while_each_thread(g, p);
4363
4364 printk("\n");
4365 printk("=============================================\n\n");
4366
4367 if (unlock)
4368 read_unlock(&tasklist_lock);
4369}
4370EXPORT_SYMBOL_GPL(debug_show_all_locks);
4371#endif
4372
4373/*
4374 * Careful: only use this function if you are sure that
4375 * the task cannot run in parallel!
4376 */
4377void debug_show_held_locks(struct task_struct *task)
4378{
4379 if (unlikely(!debug_locks)) {
4380 printk("INFO: lockdep is turned off.\n");
4381 return;
4382 }
4383 lockdep_print_held_locks(task);
4384}
4385EXPORT_SYMBOL_GPL(debug_show_held_locks);
4386
4387asmlinkage __visible void lockdep_sys_exit(void)
4388{
4389 struct task_struct *curr = current;
4390
4391 if (unlikely(curr->lockdep_depth)) {
4392 if (!debug_locks_off())
4393 return;
4394 printk("\n");
4395 printk("================================================\n");
4396 printk("[ BUG: lock held when returning to user space! ]\n");
4397 print_kernel_ident();
4398 printk("------------------------------------------------\n");
4399 printk("%s/%d is leaving the kernel with locks still held!\n",
4400 curr->comm, curr->pid);
4401 lockdep_print_held_locks(curr);
4402 }
4403}
4404
4405void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4406{
4407 struct task_struct *curr = current;
4408
4409#ifndef CONFIG_PROVE_RCU_REPEATEDLY
4410 if (!debug_locks_off())
4411 return;
4412#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4413 /* Note: the following can be executed concurrently, so be careful. */
4414 printk("\n");
4415 printk("===============================\n");
4416 printk("[ INFO: suspicious RCU usage. ]\n");
4417 print_kernel_ident();
4418 printk("-------------------------------\n");
4419 printk("%s:%d %s!\n", file, line, s);
4420 printk("\nother info that might help us debug this:\n\n");
4421 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4422 !rcu_lockdep_current_cpu_online()
4423 ? "RCU used illegally from offline CPU!\n"
4424 : !rcu_is_watching()
4425 ? "RCU used illegally from idle CPU!\n"
4426 : "",
4427 rcu_scheduler_active, debug_locks);
4428
4429 /*
4430 * If a CPU is in the RCU-free window in idle (ie: in the section
4431 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4432 * considers that CPU to be in an "extended quiescent state",
4433 * which means that RCU will be completely ignoring that CPU.
4434 * Therefore, rcu_read_lock() and friends have absolutely no
4435 * effect on a CPU running in that state. In other words, even if
4436 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4437 * delete data structures out from under it. RCU really has no
4438 * choice here: we need to keep an RCU-free window in idle where
4439 * the CPU may possibly enter into low power mode. This way we can
4440 * notice an extended quiescent state to other CPUs that started a grace
4441 * period. Otherwise we would delay any grace period as long as we run
4442 * in the idle task.
4443 *
4444 * So complain bitterly if someone does call rcu_read_lock(),
4445 * rcu_read_lock_bh() and so on from extended quiescent states.
4446 */
4447 if (!rcu_is_watching())
4448 printk("RCU used illegally from extended quiescent state!\n");
4449
4450 lockdep_print_held_locks(curr);
4451 printk("\nstack backtrace:\n");
4452 dump_stack();
4453}
4454EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);