Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  10 *
  11 * this code maps all the lock dependencies as they occur in a live kernel
  12 * and will warn about the following classes of locking bugs:
  13 *
  14 * - lock inversion scenarios
  15 * - circular lock dependencies
  16 * - hardirq/softirq safe/unsafe locking bugs
  17 *
  18 * Bugs are reported even if the current locking scenario does not cause
  19 * any deadlock at this point.
  20 *
  21 * I.e. if anytime in the past two locks were taken in a different order,
  22 * even if it happened for another task, even if those were different
  23 * locks (but of the same class as this lock), this code will detect it.
  24 *
  25 * Thanks to Arjan van de Ven for coming up with the initial idea of
  26 * mapping lock dependencies runtime.
  27 */
  28#define DISABLE_BRANCH_PROFILING
  29#include <linux/mutex.h>
  30#include <linux/sched.h>
 
 
 
  31#include <linux/delay.h>
  32#include <linux/module.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/spinlock.h>
  36#include <linux/kallsyms.h>
  37#include <linux/interrupt.h>
  38#include <linux/stacktrace.h>
  39#include <linux/debug_locks.h>
  40#include <linux/irqflags.h>
  41#include <linux/utsname.h>
  42#include <linux/hash.h>
  43#include <linux/ftrace.h>
  44#include <linux/stringify.h>
 
  45#include <linux/bitops.h>
  46#include <linux/gfp.h>
  47#include <linux/kmemcheck.h>
  48#include <linux/random.h>
  49#include <linux/jhash.h>
 
 
 
  50
  51#include <asm/sections.h>
  52
  53#include "lockdep_internals.h"
  54
  55#define CREATE_TRACE_POINTS
  56#include <trace/events/lock.h>
  57
  58#ifdef CONFIG_PROVE_LOCKING
  59int prove_locking = 1;
  60module_param(prove_locking, int, 0644);
  61#else
  62#define prove_locking 0
  63#endif
  64
  65#ifdef CONFIG_LOCK_STAT
  66int lock_stat = 1;
  67module_param(lock_stat, int, 0644);
  68#else
  69#define lock_stat 0
  70#endif
  71
  72/*
  73 * lockdep_lock: protects the lockdep graph, the hashes and the
  74 *               class/list/hash allocators.
  75 *
  76 * This is one of the rare exceptions where it's justified
  77 * to use a raw spinlock - we really dont want the spinlock
  78 * code to recurse back into the lockdep code...
  79 */
  80static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
  81
  82static int graph_lock(void)
  83{
  84	arch_spin_lock(&lockdep_lock);
  85	/*
  86	 * Make sure that if another CPU detected a bug while
  87	 * walking the graph we dont change it (while the other
  88	 * CPU is busy printing out stuff with the graph lock
  89	 * dropped already)
  90	 */
  91	if (!debug_locks) {
  92		arch_spin_unlock(&lockdep_lock);
  93		return 0;
  94	}
  95	/* prevent any recursions within lockdep from causing deadlocks */
  96	current->lockdep_recursion++;
  97	return 1;
  98}
  99
 100static inline int graph_unlock(void)
 101{
 102	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
 103		/*
 104		 * The lockdep graph lock isn't locked while we expect it to
 105		 * be, we're confused now, bye!
 106		 */
 107		return DEBUG_LOCKS_WARN_ON(1);
 108	}
 109
 110	current->lockdep_recursion--;
 111	arch_spin_unlock(&lockdep_lock);
 112	return 0;
 113}
 114
 115/*
 116 * Turn lock debugging off and return with 0 if it was off already,
 117 * and also release the graph lock:
 118 */
 119static inline int debug_locks_off_graph_unlock(void)
 120{
 121	int ret = debug_locks_off();
 122
 123	arch_spin_unlock(&lockdep_lock);
 124
 125	return ret;
 126}
 127
 128unsigned long nr_list_entries;
 129static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 130
 131/*
 132 * All data structures here are protected by the global debug_lock.
 133 *
 134 * Mutex key structs only get allocated, once during bootup, and never
 135 * get freed - this significantly simplifies the debugging code.
 136 */
 
 
 
 137unsigned long nr_lock_classes;
 138static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 
 
 
 
 139
 140static inline struct lock_class *hlock_class(struct held_lock *hlock)
 141{
 142	if (!hlock->class_idx) {
 
 
 
 
 
 143		/*
 144		 * Someone passed in garbage, we give up.
 145		 */
 146		DEBUG_LOCKS_WARN_ON(1);
 147		return NULL;
 148	}
 149	return lock_classes + hlock->class_idx - 1;
 
 
 
 
 
 150}
 151
 152#ifdef CONFIG_LOCK_STAT
 153static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
 154
 155static inline u64 lockstat_clock(void)
 156{
 157	return local_clock();
 158}
 159
 160static int lock_point(unsigned long points[], unsigned long ip)
 161{
 162	int i;
 163
 164	for (i = 0; i < LOCKSTAT_POINTS; i++) {
 165		if (points[i] == 0) {
 166			points[i] = ip;
 167			break;
 168		}
 169		if (points[i] == ip)
 170			break;
 171	}
 172
 173	return i;
 174}
 175
 176static void lock_time_inc(struct lock_time *lt, u64 time)
 177{
 178	if (time > lt->max)
 179		lt->max = time;
 180
 181	if (time < lt->min || !lt->nr)
 182		lt->min = time;
 183
 184	lt->total += time;
 185	lt->nr++;
 186}
 187
 188static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 189{
 190	if (!src->nr)
 191		return;
 192
 193	if (src->max > dst->max)
 194		dst->max = src->max;
 195
 196	if (src->min < dst->min || !dst->nr)
 197		dst->min = src->min;
 198
 199	dst->total += src->total;
 200	dst->nr += src->nr;
 201}
 202
 203struct lock_class_stats lock_stats(struct lock_class *class)
 204{
 205	struct lock_class_stats stats;
 206	int cpu, i;
 207
 208	memset(&stats, 0, sizeof(struct lock_class_stats));
 209	for_each_possible_cpu(cpu) {
 210		struct lock_class_stats *pcs =
 211			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 212
 213		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 214			stats.contention_point[i] += pcs->contention_point[i];
 215
 216		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 217			stats.contending_point[i] += pcs->contending_point[i];
 218
 219		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 220		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 221
 222		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 223		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 224
 225		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 226			stats.bounces[i] += pcs->bounces[i];
 227	}
 228
 229	return stats;
 230}
 231
 232void clear_lock_stats(struct lock_class *class)
 233{
 234	int cpu;
 235
 236	for_each_possible_cpu(cpu) {
 237		struct lock_class_stats *cpu_stats =
 238			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 239
 240		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 241	}
 242	memset(class->contention_point, 0, sizeof(class->contention_point));
 243	memset(class->contending_point, 0, sizeof(class->contending_point));
 244}
 245
 246static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 247{
 248	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 249}
 250
 251static void put_lock_stats(struct lock_class_stats *stats)
 252{
 253	put_cpu_var(cpu_lock_stats);
 254}
 255
 256static void lock_release_holdtime(struct held_lock *hlock)
 257{
 258	struct lock_class_stats *stats;
 259	u64 holdtime;
 260
 261	if (!lock_stat)
 262		return;
 263
 264	holdtime = lockstat_clock() - hlock->holdtime_stamp;
 265
 266	stats = get_lock_stats(hlock_class(hlock));
 267	if (hlock->read)
 268		lock_time_inc(&stats->read_holdtime, holdtime);
 269	else
 270		lock_time_inc(&stats->write_holdtime, holdtime);
 271	put_lock_stats(stats);
 272}
 273#else
 274static inline void lock_release_holdtime(struct held_lock *hlock)
 275{
 276}
 277#endif
 278
 279/*
 280 * We keep a global list of all lock classes. The list only grows,
 281 * never shrinks. The list is only accessed with the lockdep
 282 * spinlock lock held.
 
 283 */
 284LIST_HEAD(all_lock_classes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285
 286/*
 287 * The lockdep classes are in a hash-table as well, for fast lookup:
 288 */
 289#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 290#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
 291#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 292#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 293
 294static struct hlist_head classhash_table[CLASSHASH_SIZE];
 295
 296/*
 297 * We put the lock dependency chains into a hash-table as well, to cache
 298 * their existence:
 299 */
 300#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 301#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
 302#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 303#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 304
 305static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 306
 307/*
 308 * The hash key of the lock dependency chains is a hash itself too:
 309 * it's a hash of all locks taken up to that lock, including that lock.
 310 * It's a 64-bit hash, because it's important for the keys to be
 311 * unique.
 312 */
 313static inline u64 iterate_chain_key(u64 key, u32 idx)
 314{
 315	u32 k0 = key, k1 = key >> 32;
 316
 317	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
 318
 319	return k0 | (u64)k1 << 32;
 320}
 321
 
 
 
 
 
 
 
 322void lockdep_off(void)
 323{
 324	current->lockdep_recursion++;
 325}
 326EXPORT_SYMBOL(lockdep_off);
 327
 328void lockdep_on(void)
 329{
 330	current->lockdep_recursion--;
 331}
 332EXPORT_SYMBOL(lockdep_on);
 333
 
 
 
 
 
 334/*
 335 * Debugging switches:
 336 */
 337
 338#define VERBOSE			0
 339#define VERY_VERBOSE		0
 340
 341#if VERBOSE
 342# define HARDIRQ_VERBOSE	1
 343# define SOFTIRQ_VERBOSE	1
 344# define RECLAIM_VERBOSE	1
 345#else
 346# define HARDIRQ_VERBOSE	0
 347# define SOFTIRQ_VERBOSE	0
 348# define RECLAIM_VERBOSE	0
 349#endif
 350
 351#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 352/*
 353 * Quick filtering for interesting events:
 354 */
 355static int class_filter(struct lock_class *class)
 356{
 357#if 0
 358	/* Example */
 359	if (class->name_version == 1 &&
 360			!strcmp(class->name, "lockname"))
 361		return 1;
 362	if (class->name_version == 1 &&
 363			!strcmp(class->name, "&struct->lockfield"))
 364		return 1;
 365#endif
 366	/* Filter everything else. 1 would be to allow everything else */
 367	return 0;
 368}
 369#endif
 370
 371static int verbose(struct lock_class *class)
 372{
 373#if VERBOSE
 374	return class_filter(class);
 375#endif
 376	return 0;
 377}
 378
 379/*
 380 * Stack-trace: tightly packed array of stack backtrace
 381 * addresses. Protected by the graph_lock.
 382 */
 383unsigned long nr_stack_trace_entries;
 384static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 385
 386static void print_lockdep_off(const char *bug_msg)
 387{
 388	printk(KERN_DEBUG "%s\n", bug_msg);
 389	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
 390#ifdef CONFIG_LOCK_STAT
 391	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
 392#endif
 393}
 394
 395static int save_trace(struct stack_trace *trace)
 396{
 397	trace->nr_entries = 0;
 398	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 399	trace->entries = stack_trace + nr_stack_trace_entries;
 400
 401	trace->skip = 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402
 403	save_stack_trace(trace);
 
 
 
 
 
 404
 405	/*
 406	 * Some daft arches put -1 at the end to indicate its a full trace.
 407	 *
 408	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
 409	 * complete trace that maxes out the entries provided will be reported
 410	 * as incomplete, friggin useless </rant>
 411	 */
 412	if (trace->nr_entries != 0 &&
 413	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
 414		trace->nr_entries--;
 415
 416	trace->max_entries = trace->nr_entries;
 
 417
 418	nr_stack_trace_entries += trace->nr_entries;
 
 
 
 419
 420	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
 
 421		if (!debug_locks_off_graph_unlock())
 422			return 0;
 423
 424		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
 425		dump_stack();
 426
 427		return 0;
 428	}
 429
 430	return 1;
 
 
 
 
 
 
 
 
 
 
 
 431}
 432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433unsigned int nr_hardirq_chains;
 434unsigned int nr_softirq_chains;
 435unsigned int nr_process_chains;
 436unsigned int max_lockdep_depth;
 437
 438#ifdef CONFIG_DEBUG_LOCKDEP
 439/*
 440 * Various lockdep statistics:
 441 */
 442DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
 443#endif
 444
 
 445/*
 446 * Locking printouts:
 447 */
 448
 449#define __USAGE(__STATE)						\
 450	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
 451	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
 452	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
 453	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
 454
 455static const char *usage_str[] =
 456{
 457#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
 458#include "lockdep_states.h"
 459#undef LOCKDEP_STATE
 460	[LOCK_USED] = "INITIAL USE",
 461};
 
 462
 463const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 464{
 465	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 466}
 467
 468static inline unsigned long lock_flag(enum lock_usage_bit bit)
 469{
 470	return 1UL << bit;
 471}
 472
 473static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 474{
 
 
 
 
 475	char c = '.';
 476
 477	if (class->usage_mask & lock_flag(bit + 2))
 
 
 
 
 
 
 
 
 478		c = '+';
 479	if (class->usage_mask & lock_flag(bit)) {
 480		c = '-';
 481		if (class->usage_mask & lock_flag(bit + 2))
 482			c = '?';
 483	}
 
 484
 485	return c;
 486}
 487
 488void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 489{
 490	int i = 0;
 491
 492#define LOCKDEP_STATE(__STATE) 						\
 493	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
 494	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
 495#include "lockdep_states.h"
 496#undef LOCKDEP_STATE
 497
 498	usage[i] = '\0';
 499}
 500
 501static void __print_lock_name(struct lock_class *class)
 502{
 503	char str[KSYM_NAME_LEN];
 504	const char *name;
 505
 506	name = class->name;
 507	if (!name) {
 508		name = __get_key_name(class->key, str);
 509		printk(KERN_CONT "%s", name);
 510	} else {
 511		printk(KERN_CONT "%s", name);
 512		if (class->name_version > 1)
 513			printk(KERN_CONT "#%d", class->name_version);
 514		if (class->subclass)
 515			printk(KERN_CONT "/%d", class->subclass);
 516	}
 517}
 518
 519static void print_lock_name(struct lock_class *class)
 520{
 521	char usage[LOCK_USAGE_CHARS];
 522
 523	get_usage_chars(class, usage);
 524
 525	printk(KERN_CONT " (");
 526	__print_lock_name(class);
 527	printk(KERN_CONT "){%s}", usage);
 528}
 529
 530static void print_lockdep_cache(struct lockdep_map *lock)
 531{
 532	const char *name;
 533	char str[KSYM_NAME_LEN];
 534
 535	name = lock->name;
 536	if (!name)
 537		name = __get_key_name(lock->key->subkeys, str);
 538
 539	printk(KERN_CONT "%s", name);
 540}
 541
 542static void print_lock(struct held_lock *hlock)
 543{
 544	/*
 545	 * We can be called locklessly through debug_show_all_locks() so be
 546	 * extra careful, the hlock might have been released and cleared.
 
 
 
 
 
 
 547	 */
 548	unsigned int class_idx = hlock->class_idx;
 549
 550	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
 551	barrier();
 552
 553	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
 554		printk(KERN_CONT "<RELEASED>\n");
 555		return;
 556	}
 557
 558	print_lock_name(lock_classes + class_idx - 1);
 559	printk(KERN_CONT ", at: [<%p>] %pS\n",
 560		(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
 561}
 562
 563static void lockdep_print_held_locks(struct task_struct *curr)
 564{
 565	int i, depth = curr->lockdep_depth;
 566
 567	if (!depth) {
 568		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
 
 
 
 
 
 
 
 
 569		return;
 570	}
 571	printk("%d lock%s held by %s/%d:\n",
 572		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 573
 574	for (i = 0; i < depth; i++) {
 575		printk(" #%d: ", i);
 576		print_lock(curr->held_locks + i);
 577	}
 578}
 579
 580static void print_kernel_ident(void)
 581{
 582	printk("%s %.*s %s\n", init_utsname()->release,
 583		(int)strcspn(init_utsname()->version, " "),
 584		init_utsname()->version,
 585		print_tainted());
 586}
 587
 588static int very_verbose(struct lock_class *class)
 589{
 590#if VERY_VERBOSE
 591	return class_filter(class);
 592#endif
 593	return 0;
 594}
 595
 596/*
 597 * Is this the address of a static object:
 598 */
 599#ifdef __KERNEL__
 600static int static_obj(void *obj)
 601{
 602	unsigned long start = (unsigned long) &_stext,
 603		      end   = (unsigned long) &_end,
 604		      addr  = (unsigned long) obj;
 605
 
 
 
 606	/*
 607	 * static variable?
 608	 */
 609	if ((addr >= start) && (addr < end))
 610		return 1;
 611
 612	if (arch_is_kernel_data(addr))
 613		return 1;
 614
 615	/*
 616	 * in-kernel percpu var?
 617	 */
 618	if (is_kernel_percpu_address(addr))
 619		return 1;
 620
 621	/*
 622	 * module static or percpu var?
 623	 */
 624	return is_module_address(addr) || is_module_percpu_address(addr);
 625}
 626#endif
 627
 628/*
 629 * To make lock name printouts unique, we calculate a unique
 630 * class->name_version generation counter:
 
 631 */
 632static int count_matching_names(struct lock_class *new_class)
 633{
 634	struct lock_class *class;
 635	int count = 0;
 636
 637	if (!new_class->name)
 638		return 0;
 639
 640	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
 641		if (new_class->key - new_class->subclass == class->key)
 642			return class->name_version;
 643		if (class->name && !strcmp(class->name, new_class->name))
 644			count = max(count, class->name_version);
 645	}
 646
 647	return count + 1;
 648}
 649
 650/*
 651 * Register a lock's class in the hash-table, if the class is not present
 652 * yet. Otherwise we look it up. We cache the result in the lock object
 653 * itself, so actual lookup of the hash should be once per lock object.
 654 */
 655static inline struct lock_class *
 656look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 657{
 658	struct lockdep_subclass_key *key;
 659	struct hlist_head *hash_head;
 660	struct lock_class *class;
 661
 662	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 663		debug_locks_off();
 664		printk(KERN_ERR
 665			"BUG: looking up invalid subclass: %u\n", subclass);
 666		printk(KERN_ERR
 667			"turning off the locking correctness validator.\n");
 668		dump_stack();
 669		return NULL;
 670	}
 671
 672	/*
 673	 * Static locks do not have their class-keys yet - for them the key
 674	 * is the lock object itself:
 675	 */
 676	if (unlikely(!lock->key))
 677		lock->key = (void *)lock;
 678
 679	/*
 680	 * NOTE: the class-key must be unique. For dynamic locks, a static
 681	 * lock_class_key variable is passed in through the mutex_init()
 682	 * (or spin_lock_init()) call - which acts as the key. For static
 683	 * locks we use the lock object itself as the key.
 684	 */
 685	BUILD_BUG_ON(sizeof(struct lock_class_key) >
 686			sizeof(struct lockdep_map));
 687
 688	key = lock->key->subkeys + subclass;
 689
 690	hash_head = classhashentry(key);
 691
 692	/*
 693	 * We do an RCU walk of the hash, see lockdep_free_key_range().
 694	 */
 695	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
 696		return NULL;
 697
 698	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 699		if (class->key == key) {
 700			/*
 701			 * Huh! same key, different name? Did someone trample
 702			 * on some memory? We're most confused.
 703			 */
 704			WARN_ON_ONCE(class->name != lock->name);
 
 705			return class;
 706		}
 707	}
 708
 709	return NULL;
 710}
 711
 712/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 713 * Register a lock's class in the hash-table, if the class is not present
 714 * yet. Otherwise we look it up. We cache the result in the lock object
 715 * itself, so actual lookup of the hash should be once per lock object.
 716 */
 717static struct lock_class *
 718register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 719{
 720	struct lockdep_subclass_key *key;
 721	struct hlist_head *hash_head;
 722	struct lock_class *class;
 723
 724	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 725
 726	class = look_up_lock_class(lock, subclass);
 727	if (likely(class))
 728		goto out_set_class_cache;
 729
 730	/*
 731	 * Debug-check: all keys must be persistent!
 732 	 */
 733	if (!static_obj(lock->key)) {
 734		debug_locks_off();
 735		printk("INFO: trying to register non-static key.\n");
 736		printk("the code is fine but needs lockdep annotation.\n");
 737		printk("turning off the locking correctness validator.\n");
 738		dump_stack();
 739
 740		return NULL;
 741	}
 742
 743	key = lock->key->subkeys + subclass;
 744	hash_head = classhashentry(key);
 745
 746	if (!graph_lock()) {
 747		return NULL;
 748	}
 749	/*
 750	 * We have to do the hash-walk again, to avoid races
 751	 * with another CPU:
 752	 */
 753	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 754		if (class->key == key)
 755			goto out_unlock_set;
 756	}
 757
 758	/*
 759	 * Allocate a new key from the static array, and add it to
 760	 * the hash:
 761	 */
 762	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 
 763		if (!debug_locks_off_graph_unlock()) {
 764			return NULL;
 765		}
 766
 767		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
 768		dump_stack();
 769		return NULL;
 770	}
 771	class = lock_classes + nr_lock_classes++;
 
 772	debug_atomic_inc(nr_unused_locks);
 773	class->key = key;
 774	class->name = lock->name;
 775	class->subclass = subclass;
 776	INIT_LIST_HEAD(&class->lock_entry);
 777	INIT_LIST_HEAD(&class->locks_before);
 778	INIT_LIST_HEAD(&class->locks_after);
 779	class->name_version = count_matching_names(class);
 780	/*
 781	 * We use RCU's safe list-add method to make
 782	 * parallel walking of the hash-list safe:
 783	 */
 784	hlist_add_head_rcu(&class->hash_entry, hash_head);
 785	/*
 786	 * Add it to the global list of classes:
 
 787	 */
 788	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
 789
 790	if (verbose(class)) {
 791		graph_unlock();
 792
 793		printk("\nnew class %p: %s", class->key, class->name);
 794		if (class->name_version > 1)
 795			printk(KERN_CONT "#%d", class->name_version);
 796		printk(KERN_CONT "\n");
 797		dump_stack();
 798
 799		if (!graph_lock()) {
 800			return NULL;
 801		}
 802	}
 803out_unlock_set:
 804	graph_unlock();
 805
 806out_set_class_cache:
 807	if (!subclass || force)
 808		lock->class_cache[0] = class;
 809	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
 810		lock->class_cache[subclass] = class;
 811
 812	/*
 813	 * Hash collision, did we smoke some? We found a class with a matching
 814	 * hash but the subclass -- which is hashed in -- didn't match.
 815	 */
 816	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
 817		return NULL;
 818
 819	return class;
 820}
 821
 822#ifdef CONFIG_PROVE_LOCKING
 823/*
 824 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 825 * with NULL on failure)
 826 */
 827static struct lock_list *alloc_list_entry(void)
 828{
 829	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
 
 
 
 830		if (!debug_locks_off_graph_unlock())
 831			return NULL;
 832
 833		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
 834		dump_stack();
 835		return NULL;
 836	}
 837	return list_entries + nr_list_entries++;
 
 
 838}
 839
 840/*
 841 * Add a new dependency to the head of the list:
 842 */
 843static int add_lock_to_list(struct lock_class *this, struct list_head *head,
 
 844			    unsigned long ip, int distance,
 845			    struct stack_trace *trace)
 846{
 847	struct lock_list *entry;
 848	/*
 849	 * Lock not present yet - get a new dependency struct and
 850	 * add it to the list:
 851	 */
 852	entry = alloc_list_entry();
 853	if (!entry)
 854		return 0;
 855
 856	entry->class = this;
 
 857	entry->distance = distance;
 858	entry->trace = *trace;
 859	/*
 860	 * Both allocation and removal are done under the graph lock; but
 861	 * iteration is under RCU-sched; see look_up_lock_class() and
 862	 * lockdep_free_key_range().
 863	 */
 864	list_add_tail_rcu(&entry->entry, head);
 865
 866	return 1;
 867}
 868
 869/*
 870 * For good efficiency of modular, we use power of 2
 871 */
 872#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
 873#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
 874
 875/*
 876 * The circular_queue and helpers is used to implement the
 877 * breadth-first search(BFS)algorithem, by which we can build
 878 * the shortest path from the next lock to be acquired to the
 879 * previous held lock if there is a circular between them.
 
 
 
 
 880 */
 881struct circular_queue {
 882	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
 883	unsigned int  front, rear;
 884};
 885
 886static struct circular_queue lock_cq;
 887
 888unsigned int max_bfs_queue_depth;
 889
 890static unsigned int lockdep_dependency_gen_id;
 891
 892static inline void __cq_init(struct circular_queue *cq)
 893{
 894	cq->front = cq->rear = 0;
 895	lockdep_dependency_gen_id++;
 896}
 897
 898static inline int __cq_empty(struct circular_queue *cq)
 899{
 900	return (cq->front == cq->rear);
 901}
 902
 903static inline int __cq_full(struct circular_queue *cq)
 904{
 905	return ((cq->rear + 1) & CQ_MASK) == cq->front;
 906}
 907
 908static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 909{
 910	if (__cq_full(cq))
 911		return -1;
 912
 913	cq->element[cq->rear] = elem;
 914	cq->rear = (cq->rear + 1) & CQ_MASK;
 915	return 0;
 916}
 917
 918static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 
 
 
 
 919{
 
 
 920	if (__cq_empty(cq))
 921		return -1;
 922
 923	*elem = cq->element[cq->front];
 924	cq->front = (cq->front + 1) & CQ_MASK;
 925	return 0;
 
 926}
 927
 928static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 929{
 930	return (cq->rear - cq->front) & CQ_MASK;
 931}
 932
 933static inline void mark_lock_accessed(struct lock_list *lock,
 934					struct lock_list *parent)
 935{
 936	unsigned long nr;
 937
 938	nr = lock - list_entries;
 939	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
 940	lock->parent = parent;
 941	lock->class->dep_gen_id = lockdep_dependency_gen_id;
 942}
 943
 944static inline unsigned long lock_accessed(struct lock_list *lock)
 945{
 946	unsigned long nr;
 947
 948	nr = lock - list_entries;
 949	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
 950	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 951}
 952
 953static inline struct lock_list *get_lock_parent(struct lock_list *child)
 954{
 955	return child->parent;
 956}
 957
 958static inline int get_lock_depth(struct lock_list *child)
 959{
 960	int depth = 0;
 961	struct lock_list *parent;
 962
 963	while ((parent = get_lock_parent(child))) {
 964		child = parent;
 965		depth++;
 966	}
 967	return depth;
 968}
 969
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970static int __bfs(struct lock_list *source_entry,
 971		 void *data,
 972		 int (*match)(struct lock_list *entry, void *data),
 973		 struct lock_list **target_entry,
 974		 int forward)
 975{
 976	struct lock_list *entry;
 
 977	struct list_head *head;
 978	struct circular_queue *cq = &lock_cq;
 979	int ret = 1;
 980
 981	if (match(source_entry, data)) {
 982		*target_entry = source_entry;
 983		ret = 0;
 984		goto exit;
 985	}
 986
 987	if (forward)
 988		head = &source_entry->class->locks_after;
 989	else
 990		head = &source_entry->class->locks_before;
 991
 992	if (list_empty(head))
 993		goto exit;
 994
 995	__cq_init(cq);
 996	__cq_enqueue(cq, (unsigned long)source_entry);
 997
 998	while (!__cq_empty(cq)) {
 999		struct lock_list *lock;
1000
1001		__cq_dequeue(cq, (unsigned long *)&lock);
1002
1003		if (!lock->class) {
1004			ret = -2;
1005			goto exit;
1006		}
1007
1008		if (forward)
1009			head = &lock->class->locks_after;
1010		else
1011			head = &lock->class->locks_before;
1012
1013		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1014
1015		list_for_each_entry_rcu(entry, head, entry) {
1016			if (!lock_accessed(entry)) {
1017				unsigned int cq_depth;
1018				mark_lock_accessed(entry, lock);
1019				if (match(entry, data)) {
1020					*target_entry = entry;
1021					ret = 0;
1022					goto exit;
1023				}
1024
1025				if (__cq_enqueue(cq, (unsigned long)entry)) {
1026					ret = -1;
1027					goto exit;
1028				}
1029				cq_depth = __cq_get_elem_count(cq);
1030				if (max_bfs_queue_depth < cq_depth)
1031					max_bfs_queue_depth = cq_depth;
1032			}
1033		}
1034	}
1035exit:
1036	return ret;
1037}
1038
1039static inline int __bfs_forwards(struct lock_list *src_entry,
1040			void *data,
1041			int (*match)(struct lock_list *entry, void *data),
1042			struct lock_list **target_entry)
1043{
1044	return __bfs(src_entry, data, match, target_entry, 1);
 
1045
1046}
1047
1048static inline int __bfs_backwards(struct lock_list *src_entry,
1049			void *data,
1050			int (*match)(struct lock_list *entry, void *data),
1051			struct lock_list **target_entry)
1052{
1053	return __bfs(src_entry, data, match, target_entry, 0);
 
1054
1055}
1056
1057/*
1058 * Recursive, forwards-direction lock-dependency checking, used for
1059 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1060 * checking.
1061 */
1062
1063/*
1064 * Print a dependency chain entry (this is only done when a deadlock
1065 * has been detected):
1066 */
1067static noinline int
1068print_circular_bug_entry(struct lock_list *target, int depth)
1069{
1070	if (debug_locks_silent)
1071		return 0;
1072	printk("\n-> #%u", depth);
1073	print_lock_name(target->class);
1074	printk(KERN_CONT ":\n");
1075	print_stack_trace(&target->trace, 6);
1076
1077	return 0;
1078}
1079
1080static void
1081print_circular_lock_scenario(struct held_lock *src,
1082			     struct held_lock *tgt,
1083			     struct lock_list *prt)
1084{
1085	struct lock_class *source = hlock_class(src);
1086	struct lock_class *target = hlock_class(tgt);
1087	struct lock_class *parent = prt->class;
1088
1089	/*
1090	 * A direct locking problem where unsafe_class lock is taken
1091	 * directly by safe_class lock, then all we need to show
1092	 * is the deadlock scenario, as it is obvious that the
1093	 * unsafe lock is taken under the safe lock.
1094	 *
1095	 * But if there is a chain instead, where the safe lock takes
1096	 * an intermediate lock (middle_class) where this lock is
1097	 * not the same as the safe lock, then the lock chain is
1098	 * used to describe the problem. Otherwise we would need
1099	 * to show a different CPU case for each link in the chain
1100	 * from the safe_class lock to the unsafe_class lock.
1101	 */
1102	if (parent != source) {
1103		printk("Chain exists of:\n  ");
1104		__print_lock_name(source);
1105		printk(KERN_CONT " --> ");
1106		__print_lock_name(parent);
1107		printk(KERN_CONT " --> ");
1108		__print_lock_name(target);
1109		printk(KERN_CONT "\n\n");
1110	}
1111
1112	printk(" Possible unsafe locking scenario:\n\n");
1113	printk("       CPU0                    CPU1\n");
1114	printk("       ----                    ----\n");
1115	printk("  lock(");
1116	__print_lock_name(target);
1117	printk(KERN_CONT ");\n");
1118	printk("                               lock(");
1119	__print_lock_name(parent);
1120	printk(KERN_CONT ");\n");
1121	printk("                               lock(");
1122	__print_lock_name(target);
1123	printk(KERN_CONT ");\n");
1124	printk("  lock(");
1125	__print_lock_name(source);
1126	printk(KERN_CONT ");\n");
1127	printk("\n *** DEADLOCK ***\n\n");
1128}
1129
1130/*
1131 * When a circular dependency is detected, print the
1132 * header first:
1133 */
1134static noinline int
1135print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1136			struct held_lock *check_src,
1137			struct held_lock *check_tgt)
1138{
1139	struct task_struct *curr = current;
1140
1141	if (debug_locks_silent)
1142		return 0;
1143
1144	printk("\n");
1145	printk("======================================================\n");
1146	printk("[ INFO: possible circular locking dependency detected ]\n");
1147	print_kernel_ident();
1148	printk("-------------------------------------------------------\n");
1149	printk("%s/%d is trying to acquire lock:\n",
1150		curr->comm, task_pid_nr(curr));
1151	print_lock(check_src);
1152	printk("\nbut task is already holding lock:\n");
 
 
1153	print_lock(check_tgt);
1154	printk("\nwhich lock already depends on the new lock.\n\n");
1155	printk("\nthe existing dependency chain (in reverse order) is:\n");
1156
1157	print_circular_bug_entry(entry, depth);
1158
1159	return 0;
1160}
1161
1162static inline int class_equal(struct lock_list *entry, void *data)
1163{
1164	return entry->class == data;
1165}
1166
1167static noinline int print_circular_bug(struct lock_list *this,
1168				struct lock_list *target,
1169				struct held_lock *check_src,
1170				struct held_lock *check_tgt)
1171{
1172	struct task_struct *curr = current;
1173	struct lock_list *parent;
1174	struct lock_list *first_parent;
1175	int depth;
1176
1177	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1178		return 0;
1179
1180	if (!save_trace(&this->trace))
1181		return 0;
 
1182
1183	depth = get_lock_depth(target);
1184
1185	print_circular_bug_header(target, depth, check_src, check_tgt);
1186
1187	parent = get_lock_parent(target);
1188	first_parent = parent;
1189
1190	while (parent) {
1191		print_circular_bug_entry(parent, --depth);
1192		parent = get_lock_parent(parent);
1193	}
1194
1195	printk("\nother info that might help us debug this:\n\n");
1196	print_circular_lock_scenario(check_src, check_tgt,
1197				     first_parent);
1198
1199	lockdep_print_held_locks(curr);
1200
1201	printk("\nstack backtrace:\n");
1202	dump_stack();
1203
1204	return 0;
1205}
1206
1207static noinline int print_bfs_bug(int ret)
1208{
1209	if (!debug_locks_off_graph_unlock())
1210		return 0;
1211
1212	/*
1213	 * Breadth-first-search failed, graph got corrupted?
1214	 */
1215	WARN(1, "lockdep bfs error:%d\n", ret);
1216
1217	return 0;
1218}
1219
1220static int noop_count(struct lock_list *entry, void *data)
1221{
1222	(*(unsigned long *)data)++;
1223	return 0;
1224}
1225
1226static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1227{
1228	unsigned long  count = 0;
1229	struct lock_list *uninitialized_var(target_entry);
1230
1231	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1232
1233	return count;
1234}
1235unsigned long lockdep_count_forward_deps(struct lock_class *class)
1236{
1237	unsigned long ret, flags;
1238	struct lock_list this;
1239
1240	this.parent = NULL;
1241	this.class = class;
1242
1243	local_irq_save(flags);
1244	arch_spin_lock(&lockdep_lock);
1245	ret = __lockdep_count_forward_deps(&this);
1246	arch_spin_unlock(&lockdep_lock);
1247	local_irq_restore(flags);
1248
1249	return ret;
1250}
1251
1252static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1253{
1254	unsigned long  count = 0;
1255	struct lock_list *uninitialized_var(target_entry);
1256
1257	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1258
1259	return count;
1260}
1261
1262unsigned long lockdep_count_backward_deps(struct lock_class *class)
1263{
1264	unsigned long ret, flags;
1265	struct lock_list this;
1266
1267	this.parent = NULL;
1268	this.class = class;
1269
1270	local_irq_save(flags);
1271	arch_spin_lock(&lockdep_lock);
1272	ret = __lockdep_count_backward_deps(&this);
1273	arch_spin_unlock(&lockdep_lock);
1274	local_irq_restore(flags);
1275
1276	return ret;
1277}
1278
1279/*
1280 * Prove that the dependency graph starting at <entry> can not
1281 * lead to <target>. Print an error and return 0 if it does.
1282 */
1283static noinline int
1284check_noncircular(struct lock_list *root, struct lock_class *target,
1285		struct lock_list **target_entry)
1286{
1287	int result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288
1289	debug_atomic_inc(nr_cyclic_checks);
1290
1291	result = __bfs_forwards(root, target, class_equal, target_entry);
1292
1293	return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294}
1295
1296#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1297/*
1298 * Forwards and backwards subgraph searching, for the purposes of
1299 * proving that two subgraphs can be connected by a new dependency
1300 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1301 */
1302
1303static inline int usage_match(struct lock_list *entry, void *bit)
1304{
1305	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1306}
1307
1308
1309
1310/*
1311 * Find a node in the forwards-direction dependency sub-graph starting
1312 * at @root->class that matches @bit.
1313 *
1314 * Return 0 if such a node exists in the subgraph, and put that node
1315 * into *@target_entry.
1316 *
1317 * Return 1 otherwise and keep *@target_entry unchanged.
1318 * Return <0 on error.
1319 */
1320static int
1321find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1322			struct lock_list **target_entry)
1323{
1324	int result;
1325
1326	debug_atomic_inc(nr_find_usage_forwards_checks);
1327
1328	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1329
1330	return result;
1331}
1332
1333/*
1334 * Find a node in the backwards-direction dependency sub-graph starting
1335 * at @root->class that matches @bit.
1336 *
1337 * Return 0 if such a node exists in the subgraph, and put that node
1338 * into *@target_entry.
1339 *
1340 * Return 1 otherwise and keep *@target_entry unchanged.
1341 * Return <0 on error.
1342 */
1343static int
1344find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1345			struct lock_list **target_entry)
1346{
1347	int result;
1348
1349	debug_atomic_inc(nr_find_usage_backwards_checks);
1350
1351	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1352
1353	return result;
1354}
1355
1356static void print_lock_class_header(struct lock_class *class, int depth)
1357{
1358	int bit;
1359
1360	printk("%*s->", depth, "");
1361	print_lock_name(class);
1362	printk(KERN_CONT " ops: %lu", class->ops);
 
 
1363	printk(KERN_CONT " {\n");
1364
1365	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1366		if (class->usage_mask & (1 << bit)) {
1367			int len = depth;
1368
1369			len += printk("%*s   %s", depth, "", usage_str[bit]);
1370			len += printk(KERN_CONT " at:\n");
1371			print_stack_trace(class->usage_traces + bit, len);
1372		}
1373	}
1374	printk("%*s }\n", depth, "");
1375
1376	printk("%*s ... key      at: [<%p>] %pS\n",
1377		depth, "", class->key, class->key);
1378}
1379
1380/*
1381 * printk the shortest lock dependencies from @start to @end in reverse order:
1382 */
1383static void __used
1384print_shortest_lock_dependencies(struct lock_list *leaf,
1385				struct lock_list *root)
1386{
1387	struct lock_list *entry = leaf;
1388	int depth;
1389
1390	/*compute depth from generated tree by BFS*/
1391	depth = get_lock_depth(leaf);
1392
1393	do {
1394		print_lock_class_header(entry->class, depth);
1395		printk("%*s ... acquired at:\n", depth, "");
1396		print_stack_trace(&entry->trace, 2);
1397		printk("\n");
1398
1399		if (depth == 0 && (entry != root)) {
1400			printk("lockdep:%s bad path found in chain graph\n", __func__);
1401			break;
1402		}
1403
1404		entry = get_lock_parent(entry);
1405		depth--;
1406	} while (entry && (depth >= 0));
1407
1408	return;
1409}
1410
1411static void
1412print_irq_lock_scenario(struct lock_list *safe_entry,
1413			struct lock_list *unsafe_entry,
1414			struct lock_class *prev_class,
1415			struct lock_class *next_class)
1416{
1417	struct lock_class *safe_class = safe_entry->class;
1418	struct lock_class *unsafe_class = unsafe_entry->class;
1419	struct lock_class *middle_class = prev_class;
1420
1421	if (middle_class == safe_class)
1422		middle_class = next_class;
1423
1424	/*
1425	 * A direct locking problem where unsafe_class lock is taken
1426	 * directly by safe_class lock, then all we need to show
1427	 * is the deadlock scenario, as it is obvious that the
1428	 * unsafe lock is taken under the safe lock.
1429	 *
1430	 * But if there is a chain instead, where the safe lock takes
1431	 * an intermediate lock (middle_class) where this lock is
1432	 * not the same as the safe lock, then the lock chain is
1433	 * used to describe the problem. Otherwise we would need
1434	 * to show a different CPU case for each link in the chain
1435	 * from the safe_class lock to the unsafe_class lock.
1436	 */
1437	if (middle_class != unsafe_class) {
1438		printk("Chain exists of:\n  ");
1439		__print_lock_name(safe_class);
1440		printk(KERN_CONT " --> ");
1441		__print_lock_name(middle_class);
1442		printk(KERN_CONT " --> ");
1443		__print_lock_name(unsafe_class);
1444		printk(KERN_CONT "\n\n");
1445	}
1446
1447	printk(" Possible interrupt unsafe locking scenario:\n\n");
1448	printk("       CPU0                    CPU1\n");
1449	printk("       ----                    ----\n");
1450	printk("  lock(");
1451	__print_lock_name(unsafe_class);
1452	printk(KERN_CONT ");\n");
1453	printk("                               local_irq_disable();\n");
1454	printk("                               lock(");
1455	__print_lock_name(safe_class);
1456	printk(KERN_CONT ");\n");
1457	printk("                               lock(");
1458	__print_lock_name(middle_class);
1459	printk(KERN_CONT ");\n");
1460	printk("  <Interrupt>\n");
1461	printk("    lock(");
1462	__print_lock_name(safe_class);
1463	printk(KERN_CONT ");\n");
1464	printk("\n *** DEADLOCK ***\n\n");
1465}
1466
1467static int
1468print_bad_irq_dependency(struct task_struct *curr,
1469			 struct lock_list *prev_root,
1470			 struct lock_list *next_root,
1471			 struct lock_list *backwards_entry,
1472			 struct lock_list *forwards_entry,
1473			 struct held_lock *prev,
1474			 struct held_lock *next,
1475			 enum lock_usage_bit bit1,
1476			 enum lock_usage_bit bit2,
1477			 const char *irqclass)
1478{
1479	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1480		return 0;
1481
1482	printk("\n");
1483	printk("======================================================\n");
1484	printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1485		irqclass, irqclass);
1486	print_kernel_ident();
1487	printk("------------------------------------------------------\n");
1488	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1489		curr->comm, task_pid_nr(curr),
1490		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1491		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1492		curr->hardirqs_enabled,
1493		curr->softirqs_enabled);
1494	print_lock(next);
1495
1496	printk("\nand this task is already holding:\n");
1497	print_lock(prev);
1498	printk("which would create a new lock dependency:\n");
1499	print_lock_name(hlock_class(prev));
1500	printk(KERN_CONT " ->");
1501	print_lock_name(hlock_class(next));
1502	printk(KERN_CONT "\n");
1503
1504	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1505		irqclass);
1506	print_lock_name(backwards_entry->class);
1507	printk("\n... which became %s-irq-safe at:\n", irqclass);
1508
1509	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1510
1511	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1512	print_lock_name(forwards_entry->class);
1513	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1514	printk("...");
1515
1516	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1517
1518	printk("\nother info that might help us debug this:\n\n");
1519	print_irq_lock_scenario(backwards_entry, forwards_entry,
1520				hlock_class(prev), hlock_class(next));
1521
1522	lockdep_print_held_locks(curr);
1523
1524	printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1525	if (!save_trace(&prev_root->trace))
1526		return 0;
 
1527	print_shortest_lock_dependencies(backwards_entry, prev_root);
1528
1529	printk("\nthe dependencies between the lock to be acquired");
1530	printk(" and %s-irq-unsafe lock:\n", irqclass);
1531	if (!save_trace(&next_root->trace))
1532		return 0;
 
1533	print_shortest_lock_dependencies(forwards_entry, next_root);
1534
1535	printk("\nstack backtrace:\n");
1536	dump_stack();
1537
1538	return 0;
1539}
1540
1541static int
1542check_usage(struct task_struct *curr, struct held_lock *prev,
1543	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1544	    enum lock_usage_bit bit_forwards, const char *irqclass)
1545{
1546	int ret;
1547	struct lock_list this, that;
1548	struct lock_list *uninitialized_var(target_entry);
1549	struct lock_list *uninitialized_var(target_entry1);
1550
1551	this.parent = NULL;
1552
1553	this.class = hlock_class(prev);
1554	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1555	if (ret < 0)
1556		return print_bfs_bug(ret);
1557	if (ret == 1)
1558		return ret;
1559
1560	that.parent = NULL;
1561	that.class = hlock_class(next);
1562	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1563	if (ret < 0)
1564		return print_bfs_bug(ret);
1565	if (ret == 1)
1566		return ret;
1567
1568	return print_bad_irq_dependency(curr, &this, &that,
1569			target_entry, target_entry1,
1570			prev, next,
1571			bit_backwards, bit_forwards, irqclass);
1572}
1573
1574static const char *state_names[] = {
1575#define LOCKDEP_STATE(__STATE) \
1576	__stringify(__STATE),
1577#include "lockdep_states.h"
1578#undef LOCKDEP_STATE
1579};
1580
1581static const char *state_rnames[] = {
1582#define LOCKDEP_STATE(__STATE) \
1583	__stringify(__STATE)"-READ",
1584#include "lockdep_states.h"
1585#undef LOCKDEP_STATE
1586};
1587
1588static inline const char *state_name(enum lock_usage_bit bit)
1589{
1590	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
 
 
 
1591}
1592
 
 
 
 
 
 
 
1593static int exclusive_bit(int new_bit)
1594{
1595	/*
1596	 * USED_IN
1597	 * USED_IN_READ
1598	 * ENABLED
1599	 * ENABLED_READ
1600	 *
1601	 * bit 0 - write/read
1602	 * bit 1 - used_in/enabled
1603	 * bit 2+  state
1604	 */
1605
1606	int state = new_bit & ~3;
1607	int dir = new_bit & 2;
1608
1609	/*
1610	 * keep state, bit flip the direction and strip read.
1611	 */
1612	return state | (dir ^ 2);
1613}
1614
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1616			   struct held_lock *next, enum lock_usage_bit bit)
1617{
 
 
 
 
 
 
 
1618	/*
1619	 * Prove that the new dependency does not connect a hardirq-safe
1620	 * lock with a hardirq-unsafe lock - to achieve this we search
1621	 * the backwards-subgraph starting at <prev>, and the
1622	 * forwards-subgraph starting at <next>:
1623	 */
1624	if (!check_usage(curr, prev, next, bit,
1625			   exclusive_bit(bit), state_name(bit)))
 
 
 
 
1626		return 0;
 
1627
1628	bit++; /* _READ */
 
 
1629
1630	/*
1631	 * Prove that the new dependency does not connect a hardirq-safe-read
1632	 * lock with a hardirq-unsafe lock - to achieve this we search
1633	 * the backwards-subgraph starting at <prev>, and the
1634	 * forwards-subgraph starting at <next>:
1635	 */
1636	if (!check_usage(curr, prev, next, bit,
1637			   exclusive_bit(bit), state_name(bit)))
 
 
 
 
 
 
1638		return 0;
 
 
 
1639
1640	return 1;
1641}
 
 
 
 
1642
1643static int
1644check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1645		struct held_lock *next)
1646{
1647#define LOCKDEP_STATE(__STATE)						\
1648	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1649		return 0;
1650#include "lockdep_states.h"
1651#undef LOCKDEP_STATE
 
1652
1653	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1654}
1655
1656static void inc_chains(void)
1657{
1658	if (current->hardirq_context)
1659		nr_hardirq_chains++;
1660	else {
1661		if (current->softirq_context)
1662			nr_softirq_chains++;
1663		else
1664			nr_process_chains++;
1665	}
1666}
1667
1668#else
1669
1670static inline int
1671check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1672		struct held_lock *next)
1673{
1674	return 1;
1675}
1676
1677static inline void inc_chains(void)
1678{
1679	nr_process_chains++;
1680}
1681
1682#endif
1683
1684static void
1685print_deadlock_scenario(struct held_lock *nxt,
1686			     struct held_lock *prv)
1687{
1688	struct lock_class *next = hlock_class(nxt);
1689	struct lock_class *prev = hlock_class(prv);
1690
1691	printk(" Possible unsafe locking scenario:\n\n");
1692	printk("       CPU0\n");
1693	printk("       ----\n");
1694	printk("  lock(");
1695	__print_lock_name(prev);
1696	printk(KERN_CONT ");\n");
1697	printk("  lock(");
1698	__print_lock_name(next);
1699	printk(KERN_CONT ");\n");
1700	printk("\n *** DEADLOCK ***\n\n");
1701	printk(" May be due to missing lock nesting notation\n\n");
1702}
1703
1704static int
1705print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1706		   struct held_lock *next)
1707{
1708	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1709		return 0;
1710
1711	printk("\n");
1712	printk("=============================================\n");
1713	printk("[ INFO: possible recursive locking detected ]\n");
1714	print_kernel_ident();
1715	printk("---------------------------------------------\n");
1716	printk("%s/%d is trying to acquire lock:\n",
1717		curr->comm, task_pid_nr(curr));
1718	print_lock(next);
1719	printk("\nbut task is already holding lock:\n");
1720	print_lock(prev);
1721
1722	printk("\nother info that might help us debug this:\n");
1723	print_deadlock_scenario(next, prev);
1724	lockdep_print_held_locks(curr);
1725
1726	printk("\nstack backtrace:\n");
1727	dump_stack();
1728
1729	return 0;
1730}
1731
1732/*
1733 * Check whether we are holding such a class already.
1734 *
1735 * (Note that this has to be done separately, because the graph cannot
1736 * detect such classes of deadlocks.)
1737 *
1738 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1739 */
1740static int
1741check_deadlock(struct task_struct *curr, struct held_lock *next,
1742	       struct lockdep_map *next_instance, int read)
1743{
1744	struct held_lock *prev;
1745	struct held_lock *nest = NULL;
1746	int i;
1747
1748	for (i = 0; i < curr->lockdep_depth; i++) {
1749		prev = curr->held_locks + i;
1750
1751		if (prev->instance == next->nest_lock)
1752			nest = prev;
1753
1754		if (hlock_class(prev) != hlock_class(next))
1755			continue;
1756
1757		/*
1758		 * Allow read-after-read recursion of the same
1759		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1760		 */
1761		if ((read == 2) && prev->read)
1762			return 2;
1763
1764		/*
1765		 * We're holding the nest_lock, which serializes this lock's
1766		 * nesting behaviour.
1767		 */
1768		if (nest)
1769			return 2;
1770
1771		return print_deadlock_bug(curr, prev, next);
 
1772	}
1773	return 1;
1774}
1775
1776/*
1777 * There was a chain-cache miss, and we are about to add a new dependency
1778 * to a previous lock. We recursively validate the following rules:
1779 *
1780 *  - would the adding of the <prev> -> <next> dependency create a
1781 *    circular dependency in the graph? [== circular deadlock]
1782 *
1783 *  - does the new prev->next dependency connect any hardirq-safe lock
1784 *    (in the full backwards-subgraph starting at <prev>) with any
1785 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1786 *    <next>)? [== illegal lock inversion with hardirq contexts]
1787 *
1788 *  - does the new prev->next dependency connect any softirq-safe lock
1789 *    (in the full backwards-subgraph starting at <prev>) with any
1790 *    softirq-unsafe lock (in the full forwards-subgraph starting at
1791 *    <next>)? [== illegal lock inversion with softirq contexts]
1792 *
1793 * any of these scenarios could lead to a deadlock.
1794 *
1795 * Then if all the validations pass, we add the forwards and backwards
1796 * dependency.
1797 */
1798static int
1799check_prev_add(struct task_struct *curr, struct held_lock *prev,
1800	       struct held_lock *next, int distance, int *stack_saved)
 
1801{
1802	struct lock_list *entry;
1803	int ret;
1804	struct lock_list this;
1805	struct lock_list *uninitialized_var(target_entry);
1806	/*
1807	 * Static variable, serialized by the graph_lock().
1808	 *
1809	 * We use this static variable to save the stack trace in case
1810	 * we call into this function multiple times due to encountering
1811	 * trylocks in the held lock stack.
1812	 */
1813	static struct stack_trace trace;
 
 
 
 
 
 
 
 
1814
1815	/*
1816	 * Prove that the new <prev> -> <next> dependency would not
1817	 * create a circular dependency in the graph. (We do this by
1818	 * forward-recursing into the graph starting at <next>, and
1819	 * checking whether we can reach <prev>.)
1820	 *
1821	 * We are using global variables to control the recursion, to
1822	 * keep the stackframe size of the recursive functions low:
 
1823	 */
1824	this.class = hlock_class(next);
1825	this.parent = NULL;
1826	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1827	if (unlikely(!ret))
1828		return print_circular_bug(&this, target_entry, next, prev);
1829	else if (unlikely(ret < 0))
1830		return print_bfs_bug(ret);
1831
1832	if (!check_prev_add_irq(curr, prev, next))
1833		return 0;
1834
1835	/*
1836	 * For recursive read-locks we do all the dependency checks,
1837	 * but we dont store read-triggered dependencies (only
1838	 * write-triggered dependencies). This ensures that only the
1839	 * write-side dependencies matter, and that if for example a
1840	 * write-lock never takes any other locks, then the reads are
1841	 * equivalent to a NOP.
1842	 */
1843	if (next->read == 2 || prev->read == 2)
1844		return 1;
1845	/*
1846	 * Is the <prev> -> <next> dependency already present?
1847	 *
1848	 * (this may occur even though this is a new chain: consider
1849	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1850	 *  chains - the second one will be new, but L1 already has
1851	 *  L2 added to its dependency list, due to the first chain.)
1852	 */
1853	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1854		if (entry->class == hlock_class(next)) {
1855			if (distance == 1)
1856				entry->distance = 1;
1857			return 2;
1858		}
1859	}
1860
1861	if (!*stack_saved) {
1862		if (!save_trace(&trace))
 
 
 
 
 
 
 
 
 
 
1863			return 0;
1864		*stack_saved = 1;
1865	}
1866
1867	/*
1868	 * Ok, all validations passed, add the new lock
1869	 * to the previous lock's dependency list:
1870	 */
1871	ret = add_lock_to_list(hlock_class(next),
1872			       &hlock_class(prev)->locks_after,
1873			       next->acquire_ip, distance, &trace);
1874
1875	if (!ret)
1876		return 0;
1877
1878	ret = add_lock_to_list(hlock_class(prev),
1879			       &hlock_class(next)->locks_before,
1880			       next->acquire_ip, distance, &trace);
1881	if (!ret)
1882		return 0;
1883
1884	/*
1885	 * Debugging printouts:
1886	 */
1887	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1888		/* We drop graph lock, so another thread can overwrite trace. */
1889		*stack_saved = 0;
1890		graph_unlock();
1891		printk("\n new dependency: ");
1892		print_lock_name(hlock_class(prev));
1893		printk(KERN_CONT " => ");
1894		print_lock_name(hlock_class(next));
1895		printk(KERN_CONT "\n");
1896		dump_stack();
1897		return graph_lock();
1898	}
1899	return 1;
1900}
1901
1902/*
1903 * Add the dependency to all directly-previous locks that are 'relevant'.
1904 * The ones that are relevant are (in increasing distance from curr):
1905 * all consecutive trylock entries and the final non-trylock entry - or
1906 * the end of this context's lock-chain - whichever comes first.
1907 */
1908static int
1909check_prevs_add(struct task_struct *curr, struct held_lock *next)
1910{
 
1911	int depth = curr->lockdep_depth;
1912	int stack_saved = 0;
1913	struct held_lock *hlock;
1914
1915	/*
1916	 * Debugging checks.
1917	 *
1918	 * Depth must not be zero for a non-head lock:
1919	 */
1920	if (!depth)
1921		goto out_bug;
1922	/*
1923	 * At least two relevant locks must exist for this
1924	 * to be a head:
1925	 */
1926	if (curr->held_locks[depth].irq_context !=
1927			curr->held_locks[depth-1].irq_context)
1928		goto out_bug;
1929
1930	for (;;) {
1931		int distance = curr->lockdep_depth - depth + 1;
1932		hlock = curr->held_locks + depth - 1;
 
1933		/*
1934		 * Only non-recursive-read entries get new dependencies
1935		 * added:
1936		 */
1937		if (hlock->read != 2 && hlock->check) {
1938			if (!check_prev_add(curr, hlock, next,
1939						distance, &stack_saved))
 
1940				return 0;
 
1941			/*
1942			 * Stop after the first non-trylock entry,
1943			 * as non-trylock entries have added their
1944			 * own direct dependencies already, so this
1945			 * lock is connected to them indirectly:
1946			 */
1947			if (!hlock->trylock)
1948				break;
1949		}
 
1950		depth--;
1951		/*
1952		 * End of lock-stack?
1953		 */
1954		if (!depth)
1955			break;
1956		/*
1957		 * Stop the search if we cross into another context:
1958		 */
1959		if (curr->held_locks[depth].irq_context !=
1960				curr->held_locks[depth-1].irq_context)
1961			break;
1962	}
1963	return 1;
1964out_bug:
1965	if (!debug_locks_off_graph_unlock())
1966		return 0;
1967
1968	/*
1969	 * Clearly we all shouldn't be here, but since we made it we
1970	 * can reliable say we messed up our state. See the above two
1971	 * gotos for reasons why we could possibly end up here.
1972	 */
1973	WARN_ON(1);
1974
1975	return 0;
1976}
1977
1978unsigned long nr_lock_chains;
1979struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
 
1980int nr_chain_hlocks;
1981static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1982
1983struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1984{
1985	return lock_classes + chain_hlocks[chain->base + i];
1986}
1987
1988/*
1989 * Returns the index of the first held_lock of the current chain
1990 */
1991static inline int get_first_held_lock(struct task_struct *curr,
1992					struct held_lock *hlock)
1993{
1994	int i;
1995	struct held_lock *hlock_curr;
1996
1997	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1998		hlock_curr = curr->held_locks + i;
1999		if (hlock_curr->irq_context != hlock->irq_context)
2000			break;
2001
2002	}
2003
2004	return ++i;
2005}
2006
2007#ifdef CONFIG_DEBUG_LOCKDEP
2008/*
2009 * Returns the next chain_key iteration
2010 */
2011static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2012{
2013	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2014
2015	printk(" class_idx:%d -> chain_key:%016Lx",
2016		class_idx,
2017		(unsigned long long)new_chain_key);
2018	return new_chain_key;
2019}
2020
2021static void
2022print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2023{
2024	struct held_lock *hlock;
2025	u64 chain_key = 0;
2026	int depth = curr->lockdep_depth;
2027	int i;
2028
2029	printk("depth: %u\n", depth + 1);
2030	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
 
2031		hlock = curr->held_locks + i;
2032		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2033
2034		print_lock(hlock);
2035	}
2036
2037	print_chain_key_iteration(hlock_next->class_idx, chain_key);
2038	print_lock(hlock_next);
2039}
2040
2041static void print_chain_keys_chain(struct lock_chain *chain)
2042{
2043	int i;
2044	u64 chain_key = 0;
2045	int class_id;
2046
2047	printk("depth: %u\n", chain->depth);
2048	for (i = 0; i < chain->depth; i++) {
2049		class_id = chain_hlocks[chain->base + i];
2050		chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2051
2052		print_lock_name(lock_classes + class_id);
2053		printk("\n");
2054	}
2055}
2056
2057static void print_collision(struct task_struct *curr,
2058			struct held_lock *hlock_next,
2059			struct lock_chain *chain)
2060{
2061	printk("\n");
2062	printk("======================\n");
2063	printk("[chain_key collision ]\n");
2064	print_kernel_ident();
2065	printk("----------------------\n");
2066	printk("%s/%d: ", current->comm, task_pid_nr(current));
2067	printk("Hash chain already cached but the contents don't match!\n");
2068
2069	printk("Held locks:");
2070	print_chain_keys_held_locks(curr, hlock_next);
2071
2072	printk("Locks in cached chain:");
2073	print_chain_keys_chain(chain);
2074
2075	printk("\nstack backtrace:\n");
2076	dump_stack();
2077}
2078#endif
2079
2080/*
2081 * Checks whether the chain and the current held locks are consistent
2082 * in depth and also in content. If they are not it most likely means
2083 * that there was a collision during the calculation of the chain_key.
2084 * Returns: 0 not passed, 1 passed
2085 */
2086static int check_no_collision(struct task_struct *curr,
2087			struct held_lock *hlock,
2088			struct lock_chain *chain)
2089{
2090#ifdef CONFIG_DEBUG_LOCKDEP
2091	int i, j, id;
2092
2093	i = get_first_held_lock(curr, hlock);
2094
2095	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2096		print_collision(curr, hlock, chain);
2097		return 0;
2098	}
2099
2100	for (j = 0; j < chain->depth - 1; j++, i++) {
2101		id = curr->held_locks[i].class_idx - 1;
2102
2103		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2104			print_collision(curr, hlock, chain);
2105			return 0;
2106		}
2107	}
2108#endif
2109	return 1;
2110}
2111
2112/*
2113 * Look up a dependency chain. If the key is not present yet then
2114 * add it and return 1 - in this case the new dependency chain is
2115 * validated. If the key is already hashed, return 0.
2116 * (On return with 1 graph_lock is held.)
2117 */
2118static inline int lookup_chain_cache(struct task_struct *curr,
2119				     struct held_lock *hlock,
2120				     u64 chain_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2121{
2122	struct lock_class *class = hlock_class(hlock);
2123	struct hlist_head *hash_head = chainhashentry(chain_key);
2124	struct lock_chain *chain;
2125	int i, j;
2126
2127	/*
2128	 * We might need to take the graph lock, ensure we've got IRQs
2129	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2130	 * lockdep won't complain about its own locking errors.
2131	 */
2132	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2133		return 0;
2134	/*
2135	 * We can walk it lock-free, because entries only get added
2136	 * to the hash:
2137	 */
2138	hlist_for_each_entry_rcu(chain, hash_head, entry) {
2139		if (chain->chain_key == chain_key) {
2140cache_hit:
2141			debug_atomic_inc(chain_lookup_hits);
2142			if (!check_no_collision(curr, hlock, chain))
2143				return 0;
2144
2145			if (very_verbose(class))
2146				printk("\nhash chain already cached, key: "
2147					"%016Lx tail class: [%p] %s\n",
2148					(unsigned long long)chain_key,
2149					class->key, class->name);
2150			return 0;
2151		}
2152	}
2153	if (very_verbose(class))
2154		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2155			(unsigned long long)chain_key, class->key, class->name);
2156	/*
2157	 * Allocate a new chain entry from the static array, and add
2158	 * it to the hash:
2159	 */
2160	if (!graph_lock())
2161		return 0;
2162	/*
2163	 * We have to walk the chain again locked - to avoid duplicates:
2164	 */
2165	hlist_for_each_entry(chain, hash_head, entry) {
2166		if (chain->chain_key == chain_key) {
2167			graph_unlock();
2168			goto cache_hit;
2169		}
2170	}
2171	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2172		if (!debug_locks_off_graph_unlock())
2173			return 0;
2174
2175		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2176		dump_stack();
2177		return 0;
2178	}
2179	chain = lock_chains + nr_lock_chains++;
2180	chain->chain_key = chain_key;
2181	chain->irq_context = hlock->irq_context;
2182	i = get_first_held_lock(curr, hlock);
2183	chain->depth = curr->lockdep_depth + 1 - i;
2184
2185	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2186	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2187	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2188
2189	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2190		chain->base = nr_chain_hlocks;
2191		for (j = 0; j < chain->depth - 1; j++, i++) {
2192			int lock_id = curr->held_locks[i].class_idx - 1;
2193			chain_hlocks[chain->base + j] = lock_id;
2194		}
2195		chain_hlocks[chain->base + j] = class - lock_classes;
2196	}
2197
2198	if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2199		nr_chain_hlocks += chain->depth;
2200
2201#ifdef CONFIG_DEBUG_LOCKDEP
2202	/*
2203	 * Important for check_no_collision().
2204	 */
2205	if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2206		if (debug_locks_off_graph_unlock())
2207			return 0;
2208
2209		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2210		dump_stack();
2211		return 0;
2212	}
2213#endif
2214
2215	hlist_add_head_rcu(&chain->entry, hash_head);
2216	debug_atomic_inc(chain_lookup_misses);
2217	inc_chains();
2218
2219	return 1;
2220}
2221
2222static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2223		struct held_lock *hlock, int chain_head, u64 chain_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224{
2225	/*
2226	 * Trylock needs to maintain the stack of held locks, but it
2227	 * does not add new dependencies, because trylock can be done
2228	 * in any order.
2229	 *
2230	 * We look up the chain_key and do the O(N^2) check and update of
2231	 * the dependencies only if this is a new dependency chain.
2232	 * (If lookup_chain_cache() returns with 1 it acquires
2233	 * graph_lock for us)
2234	 */
2235	if (!hlock->trylock && hlock->check &&
2236	    lookup_chain_cache(curr, hlock, chain_key)) {
2237		/*
2238		 * Check whether last held lock:
2239		 *
2240		 * - is irq-safe, if this lock is irq-unsafe
2241		 * - is softirq-safe, if this lock is hardirq-unsafe
2242		 *
2243		 * And check whether the new lock's dependency graph
2244		 * could lead back to the previous lock.
 
 
 
2245		 *
2246		 * any of these scenarios could lead to a deadlock. If
2247		 * All validations
 
 
 
2248		 */
2249		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2250
2251		if (!ret)
2252			return 0;
2253		/*
2254		 * Mark recursive read, as we jump over it when
2255		 * building dependencies (just like we jump over
2256		 * trylock entries):
2257		 */
2258		if (ret == 2)
2259			hlock->read = 2;
2260		/*
2261		 * Add dependency only if this lock is not the head
2262		 * of the chain, and if it's not a secondary read-lock:
2263		 */
2264		if (!chain_head && ret != 2)
2265			if (!check_prevs_add(curr, hlock))
2266				return 0;
 
 
2267		graph_unlock();
2268	} else
2269		/* after lookup_chain_cache(): */
2270		if (unlikely(!debug_locks))
2271			return 0;
 
2272
2273	return 1;
2274}
2275#else
2276static inline int validate_chain(struct task_struct *curr,
2277	       	struct lockdep_map *lock, struct held_lock *hlock,
2278		int chain_head, u64 chain_key)
2279{
2280	return 1;
2281}
2282#endif
2283
2284/*
2285 * We are building curr_chain_key incrementally, so double-check
2286 * it from scratch, to make sure that it's done correctly:
2287 */
2288static void check_chain_key(struct task_struct *curr)
2289{
2290#ifdef CONFIG_DEBUG_LOCKDEP
2291	struct held_lock *hlock, *prev_hlock = NULL;
2292	unsigned int i;
2293	u64 chain_key = 0;
2294
2295	for (i = 0; i < curr->lockdep_depth; i++) {
2296		hlock = curr->held_locks + i;
2297		if (chain_key != hlock->prev_chain_key) {
2298			debug_locks_off();
2299			/*
2300			 * We got mighty confused, our chain keys don't match
2301			 * with what we expect, someone trample on our task state?
2302			 */
2303			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2304				curr->lockdep_depth, i,
2305				(unsigned long long)chain_key,
2306				(unsigned long long)hlock->prev_chain_key);
2307			return;
2308		}
 
2309		/*
2310		 * Whoops ran out of static storage again?
 
2311		 */
2312		if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2313			return;
2314
2315		if (prev_hlock && (prev_hlock->irq_context !=
2316							hlock->irq_context))
2317			chain_key = 0;
2318		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2319		prev_hlock = hlock;
2320	}
2321	if (chain_key != curr->curr_chain_key) {
2322		debug_locks_off();
2323		/*
2324		 * More smoking hash instead of calculating it, damn see these
2325		 * numbers float.. I bet that a pink elephant stepped on my memory.
2326		 */
2327		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2328			curr->lockdep_depth, i,
2329			(unsigned long long)chain_key,
2330			(unsigned long long)curr->curr_chain_key);
2331	}
2332#endif
2333}
2334
2335static void
2336print_usage_bug_scenario(struct held_lock *lock)
 
 
 
2337{
2338	struct lock_class *class = hlock_class(lock);
2339
2340	printk(" Possible unsafe locking scenario:\n\n");
2341	printk("       CPU0\n");
2342	printk("       ----\n");
2343	printk("  lock(");
2344	__print_lock_name(class);
2345	printk(KERN_CONT ");\n");
2346	printk("  <Interrupt>\n");
2347	printk("    lock(");
2348	__print_lock_name(class);
2349	printk(KERN_CONT ");\n");
2350	printk("\n *** DEADLOCK ***\n\n");
2351}
2352
2353static int
2354print_usage_bug(struct task_struct *curr, struct held_lock *this,
2355		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2356{
2357	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2358		return 0;
2359
2360	printk("\n");
2361	printk("=================================\n");
2362	printk("[ INFO: inconsistent lock state ]\n");
2363	print_kernel_ident();
2364	printk("---------------------------------\n");
2365
2366	printk("inconsistent {%s} -> {%s} usage.\n",
2367		usage_str[prev_bit], usage_str[new_bit]);
2368
2369	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2370		curr->comm, task_pid_nr(curr),
2371		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2372		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2373		trace_hardirqs_enabled(curr),
2374		trace_softirqs_enabled(curr));
2375	print_lock(this);
2376
2377	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2378	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2379
2380	print_irqtrace_events(curr);
2381	printk("\nother info that might help us debug this:\n");
2382	print_usage_bug_scenario(this);
2383
2384	lockdep_print_held_locks(curr);
2385
2386	printk("\nstack backtrace:\n");
2387	dump_stack();
2388
2389	return 0;
2390}
2391
2392/*
2393 * Print out an error if an invalid bit is set:
2394 */
2395static inline int
2396valid_state(struct task_struct *curr, struct held_lock *this,
2397	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2398{
2399	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2400		return print_usage_bug(curr, this, bad_bit, new_bit);
 
 
2401	return 1;
2402}
2403
2404static int mark_lock(struct task_struct *curr, struct held_lock *this,
2405		     enum lock_usage_bit new_bit);
2406
2407#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2408
2409/*
2410 * print irq inversion bug:
2411 */
2412static int
2413print_irq_inversion_bug(struct task_struct *curr,
2414			struct lock_list *root, struct lock_list *other,
2415			struct held_lock *this, int forwards,
2416			const char *irqclass)
2417{
2418	struct lock_list *entry = other;
2419	struct lock_list *middle = NULL;
2420	int depth;
2421
2422	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2423		return 0;
2424
2425	printk("\n");
2426	printk("=========================================================\n");
2427	printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2428	print_kernel_ident();
2429	printk("---------------------------------------------------------\n");
2430	printk("%s/%d just changed the state of lock:\n",
2431		curr->comm, task_pid_nr(curr));
2432	print_lock(this);
2433	if (forwards)
2434		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2435	else
2436		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2437	print_lock_name(other->class);
2438	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2439
2440	printk("\nother info that might help us debug this:\n");
2441
2442	/* Find a middle lock (if one exists) */
2443	depth = get_lock_depth(other);
2444	do {
2445		if (depth == 0 && (entry != root)) {
2446			printk("lockdep:%s bad path found in chain graph\n", __func__);
2447			break;
2448		}
2449		middle = entry;
2450		entry = get_lock_parent(entry);
2451		depth--;
2452	} while (entry && entry != root && (depth >= 0));
2453	if (forwards)
2454		print_irq_lock_scenario(root, other,
2455			middle ? middle->class : root->class, other->class);
2456	else
2457		print_irq_lock_scenario(other, root,
2458			middle ? middle->class : other->class, root->class);
2459
2460	lockdep_print_held_locks(curr);
2461
2462	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2463	if (!save_trace(&root->trace))
2464		return 0;
 
2465	print_shortest_lock_dependencies(other, root);
2466
2467	printk("\nstack backtrace:\n");
2468	dump_stack();
2469
2470	return 0;
2471}
2472
2473/*
2474 * Prove that in the forwards-direction subgraph starting at <this>
2475 * there is no lock matching <mask>:
2476 */
2477static int
2478check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2479		     enum lock_usage_bit bit, const char *irqclass)
2480{
2481	int ret;
2482	struct lock_list root;
2483	struct lock_list *uninitialized_var(target_entry);
2484
2485	root.parent = NULL;
2486	root.class = hlock_class(this);
2487	ret = find_usage_forwards(&root, bit, &target_entry);
2488	if (ret < 0)
2489		return print_bfs_bug(ret);
 
 
2490	if (ret == 1)
2491		return ret;
2492
2493	return print_irq_inversion_bug(curr, &root, target_entry,
2494					this, 1, irqclass);
 
2495}
2496
2497/*
2498 * Prove that in the backwards-direction subgraph starting at <this>
2499 * there is no lock matching <mask>:
2500 */
2501static int
2502check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2503		      enum lock_usage_bit bit, const char *irqclass)
2504{
2505	int ret;
2506	struct lock_list root;
2507	struct lock_list *uninitialized_var(target_entry);
2508
2509	root.parent = NULL;
2510	root.class = hlock_class(this);
2511	ret = find_usage_backwards(&root, bit, &target_entry);
2512	if (ret < 0)
2513		return print_bfs_bug(ret);
 
 
2514	if (ret == 1)
2515		return ret;
2516
2517	return print_irq_inversion_bug(curr, &root, target_entry,
2518					this, 0, irqclass);
 
2519}
2520
2521void print_irqtrace_events(struct task_struct *curr)
2522{
2523	printk("irq event stamp: %u\n", curr->irq_events);
2524	printk("hardirqs last  enabled at (%u): [<%p>] %pS\n",
2525		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2526		(void *)curr->hardirq_enable_ip);
2527	printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
2528		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2529		(void *)curr->hardirq_disable_ip);
2530	printk("softirqs last  enabled at (%u): [<%p>] %pS\n",
2531		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2532		(void *)curr->softirq_enable_ip);
2533	printk("softirqs last disabled at (%u): [<%p>] %pS\n",
2534		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2535		(void *)curr->softirq_disable_ip);
2536}
2537
2538static int HARDIRQ_verbose(struct lock_class *class)
2539{
2540#if HARDIRQ_VERBOSE
2541	return class_filter(class);
2542#endif
2543	return 0;
2544}
2545
2546static int SOFTIRQ_verbose(struct lock_class *class)
2547{
2548#if SOFTIRQ_VERBOSE
2549	return class_filter(class);
2550#endif
2551	return 0;
2552}
2553
2554static int RECLAIM_FS_verbose(struct lock_class *class)
2555{
2556#if RECLAIM_VERBOSE
2557	return class_filter(class);
2558#endif
2559	return 0;
2560}
2561
2562#define STRICT_READ_CHECKS	1
2563
2564static int (*state_verbose_f[])(struct lock_class *class) = {
2565#define LOCKDEP_STATE(__STATE) \
2566	__STATE##_verbose,
2567#include "lockdep_states.h"
2568#undef LOCKDEP_STATE
2569};
2570
2571static inline int state_verbose(enum lock_usage_bit bit,
2572				struct lock_class *class)
2573{
2574	return state_verbose_f[bit >> 2](class);
2575}
2576
2577typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2578			     enum lock_usage_bit bit, const char *name);
2579
2580static int
2581mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2582		enum lock_usage_bit new_bit)
2583{
2584	int excl_bit = exclusive_bit(new_bit);
2585	int read = new_bit & 1;
2586	int dir = new_bit & 2;
2587
2588	/*
2589	 * mark USED_IN has to look forwards -- to ensure no dependency
2590	 * has ENABLED state, which would allow recursion deadlocks.
2591	 *
2592	 * mark ENABLED has to look backwards -- to ensure no dependee
2593	 * has USED_IN state, which, again, would allow  recursion deadlocks.
2594	 */
2595	check_usage_f usage = dir ?
2596		check_usage_backwards : check_usage_forwards;
2597
2598	/*
2599	 * Validate that this particular lock does not have conflicting
2600	 * usage states.
2601	 */
2602	if (!valid_state(curr, this, new_bit, excl_bit))
2603		return 0;
2604
2605	/*
2606	 * Validate that the lock dependencies don't have conflicting usage
2607	 * states.
2608	 */
2609	if ((!read || !dir || STRICT_READ_CHECKS) &&
2610			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2611		return 0;
2612
2613	/*
2614	 * Check for read in write conflicts
2615	 */
2616	if (!read) {
2617		if (!valid_state(curr, this, new_bit, excl_bit + 1))
2618			return 0;
2619
2620		if (STRICT_READ_CHECKS &&
2621			!usage(curr, this, excl_bit + 1,
2622				state_name(new_bit + 1)))
2623			return 0;
2624	}
2625
2626	if (state_verbose(new_bit, hlock_class(this)))
2627		return 2;
2628
2629	return 1;
2630}
2631
2632enum mark_type {
2633#define LOCKDEP_STATE(__STATE)	__STATE,
2634#include "lockdep_states.h"
2635#undef LOCKDEP_STATE
2636};
2637
2638/*
2639 * Mark all held locks with a usage bit:
2640 */
2641static int
2642mark_held_locks(struct task_struct *curr, enum mark_type mark)
2643{
2644	enum lock_usage_bit usage_bit;
2645	struct held_lock *hlock;
2646	int i;
2647
2648	for (i = 0; i < curr->lockdep_depth; i++) {
 
2649		hlock = curr->held_locks + i;
2650
2651		usage_bit = 2 + (mark << 2); /* ENABLED */
2652		if (hlock->read)
2653			usage_bit += 1; /* READ */
2654
2655		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2656
2657		if (!hlock->check)
2658			continue;
2659
2660		if (!mark_lock(curr, hlock, usage_bit))
2661			return 0;
2662	}
2663
2664	return 1;
2665}
2666
2667/*
2668 * Hardirqs will be enabled:
2669 */
2670static void __trace_hardirqs_on_caller(unsigned long ip)
2671{
2672	struct task_struct *curr = current;
2673
2674	/* we'll do an OFF -> ON transition: */
2675	curr->hardirqs_enabled = 1;
2676
2677	/*
2678	 * We are going to turn hardirqs on, so set the
2679	 * usage bit for all held locks:
2680	 */
2681	if (!mark_held_locks(curr, HARDIRQ))
2682		return;
2683	/*
2684	 * If we have softirqs enabled, then set the usage
2685	 * bit for all held locks. (disabled hardirqs prevented
2686	 * this bit from being set before)
2687	 */
2688	if (curr->softirqs_enabled)
2689		if (!mark_held_locks(curr, SOFTIRQ))
2690			return;
2691
2692	curr->hardirq_enable_ip = ip;
2693	curr->hardirq_enable_event = ++curr->irq_events;
2694	debug_atomic_inc(hardirqs_on_events);
2695}
2696
2697__visible void trace_hardirqs_on_caller(unsigned long ip)
2698{
2699	time_hardirqs_on(CALLER_ADDR0, ip);
2700
2701	if (unlikely(!debug_locks || current->lockdep_recursion))
2702		return;
2703
2704	if (unlikely(current->hardirqs_enabled)) {
2705		/*
2706		 * Neither irq nor preemption are disabled here
2707		 * so this is racy by nature but losing one hit
2708		 * in a stat is not a big deal.
2709		 */
2710		__debug_atomic_inc(redundant_hardirqs_on);
2711		return;
2712	}
2713
2714	/*
2715	 * We're enabling irqs and according to our state above irqs weren't
2716	 * already enabled, yet we find the hardware thinks they are in fact
2717	 * enabled.. someone messed up their IRQ state tracing.
2718	 */
2719	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2720		return;
2721
2722	/*
2723	 * See the fine text that goes along with this variable definition.
2724	 */
2725	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2726		return;
2727
2728	/*
2729	 * Can't allow enabling interrupts while in an interrupt handler,
2730	 * that's general bad form and such. Recursion, limited stack etc..
2731	 */
2732	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2733		return;
2734
2735	current->lockdep_recursion = 1;
2736	__trace_hardirqs_on_caller(ip);
2737	current->lockdep_recursion = 0;
2738}
2739EXPORT_SYMBOL(trace_hardirqs_on_caller);
2740
2741void trace_hardirqs_on(void)
2742{
2743	trace_hardirqs_on_caller(CALLER_ADDR0);
2744}
2745EXPORT_SYMBOL(trace_hardirqs_on);
2746
2747/*
2748 * Hardirqs were disabled:
2749 */
2750__visible void trace_hardirqs_off_caller(unsigned long ip)
2751{
2752	struct task_struct *curr = current;
2753
2754	time_hardirqs_off(CALLER_ADDR0, ip);
2755
2756	if (unlikely(!debug_locks || current->lockdep_recursion))
2757		return;
2758
2759	/*
2760	 * So we're supposed to get called after you mask local IRQs, but for
2761	 * some reason the hardware doesn't quite think you did a proper job.
2762	 */
2763	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2764		return;
2765
2766	if (curr->hardirqs_enabled) {
2767		/*
2768		 * We have done an ON -> OFF transition:
2769		 */
2770		curr->hardirqs_enabled = 0;
2771		curr->hardirq_disable_ip = ip;
2772		curr->hardirq_disable_event = ++curr->irq_events;
2773		debug_atomic_inc(hardirqs_off_events);
2774	} else
2775		debug_atomic_inc(redundant_hardirqs_off);
2776}
2777EXPORT_SYMBOL(trace_hardirqs_off_caller);
2778
2779void trace_hardirqs_off(void)
2780{
2781	trace_hardirqs_off_caller(CALLER_ADDR0);
2782}
2783EXPORT_SYMBOL(trace_hardirqs_off);
2784
2785/*
2786 * Softirqs will be enabled:
2787 */
2788void trace_softirqs_on(unsigned long ip)
2789{
2790	struct task_struct *curr = current;
2791
2792	if (unlikely(!debug_locks || current->lockdep_recursion))
2793		return;
2794
2795	/*
2796	 * We fancy IRQs being disabled here, see softirq.c, avoids
2797	 * funny state and nesting things.
2798	 */
2799	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2800		return;
2801
2802	if (curr->softirqs_enabled) {
2803		debug_atomic_inc(redundant_softirqs_on);
2804		return;
2805	}
2806
2807	current->lockdep_recursion = 1;
2808	/*
2809	 * We'll do an OFF -> ON transition:
2810	 */
2811	curr->softirqs_enabled = 1;
2812	curr->softirq_enable_ip = ip;
2813	curr->softirq_enable_event = ++curr->irq_events;
2814	debug_atomic_inc(softirqs_on_events);
2815	/*
2816	 * We are going to turn softirqs on, so set the
2817	 * usage bit for all held locks, if hardirqs are
2818	 * enabled too:
2819	 */
2820	if (curr->hardirqs_enabled)
2821		mark_held_locks(curr, SOFTIRQ);
2822	current->lockdep_recursion = 0;
2823}
2824
2825/*
2826 * Softirqs were disabled:
2827 */
2828void trace_softirqs_off(unsigned long ip)
2829{
2830	struct task_struct *curr = current;
2831
2832	if (unlikely(!debug_locks || current->lockdep_recursion))
2833		return;
2834
2835	/*
2836	 * We fancy IRQs being disabled here, see softirq.c
2837	 */
2838	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2839		return;
2840
2841	if (curr->softirqs_enabled) {
2842		/*
2843		 * We have done an ON -> OFF transition:
2844		 */
2845		curr->softirqs_enabled = 0;
2846		curr->softirq_disable_ip = ip;
2847		curr->softirq_disable_event = ++curr->irq_events;
2848		debug_atomic_inc(softirqs_off_events);
2849		/*
2850		 * Whoops, we wanted softirqs off, so why aren't they?
2851		 */
2852		DEBUG_LOCKS_WARN_ON(!softirq_count());
2853	} else
2854		debug_atomic_inc(redundant_softirqs_off);
2855}
2856
2857static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2858{
2859	struct task_struct *curr = current;
2860
2861	if (unlikely(!debug_locks))
2862		return;
2863
2864	/* no reclaim without waiting on it */
2865	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
2866		return;
2867
2868	/* this guy won't enter reclaim */
2869	if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2870		return;
2871
2872	/* We're only interested __GFP_FS allocations for now */
2873	if (!(gfp_mask & __GFP_FS))
2874		return;
2875
2876	/*
2877	 * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2878	 */
2879	if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2880		return;
2881
2882	mark_held_locks(curr, RECLAIM_FS);
2883}
2884
2885static void check_flags(unsigned long flags);
2886
2887void lockdep_trace_alloc(gfp_t gfp_mask)
2888{
2889	unsigned long flags;
 
2890
2891	if (unlikely(current->lockdep_recursion))
2892		return;
2893
2894	raw_local_irq_save(flags);
2895	check_flags(flags);
2896	current->lockdep_recursion = 1;
2897	__lockdep_trace_alloc(gfp_mask, flags);
2898	current->lockdep_recursion = 0;
2899	raw_local_irq_restore(flags);
2900}
2901
2902static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2903{
2904	/*
2905	 * If non-trylock use in a hardirq or softirq context, then
2906	 * mark the lock as used in these contexts:
2907	 */
2908	if (!hlock->trylock) {
2909		if (hlock->read) {
2910			if (curr->hardirq_context)
2911				if (!mark_lock(curr, hlock,
2912						LOCK_USED_IN_HARDIRQ_READ))
2913					return 0;
2914			if (curr->softirq_context)
2915				if (!mark_lock(curr, hlock,
2916						LOCK_USED_IN_SOFTIRQ_READ))
2917					return 0;
2918		} else {
2919			if (curr->hardirq_context)
2920				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2921					return 0;
2922			if (curr->softirq_context)
2923				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2924					return 0;
2925		}
2926	}
2927	if (!hlock->hardirqs_off) {
2928		if (hlock->read) {
2929			if (!mark_lock(curr, hlock,
2930					LOCK_ENABLED_HARDIRQ_READ))
2931				return 0;
2932			if (curr->softirqs_enabled)
2933				if (!mark_lock(curr, hlock,
2934						LOCK_ENABLED_SOFTIRQ_READ))
2935					return 0;
2936		} else {
2937			if (!mark_lock(curr, hlock,
2938					LOCK_ENABLED_HARDIRQ))
2939				return 0;
2940			if (curr->softirqs_enabled)
2941				if (!mark_lock(curr, hlock,
2942						LOCK_ENABLED_SOFTIRQ))
2943					return 0;
2944		}
2945	}
2946
2947	/*
2948	 * We reuse the irq context infrastructure more broadly as a general
2949	 * context checking code. This tests GFP_FS recursion (a lock taken
2950	 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2951	 * allocation).
2952	 */
2953	if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2954		if (hlock->read) {
2955			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2956					return 0;
2957		} else {
2958			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2959					return 0;
2960		}
2961	}
2962
2963	return 1;
2964}
2965
2966static inline unsigned int task_irq_context(struct task_struct *task)
2967{
2968	return 2 * !!task->hardirq_context + !!task->softirq_context;
2969}
2970
2971static int separate_irq_context(struct task_struct *curr,
2972		struct held_lock *hlock)
2973{
2974	unsigned int depth = curr->lockdep_depth;
2975
2976	/*
2977	 * Keep track of points where we cross into an interrupt context:
2978	 */
2979	if (depth) {
2980		struct held_lock *prev_hlock;
2981
2982		prev_hlock = curr->held_locks + depth-1;
2983		/*
2984		 * If we cross into another context, reset the
2985		 * hash key (this also prevents the checking and the
2986		 * adding of the dependency to 'prev'):
2987		 */
2988		if (prev_hlock->irq_context != hlock->irq_context)
2989			return 1;
2990	}
2991	return 0;
2992}
2993
2994#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2995
2996static inline
2997int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2998		enum lock_usage_bit new_bit)
2999{
3000	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3001	return 1;
3002}
3003
3004static inline int mark_irqflags(struct task_struct *curr,
3005		struct held_lock *hlock)
3006{
3007	return 1;
3008}
3009
3010static inline unsigned int task_irq_context(struct task_struct *task)
3011{
3012	return 0;
3013}
3014
3015static inline int separate_irq_context(struct task_struct *curr,
3016		struct held_lock *hlock)
3017{
3018	return 0;
3019}
3020
3021void lockdep_trace_alloc(gfp_t gfp_mask)
3022{
3023}
3024
3025#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3026
3027/*
3028 * Mark a lock with a usage bit, and validate the state transition:
3029 */
3030static int mark_lock(struct task_struct *curr, struct held_lock *this,
3031			     enum lock_usage_bit new_bit)
3032{
3033	unsigned int new_mask = 1 << new_bit, ret = 1;
3034
 
 
 
 
 
3035	/*
3036	 * If already set then do not dirty the cacheline,
3037	 * nor do any checks:
3038	 */
3039	if (likely(hlock_class(this)->usage_mask & new_mask))
3040		return 1;
3041
3042	if (!graph_lock())
3043		return 0;
3044	/*
3045	 * Make sure we didn't race:
3046	 */
3047	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3048		graph_unlock();
3049		return 1;
3050	}
3051
3052	hlock_class(this)->usage_mask |= new_mask;
3053
3054	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3055		return 0;
3056
3057	switch (new_bit) {
3058#define LOCKDEP_STATE(__STATE)			\
3059	case LOCK_USED_IN_##__STATE:		\
3060	case LOCK_USED_IN_##__STATE##_READ:	\
3061	case LOCK_ENABLED_##__STATE:		\
3062	case LOCK_ENABLED_##__STATE##_READ:
3063#include "lockdep_states.h"
3064#undef LOCKDEP_STATE
3065		ret = mark_lock_irq(curr, this, new_bit);
3066		if (!ret)
3067			return 0;
3068		break;
3069	case LOCK_USED:
3070		debug_atomic_dec(nr_unused_locks);
3071		break;
3072	default:
3073		if (!debug_locks_off_graph_unlock())
 
3074			return 0;
3075		WARN_ON(1);
3076		return 0;
3077	}
3078
3079	graph_unlock();
3080
3081	/*
3082	 * We must printk outside of the graph_lock:
3083	 */
3084	if (ret == 2) {
3085		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3086		print_lock(this);
3087		print_irqtrace_events(curr);
3088		dump_stack();
3089	}
3090
3091	return ret;
3092}
3093
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3094/*
3095 * Initialize a lock instance's lock-class mapping info:
3096 */
3097void lockdep_init_map(struct lockdep_map *lock, const char *name,
3098		      struct lock_class_key *key, int subclass)
3099{
3100	int i;
3101
3102	kmemcheck_mark_initialized(lock, sizeof(*lock));
3103
3104	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3105		lock->class_cache[i] = NULL;
3106
3107#ifdef CONFIG_LOCK_STAT
3108	lock->cpu = raw_smp_processor_id();
3109#endif
3110
3111	/*
3112	 * Can't be having no nameless bastards around this place!
3113	 */
3114	if (DEBUG_LOCKS_WARN_ON(!name)) {
3115		lock->name = "NULL";
3116		return;
3117	}
3118
3119	lock->name = name;
3120
3121	/*
3122	 * No key, no joy, we need to hash something.
3123	 */
3124	if (DEBUG_LOCKS_WARN_ON(!key))
3125		return;
3126	/*
3127	 * Sanity check, the lock-class key must be persistent:
 
3128	 */
3129	if (!static_obj(key)) {
3130		printk("BUG: key %p not in .data!\n", key);
3131		/*
3132		 * What it says above ^^^^^, I suggest you read it.
3133		 */
3134		DEBUG_LOCKS_WARN_ON(1);
3135		return;
3136	}
3137	lock->key = key;
3138
3139	if (unlikely(!debug_locks))
3140		return;
3141
3142	if (subclass) {
3143		unsigned long flags;
3144
3145		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3146			return;
3147
3148		raw_local_irq_save(flags);
3149		current->lockdep_recursion = 1;
3150		register_lock_class(lock, subclass, 1);
3151		current->lockdep_recursion = 0;
3152		raw_local_irq_restore(flags);
3153	}
3154}
3155EXPORT_SYMBOL_GPL(lockdep_init_map);
3156
3157struct lock_class_key __lockdep_no_validate__;
3158EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3159
3160static int
3161print_lock_nested_lock_not_held(struct task_struct *curr,
3162				struct held_lock *hlock,
3163				unsigned long ip)
3164{
3165	if (!debug_locks_off())
3166		return 0;
3167	if (debug_locks_silent)
3168		return 0;
3169
3170	printk("\n");
3171	printk("==================================\n");
3172	printk("[ BUG: Nested lock was not taken ]\n");
3173	print_kernel_ident();
3174	printk("----------------------------------\n");
3175
3176	printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3177	print_lock(hlock);
3178
3179	printk("\nbut this task is not holding:\n");
3180	printk("%s\n", hlock->nest_lock->name);
3181
3182	printk("\nstack backtrace:\n");
3183	dump_stack();
3184
3185	printk("\nother info that might help us debug this:\n");
3186	lockdep_print_held_locks(curr);
3187
3188	printk("\nstack backtrace:\n");
3189	dump_stack();
3190
3191	return 0;
3192}
3193
3194static int __lock_is_held(struct lockdep_map *lock, int read);
3195
3196/*
3197 * This gets called for every mutex_lock*()/spin_lock*() operation.
3198 * We maintain the dependency maps and validate the locking attempt:
 
 
 
 
3199 */
3200static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3201			  int trylock, int read, int check, int hardirqs_off,
3202			  struct lockdep_map *nest_lock, unsigned long ip,
3203			  int references, int pin_count)
3204{
3205	struct task_struct *curr = current;
3206	struct lock_class *class = NULL;
3207	struct held_lock *hlock;
3208	unsigned int depth;
3209	int chain_head = 0;
3210	int class_idx;
3211	u64 chain_key;
3212
3213	if (unlikely(!debug_locks))
3214		return 0;
3215
3216	/*
3217	 * Lockdep should run with IRQs disabled, otherwise we could
3218	 * get an interrupt which would want to take locks, which would
3219	 * end up in lockdep and have you got a head-ache already?
3220	 */
3221	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3222		return 0;
3223
3224	if (!prove_locking || lock->key == &__lockdep_no_validate__)
3225		check = 0;
3226
3227	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3228		class = lock->class_cache[subclass];
3229	/*
3230	 * Not cached?
3231	 */
3232	if (unlikely(!class)) {
3233		class = register_lock_class(lock, subclass, 0);
3234		if (!class)
3235			return 0;
3236	}
3237	atomic_inc((atomic_t *)&class->ops);
 
 
3238	if (very_verbose(class)) {
3239		printk("\nacquire class [%p] %s", class->key, class->name);
3240		if (class->name_version > 1)
3241			printk(KERN_CONT "#%d", class->name_version);
3242		printk(KERN_CONT "\n");
3243		dump_stack();
3244	}
3245
3246	/*
3247	 * Add the lock to the list of currently held locks.
3248	 * (we dont increase the depth just yet, up until the
3249	 * dependency checks are done)
3250	 */
3251	depth = curr->lockdep_depth;
3252	/*
3253	 * Ran out of static storage for our per-task lock stack again have we?
3254	 */
3255	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3256		return 0;
3257
3258	class_idx = class - lock_classes + 1;
3259
3260	if (depth) {
3261		hlock = curr->held_locks + depth - 1;
3262		if (hlock->class_idx == class_idx && nest_lock) {
3263			if (hlock->references)
 
 
 
3264				hlock->references++;
3265			else
3266				hlock->references = 2;
3267
3268			return 1;
 
 
 
 
 
 
3269		}
3270	}
3271
3272	hlock = curr->held_locks + depth;
3273	/*
3274	 * Plain impossible, we just registered it and checked it weren't no
3275	 * NULL like.. I bet this mushroom I ate was good!
3276	 */
3277	if (DEBUG_LOCKS_WARN_ON(!class))
3278		return 0;
3279	hlock->class_idx = class_idx;
3280	hlock->acquire_ip = ip;
3281	hlock->instance = lock;
3282	hlock->nest_lock = nest_lock;
3283	hlock->irq_context = task_irq_context(curr);
3284	hlock->trylock = trylock;
3285	hlock->read = read;
3286	hlock->check = check;
3287	hlock->hardirqs_off = !!hardirqs_off;
3288	hlock->references = references;
3289#ifdef CONFIG_LOCK_STAT
3290	hlock->waittime_stamp = 0;
3291	hlock->holdtime_stamp = lockstat_clock();
3292#endif
3293	hlock->pin_count = pin_count;
3294
3295	if (check && !mark_irqflags(curr, hlock))
3296		return 0;
3297
3298	/* mark it as used: */
3299	if (!mark_lock(curr, hlock, LOCK_USED))
3300		return 0;
3301
3302	/*
3303	 * Calculate the chain hash: it's the combined hash of all the
3304	 * lock keys along the dependency chain. We save the hash value
3305	 * at every step so that we can get the current hash easily
3306	 * after unlock. The chain hash is then used to cache dependency
3307	 * results.
3308	 *
3309	 * The 'key ID' is what is the most compact key value to drive
3310	 * the hash, not class->key.
3311	 */
3312	/*
3313	 * Whoops, we did it again.. ran straight out of our static allocation.
3314	 */
3315	if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3316		return 0;
3317
3318	chain_key = curr->curr_chain_key;
3319	if (!depth) {
3320		/*
3321		 * How can we have a chain hash when we ain't got no keys?!
3322		 */
3323		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3324			return 0;
3325		chain_head = 1;
3326	}
3327
3328	hlock->prev_chain_key = chain_key;
3329	if (separate_irq_context(curr, hlock)) {
3330		chain_key = 0;
3331		chain_head = 1;
3332	}
3333	chain_key = iterate_chain_key(chain_key, class_idx);
3334
3335	if (nest_lock && !__lock_is_held(nest_lock, -1))
3336		return print_lock_nested_lock_not_held(curr, hlock, ip);
 
 
3337
3338	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
 
 
 
 
 
3339		return 0;
3340
3341	curr->curr_chain_key = chain_key;
3342	curr->lockdep_depth++;
3343	check_chain_key(curr);
3344#ifdef CONFIG_DEBUG_LOCKDEP
3345	if (unlikely(!debug_locks))
3346		return 0;
3347#endif
3348	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3349		debug_locks_off();
3350		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3351		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3352		       curr->lockdep_depth, MAX_LOCK_DEPTH);
3353
3354		lockdep_print_held_locks(current);
3355		debug_show_all_locks();
3356		dump_stack();
3357
3358		return 0;
3359	}
3360
3361	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3362		max_lockdep_depth = curr->lockdep_depth;
3363
3364	return 1;
3365}
3366
3367static int
3368print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3369			   unsigned long ip)
3370{
3371	if (!debug_locks_off())
3372		return 0;
3373	if (debug_locks_silent)
3374		return 0;
3375
3376	printk("\n");
3377	printk("=====================================\n");
3378	printk("[ BUG: bad unlock balance detected! ]\n");
3379	print_kernel_ident();
3380	printk("-------------------------------------\n");
3381	printk("%s/%d is trying to release lock (",
3382		curr->comm, task_pid_nr(curr));
3383	print_lockdep_cache(lock);
3384	printk(KERN_CONT ") at:\n");
3385	print_ip_sym(ip);
3386	printk("but there are no more locks to release!\n");
3387	printk("\nother info that might help us debug this:\n");
3388	lockdep_print_held_locks(curr);
3389
3390	printk("\nstack backtrace:\n");
3391	dump_stack();
3392
3393	return 0;
3394}
3395
3396static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
 
3397{
3398	if (hlock->instance == lock)
3399		return 1;
3400
3401	if (hlock->references) {
3402		struct lock_class *class = lock->class_cache[0];
3403
3404		if (!class)
3405			class = look_up_lock_class(lock, 0);
3406
3407		/*
3408		 * If look_up_lock_class() failed to find a class, we're trying
3409		 * to test if we hold a lock that has never yet been acquired.
3410		 * Clearly if the lock hasn't been acquired _ever_, we're not
3411		 * holding it either, so report failure.
3412		 */
3413		if (!class)
3414			return 0;
3415
3416		/*
3417		 * References, but not a lock we're actually ref-counting?
3418		 * State got messed up, follow the sites that change ->references
3419		 * and try to make sense of it.
3420		 */
3421		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3422			return 0;
3423
3424		if (hlock->class_idx == class - lock_classes + 1)
3425			return 1;
3426	}
3427
3428	return 0;
3429}
3430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3431static int
3432__lock_set_class(struct lockdep_map *lock, const char *name,
3433		 struct lock_class_key *key, unsigned int subclass,
3434		 unsigned long ip)
3435{
3436	struct task_struct *curr = current;
3437	struct held_lock *hlock, *prev_hlock;
 
3438	struct lock_class *class;
3439	unsigned int depth;
3440	int i;
3441
 
 
 
3442	depth = curr->lockdep_depth;
3443	/*
3444	 * This function is about (re)setting the class of a held lock,
3445	 * yet we're not actually holding any locks. Naughty user!
3446	 */
3447	if (DEBUG_LOCKS_WARN_ON(!depth))
3448		return 0;
3449
3450	prev_hlock = NULL;
3451	for (i = depth-1; i >= 0; i--) {
3452		hlock = curr->held_locks + i;
3453		/*
3454		 * We must not cross into another context:
3455		 */
3456		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3457			break;
3458		if (match_held_lock(hlock, lock))
3459			goto found_it;
3460		prev_hlock = hlock;
3461	}
3462	return print_unlock_imbalance_bug(curr, lock, ip);
3463
3464found_it:
3465	lockdep_init_map(lock, name, key, 0);
3466	class = register_lock_class(lock, subclass, 0);
3467	hlock->class_idx = class - lock_classes + 1;
3468
3469	curr->lockdep_depth = i;
3470	curr->curr_chain_key = hlock->prev_chain_key;
3471
3472	for (; i < depth; i++) {
3473		hlock = curr->held_locks + i;
3474		if (!__lock_acquire(hlock->instance,
3475			hlock_class(hlock)->subclass, hlock->trylock,
3476				hlock->read, hlock->check, hlock->hardirqs_off,
3477				hlock->nest_lock, hlock->acquire_ip,
3478				hlock->references, hlock->pin_count))
3479			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3480	}
3481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3482	/*
3483	 * I took it apart and put it back together again, except now I have
3484	 * these 'spare' parts.. where shall I put them.
3485	 */
3486	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3487		return 0;
 
3488	return 1;
3489}
3490
3491/*
3492 * Remove the lock to the list of currently held locks - this gets
3493 * called on mutex_unlock()/spin_unlock*() (or on a failed
3494 * mutex_lock_interruptible()).
3495 *
3496 * @nested is an hysterical artifact, needs a tree wide cleanup.
3497 */
3498static int
3499__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3500{
3501	struct task_struct *curr = current;
3502	struct held_lock *hlock, *prev_hlock;
3503	unsigned int depth;
3504	int i;
3505
3506	if (unlikely(!debug_locks))
3507		return 0;
3508
3509	depth = curr->lockdep_depth;
3510	/*
3511	 * So we're all set to release this lock.. wait what lock? We don't
3512	 * own any locks, you've been drinking again?
3513	 */
3514	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3515		 return print_unlock_imbalance_bug(curr, lock, ip);
 
 
3516
3517	/*
3518	 * Check whether the lock exists in the current stack
3519	 * of held locks:
3520	 */
3521	prev_hlock = NULL;
3522	for (i = depth-1; i >= 0; i--) {
3523		hlock = curr->held_locks + i;
3524		/*
3525		 * We must not cross into another context:
3526		 */
3527		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3528			break;
3529		if (match_held_lock(hlock, lock))
3530			goto found_it;
3531		prev_hlock = hlock;
3532	}
3533	return print_unlock_imbalance_bug(curr, lock, ip);
3534
3535found_it:
3536	if (hlock->instance == lock)
3537		lock_release_holdtime(hlock);
3538
3539	WARN(hlock->pin_count, "releasing a pinned lock\n");
3540
3541	if (hlock->references) {
3542		hlock->references--;
3543		if (hlock->references) {
3544			/*
3545			 * We had, and after removing one, still have
3546			 * references, the current lock stack is still
3547			 * valid. We're done!
3548			 */
3549			return 1;
3550		}
3551	}
3552
3553	/*
3554	 * We have the right lock to unlock, 'hlock' points to it.
3555	 * Now we remove it from the stack, and add back the other
3556	 * entries (if any), recalculating the hash along the way:
3557	 */
3558
3559	curr->lockdep_depth = i;
3560	curr->curr_chain_key = hlock->prev_chain_key;
3561
3562	for (i++; i < depth; i++) {
3563		hlock = curr->held_locks + i;
3564		if (!__lock_acquire(hlock->instance,
3565			hlock_class(hlock)->subclass, hlock->trylock,
3566				hlock->read, hlock->check, hlock->hardirqs_off,
3567				hlock->nest_lock, hlock->acquire_ip,
3568				hlock->references, hlock->pin_count))
3569			return 0;
3570	}
3571
3572	/*
3573	 * We had N bottles of beer on the wall, we drank one, but now
3574	 * there's not N-1 bottles of beer left on the wall...
 
3575	 */
3576	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3577		return 0;
3578
3579	return 1;
 
 
 
 
 
3580}
3581
3582static int __lock_is_held(struct lockdep_map *lock, int read)
 
3583{
3584	struct task_struct *curr = current;
3585	int i;
3586
3587	for (i = 0; i < curr->lockdep_depth; i++) {
3588		struct held_lock *hlock = curr->held_locks + i;
3589
3590		if (match_held_lock(hlock, lock)) {
3591			if (read == -1 || hlock->read == read)
3592				return 1;
3593
3594			return 0;
3595		}
3596	}
3597
3598	return 0;
3599}
3600
3601static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3602{
3603	struct pin_cookie cookie = NIL_COOKIE;
3604	struct task_struct *curr = current;
3605	int i;
3606
3607	if (unlikely(!debug_locks))
3608		return cookie;
3609
3610	for (i = 0; i < curr->lockdep_depth; i++) {
3611		struct held_lock *hlock = curr->held_locks + i;
3612
3613		if (match_held_lock(hlock, lock)) {
3614			/*
3615			 * Grab 16bits of randomness; this is sufficient to not
3616			 * be guessable and still allows some pin nesting in
3617			 * our u32 pin_count.
3618			 */
3619			cookie.val = 1 + (prandom_u32() >> 16);
3620			hlock->pin_count += cookie.val;
3621			return cookie;
3622		}
3623	}
3624
3625	WARN(1, "pinning an unheld lock\n");
3626	return cookie;
3627}
3628
3629static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3630{
3631	struct task_struct *curr = current;
3632	int i;
3633
3634	if (unlikely(!debug_locks))
3635		return;
3636
3637	for (i = 0; i < curr->lockdep_depth; i++) {
3638		struct held_lock *hlock = curr->held_locks + i;
3639
3640		if (match_held_lock(hlock, lock)) {
3641			hlock->pin_count += cookie.val;
3642			return;
3643		}
3644	}
3645
3646	WARN(1, "pinning an unheld lock\n");
3647}
3648
3649static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3650{
3651	struct task_struct *curr = current;
3652	int i;
3653
3654	if (unlikely(!debug_locks))
3655		return;
3656
3657	for (i = 0; i < curr->lockdep_depth; i++) {
3658		struct held_lock *hlock = curr->held_locks + i;
3659
3660		if (match_held_lock(hlock, lock)) {
3661			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3662				return;
3663
3664			hlock->pin_count -= cookie.val;
3665
3666			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3667				hlock->pin_count = 0;
3668
3669			return;
3670		}
3671	}
3672
3673	WARN(1, "unpinning an unheld lock\n");
3674}
3675
3676/*
3677 * Check whether we follow the irq-flags state precisely:
3678 */
3679static void check_flags(unsigned long flags)
3680{
3681#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3682    defined(CONFIG_TRACE_IRQFLAGS)
3683	if (!debug_locks)
3684		return;
3685
3686	if (irqs_disabled_flags(flags)) {
3687		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3688			printk("possible reason: unannotated irqs-off.\n");
3689		}
3690	} else {
3691		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3692			printk("possible reason: unannotated irqs-on.\n");
3693		}
3694	}
3695
3696	/*
3697	 * We dont accurately track softirq state in e.g.
3698	 * hardirq contexts (such as on 4KSTACKS), so only
3699	 * check if not in hardirq contexts:
3700	 */
3701	if (!hardirq_count()) {
3702		if (softirq_count()) {
3703			/* like the above, but with softirqs */
3704			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3705		} else {
3706			/* lick the above, does it taste good? */
3707			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3708		}
3709	}
3710
3711	if (!debug_locks)
3712		print_irqtrace_events(current);
3713#endif
3714}
3715
3716void lock_set_class(struct lockdep_map *lock, const char *name,
3717		    struct lock_class_key *key, unsigned int subclass,
3718		    unsigned long ip)
3719{
3720	unsigned long flags;
3721
3722	if (unlikely(current->lockdep_recursion))
3723		return;
3724
3725	raw_local_irq_save(flags);
3726	current->lockdep_recursion = 1;
3727	check_flags(flags);
3728	if (__lock_set_class(lock, name, key, subclass, ip))
3729		check_chain_key(current);
3730	current->lockdep_recursion = 0;
3731	raw_local_irq_restore(flags);
3732}
3733EXPORT_SYMBOL_GPL(lock_set_class);
3734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3735/*
3736 * We are not always called with irqs disabled - do that here,
3737 * and also avoid lockdep recursion:
3738 */
3739void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3740			  int trylock, int read, int check,
3741			  struct lockdep_map *nest_lock, unsigned long ip)
3742{
3743	unsigned long flags;
3744
3745	if (unlikely(current->lockdep_recursion))
3746		return;
3747
3748	raw_local_irq_save(flags);
3749	check_flags(flags);
3750
3751	current->lockdep_recursion = 1;
3752	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3753	__lock_acquire(lock, subclass, trylock, read, check,
3754		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3755	current->lockdep_recursion = 0;
3756	raw_local_irq_restore(flags);
3757}
3758EXPORT_SYMBOL_GPL(lock_acquire);
3759
3760void lock_release(struct lockdep_map *lock, int nested,
3761			  unsigned long ip)
3762{
3763	unsigned long flags;
3764
3765	if (unlikely(current->lockdep_recursion))
3766		return;
3767
3768	raw_local_irq_save(flags);
3769	check_flags(flags);
3770	current->lockdep_recursion = 1;
3771	trace_lock_release(lock, ip);
3772	if (__lock_release(lock, nested, ip))
3773		check_chain_key(current);
3774	current->lockdep_recursion = 0;
3775	raw_local_irq_restore(flags);
3776}
3777EXPORT_SYMBOL_GPL(lock_release);
3778
3779int lock_is_held_type(struct lockdep_map *lock, int read)
3780{
3781	unsigned long flags;
3782	int ret = 0;
3783
3784	if (unlikely(current->lockdep_recursion))
3785		return 1; /* avoid false negative lockdep_assert_held() */
3786
3787	raw_local_irq_save(flags);
3788	check_flags(flags);
3789
3790	current->lockdep_recursion = 1;
3791	ret = __lock_is_held(lock, read);
3792	current->lockdep_recursion = 0;
3793	raw_local_irq_restore(flags);
3794
3795	return ret;
3796}
3797EXPORT_SYMBOL_GPL(lock_is_held_type);
 
3798
3799struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3800{
3801	struct pin_cookie cookie = NIL_COOKIE;
3802	unsigned long flags;
3803
3804	if (unlikely(current->lockdep_recursion))
3805		return cookie;
3806
3807	raw_local_irq_save(flags);
3808	check_flags(flags);
3809
3810	current->lockdep_recursion = 1;
3811	cookie = __lock_pin_lock(lock);
3812	current->lockdep_recursion = 0;
3813	raw_local_irq_restore(flags);
3814
3815	return cookie;
3816}
3817EXPORT_SYMBOL_GPL(lock_pin_lock);
3818
3819void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3820{
3821	unsigned long flags;
3822
3823	if (unlikely(current->lockdep_recursion))
3824		return;
3825
3826	raw_local_irq_save(flags);
3827	check_flags(flags);
3828
3829	current->lockdep_recursion = 1;
3830	__lock_repin_lock(lock, cookie);
3831	current->lockdep_recursion = 0;
3832	raw_local_irq_restore(flags);
3833}
3834EXPORT_SYMBOL_GPL(lock_repin_lock);
3835
3836void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3837{
3838	unsigned long flags;
3839
3840	if (unlikely(current->lockdep_recursion))
3841		return;
3842
3843	raw_local_irq_save(flags);
3844	check_flags(flags);
3845
3846	current->lockdep_recursion = 1;
3847	__lock_unpin_lock(lock, cookie);
3848	current->lockdep_recursion = 0;
3849	raw_local_irq_restore(flags);
3850}
3851EXPORT_SYMBOL_GPL(lock_unpin_lock);
3852
3853void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3854{
3855	current->lockdep_reclaim_gfp = gfp_mask;
3856}
3857
3858void lockdep_clear_current_reclaim_state(void)
3859{
3860	current->lockdep_reclaim_gfp = 0;
3861}
3862
3863#ifdef CONFIG_LOCK_STAT
3864static int
3865print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3866			   unsigned long ip)
3867{
3868	if (!debug_locks_off())
3869		return 0;
3870	if (debug_locks_silent)
3871		return 0;
3872
3873	printk("\n");
3874	printk("=================================\n");
3875	printk("[ BUG: bad contention detected! ]\n");
3876	print_kernel_ident();
3877	printk("---------------------------------\n");
3878	printk("%s/%d is trying to contend lock (",
3879		curr->comm, task_pid_nr(curr));
3880	print_lockdep_cache(lock);
3881	printk(KERN_CONT ") at:\n");
3882	print_ip_sym(ip);
3883	printk("but there are no locks held!\n");
3884	printk("\nother info that might help us debug this:\n");
3885	lockdep_print_held_locks(curr);
3886
3887	printk("\nstack backtrace:\n");
3888	dump_stack();
3889
3890	return 0;
3891}
3892
3893static void
3894__lock_contended(struct lockdep_map *lock, unsigned long ip)
3895{
3896	struct task_struct *curr = current;
3897	struct held_lock *hlock, *prev_hlock;
3898	struct lock_class_stats *stats;
3899	unsigned int depth;
3900	int i, contention_point, contending_point;
3901
3902	depth = curr->lockdep_depth;
3903	/*
3904	 * Whee, we contended on this lock, except it seems we're not
3905	 * actually trying to acquire anything much at all..
3906	 */
3907	if (DEBUG_LOCKS_WARN_ON(!depth))
3908		return;
3909
3910	prev_hlock = NULL;
3911	for (i = depth-1; i >= 0; i--) {
3912		hlock = curr->held_locks + i;
3913		/*
3914		 * We must not cross into another context:
3915		 */
3916		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3917			break;
3918		if (match_held_lock(hlock, lock))
3919			goto found_it;
3920		prev_hlock = hlock;
3921	}
3922	print_lock_contention_bug(curr, lock, ip);
3923	return;
3924
3925found_it:
3926	if (hlock->instance != lock)
3927		return;
3928
3929	hlock->waittime_stamp = lockstat_clock();
3930
3931	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3932	contending_point = lock_point(hlock_class(hlock)->contending_point,
3933				      lock->ip);
3934
3935	stats = get_lock_stats(hlock_class(hlock));
3936	if (contention_point < LOCKSTAT_POINTS)
3937		stats->contention_point[contention_point]++;
3938	if (contending_point < LOCKSTAT_POINTS)
3939		stats->contending_point[contending_point]++;
3940	if (lock->cpu != smp_processor_id())
3941		stats->bounces[bounce_contended + !!hlock->read]++;
3942	put_lock_stats(stats);
3943}
3944
3945static void
3946__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3947{
3948	struct task_struct *curr = current;
3949	struct held_lock *hlock, *prev_hlock;
3950	struct lock_class_stats *stats;
3951	unsigned int depth;
3952	u64 now, waittime = 0;
3953	int i, cpu;
3954
3955	depth = curr->lockdep_depth;
3956	/*
3957	 * Yay, we acquired ownership of this lock we didn't try to
3958	 * acquire, how the heck did that happen?
3959	 */
3960	if (DEBUG_LOCKS_WARN_ON(!depth))
3961		return;
3962
3963	prev_hlock = NULL;
3964	for (i = depth-1; i >= 0; i--) {
3965		hlock = curr->held_locks + i;
3966		/*
3967		 * We must not cross into another context:
3968		 */
3969		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3970			break;
3971		if (match_held_lock(hlock, lock))
3972			goto found_it;
3973		prev_hlock = hlock;
3974	}
3975	print_lock_contention_bug(curr, lock, _RET_IP_);
3976	return;
3977
3978found_it:
3979	if (hlock->instance != lock)
3980		return;
3981
3982	cpu = smp_processor_id();
3983	if (hlock->waittime_stamp) {
3984		now = lockstat_clock();
3985		waittime = now - hlock->waittime_stamp;
3986		hlock->holdtime_stamp = now;
3987	}
3988
3989	trace_lock_acquired(lock, ip);
3990
3991	stats = get_lock_stats(hlock_class(hlock));
3992	if (waittime) {
3993		if (hlock->read)
3994			lock_time_inc(&stats->read_waittime, waittime);
3995		else
3996			lock_time_inc(&stats->write_waittime, waittime);
3997	}
3998	if (lock->cpu != cpu)
3999		stats->bounces[bounce_acquired + !!hlock->read]++;
4000	put_lock_stats(stats);
4001
4002	lock->cpu = cpu;
4003	lock->ip = ip;
4004}
4005
4006void lock_contended(struct lockdep_map *lock, unsigned long ip)
4007{
4008	unsigned long flags;
4009
4010	if (unlikely(!lock_stat))
4011		return;
4012
4013	if (unlikely(current->lockdep_recursion))
4014		return;
4015
4016	raw_local_irq_save(flags);
4017	check_flags(flags);
4018	current->lockdep_recursion = 1;
4019	trace_lock_contended(lock, ip);
4020	__lock_contended(lock, ip);
4021	current->lockdep_recursion = 0;
4022	raw_local_irq_restore(flags);
4023}
4024EXPORT_SYMBOL_GPL(lock_contended);
4025
4026void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4027{
4028	unsigned long flags;
4029
4030	if (unlikely(!lock_stat))
4031		return;
4032
4033	if (unlikely(current->lockdep_recursion))
4034		return;
4035
4036	raw_local_irq_save(flags);
4037	check_flags(flags);
4038	current->lockdep_recursion = 1;
4039	__lock_acquired(lock, ip);
4040	current->lockdep_recursion = 0;
4041	raw_local_irq_restore(flags);
4042}
4043EXPORT_SYMBOL_GPL(lock_acquired);
4044#endif
4045
4046/*
4047 * Used by the testsuite, sanitize the validator state
4048 * after a simulated failure:
4049 */
4050
4051void lockdep_reset(void)
4052{
4053	unsigned long flags;
4054	int i;
4055
4056	raw_local_irq_save(flags);
4057	current->curr_chain_key = 0;
4058	current->lockdep_depth = 0;
4059	current->lockdep_recursion = 0;
4060	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4061	nr_hardirq_chains = 0;
4062	nr_softirq_chains = 0;
4063	nr_process_chains = 0;
4064	debug_locks = 1;
4065	for (i = 0; i < CHAINHASH_SIZE; i++)
4066		INIT_HLIST_HEAD(chainhash_table + i);
4067	raw_local_irq_restore(flags);
4068}
4069
4070static void zap_class(struct lock_class *class)
 
 
 
4071{
 
 
 
4072	int i;
4073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4074	/*
4075	 * Remove all dependencies this lock is
4076	 * involved in:
4077	 */
4078	for (i = 0; i < nr_list_entries; i++) {
4079		if (list_entries[i].class == class)
4080			list_del_rcu(&list_entries[i].entry);
 
 
 
 
 
 
 
 
 
 
 
4081	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4082	/*
4083	 * Unhash the class and remove it from the all_lock_classes list:
 
4084	 */
4085	hlist_del_rcu(&class->hash_entry);
4086	list_del_rcu(&class->lock_entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4087
4088	RCU_INIT_POINTER(class->key, NULL);
4089	RCU_INIT_POINTER(class->name, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
4090}
4091
4092static inline int within(const void *addr, void *start, unsigned long size)
4093{
4094	return addr >= start && addr < start + size;
4095}
4096
 
 
 
 
 
 
 
 
 
 
 
 
 
4097/*
4098 * Used in module.c to remove lock classes from memory that is going to be
4099 * freed; and possibly re-used by other modules.
4100 *
4101 * We will have had one sync_sched() before getting here, so we're guaranteed
4102 * nobody will look up these exact classes -- they're properly dead but still
4103 * allocated.
4104 */
4105void lockdep_free_key_range(void *start, unsigned long size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4106{
4107	struct lock_class *class;
4108	struct hlist_head *head;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4109	unsigned long flags;
4110	int i;
4111	int locked;
 
4112
4113	raw_local_irq_save(flags);
4114	locked = graph_lock();
 
 
 
 
 
 
4115
4116	/*
4117	 * Unhash all classes that were created by this module:
4118	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4119	for (i = 0; i < CLASSHASH_SIZE; i++) {
4120		head = classhash_table + i;
4121		hlist_for_each_entry_rcu(class, head, hash_entry) {
4122			if (within(class->key, start, size))
4123				zap_class(class);
4124			else if (within(class->name, start, size))
4125				zap_class(class);
4126		}
4127	}
 
4128
4129	if (locked)
4130		graph_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4131	raw_local_irq_restore(flags);
4132
4133	/*
4134	 * Wait for any possible iterators from look_up_lock_class() to pass
4135	 * before continuing to free the memory they refer to.
4136	 *
4137	 * sync_sched() is sufficient because the read-side is IRQ disable.
4138	 */
4139	synchronize_sched();
 
4140
4141	/*
4142	 * XXX at this point we could return the resources to the pool;
4143	 * instead we leak them. We would need to change to bitmap allocators
4144	 * instead of the linear allocators we have now.
4145	 */
 
 
 
 
 
 
 
 
 
 
 
 
4146}
4147
4148void lockdep_reset_lock(struct lockdep_map *lock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4149{
4150	struct lock_class *class;
4151	struct hlist_head *head;
4152	unsigned long flags;
4153	int i, j;
4154	int locked;
4155
4156	raw_local_irq_save(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4157
4158	/*
4159	 * Remove all classes this lock might have:
4160	 */
4161	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4162		/*
4163		 * If the class exists we look it up and zap it:
4164		 */
4165		class = look_up_lock_class(lock, j);
4166		if (class)
4167			zap_class(class);
4168	}
4169	/*
4170	 * Debug check: in the end all mapped classes should
4171	 * be gone.
4172	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4173	locked = graph_lock();
4174	for (i = 0; i < CLASSHASH_SIZE; i++) {
4175		head = classhash_table + i;
4176		hlist_for_each_entry_rcu(class, head, hash_entry) {
4177			int match = 0;
4178
4179			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4180				match |= class == lock->class_cache[j];
 
4181
4182			if (unlikely(match)) {
4183				if (debug_locks_off_graph_unlock()) {
4184					/*
4185					 * We all just reset everything, how did it match?
4186					 */
4187					WARN_ON(1);
4188				}
4189				goto out_restore;
4190			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4191		}
4192	}
4193	if (locked)
4194		graph_unlock();
4195
4196out_restore:
 
4197	raw_local_irq_restore(flags);
 
 
 
4198}
 
4199
4200void __init lockdep_info(void)
4201{
4202	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4203
4204	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4205	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4206	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4207	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4208	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4209	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4210	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4211
4212	printk(" memory used by lock dependency info: %lu kB\n",
4213		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4214		sizeof(struct list_head) * CLASSHASH_SIZE +
4215		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4216		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4217		sizeof(struct list_head) * CHAINHASH_SIZE
 
 
4218#ifdef CONFIG_PROVE_LOCKING
4219		+ sizeof(struct circular_queue)
 
 
 
4220#endif
4221		) / 1024
4222		);
4223
4224	printk(" per task-struct memory footprint: %lu bytes\n",
4225		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
 
 
 
 
 
 
4226}
4227
4228static void
4229print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4230		     const void *mem_to, struct held_lock *hlock)
4231{
4232	if (!debug_locks_off())
4233		return;
4234	if (debug_locks_silent)
4235		return;
4236
4237	printk("\n");
4238	printk("=========================\n");
4239	printk("[ BUG: held lock freed! ]\n");
4240	print_kernel_ident();
4241	printk("-------------------------\n");
4242	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4243		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4244	print_lock(hlock);
4245	lockdep_print_held_locks(curr);
4246
4247	printk("\nstack backtrace:\n");
4248	dump_stack();
4249}
4250
4251static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4252				const void* lock_from, unsigned long lock_len)
4253{
4254	return lock_from + lock_len <= mem_from ||
4255		mem_from + mem_len <= lock_from;
4256}
4257
4258/*
4259 * Called when kernel memory is freed (or unmapped), or if a lock
4260 * is destroyed or reinitialized - this code checks whether there is
4261 * any held lock in the memory range of <from> to <to>:
4262 */
4263void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4264{
4265	struct task_struct *curr = current;
4266	struct held_lock *hlock;
4267	unsigned long flags;
4268	int i;
4269
4270	if (unlikely(!debug_locks))
4271		return;
4272
4273	local_irq_save(flags);
4274	for (i = 0; i < curr->lockdep_depth; i++) {
4275		hlock = curr->held_locks + i;
4276
4277		if (not_in_range(mem_from, mem_len, hlock->instance,
4278					sizeof(*hlock->instance)))
4279			continue;
4280
4281		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4282		break;
4283	}
4284	local_irq_restore(flags);
4285}
4286EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4287
4288static void print_held_locks_bug(void)
4289{
4290	if (!debug_locks_off())
4291		return;
4292	if (debug_locks_silent)
4293		return;
4294
4295	printk("\n");
4296	printk("=====================================\n");
4297	printk("[ BUG: %s/%d still has locks held! ]\n",
4298	       current->comm, task_pid_nr(current));
4299	print_kernel_ident();
4300	printk("-------------------------------------\n");
4301	lockdep_print_held_locks(current);
4302	printk("\nstack backtrace:\n");
4303	dump_stack();
4304}
4305
4306void debug_check_no_locks_held(void)
4307{
4308	if (unlikely(current->lockdep_depth > 0))
4309		print_held_locks_bug();
4310}
4311EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4312
4313#ifdef __KERNEL__
4314void debug_show_all_locks(void)
4315{
4316	struct task_struct *g, *p;
4317	int count = 10;
4318	int unlock = 1;
4319
4320	if (unlikely(!debug_locks)) {
4321		printk("INFO: lockdep is turned off.\n");
4322		return;
4323	}
4324	printk("\nShowing all locks held in the system:\n");
4325
4326	/*
4327	 * Here we try to get the tasklist_lock as hard as possible,
4328	 * if not successful after 2 seconds we ignore it (but keep
4329	 * trying). This is to enable a debug printout even if a
4330	 * tasklist_lock-holding task deadlocks or crashes.
4331	 */
4332retry:
4333	if (!read_trylock(&tasklist_lock)) {
4334		if (count == 10)
4335			printk("hm, tasklist_lock locked, retrying... ");
4336		if (count) {
4337			count--;
4338			printk(" #%d", 10-count);
4339			mdelay(200);
4340			goto retry;
4341		}
4342		printk(" ignoring it.\n");
4343		unlock = 0;
4344	} else {
4345		if (count != 10)
4346			printk(KERN_CONT " locked it.\n");
4347	}
4348
4349	do_each_thread(g, p) {
4350		/*
4351		 * It's not reliable to print a task's held locks
4352		 * if it's not sleeping (or if it's not the current
4353		 * task):
4354		 */
4355		if (p->state == TASK_RUNNING && p != current)
4356			continue;
4357		if (p->lockdep_depth)
4358			lockdep_print_held_locks(p);
4359		if (!unlock)
4360			if (read_trylock(&tasklist_lock))
4361				unlock = 1;
4362	} while_each_thread(g, p);
4363
4364	printk("\n");
4365	printk("=============================================\n\n");
4366
4367	if (unlock)
4368		read_unlock(&tasklist_lock);
4369}
4370EXPORT_SYMBOL_GPL(debug_show_all_locks);
4371#endif
4372
4373/*
4374 * Careful: only use this function if you are sure that
4375 * the task cannot run in parallel!
4376 */
4377void debug_show_held_locks(struct task_struct *task)
4378{
4379	if (unlikely(!debug_locks)) {
4380		printk("INFO: lockdep is turned off.\n");
4381		return;
4382	}
4383	lockdep_print_held_locks(task);
4384}
4385EXPORT_SYMBOL_GPL(debug_show_held_locks);
4386
4387asmlinkage __visible void lockdep_sys_exit(void)
4388{
4389	struct task_struct *curr = current;
4390
4391	if (unlikely(curr->lockdep_depth)) {
4392		if (!debug_locks_off())
4393			return;
4394		printk("\n");
4395		printk("================================================\n");
4396		printk("[ BUG: lock held when returning to user space! ]\n");
4397		print_kernel_ident();
4398		printk("------------------------------------------------\n");
4399		printk("%s/%d is leaving the kernel with locks still held!\n",
4400				curr->comm, curr->pid);
4401		lockdep_print_held_locks(curr);
4402	}
 
 
 
 
 
 
4403}
4404
4405void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4406{
4407	struct task_struct *curr = current;
4408
4409#ifndef CONFIG_PROVE_RCU_REPEATEDLY
4410	if (!debug_locks_off())
4411		return;
4412#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4413	/* Note: the following can be executed concurrently, so be careful. */
4414	printk("\n");
4415	printk("===============================\n");
4416	printk("[ INFO: suspicious RCU usage. ]\n");
4417	print_kernel_ident();
4418	printk("-------------------------------\n");
4419	printk("%s:%d %s!\n", file, line, s);
4420	printk("\nother info that might help us debug this:\n\n");
4421	printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4422	       !rcu_lockdep_current_cpu_online()
4423			? "RCU used illegally from offline CPU!\n"
4424			: !rcu_is_watching()
4425				? "RCU used illegally from idle CPU!\n"
4426				: "",
4427	       rcu_scheduler_active, debug_locks);
4428
4429	/*
4430	 * If a CPU is in the RCU-free window in idle (ie: in the section
4431	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4432	 * considers that CPU to be in an "extended quiescent state",
4433	 * which means that RCU will be completely ignoring that CPU.
4434	 * Therefore, rcu_read_lock() and friends have absolutely no
4435	 * effect on a CPU running in that state. In other words, even if
4436	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4437	 * delete data structures out from under it.  RCU really has no
4438	 * choice here: we need to keep an RCU-free window in idle where
4439	 * the CPU may possibly enter into low power mode. This way we can
4440	 * notice an extended quiescent state to other CPUs that started a grace
4441	 * period. Otherwise we would delay any grace period as long as we run
4442	 * in the idle task.
4443	 *
4444	 * So complain bitterly if someone does call rcu_read_lock(),
4445	 * rcu_read_lock_bh() and so on from extended quiescent states.
4446	 */
4447	if (!rcu_is_watching())
4448		printk("RCU used illegally from extended quiescent state!\n");
4449
4450	lockdep_print_held_locks(curr);
4451	printk("\nstack backtrace:\n");
4452	dump_stack();
4453}
4454EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/lockdep.c
   4 *
   5 * Runtime locking correctness validator
   6 *
   7 * Started by Ingo Molnar:
   8 *
   9 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  11 *
  12 * this code maps all the lock dependencies as they occur in a live kernel
  13 * and will warn about the following classes of locking bugs:
  14 *
  15 * - lock inversion scenarios
  16 * - circular lock dependencies
  17 * - hardirq/softirq safe/unsafe locking bugs
  18 *
  19 * Bugs are reported even if the current locking scenario does not cause
  20 * any deadlock at this point.
  21 *
  22 * I.e. if anytime in the past two locks were taken in a different order,
  23 * even if it happened for another task, even if those were different
  24 * locks (but of the same class as this lock), this code will detect it.
  25 *
  26 * Thanks to Arjan van de Ven for coming up with the initial idea of
  27 * mapping lock dependencies runtime.
  28 */
  29#define DISABLE_BRANCH_PROFILING
  30#include <linux/mutex.h>
  31#include <linux/sched.h>
  32#include <linux/sched/clock.h>
  33#include <linux/sched/task.h>
  34#include <linux/sched/mm.h>
  35#include <linux/delay.h>
  36#include <linux/module.h>
  37#include <linux/proc_fs.h>
  38#include <linux/seq_file.h>
  39#include <linux/spinlock.h>
  40#include <linux/kallsyms.h>
  41#include <linux/interrupt.h>
  42#include <linux/stacktrace.h>
  43#include <linux/debug_locks.h>
  44#include <linux/irqflags.h>
  45#include <linux/utsname.h>
  46#include <linux/hash.h>
  47#include <linux/ftrace.h>
  48#include <linux/stringify.h>
  49#include <linux/bitmap.h>
  50#include <linux/bitops.h>
  51#include <linux/gfp.h>
 
  52#include <linux/random.h>
  53#include <linux/jhash.h>
  54#include <linux/nmi.h>
  55#include <linux/rcupdate.h>
  56#include <linux/kprobes.h>
  57
  58#include <asm/sections.h>
  59
  60#include "lockdep_internals.h"
  61
  62#define CREATE_TRACE_POINTS
  63#include <trace/events/lock.h>
  64
  65#ifdef CONFIG_PROVE_LOCKING
  66int prove_locking = 1;
  67module_param(prove_locking, int, 0644);
  68#else
  69#define prove_locking 0
  70#endif
  71
  72#ifdef CONFIG_LOCK_STAT
  73int lock_stat = 1;
  74module_param(lock_stat, int, 0644);
  75#else
  76#define lock_stat 0
  77#endif
  78
  79/*
  80 * lockdep_lock: protects the lockdep graph, the hashes and the
  81 *               class/list/hash allocators.
  82 *
  83 * This is one of the rare exceptions where it's justified
  84 * to use a raw spinlock - we really dont want the spinlock
  85 * code to recurse back into the lockdep code...
  86 */
  87static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  88static struct task_struct *lockdep_selftest_task_struct;
  89
  90static int graph_lock(void)
  91{
  92	arch_spin_lock(&lockdep_lock);
  93	/*
  94	 * Make sure that if another CPU detected a bug while
  95	 * walking the graph we dont change it (while the other
  96	 * CPU is busy printing out stuff with the graph lock
  97	 * dropped already)
  98	 */
  99	if (!debug_locks) {
 100		arch_spin_unlock(&lockdep_lock);
 101		return 0;
 102	}
 103	/* prevent any recursions within lockdep from causing deadlocks */
 104	current->lockdep_recursion++;
 105	return 1;
 106}
 107
 108static inline int graph_unlock(void)
 109{
 110	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
 111		/*
 112		 * The lockdep graph lock isn't locked while we expect it to
 113		 * be, we're confused now, bye!
 114		 */
 115		return DEBUG_LOCKS_WARN_ON(1);
 116	}
 117
 118	current->lockdep_recursion--;
 119	arch_spin_unlock(&lockdep_lock);
 120	return 0;
 121}
 122
 123/*
 124 * Turn lock debugging off and return with 0 if it was off already,
 125 * and also release the graph lock:
 126 */
 127static inline int debug_locks_off_graph_unlock(void)
 128{
 129	int ret = debug_locks_off();
 130
 131	arch_spin_unlock(&lockdep_lock);
 132
 133	return ret;
 134}
 135
 136unsigned long nr_list_entries;
 137static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 138static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
 139
 140/*
 141 * All data structures here are protected by the global debug_lock.
 142 *
 143 * nr_lock_classes is the number of elements of lock_classes[] that is
 144 * in use.
 145 */
 146#define KEYHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 147#define KEYHASH_SIZE		(1UL << KEYHASH_BITS)
 148static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
 149unsigned long nr_lock_classes;
 150#ifndef CONFIG_DEBUG_LOCKDEP
 151static
 152#endif
 153struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 154static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
 155
 156static inline struct lock_class *hlock_class(struct held_lock *hlock)
 157{
 158	unsigned int class_idx = hlock->class_idx;
 159
 160	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
 161	barrier();
 162
 163	if (!test_bit(class_idx, lock_classes_in_use)) {
 164		/*
 165		 * Someone passed in garbage, we give up.
 166		 */
 167		DEBUG_LOCKS_WARN_ON(1);
 168		return NULL;
 169	}
 170
 171	/*
 172	 * At this point, if the passed hlock->class_idx is still garbage,
 173	 * we just have to live with it
 174	 */
 175	return lock_classes + class_idx;
 176}
 177
 178#ifdef CONFIG_LOCK_STAT
 179static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
 180
 181static inline u64 lockstat_clock(void)
 182{
 183	return local_clock();
 184}
 185
 186static int lock_point(unsigned long points[], unsigned long ip)
 187{
 188	int i;
 189
 190	for (i = 0; i < LOCKSTAT_POINTS; i++) {
 191		if (points[i] == 0) {
 192			points[i] = ip;
 193			break;
 194		}
 195		if (points[i] == ip)
 196			break;
 197	}
 198
 199	return i;
 200}
 201
 202static void lock_time_inc(struct lock_time *lt, u64 time)
 203{
 204	if (time > lt->max)
 205		lt->max = time;
 206
 207	if (time < lt->min || !lt->nr)
 208		lt->min = time;
 209
 210	lt->total += time;
 211	lt->nr++;
 212}
 213
 214static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 215{
 216	if (!src->nr)
 217		return;
 218
 219	if (src->max > dst->max)
 220		dst->max = src->max;
 221
 222	if (src->min < dst->min || !dst->nr)
 223		dst->min = src->min;
 224
 225	dst->total += src->total;
 226	dst->nr += src->nr;
 227}
 228
 229struct lock_class_stats lock_stats(struct lock_class *class)
 230{
 231	struct lock_class_stats stats;
 232	int cpu, i;
 233
 234	memset(&stats, 0, sizeof(struct lock_class_stats));
 235	for_each_possible_cpu(cpu) {
 236		struct lock_class_stats *pcs =
 237			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 238
 239		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 240			stats.contention_point[i] += pcs->contention_point[i];
 241
 242		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 243			stats.contending_point[i] += pcs->contending_point[i];
 244
 245		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 246		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 247
 248		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 249		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 250
 251		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 252			stats.bounces[i] += pcs->bounces[i];
 253	}
 254
 255	return stats;
 256}
 257
 258void clear_lock_stats(struct lock_class *class)
 259{
 260	int cpu;
 261
 262	for_each_possible_cpu(cpu) {
 263		struct lock_class_stats *cpu_stats =
 264			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 265
 266		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 267	}
 268	memset(class->contention_point, 0, sizeof(class->contention_point));
 269	memset(class->contending_point, 0, sizeof(class->contending_point));
 270}
 271
 272static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 273{
 274	return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
 
 
 
 
 
 275}
 276
 277static void lock_release_holdtime(struct held_lock *hlock)
 278{
 279	struct lock_class_stats *stats;
 280	u64 holdtime;
 281
 282	if (!lock_stat)
 283		return;
 284
 285	holdtime = lockstat_clock() - hlock->holdtime_stamp;
 286
 287	stats = get_lock_stats(hlock_class(hlock));
 288	if (hlock->read)
 289		lock_time_inc(&stats->read_holdtime, holdtime);
 290	else
 291		lock_time_inc(&stats->write_holdtime, holdtime);
 
 292}
 293#else
 294static inline void lock_release_holdtime(struct held_lock *hlock)
 295{
 296}
 297#endif
 298
 299/*
 300 * We keep a global list of all lock classes. The list is only accessed with
 301 * the lockdep spinlock lock held. free_lock_classes is a list with free
 302 * elements. These elements are linked together by the lock_entry member in
 303 * struct lock_class.
 304 */
 305LIST_HEAD(all_lock_classes);
 306static LIST_HEAD(free_lock_classes);
 307
 308/**
 309 * struct pending_free - information about data structures about to be freed
 310 * @zapped: Head of a list with struct lock_class elements.
 311 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
 312 *	are about to be freed.
 313 */
 314struct pending_free {
 315	struct list_head zapped;
 316	DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
 317};
 318
 319/**
 320 * struct delayed_free - data structures used for delayed freeing
 321 *
 322 * A data structure for delayed freeing of data structures that may be
 323 * accessed by RCU readers at the time these were freed.
 324 *
 325 * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
 326 * @index:     Index of @pf to which freed data structures are added.
 327 * @scheduled: Whether or not an RCU callback has been scheduled.
 328 * @pf:        Array with information about data structures about to be freed.
 329 */
 330static struct delayed_free {
 331	struct rcu_head		rcu_head;
 332	int			index;
 333	int			scheduled;
 334	struct pending_free	pf[2];
 335} delayed_free;
 336
 337/*
 338 * The lockdep classes are in a hash-table as well, for fast lookup:
 339 */
 340#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 341#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
 342#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 343#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 344
 345static struct hlist_head classhash_table[CLASSHASH_SIZE];
 346
 347/*
 348 * We put the lock dependency chains into a hash-table as well, to cache
 349 * their existence:
 350 */
 351#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 352#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
 353#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 354#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 355
 356static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 357
 358/*
 359 * The hash key of the lock dependency chains is a hash itself too:
 360 * it's a hash of all locks taken up to that lock, including that lock.
 361 * It's a 64-bit hash, because it's important for the keys to be
 362 * unique.
 363 */
 364static inline u64 iterate_chain_key(u64 key, u32 idx)
 365{
 366	u32 k0 = key, k1 = key >> 32;
 367
 368	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
 369
 370	return k0 | (u64)k1 << 32;
 371}
 372
 373void lockdep_init_task(struct task_struct *task)
 374{
 375	task->lockdep_depth = 0; /* no locks held yet */
 376	task->curr_chain_key = INITIAL_CHAIN_KEY;
 377	task->lockdep_recursion = 0;
 378}
 379
 380void lockdep_off(void)
 381{
 382	current->lockdep_recursion++;
 383}
 384EXPORT_SYMBOL(lockdep_off);
 385
 386void lockdep_on(void)
 387{
 388	current->lockdep_recursion--;
 389}
 390EXPORT_SYMBOL(lockdep_on);
 391
 392void lockdep_set_selftest_task(struct task_struct *task)
 393{
 394	lockdep_selftest_task_struct = task;
 395}
 396
 397/*
 398 * Debugging switches:
 399 */
 400
 401#define VERBOSE			0
 402#define VERY_VERBOSE		0
 403
 404#if VERBOSE
 405# define HARDIRQ_VERBOSE	1
 406# define SOFTIRQ_VERBOSE	1
 
 407#else
 408# define HARDIRQ_VERBOSE	0
 409# define SOFTIRQ_VERBOSE	0
 
 410#endif
 411
 412#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
 413/*
 414 * Quick filtering for interesting events:
 415 */
 416static int class_filter(struct lock_class *class)
 417{
 418#if 0
 419	/* Example */
 420	if (class->name_version == 1 &&
 421			!strcmp(class->name, "lockname"))
 422		return 1;
 423	if (class->name_version == 1 &&
 424			!strcmp(class->name, "&struct->lockfield"))
 425		return 1;
 426#endif
 427	/* Filter everything else. 1 would be to allow everything else */
 428	return 0;
 429}
 430#endif
 431
 432static int verbose(struct lock_class *class)
 433{
 434#if VERBOSE
 435	return class_filter(class);
 436#endif
 437	return 0;
 438}
 439
 
 
 
 
 
 
 
 440static void print_lockdep_off(const char *bug_msg)
 441{
 442	printk(KERN_DEBUG "%s\n", bug_msg);
 443	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
 444#ifdef CONFIG_LOCK_STAT
 445	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
 446#endif
 447}
 448
 449unsigned long nr_stack_trace_entries;
 
 
 
 
 450
 451#ifdef CONFIG_PROVE_LOCKING
 452/**
 453 * struct lock_trace - single stack backtrace
 454 * @hash_entry:	Entry in a stack_trace_hash[] list.
 455 * @hash:	jhash() of @entries.
 456 * @nr_entries:	Number of entries in @entries.
 457 * @entries:	Actual stack backtrace.
 458 */
 459struct lock_trace {
 460	struct hlist_node	hash_entry;
 461	u32			hash;
 462	u32			nr_entries;
 463	unsigned long		entries[0] __aligned(sizeof(unsigned long));
 464};
 465#define LOCK_TRACE_SIZE_IN_LONGS				\
 466	(sizeof(struct lock_trace) / sizeof(unsigned long))
 467/*
 468 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
 469 */
 470static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 471static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
 472
 473static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
 474{
 475	return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
 476		memcmp(t1->entries, t2->entries,
 477		       t1->nr_entries * sizeof(t1->entries[0])) == 0;
 478}
 479
 480static struct lock_trace *save_trace(void)
 481{
 482	struct lock_trace *trace, *t2;
 483	struct hlist_head *hash_head;
 484	u32 hash;
 485	unsigned int max_entries;
 
 
 
 
 486
 487	BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
 488	BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
 489
 490	trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
 491	max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
 492		LOCK_TRACE_SIZE_IN_LONGS;
 493	trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 494
 495	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
 496	    LOCK_TRACE_SIZE_IN_LONGS - 1) {
 497		if (!debug_locks_off_graph_unlock())
 498			return NULL;
 499
 500		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
 501		dump_stack();
 502
 503		return NULL;
 504	}
 505
 506	hash = jhash(trace->entries, trace->nr_entries *
 507		     sizeof(trace->entries[0]), 0);
 508	trace->hash = hash;
 509	hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
 510	hlist_for_each_entry(t2, hash_head, hash_entry) {
 511		if (traces_identical(trace, t2))
 512			return t2;
 513	}
 514	nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
 515	hlist_add_head(&trace->hash_entry, hash_head);
 516
 517	return trace;
 518}
 519
 520/* Return the number of stack traces in the stack_trace[] array. */
 521u64 lockdep_stack_trace_count(void)
 522{
 523	struct lock_trace *trace;
 524	u64 c = 0;
 525	int i;
 526
 527	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
 528		hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
 529			c++;
 530		}
 531	}
 532
 533	return c;
 534}
 535
 536/* Return the number of stack hash chains that have at least one stack trace. */
 537u64 lockdep_stack_hash_count(void)
 538{
 539	u64 c = 0;
 540	int i;
 541
 542	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
 543		if (!hlist_empty(&stack_trace_hash[i]))
 544			c++;
 545
 546	return c;
 547}
 548#endif
 549
 550unsigned int nr_hardirq_chains;
 551unsigned int nr_softirq_chains;
 552unsigned int nr_process_chains;
 553unsigned int max_lockdep_depth;
 554
 555#ifdef CONFIG_DEBUG_LOCKDEP
 556/*
 557 * Various lockdep statistics:
 558 */
 559DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
 560#endif
 561
 562#ifdef CONFIG_PROVE_LOCKING
 563/*
 564 * Locking printouts:
 565 */
 566
 567#define __USAGE(__STATE)						\
 568	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
 569	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
 570	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
 571	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
 572
 573static const char *usage_str[] =
 574{
 575#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
 576#include "lockdep_states.h"
 577#undef LOCKDEP_STATE
 578	[LOCK_USED] = "INITIAL USE",
 579};
 580#endif
 581
 582const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
 583{
 584	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 585}
 586
 587static inline unsigned long lock_flag(enum lock_usage_bit bit)
 588{
 589	return 1UL << bit;
 590}
 591
 592static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 593{
 594	/*
 595	 * The usage character defaults to '.' (i.e., irqs disabled and not in
 596	 * irq context), which is the safest usage category.
 597	 */
 598	char c = '.';
 599
 600	/*
 601	 * The order of the following usage checks matters, which will
 602	 * result in the outcome character as follows:
 603	 *
 604	 * - '+': irq is enabled and not in irq context
 605	 * - '-': in irq context and irq is disabled
 606	 * - '?': in irq context and irq is enabled
 607	 */
 608	if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
 609		c = '+';
 610		if (class->usage_mask & lock_flag(bit))
 
 
 611			c = '?';
 612	} else if (class->usage_mask & lock_flag(bit))
 613		c = '-';
 614
 615	return c;
 616}
 617
 618void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 619{
 620	int i = 0;
 621
 622#define LOCKDEP_STATE(__STATE) 						\
 623	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
 624	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
 625#include "lockdep_states.h"
 626#undef LOCKDEP_STATE
 627
 628	usage[i] = '\0';
 629}
 630
 631static void __print_lock_name(struct lock_class *class)
 632{
 633	char str[KSYM_NAME_LEN];
 634	const char *name;
 635
 636	name = class->name;
 637	if (!name) {
 638		name = __get_key_name(class->key, str);
 639		printk(KERN_CONT "%s", name);
 640	} else {
 641		printk(KERN_CONT "%s", name);
 642		if (class->name_version > 1)
 643			printk(KERN_CONT "#%d", class->name_version);
 644		if (class->subclass)
 645			printk(KERN_CONT "/%d", class->subclass);
 646	}
 647}
 648
 649static void print_lock_name(struct lock_class *class)
 650{
 651	char usage[LOCK_USAGE_CHARS];
 652
 653	get_usage_chars(class, usage);
 654
 655	printk(KERN_CONT " (");
 656	__print_lock_name(class);
 657	printk(KERN_CONT "){%s}", usage);
 658}
 659
 660static void print_lockdep_cache(struct lockdep_map *lock)
 661{
 662	const char *name;
 663	char str[KSYM_NAME_LEN];
 664
 665	name = lock->name;
 666	if (!name)
 667		name = __get_key_name(lock->key->subkeys, str);
 668
 669	printk(KERN_CONT "%s", name);
 670}
 671
 672static void print_lock(struct held_lock *hlock)
 673{
 674	/*
 675	 * We can be called locklessly through debug_show_all_locks() so be
 676	 * extra careful, the hlock might have been released and cleared.
 677	 *
 678	 * If this indeed happens, lets pretend it does not hurt to continue
 679	 * to print the lock unless the hlock class_idx does not point to a
 680	 * registered class. The rationale here is: since we don't attempt
 681	 * to distinguish whether we are in this situation, if it just
 682	 * happened we can't count on class_idx to tell either.
 683	 */
 684	struct lock_class *lock = hlock_class(hlock);
 
 
 
 685
 686	if (!lock) {
 687		printk(KERN_CONT "<RELEASED>\n");
 688		return;
 689	}
 690
 691	printk(KERN_CONT "%px", hlock->instance);
 692	print_lock_name(lock);
 693	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
 694}
 695
 696static void lockdep_print_held_locks(struct task_struct *p)
 697{
 698	int i, depth = READ_ONCE(p->lockdep_depth);
 699
 700	if (!depth)
 701		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
 702	else
 703		printk("%d lock%s held by %s/%d:\n", depth,
 704		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
 705	/*
 706	 * It's not reliable to print a task's held locks if it's not sleeping
 707	 * and it's not the current task.
 708	 */
 709	if (p->state == TASK_RUNNING && p != current)
 710		return;
 
 
 
 
 711	for (i = 0; i < depth; i++) {
 712		printk(" #%d: ", i);
 713		print_lock(p->held_locks + i);
 714	}
 715}
 716
 717static void print_kernel_ident(void)
 718{
 719	printk("%s %.*s %s\n", init_utsname()->release,
 720		(int)strcspn(init_utsname()->version, " "),
 721		init_utsname()->version,
 722		print_tainted());
 723}
 724
 725static int very_verbose(struct lock_class *class)
 726{
 727#if VERY_VERBOSE
 728	return class_filter(class);
 729#endif
 730	return 0;
 731}
 732
 733/*
 734 * Is this the address of a static object:
 735 */
 736#ifdef __KERNEL__
 737static int static_obj(const void *obj)
 738{
 739	unsigned long start = (unsigned long) &_stext,
 740		      end   = (unsigned long) &_end,
 741		      addr  = (unsigned long) obj;
 742
 743	if (arch_is_kernel_initmem_freed(addr))
 744		return 0;
 745
 746	/*
 747	 * static variable?
 748	 */
 749	if ((addr >= start) && (addr < end))
 750		return 1;
 751
 752	if (arch_is_kernel_data(addr))
 753		return 1;
 754
 755	/*
 756	 * in-kernel percpu var?
 757	 */
 758	if (is_kernel_percpu_address(addr))
 759		return 1;
 760
 761	/*
 762	 * module static or percpu var?
 763	 */
 764	return is_module_address(addr) || is_module_percpu_address(addr);
 765}
 766#endif
 767
 768/*
 769 * To make lock name printouts unique, we calculate a unique
 770 * class->name_version generation counter. The caller must hold the graph
 771 * lock.
 772 */
 773static int count_matching_names(struct lock_class *new_class)
 774{
 775	struct lock_class *class;
 776	int count = 0;
 777
 778	if (!new_class->name)
 779		return 0;
 780
 781	list_for_each_entry(class, &all_lock_classes, lock_entry) {
 782		if (new_class->key - new_class->subclass == class->key)
 783			return class->name_version;
 784		if (class->name && !strcmp(class->name, new_class->name))
 785			count = max(count, class->name_version);
 786	}
 787
 788	return count + 1;
 789}
 790
 
 
 
 
 
 791static inline struct lock_class *
 792look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
 793{
 794	struct lockdep_subclass_key *key;
 795	struct hlist_head *hash_head;
 796	struct lock_class *class;
 797
 798	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 799		debug_locks_off();
 800		printk(KERN_ERR
 801			"BUG: looking up invalid subclass: %u\n", subclass);
 802		printk(KERN_ERR
 803			"turning off the locking correctness validator.\n");
 804		dump_stack();
 805		return NULL;
 806	}
 807
 808	/*
 809	 * If it is not initialised then it has never been locked,
 810	 * so it won't be present in the hash table.
 811	 */
 812	if (unlikely(!lock->key))
 813		return NULL;
 814
 815	/*
 816	 * NOTE: the class-key must be unique. For dynamic locks, a static
 817	 * lock_class_key variable is passed in through the mutex_init()
 818	 * (or spin_lock_init()) call - which acts as the key. For static
 819	 * locks we use the lock object itself as the key.
 820	 */
 821	BUILD_BUG_ON(sizeof(struct lock_class_key) >
 822			sizeof(struct lockdep_map));
 823
 824	key = lock->key->subkeys + subclass;
 825
 826	hash_head = classhashentry(key);
 827
 828	/*
 829	 * We do an RCU walk of the hash, see lockdep_free_key_range().
 830	 */
 831	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
 832		return NULL;
 833
 834	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
 835		if (class->key == key) {
 836			/*
 837			 * Huh! same key, different name? Did someone trample
 838			 * on some memory? We're most confused.
 839			 */
 840			WARN_ON_ONCE(class->name != lock->name &&
 841				     lock->key != &__lockdep_no_validate__);
 842			return class;
 843		}
 844	}
 845
 846	return NULL;
 847}
 848
 849/*
 850 * Static locks do not have their class-keys yet - for them the key is
 851 * the lock object itself. If the lock is in the per cpu area, the
 852 * canonical address of the lock (per cpu offset removed) is used.
 853 */
 854static bool assign_lock_key(struct lockdep_map *lock)
 855{
 856	unsigned long can_addr, addr = (unsigned long)lock;
 857
 858#ifdef __KERNEL__
 859	/*
 860	 * lockdep_free_key_range() assumes that struct lock_class_key
 861	 * objects do not overlap. Since we use the address of lock
 862	 * objects as class key for static objects, check whether the
 863	 * size of lock_class_key objects does not exceed the size of
 864	 * the smallest lock object.
 865	 */
 866	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
 867#endif
 868
 869	if (__is_kernel_percpu_address(addr, &can_addr))
 870		lock->key = (void *)can_addr;
 871	else if (__is_module_percpu_address(addr, &can_addr))
 872		lock->key = (void *)can_addr;
 873	else if (static_obj(lock))
 874		lock->key = (void *)lock;
 875	else {
 876		/* Debug-check: all keys must be persistent! */
 877		debug_locks_off();
 878		pr_err("INFO: trying to register non-static key.\n");
 879		pr_err("the code is fine but needs lockdep annotation.\n");
 880		pr_err("turning off the locking correctness validator.\n");
 881		dump_stack();
 882		return false;
 883	}
 884
 885	return true;
 886}
 887
 888#ifdef CONFIG_DEBUG_LOCKDEP
 889
 890/* Check whether element @e occurs in list @h */
 891static bool in_list(struct list_head *e, struct list_head *h)
 892{
 893	struct list_head *f;
 894
 895	list_for_each(f, h) {
 896		if (e == f)
 897			return true;
 898	}
 899
 900	return false;
 901}
 902
 903/*
 904 * Check whether entry @e occurs in any of the locks_after or locks_before
 905 * lists.
 906 */
 907static bool in_any_class_list(struct list_head *e)
 908{
 909	struct lock_class *class;
 910	int i;
 911
 912	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 913		class = &lock_classes[i];
 914		if (in_list(e, &class->locks_after) ||
 915		    in_list(e, &class->locks_before))
 916			return true;
 917	}
 918	return false;
 919}
 920
 921static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
 922{
 923	struct lock_list *e;
 924
 925	list_for_each_entry(e, h, entry) {
 926		if (e->links_to != c) {
 927			printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
 928			       c->name ? : "(?)",
 929			       (unsigned long)(e - list_entries),
 930			       e->links_to && e->links_to->name ?
 931			       e->links_to->name : "(?)",
 932			       e->class && e->class->name ? e->class->name :
 933			       "(?)");
 934			return false;
 935		}
 936	}
 937	return true;
 938}
 939
 940#ifdef CONFIG_PROVE_LOCKING
 941static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
 942#endif
 943
 944static bool check_lock_chain_key(struct lock_chain *chain)
 945{
 946#ifdef CONFIG_PROVE_LOCKING
 947	u64 chain_key = INITIAL_CHAIN_KEY;
 948	int i;
 949
 950	for (i = chain->base; i < chain->base + chain->depth; i++)
 951		chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
 952	/*
 953	 * The 'unsigned long long' casts avoid that a compiler warning
 954	 * is reported when building tools/lib/lockdep.
 955	 */
 956	if (chain->chain_key != chain_key) {
 957		printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
 958		       (unsigned long long)(chain - lock_chains),
 959		       (unsigned long long)chain->chain_key,
 960		       (unsigned long long)chain_key);
 961		return false;
 962	}
 963#endif
 964	return true;
 965}
 966
 967static bool in_any_zapped_class_list(struct lock_class *class)
 968{
 969	struct pending_free *pf;
 970	int i;
 971
 972	for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
 973		if (in_list(&class->lock_entry, &pf->zapped))
 974			return true;
 975	}
 976
 977	return false;
 978}
 979
 980static bool __check_data_structures(void)
 981{
 982	struct lock_class *class;
 983	struct lock_chain *chain;
 984	struct hlist_head *head;
 985	struct lock_list *e;
 986	int i;
 987
 988	/* Check whether all classes occur in a lock list. */
 989	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 990		class = &lock_classes[i];
 991		if (!in_list(&class->lock_entry, &all_lock_classes) &&
 992		    !in_list(&class->lock_entry, &free_lock_classes) &&
 993		    !in_any_zapped_class_list(class)) {
 994			printk(KERN_INFO "class %px/%s is not in any class list\n",
 995			       class, class->name ? : "(?)");
 996			return false;
 997		}
 998	}
 999
1000	/* Check whether all classes have valid lock lists. */
1001	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1002		class = &lock_classes[i];
1003		if (!class_lock_list_valid(class, &class->locks_before))
1004			return false;
1005		if (!class_lock_list_valid(class, &class->locks_after))
1006			return false;
1007	}
1008
1009	/* Check the chain_key of all lock chains. */
1010	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1011		head = chainhash_table + i;
1012		hlist_for_each_entry_rcu(chain, head, entry) {
1013			if (!check_lock_chain_key(chain))
1014				return false;
1015		}
1016	}
1017
1018	/*
1019	 * Check whether all list entries that are in use occur in a class
1020	 * lock list.
1021	 */
1022	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1023		e = list_entries + i;
1024		if (!in_any_class_list(&e->entry)) {
1025			printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1026			       (unsigned int)(e - list_entries),
1027			       e->class->name ? : "(?)",
1028			       e->links_to->name ? : "(?)");
1029			return false;
1030		}
1031	}
1032
1033	/*
1034	 * Check whether all list entries that are not in use do not occur in
1035	 * a class lock list.
1036	 */
1037	for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1038		e = list_entries + i;
1039		if (in_any_class_list(&e->entry)) {
1040			printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1041			       (unsigned int)(e - list_entries),
1042			       e->class && e->class->name ? e->class->name :
1043			       "(?)",
1044			       e->links_to && e->links_to->name ?
1045			       e->links_to->name : "(?)");
1046			return false;
1047		}
1048	}
1049
1050	return true;
1051}
1052
1053int check_consistency = 0;
1054module_param(check_consistency, int, 0644);
1055
1056static void check_data_structures(void)
1057{
1058	static bool once = false;
1059
1060	if (check_consistency && !once) {
1061		if (!__check_data_structures()) {
1062			once = true;
1063			WARN_ON(once);
1064		}
1065	}
1066}
1067
1068#else /* CONFIG_DEBUG_LOCKDEP */
1069
1070static inline void check_data_structures(void) { }
1071
1072#endif /* CONFIG_DEBUG_LOCKDEP */
1073
1074/*
1075 * Initialize the lock_classes[] array elements, the free_lock_classes list
1076 * and also the delayed_free structure.
1077 */
1078static void init_data_structures_once(void)
1079{
1080	static bool ds_initialized, rcu_head_initialized;
1081	int i;
1082
1083	if (likely(rcu_head_initialized))
1084		return;
1085
1086	if (system_state >= SYSTEM_SCHEDULING) {
1087		init_rcu_head(&delayed_free.rcu_head);
1088		rcu_head_initialized = true;
1089	}
1090
1091	if (ds_initialized)
1092		return;
1093
1094	ds_initialized = true;
1095
1096	INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1097	INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1098
1099	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1100		list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1101		INIT_LIST_HEAD(&lock_classes[i].locks_after);
1102		INIT_LIST_HEAD(&lock_classes[i].locks_before);
1103	}
1104}
1105
1106static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1107{
1108	unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1109
1110	return lock_keys_hash + hash;
1111}
1112
1113/* Register a dynamically allocated key. */
1114void lockdep_register_key(struct lock_class_key *key)
1115{
1116	struct hlist_head *hash_head;
1117	struct lock_class_key *k;
1118	unsigned long flags;
1119
1120	if (WARN_ON_ONCE(static_obj(key)))
1121		return;
1122	hash_head = keyhashentry(key);
1123
1124	raw_local_irq_save(flags);
1125	if (!graph_lock())
1126		goto restore_irqs;
1127	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1128		if (WARN_ON_ONCE(k == key))
1129			goto out_unlock;
1130	}
1131	hlist_add_head_rcu(&key->hash_entry, hash_head);
1132out_unlock:
1133	graph_unlock();
1134restore_irqs:
1135	raw_local_irq_restore(flags);
1136}
1137EXPORT_SYMBOL_GPL(lockdep_register_key);
1138
1139/* Check whether a key has been registered as a dynamic key. */
1140static bool is_dynamic_key(const struct lock_class_key *key)
1141{
1142	struct hlist_head *hash_head;
1143	struct lock_class_key *k;
1144	bool found = false;
1145
1146	if (WARN_ON_ONCE(static_obj(key)))
1147		return false;
1148
1149	/*
1150	 * If lock debugging is disabled lock_keys_hash[] may contain
1151	 * pointers to memory that has already been freed. Avoid triggering
1152	 * a use-after-free in that case by returning early.
1153	 */
1154	if (!debug_locks)
1155		return true;
1156
1157	hash_head = keyhashentry(key);
1158
1159	rcu_read_lock();
1160	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1161		if (k == key) {
1162			found = true;
1163			break;
1164		}
1165	}
1166	rcu_read_unlock();
1167
1168	return found;
1169}
1170
1171/*
1172 * Register a lock's class in the hash-table, if the class is not present
1173 * yet. Otherwise we look it up. We cache the result in the lock object
1174 * itself, so actual lookup of the hash should be once per lock object.
1175 */
1176static struct lock_class *
1177register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1178{
1179	struct lockdep_subclass_key *key;
1180	struct hlist_head *hash_head;
1181	struct lock_class *class;
1182
1183	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1184
1185	class = look_up_lock_class(lock, subclass);
1186	if (likely(class))
1187		goto out_set_class_cache;
1188
1189	if (!lock->key) {
1190		if (!assign_lock_key(lock))
1191			return NULL;
1192	} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
 
 
 
 
 
 
1193		return NULL;
1194	}
1195
1196	key = lock->key->subkeys + subclass;
1197	hash_head = classhashentry(key);
1198
1199	if (!graph_lock()) {
1200		return NULL;
1201	}
1202	/*
1203	 * We have to do the hash-walk again, to avoid races
1204	 * with another CPU:
1205	 */
1206	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1207		if (class->key == key)
1208			goto out_unlock_set;
1209	}
1210
1211	init_data_structures_once();
1212
1213	/* Allocate a new lock class and add it to the hash. */
1214	class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1215					 lock_entry);
1216	if (!class) {
1217		if (!debug_locks_off_graph_unlock()) {
1218			return NULL;
1219		}
1220
1221		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1222		dump_stack();
1223		return NULL;
1224	}
1225	nr_lock_classes++;
1226	__set_bit(class - lock_classes, lock_classes_in_use);
1227	debug_atomic_inc(nr_unused_locks);
1228	class->key = key;
1229	class->name = lock->name;
1230	class->subclass = subclass;
1231	WARN_ON_ONCE(!list_empty(&class->locks_before));
1232	WARN_ON_ONCE(!list_empty(&class->locks_after));
 
1233	class->name_version = count_matching_names(class);
1234	/*
1235	 * We use RCU's safe list-add method to make
1236	 * parallel walking of the hash-list safe:
1237	 */
1238	hlist_add_head_rcu(&class->hash_entry, hash_head);
1239	/*
1240	 * Remove the class from the free list and add it to the global list
1241	 * of classes.
1242	 */
1243	list_move_tail(&class->lock_entry, &all_lock_classes);
1244
1245	if (verbose(class)) {
1246		graph_unlock();
1247
1248		printk("\nnew class %px: %s", class->key, class->name);
1249		if (class->name_version > 1)
1250			printk(KERN_CONT "#%d", class->name_version);
1251		printk(KERN_CONT "\n");
1252		dump_stack();
1253
1254		if (!graph_lock()) {
1255			return NULL;
1256		}
1257	}
1258out_unlock_set:
1259	graph_unlock();
1260
1261out_set_class_cache:
1262	if (!subclass || force)
1263		lock->class_cache[0] = class;
1264	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1265		lock->class_cache[subclass] = class;
1266
1267	/*
1268	 * Hash collision, did we smoke some? We found a class with a matching
1269	 * hash but the subclass -- which is hashed in -- didn't match.
1270	 */
1271	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1272		return NULL;
1273
1274	return class;
1275}
1276
1277#ifdef CONFIG_PROVE_LOCKING
1278/*
1279 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1280 * with NULL on failure)
1281 */
1282static struct lock_list *alloc_list_entry(void)
1283{
1284	int idx = find_first_zero_bit(list_entries_in_use,
1285				      ARRAY_SIZE(list_entries));
1286
1287	if (idx >= ARRAY_SIZE(list_entries)) {
1288		if (!debug_locks_off_graph_unlock())
1289			return NULL;
1290
1291		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1292		dump_stack();
1293		return NULL;
1294	}
1295	nr_list_entries++;
1296	__set_bit(idx, list_entries_in_use);
1297	return list_entries + idx;
1298}
1299
1300/*
1301 * Add a new dependency to the head of the list:
1302 */
1303static int add_lock_to_list(struct lock_class *this,
1304			    struct lock_class *links_to, struct list_head *head,
1305			    unsigned long ip, int distance,
1306			    const struct lock_trace *trace)
1307{
1308	struct lock_list *entry;
1309	/*
1310	 * Lock not present yet - get a new dependency struct and
1311	 * add it to the list:
1312	 */
1313	entry = alloc_list_entry();
1314	if (!entry)
1315		return 0;
1316
1317	entry->class = this;
1318	entry->links_to = links_to;
1319	entry->distance = distance;
1320	entry->trace = trace;
1321	/*
1322	 * Both allocation and removal are done under the graph lock; but
1323	 * iteration is under RCU-sched; see look_up_lock_class() and
1324	 * lockdep_free_key_range().
1325	 */
1326	list_add_tail_rcu(&entry->entry, head);
1327
1328	return 1;
1329}
1330
1331/*
1332 * For good efficiency of modular, we use power of 2
1333 */
1334#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
1335#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
1336
1337/*
1338 * The circular_queue and helpers are used to implement graph
1339 * breadth-first search (BFS) algorithm, by which we can determine
1340 * whether there is a path from a lock to another. In deadlock checks,
1341 * a path from the next lock to be acquired to a previous held lock
1342 * indicates that adding the <prev> -> <next> lock dependency will
1343 * produce a circle in the graph. Breadth-first search instead of
1344 * depth-first search is used in order to find the shortest (circular)
1345 * path.
1346 */
1347struct circular_queue {
1348	struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1349	unsigned int  front, rear;
1350};
1351
1352static struct circular_queue lock_cq;
1353
1354unsigned int max_bfs_queue_depth;
1355
1356static unsigned int lockdep_dependency_gen_id;
1357
1358static inline void __cq_init(struct circular_queue *cq)
1359{
1360	cq->front = cq->rear = 0;
1361	lockdep_dependency_gen_id++;
1362}
1363
1364static inline int __cq_empty(struct circular_queue *cq)
1365{
1366	return (cq->front == cq->rear);
1367}
1368
1369static inline int __cq_full(struct circular_queue *cq)
1370{
1371	return ((cq->rear + 1) & CQ_MASK) == cq->front;
1372}
1373
1374static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1375{
1376	if (__cq_full(cq))
1377		return -1;
1378
1379	cq->element[cq->rear] = elem;
1380	cq->rear = (cq->rear + 1) & CQ_MASK;
1381	return 0;
1382}
1383
1384/*
1385 * Dequeue an element from the circular_queue, return a lock_list if
1386 * the queue is not empty, or NULL if otherwise.
1387 */
1388static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1389{
1390	struct lock_list * lock;
1391
1392	if (__cq_empty(cq))
1393		return NULL;
1394
1395	lock = cq->element[cq->front];
1396	cq->front = (cq->front + 1) & CQ_MASK;
1397
1398	return lock;
1399}
1400
1401static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
1402{
1403	return (cq->rear - cq->front) & CQ_MASK;
1404}
1405
1406static inline void mark_lock_accessed(struct lock_list *lock,
1407					struct lock_list *parent)
1408{
1409	unsigned long nr;
1410
1411	nr = lock - list_entries;
1412	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1413	lock->parent = parent;
1414	lock->class->dep_gen_id = lockdep_dependency_gen_id;
1415}
1416
1417static inline unsigned long lock_accessed(struct lock_list *lock)
1418{
1419	unsigned long nr;
1420
1421	nr = lock - list_entries;
1422	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1423	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1424}
1425
1426static inline struct lock_list *get_lock_parent(struct lock_list *child)
1427{
1428	return child->parent;
1429}
1430
1431static inline int get_lock_depth(struct lock_list *child)
1432{
1433	int depth = 0;
1434	struct lock_list *parent;
1435
1436	while ((parent = get_lock_parent(child))) {
1437		child = parent;
1438		depth++;
1439	}
1440	return depth;
1441}
1442
1443/*
1444 * Return the forward or backward dependency list.
1445 *
1446 * @lock:   the lock_list to get its class's dependency list
1447 * @offset: the offset to struct lock_class to determine whether it is
1448 *          locks_after or locks_before
1449 */
1450static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1451{
1452	void *lock_class = lock->class;
1453
1454	return lock_class + offset;
1455}
1456
1457/*
1458 * Forward- or backward-dependency search, used for both circular dependency
1459 * checking and hardirq-unsafe/softirq-unsafe checking.
1460 */
1461static int __bfs(struct lock_list *source_entry,
1462		 void *data,
1463		 int (*match)(struct lock_list *entry, void *data),
1464		 struct lock_list **target_entry,
1465		 int offset)
1466{
1467	struct lock_list *entry;
1468	struct lock_list *lock;
1469	struct list_head *head;
1470	struct circular_queue *cq = &lock_cq;
1471	int ret = 1;
1472
1473	if (match(source_entry, data)) {
1474		*target_entry = source_entry;
1475		ret = 0;
1476		goto exit;
1477	}
1478
1479	head = get_dep_list(source_entry, offset);
 
 
 
 
1480	if (list_empty(head))
1481		goto exit;
1482
1483	__cq_init(cq);
1484	__cq_enqueue(cq, source_entry);
 
 
 
1485
1486	while ((lock = __cq_dequeue(cq))) {
1487
1488		if (!lock->class) {
1489			ret = -2;
1490			goto exit;
1491		}
1492
1493		head = get_dep_list(lock, offset);
 
 
 
1494
1495		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1496
1497		list_for_each_entry_rcu(entry, head, entry) {
1498			if (!lock_accessed(entry)) {
1499				unsigned int cq_depth;
1500				mark_lock_accessed(entry, lock);
1501				if (match(entry, data)) {
1502					*target_entry = entry;
1503					ret = 0;
1504					goto exit;
1505				}
1506
1507				if (__cq_enqueue(cq, entry)) {
1508					ret = -1;
1509					goto exit;
1510				}
1511				cq_depth = __cq_get_elem_count(cq);
1512				if (max_bfs_queue_depth < cq_depth)
1513					max_bfs_queue_depth = cq_depth;
1514			}
1515		}
1516	}
1517exit:
1518	return ret;
1519}
1520
1521static inline int __bfs_forwards(struct lock_list *src_entry,
1522			void *data,
1523			int (*match)(struct lock_list *entry, void *data),
1524			struct lock_list **target_entry)
1525{
1526	return __bfs(src_entry, data, match, target_entry,
1527		     offsetof(struct lock_class, locks_after));
1528
1529}
1530
1531static inline int __bfs_backwards(struct lock_list *src_entry,
1532			void *data,
1533			int (*match)(struct lock_list *entry, void *data),
1534			struct lock_list **target_entry)
1535{
1536	return __bfs(src_entry, data, match, target_entry,
1537		     offsetof(struct lock_class, locks_before));
1538
1539}
1540
1541static void print_lock_trace(const struct lock_trace *trace,
1542			     unsigned int spaces)
1543{
1544	stack_trace_print(trace->entries, trace->nr_entries, spaces);
1545}
1546
1547/*
1548 * Print a dependency chain entry (this is only done when a deadlock
1549 * has been detected):
1550 */
1551static noinline void
1552print_circular_bug_entry(struct lock_list *target, int depth)
1553{
1554	if (debug_locks_silent)
1555		return;
1556	printk("\n-> #%u", depth);
1557	print_lock_name(target->class);
1558	printk(KERN_CONT ":\n");
1559	print_lock_trace(target->trace, 6);
 
 
1560}
1561
1562static void
1563print_circular_lock_scenario(struct held_lock *src,
1564			     struct held_lock *tgt,
1565			     struct lock_list *prt)
1566{
1567	struct lock_class *source = hlock_class(src);
1568	struct lock_class *target = hlock_class(tgt);
1569	struct lock_class *parent = prt->class;
1570
1571	/*
1572	 * A direct locking problem where unsafe_class lock is taken
1573	 * directly by safe_class lock, then all we need to show
1574	 * is the deadlock scenario, as it is obvious that the
1575	 * unsafe lock is taken under the safe lock.
1576	 *
1577	 * But if there is a chain instead, where the safe lock takes
1578	 * an intermediate lock (middle_class) where this lock is
1579	 * not the same as the safe lock, then the lock chain is
1580	 * used to describe the problem. Otherwise we would need
1581	 * to show a different CPU case for each link in the chain
1582	 * from the safe_class lock to the unsafe_class lock.
1583	 */
1584	if (parent != source) {
1585		printk("Chain exists of:\n  ");
1586		__print_lock_name(source);
1587		printk(KERN_CONT " --> ");
1588		__print_lock_name(parent);
1589		printk(KERN_CONT " --> ");
1590		__print_lock_name(target);
1591		printk(KERN_CONT "\n\n");
1592	}
1593
1594	printk(" Possible unsafe locking scenario:\n\n");
1595	printk("       CPU0                    CPU1\n");
1596	printk("       ----                    ----\n");
1597	printk("  lock(");
1598	__print_lock_name(target);
1599	printk(KERN_CONT ");\n");
1600	printk("                               lock(");
1601	__print_lock_name(parent);
1602	printk(KERN_CONT ");\n");
1603	printk("                               lock(");
1604	__print_lock_name(target);
1605	printk(KERN_CONT ");\n");
1606	printk("  lock(");
1607	__print_lock_name(source);
1608	printk(KERN_CONT ");\n");
1609	printk("\n *** DEADLOCK ***\n\n");
1610}
1611
1612/*
1613 * When a circular dependency is detected, print the
1614 * header first:
1615 */
1616static noinline void
1617print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1618			struct held_lock *check_src,
1619			struct held_lock *check_tgt)
1620{
1621	struct task_struct *curr = current;
1622
1623	if (debug_locks_silent)
1624		return;
1625
1626	pr_warn("\n");
1627	pr_warn("======================================================\n");
1628	pr_warn("WARNING: possible circular locking dependency detected\n");
1629	print_kernel_ident();
1630	pr_warn("------------------------------------------------------\n");
1631	pr_warn("%s/%d is trying to acquire lock:\n",
1632		curr->comm, task_pid_nr(curr));
1633	print_lock(check_src);
1634
1635	pr_warn("\nbut task is already holding lock:\n");
1636
1637	print_lock(check_tgt);
1638	pr_warn("\nwhich lock already depends on the new lock.\n\n");
1639	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1640
1641	print_circular_bug_entry(entry, depth);
 
 
1642}
1643
1644static inline int class_equal(struct lock_list *entry, void *data)
1645{
1646	return entry->class == data;
1647}
1648
1649static noinline void print_circular_bug(struct lock_list *this,
1650					struct lock_list *target,
1651					struct held_lock *check_src,
1652					struct held_lock *check_tgt)
1653{
1654	struct task_struct *curr = current;
1655	struct lock_list *parent;
1656	struct lock_list *first_parent;
1657	int depth;
1658
1659	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1660		return;
1661
1662	this->trace = save_trace();
1663	if (!this->trace)
1664		return;
1665
1666	depth = get_lock_depth(target);
1667
1668	print_circular_bug_header(target, depth, check_src, check_tgt);
1669
1670	parent = get_lock_parent(target);
1671	first_parent = parent;
1672
1673	while (parent) {
1674		print_circular_bug_entry(parent, --depth);
1675		parent = get_lock_parent(parent);
1676	}
1677
1678	printk("\nother info that might help us debug this:\n\n");
1679	print_circular_lock_scenario(check_src, check_tgt,
1680				     first_parent);
1681
1682	lockdep_print_held_locks(curr);
1683
1684	printk("\nstack backtrace:\n");
1685	dump_stack();
 
 
1686}
1687
1688static noinline void print_bfs_bug(int ret)
1689{
1690	if (!debug_locks_off_graph_unlock())
1691		return;
1692
1693	/*
1694	 * Breadth-first-search failed, graph got corrupted?
1695	 */
1696	WARN(1, "lockdep bfs error:%d\n", ret);
 
 
1697}
1698
1699static int noop_count(struct lock_list *entry, void *data)
1700{
1701	(*(unsigned long *)data)++;
1702	return 0;
1703}
1704
1705static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1706{
1707	unsigned long  count = 0;
1708	struct lock_list *uninitialized_var(target_entry);
1709
1710	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1711
1712	return count;
1713}
1714unsigned long lockdep_count_forward_deps(struct lock_class *class)
1715{
1716	unsigned long ret, flags;
1717	struct lock_list this;
1718
1719	this.parent = NULL;
1720	this.class = class;
1721
1722	raw_local_irq_save(flags);
1723	arch_spin_lock(&lockdep_lock);
1724	ret = __lockdep_count_forward_deps(&this);
1725	arch_spin_unlock(&lockdep_lock);
1726	raw_local_irq_restore(flags);
1727
1728	return ret;
1729}
1730
1731static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1732{
1733	unsigned long  count = 0;
1734	struct lock_list *uninitialized_var(target_entry);
1735
1736	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1737
1738	return count;
1739}
1740
1741unsigned long lockdep_count_backward_deps(struct lock_class *class)
1742{
1743	unsigned long ret, flags;
1744	struct lock_list this;
1745
1746	this.parent = NULL;
1747	this.class = class;
1748
1749	raw_local_irq_save(flags);
1750	arch_spin_lock(&lockdep_lock);
1751	ret = __lockdep_count_backward_deps(&this);
1752	arch_spin_unlock(&lockdep_lock);
1753	raw_local_irq_restore(flags);
1754
1755	return ret;
1756}
1757
1758/*
1759 * Check that the dependency graph starting at <src> can lead to
1760 * <target> or not. Print an error and return 0 if it does.
1761 */
1762static noinline int
1763check_path(struct lock_class *target, struct lock_list *src_entry,
1764	   struct lock_list **target_entry)
1765{
1766	int ret;
1767
1768	ret = __bfs_forwards(src_entry, (void *)target, class_equal,
1769			     target_entry);
1770
1771	if (unlikely(ret < 0))
1772		print_bfs_bug(ret);
1773
1774	return ret;
1775}
1776
1777/*
1778 * Prove that the dependency graph starting at <src> can not
1779 * lead to <target>. If it can, there is a circle when adding
1780 * <target> -> <src> dependency.
1781 *
1782 * Print an error and return 0 if it does.
1783 */
1784static noinline int
1785check_noncircular(struct held_lock *src, struct held_lock *target,
1786		  struct lock_trace **const trace)
1787{
1788	int ret;
1789	struct lock_list *uninitialized_var(target_entry);
1790	struct lock_list src_entry = {
1791		.class = hlock_class(src),
1792		.parent = NULL,
1793	};
1794
1795	debug_atomic_inc(nr_cyclic_checks);
1796
1797	ret = check_path(hlock_class(target), &src_entry, &target_entry);
1798
1799	if (unlikely(!ret)) {
1800		if (!*trace) {
1801			/*
1802			 * If save_trace fails here, the printing might
1803			 * trigger a WARN but because of the !nr_entries it
1804			 * should not do bad things.
1805			 */
1806			*trace = save_trace();
1807		}
1808
1809		print_circular_bug(&src_entry, target_entry, src, target);
1810	}
1811
1812	return ret;
1813}
1814
1815#ifdef CONFIG_LOCKDEP_SMALL
1816/*
1817 * Check that the dependency graph starting at <src> can lead to
1818 * <target> or not. If it can, <src> -> <target> dependency is already
1819 * in the graph.
1820 *
1821 * Print an error and return 2 if it does or 1 if it does not.
1822 */
1823static noinline int
1824check_redundant(struct held_lock *src, struct held_lock *target)
1825{
1826	int ret;
1827	struct lock_list *uninitialized_var(target_entry);
1828	struct lock_list src_entry = {
1829		.class = hlock_class(src),
1830		.parent = NULL,
1831	};
1832
1833	debug_atomic_inc(nr_redundant_checks);
1834
1835	ret = check_path(hlock_class(target), &src_entry, &target_entry);
1836
1837	if (!ret) {
1838		debug_atomic_inc(nr_redundant);
1839		ret = 2;
1840	} else if (ret < 0)
1841		ret = 0;
1842
1843	return ret;
1844}
1845#endif
1846
1847#ifdef CONFIG_TRACE_IRQFLAGS
1848
1849static inline int usage_accumulate(struct lock_list *entry, void *mask)
1850{
1851	*(unsigned long *)mask |= entry->class->usage_mask;
1852
1853	return 0;
1854}
1855
 
1856/*
1857 * Forwards and backwards subgraph searching, for the purposes of
1858 * proving that two subgraphs can be connected by a new dependency
1859 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1860 */
1861
1862static inline int usage_match(struct lock_list *entry, void *mask)
1863{
1864	return entry->class->usage_mask & *(unsigned long *)mask;
1865}
1866
 
 
1867/*
1868 * Find a node in the forwards-direction dependency sub-graph starting
1869 * at @root->class that matches @bit.
1870 *
1871 * Return 0 if such a node exists in the subgraph, and put that node
1872 * into *@target_entry.
1873 *
1874 * Return 1 otherwise and keep *@target_entry unchanged.
1875 * Return <0 on error.
1876 */
1877static int
1878find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
1879			struct lock_list **target_entry)
1880{
1881	int result;
1882
1883	debug_atomic_inc(nr_find_usage_forwards_checks);
1884
1885	result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
1886
1887	return result;
1888}
1889
1890/*
1891 * Find a node in the backwards-direction dependency sub-graph starting
1892 * at @root->class that matches @bit.
1893 *
1894 * Return 0 if such a node exists in the subgraph, and put that node
1895 * into *@target_entry.
1896 *
1897 * Return 1 otherwise and keep *@target_entry unchanged.
1898 * Return <0 on error.
1899 */
1900static int
1901find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
1902			struct lock_list **target_entry)
1903{
1904	int result;
1905
1906	debug_atomic_inc(nr_find_usage_backwards_checks);
1907
1908	result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
1909
1910	return result;
1911}
1912
1913static void print_lock_class_header(struct lock_class *class, int depth)
1914{
1915	int bit;
1916
1917	printk("%*s->", depth, "");
1918	print_lock_name(class);
1919#ifdef CONFIG_DEBUG_LOCKDEP
1920	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1921#endif
1922	printk(KERN_CONT " {\n");
1923
1924	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1925		if (class->usage_mask & (1 << bit)) {
1926			int len = depth;
1927
1928			len += printk("%*s   %s", depth, "", usage_str[bit]);
1929			len += printk(KERN_CONT " at:\n");
1930			print_lock_trace(class->usage_traces[bit], len);
1931		}
1932	}
1933	printk("%*s }\n", depth, "");
1934
1935	printk("%*s ... key      at: [<%px>] %pS\n",
1936		depth, "", class->key, class->key);
1937}
1938
1939/*
1940 * printk the shortest lock dependencies from @start to @end in reverse order:
1941 */
1942static void __used
1943print_shortest_lock_dependencies(struct lock_list *leaf,
1944				 struct lock_list *root)
1945{
1946	struct lock_list *entry = leaf;
1947	int depth;
1948
1949	/*compute depth from generated tree by BFS*/
1950	depth = get_lock_depth(leaf);
1951
1952	do {
1953		print_lock_class_header(entry->class, depth);
1954		printk("%*s ... acquired at:\n", depth, "");
1955		print_lock_trace(entry->trace, 2);
1956		printk("\n");
1957
1958		if (depth == 0 && (entry != root)) {
1959			printk("lockdep:%s bad path found in chain graph\n", __func__);
1960			break;
1961		}
1962
1963		entry = get_lock_parent(entry);
1964		depth--;
1965	} while (entry && (depth >= 0));
 
 
1966}
1967
1968static void
1969print_irq_lock_scenario(struct lock_list *safe_entry,
1970			struct lock_list *unsafe_entry,
1971			struct lock_class *prev_class,
1972			struct lock_class *next_class)
1973{
1974	struct lock_class *safe_class = safe_entry->class;
1975	struct lock_class *unsafe_class = unsafe_entry->class;
1976	struct lock_class *middle_class = prev_class;
1977
1978	if (middle_class == safe_class)
1979		middle_class = next_class;
1980
1981	/*
1982	 * A direct locking problem where unsafe_class lock is taken
1983	 * directly by safe_class lock, then all we need to show
1984	 * is the deadlock scenario, as it is obvious that the
1985	 * unsafe lock is taken under the safe lock.
1986	 *
1987	 * But if there is a chain instead, where the safe lock takes
1988	 * an intermediate lock (middle_class) where this lock is
1989	 * not the same as the safe lock, then the lock chain is
1990	 * used to describe the problem. Otherwise we would need
1991	 * to show a different CPU case for each link in the chain
1992	 * from the safe_class lock to the unsafe_class lock.
1993	 */
1994	if (middle_class != unsafe_class) {
1995		printk("Chain exists of:\n  ");
1996		__print_lock_name(safe_class);
1997		printk(KERN_CONT " --> ");
1998		__print_lock_name(middle_class);
1999		printk(KERN_CONT " --> ");
2000		__print_lock_name(unsafe_class);
2001		printk(KERN_CONT "\n\n");
2002	}
2003
2004	printk(" Possible interrupt unsafe locking scenario:\n\n");
2005	printk("       CPU0                    CPU1\n");
2006	printk("       ----                    ----\n");
2007	printk("  lock(");
2008	__print_lock_name(unsafe_class);
2009	printk(KERN_CONT ");\n");
2010	printk("                               local_irq_disable();\n");
2011	printk("                               lock(");
2012	__print_lock_name(safe_class);
2013	printk(KERN_CONT ");\n");
2014	printk("                               lock(");
2015	__print_lock_name(middle_class);
2016	printk(KERN_CONT ");\n");
2017	printk("  <Interrupt>\n");
2018	printk("    lock(");
2019	__print_lock_name(safe_class);
2020	printk(KERN_CONT ");\n");
2021	printk("\n *** DEADLOCK ***\n\n");
2022}
2023
2024static void
2025print_bad_irq_dependency(struct task_struct *curr,
2026			 struct lock_list *prev_root,
2027			 struct lock_list *next_root,
2028			 struct lock_list *backwards_entry,
2029			 struct lock_list *forwards_entry,
2030			 struct held_lock *prev,
2031			 struct held_lock *next,
2032			 enum lock_usage_bit bit1,
2033			 enum lock_usage_bit bit2,
2034			 const char *irqclass)
2035{
2036	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2037		return;
2038
2039	pr_warn("\n");
2040	pr_warn("=====================================================\n");
2041	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2042		irqclass, irqclass);
2043	print_kernel_ident();
2044	pr_warn("-----------------------------------------------------\n");
2045	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2046		curr->comm, task_pid_nr(curr),
2047		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
2048		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2049		curr->hardirqs_enabled,
2050		curr->softirqs_enabled);
2051	print_lock(next);
2052
2053	pr_warn("\nand this task is already holding:\n");
2054	print_lock(prev);
2055	pr_warn("which would create a new lock dependency:\n");
2056	print_lock_name(hlock_class(prev));
2057	pr_cont(" ->");
2058	print_lock_name(hlock_class(next));
2059	pr_cont("\n");
2060
2061	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2062		irqclass);
2063	print_lock_name(backwards_entry->class);
2064	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2065
2066	print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2067
2068	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2069	print_lock_name(forwards_entry->class);
2070	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2071	pr_warn("...");
2072
2073	print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2074
2075	pr_warn("\nother info that might help us debug this:\n\n");
2076	print_irq_lock_scenario(backwards_entry, forwards_entry,
2077				hlock_class(prev), hlock_class(next));
2078
2079	lockdep_print_held_locks(curr);
2080
2081	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2082	prev_root->trace = save_trace();
2083	if (!prev_root->trace)
2084		return;
2085	print_shortest_lock_dependencies(backwards_entry, prev_root);
2086
2087	pr_warn("\nthe dependencies between the lock to be acquired");
2088	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2089	next_root->trace = save_trace();
2090	if (!next_root->trace)
2091		return;
2092	print_shortest_lock_dependencies(forwards_entry, next_root);
2093
2094	pr_warn("\nstack backtrace:\n");
2095	dump_stack();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2096}
2097
2098static const char *state_names[] = {
2099#define LOCKDEP_STATE(__STATE) \
2100	__stringify(__STATE),
2101#include "lockdep_states.h"
2102#undef LOCKDEP_STATE
2103};
2104
2105static const char *state_rnames[] = {
2106#define LOCKDEP_STATE(__STATE) \
2107	__stringify(__STATE)"-READ",
2108#include "lockdep_states.h"
2109#undef LOCKDEP_STATE
2110};
2111
2112static inline const char *state_name(enum lock_usage_bit bit)
2113{
2114	if (bit & LOCK_USAGE_READ_MASK)
2115		return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2116	else
2117		return state_names[bit >> LOCK_USAGE_DIR_MASK];
2118}
2119
2120/*
2121 * The bit number is encoded like:
2122 *
2123 *  bit0: 0 exclusive, 1 read lock
2124 *  bit1: 0 used in irq, 1 irq enabled
2125 *  bit2-n: state
2126 */
2127static int exclusive_bit(int new_bit)
2128{
2129	int state = new_bit & LOCK_USAGE_STATE_MASK;
2130	int dir = new_bit & LOCK_USAGE_DIR_MASK;
 
 
 
 
 
 
 
 
 
 
 
2131
2132	/*
2133	 * keep state, bit flip the direction and strip read.
2134	 */
2135	return state | (dir ^ LOCK_USAGE_DIR_MASK);
2136}
2137
2138/*
2139 * Observe that when given a bitmask where each bitnr is encoded as above, a
2140 * right shift of the mask transforms the individual bitnrs as -1 and
2141 * conversely, a left shift transforms into +1 for the individual bitnrs.
2142 *
2143 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2144 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2145 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2146 *
2147 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2148 *
2149 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2150 * all bits set) and recompose with bitnr1 flipped.
2151 */
2152static unsigned long invert_dir_mask(unsigned long mask)
2153{
2154	unsigned long excl = 0;
2155
2156	/* Invert dir */
2157	excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2158	excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2159
2160	return excl;
2161}
2162
2163/*
2164 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
2165 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
2166 * And then mask out all bitnr0.
2167 */
2168static unsigned long exclusive_mask(unsigned long mask)
2169{
2170	unsigned long excl = invert_dir_mask(mask);
2171
2172	/* Strip read */
2173	excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2174	excl &= ~LOCKF_IRQ_READ;
2175
2176	return excl;
2177}
2178
2179/*
2180 * Retrieve the _possible_ original mask to which @mask is
2181 * exclusive. Ie: this is the opposite of exclusive_mask().
2182 * Note that 2 possible original bits can match an exclusive
2183 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2184 * cleared. So both are returned for each exclusive bit.
2185 */
2186static unsigned long original_mask(unsigned long mask)
2187{
2188	unsigned long excl = invert_dir_mask(mask);
2189
2190	/* Include read in existing usages */
2191	excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2192
2193	return excl;
2194}
2195
2196/*
2197 * Find the first pair of bit match between an original
2198 * usage mask and an exclusive usage mask.
2199 */
2200static int find_exclusive_match(unsigned long mask,
2201				unsigned long excl_mask,
2202				enum lock_usage_bit *bitp,
2203				enum lock_usage_bit *excl_bitp)
2204{
2205	int bit, excl;
2206
2207	for_each_set_bit(bit, &mask, LOCK_USED) {
2208		excl = exclusive_bit(bit);
2209		if (excl_mask & lock_flag(excl)) {
2210			*bitp = bit;
2211			*excl_bitp = excl;
2212			return 0;
2213		}
2214	}
2215	return -1;
2216}
2217
2218/*
2219 * Prove that the new dependency does not connect a hardirq-safe(-read)
2220 * lock with a hardirq-unsafe lock - to achieve this we search
2221 * the backwards-subgraph starting at <prev>, and the
2222 * forwards-subgraph starting at <next>:
2223 */
2224static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2225			   struct held_lock *next)
2226{
2227	unsigned long usage_mask = 0, forward_mask, backward_mask;
2228	enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2229	struct lock_list *uninitialized_var(target_entry1);
2230	struct lock_list *uninitialized_var(target_entry);
2231	struct lock_list this, that;
2232	int ret;
2233
2234	/*
2235	 * Step 1: gather all hard/soft IRQs usages backward in an
2236	 * accumulated usage mask.
 
 
2237	 */
2238	this.parent = NULL;
2239	this.class = hlock_class(prev);
2240
2241	ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2242	if (ret < 0) {
2243		print_bfs_bug(ret);
2244		return 0;
2245	}
2246
2247	usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2248	if (!usage_mask)
2249		return 1;
2250
2251	/*
2252	 * Step 2: find exclusive uses forward that match the previous
2253	 * backward accumulated mask.
 
 
2254	 */
2255	forward_mask = exclusive_mask(usage_mask);
2256
2257	that.parent = NULL;
2258	that.class = hlock_class(next);
2259
2260	ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2261	if (ret < 0) {
2262		print_bfs_bug(ret);
2263		return 0;
2264	}
2265	if (ret == 1)
2266		return ret;
2267
2268	/*
2269	 * Step 3: we found a bad match! Now retrieve a lock from the backward
2270	 * list whose usage mask matches the exclusive usage mask from the
2271	 * lock found on the forward list.
2272	 */
2273	backward_mask = original_mask(target_entry1->class->usage_mask);
2274
2275	ret = find_usage_backwards(&this, backward_mask, &target_entry);
2276	if (ret < 0) {
2277		print_bfs_bug(ret);
 
 
 
2278		return 0;
2279	}
2280	if (DEBUG_LOCKS_WARN_ON(ret == 1))
2281		return 1;
2282
2283	/*
2284	 * Step 4: narrow down to a pair of incompatible usage bits
2285	 * and report it.
2286	 */
2287	ret = find_exclusive_match(target_entry->class->usage_mask,
2288				   target_entry1->class->usage_mask,
2289				   &backward_bit, &forward_bit);
2290	if (DEBUG_LOCKS_WARN_ON(ret == -1))
2291		return 1;
2292
2293	print_bad_irq_dependency(curr, &this, &that,
2294				 target_entry, target_entry1,
2295				 prev, next,
2296				 backward_bit, forward_bit,
2297				 state_name(backward_bit));
2298
2299	return 0;
2300}
2301
2302static void inc_chains(void)
2303{
2304	if (current->hardirq_context)
2305		nr_hardirq_chains++;
2306	else {
2307		if (current->softirq_context)
2308			nr_softirq_chains++;
2309		else
2310			nr_process_chains++;
2311	}
2312}
2313
2314#else
2315
2316static inline int check_irq_usage(struct task_struct *curr,
2317				  struct held_lock *prev, struct held_lock *next)
 
2318{
2319	return 1;
2320}
2321
2322static inline void inc_chains(void)
2323{
2324	nr_process_chains++;
2325}
2326
2327#endif /* CONFIG_TRACE_IRQFLAGS */
2328
2329static void
2330print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
 
2331{
2332	struct lock_class *next = hlock_class(nxt);
2333	struct lock_class *prev = hlock_class(prv);
2334
2335	printk(" Possible unsafe locking scenario:\n\n");
2336	printk("       CPU0\n");
2337	printk("       ----\n");
2338	printk("  lock(");
2339	__print_lock_name(prev);
2340	printk(KERN_CONT ");\n");
2341	printk("  lock(");
2342	__print_lock_name(next);
2343	printk(KERN_CONT ");\n");
2344	printk("\n *** DEADLOCK ***\n\n");
2345	printk(" May be due to missing lock nesting notation\n\n");
2346}
2347
2348static void
2349print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2350		   struct held_lock *next)
2351{
2352	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2353		return;
2354
2355	pr_warn("\n");
2356	pr_warn("============================================\n");
2357	pr_warn("WARNING: possible recursive locking detected\n");
2358	print_kernel_ident();
2359	pr_warn("--------------------------------------------\n");
2360	pr_warn("%s/%d is trying to acquire lock:\n",
2361		curr->comm, task_pid_nr(curr));
2362	print_lock(next);
2363	pr_warn("\nbut task is already holding lock:\n");
2364	print_lock(prev);
2365
2366	pr_warn("\nother info that might help us debug this:\n");
2367	print_deadlock_scenario(next, prev);
2368	lockdep_print_held_locks(curr);
2369
2370	pr_warn("\nstack backtrace:\n");
2371	dump_stack();
 
 
2372}
2373
2374/*
2375 * Check whether we are holding such a class already.
2376 *
2377 * (Note that this has to be done separately, because the graph cannot
2378 * detect such classes of deadlocks.)
2379 *
2380 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2381 */
2382static int
2383check_deadlock(struct task_struct *curr, struct held_lock *next)
 
2384{
2385	struct held_lock *prev;
2386	struct held_lock *nest = NULL;
2387	int i;
2388
2389	for (i = 0; i < curr->lockdep_depth; i++) {
2390		prev = curr->held_locks + i;
2391
2392		if (prev->instance == next->nest_lock)
2393			nest = prev;
2394
2395		if (hlock_class(prev) != hlock_class(next))
2396			continue;
2397
2398		/*
2399		 * Allow read-after-read recursion of the same
2400		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2401		 */
2402		if ((next->read == 2) && prev->read)
2403			return 2;
2404
2405		/*
2406		 * We're holding the nest_lock, which serializes this lock's
2407		 * nesting behaviour.
2408		 */
2409		if (nest)
2410			return 2;
2411
2412		print_deadlock_bug(curr, prev, next);
2413		return 0;
2414	}
2415	return 1;
2416}
2417
2418/*
2419 * There was a chain-cache miss, and we are about to add a new dependency
2420 * to a previous lock. We validate the following rules:
2421 *
2422 *  - would the adding of the <prev> -> <next> dependency create a
2423 *    circular dependency in the graph? [== circular deadlock]
2424 *
2425 *  - does the new prev->next dependency connect any hardirq-safe lock
2426 *    (in the full backwards-subgraph starting at <prev>) with any
2427 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
2428 *    <next>)? [== illegal lock inversion with hardirq contexts]
2429 *
2430 *  - does the new prev->next dependency connect any softirq-safe lock
2431 *    (in the full backwards-subgraph starting at <prev>) with any
2432 *    softirq-unsafe lock (in the full forwards-subgraph starting at
2433 *    <next>)? [== illegal lock inversion with softirq contexts]
2434 *
2435 * any of these scenarios could lead to a deadlock.
2436 *
2437 * Then if all the validations pass, we add the forwards and backwards
2438 * dependency.
2439 */
2440static int
2441check_prev_add(struct task_struct *curr, struct held_lock *prev,
2442	       struct held_lock *next, int distance,
2443	       struct lock_trace **const trace)
2444{
2445	struct lock_list *entry;
2446	int ret;
2447
2448	if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2449		/*
2450		 * The warning statements below may trigger a use-after-free
2451		 * of the class name. It is better to trigger a use-after free
2452		 * and to have the class name most of the time instead of not
2453		 * having the class name available.
2454		 */
2455		WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2456			  "Detected use-after-free of lock class %px/%s\n",
2457			  hlock_class(prev),
2458			  hlock_class(prev)->name);
2459		WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2460			  "Detected use-after-free of lock class %px/%s\n",
2461			  hlock_class(next),
2462			  hlock_class(next)->name);
2463		return 2;
2464	}
2465
2466	/*
2467	 * Prove that the new <prev> -> <next> dependency would not
2468	 * create a circular dependency in the graph. (We do this by
2469	 * a breadth-first search into the graph starting at <next>,
2470	 * and check whether we can reach <prev>.)
2471	 *
2472	 * The search is limited by the size of the circular queue (i.e.,
2473	 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2474	 * in the graph whose neighbours are to be checked.
2475	 */
2476	ret = check_noncircular(next, prev, trace);
2477	if (unlikely(ret <= 0))
2478		return 0;
 
 
 
 
2479
2480	if (!check_irq_usage(curr, prev, next))
2481		return 0;
2482
2483	/*
2484	 * For recursive read-locks we do all the dependency checks,
2485	 * but we dont store read-triggered dependencies (only
2486	 * write-triggered dependencies). This ensures that only the
2487	 * write-side dependencies matter, and that if for example a
2488	 * write-lock never takes any other locks, then the reads are
2489	 * equivalent to a NOP.
2490	 */
2491	if (next->read == 2 || prev->read == 2)
2492		return 1;
2493	/*
2494	 * Is the <prev> -> <next> dependency already present?
2495	 *
2496	 * (this may occur even though this is a new chain: consider
2497	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2498	 *  chains - the second one will be new, but L1 already has
2499	 *  L2 added to its dependency list, due to the first chain.)
2500	 */
2501	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2502		if (entry->class == hlock_class(next)) {
2503			if (distance == 1)
2504				entry->distance = 1;
2505			return 1;
2506		}
2507	}
2508
2509#ifdef CONFIG_LOCKDEP_SMALL
2510	/*
2511	 * Is the <prev> -> <next> link redundant?
2512	 */
2513	ret = check_redundant(prev, next);
2514	if (ret != 1)
2515		return ret;
2516#endif
2517
2518	if (!*trace) {
2519		*trace = save_trace();
2520		if (!*trace)
2521			return 0;
 
2522	}
2523
2524	/*
2525	 * Ok, all validations passed, add the new lock
2526	 * to the previous lock's dependency list:
2527	 */
2528	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
2529			       &hlock_class(prev)->locks_after,
2530			       next->acquire_ip, distance, *trace);
2531
2532	if (!ret)
2533		return 0;
2534
2535	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
2536			       &hlock_class(next)->locks_before,
2537			       next->acquire_ip, distance, *trace);
2538	if (!ret)
2539		return 0;
2540
2541	return 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542}
2543
2544/*
2545 * Add the dependency to all directly-previous locks that are 'relevant'.
2546 * The ones that are relevant are (in increasing distance from curr):
2547 * all consecutive trylock entries and the final non-trylock entry - or
2548 * the end of this context's lock-chain - whichever comes first.
2549 */
2550static int
2551check_prevs_add(struct task_struct *curr, struct held_lock *next)
2552{
2553	struct lock_trace *trace = NULL;
2554	int depth = curr->lockdep_depth;
 
2555	struct held_lock *hlock;
2556
2557	/*
2558	 * Debugging checks.
2559	 *
2560	 * Depth must not be zero for a non-head lock:
2561	 */
2562	if (!depth)
2563		goto out_bug;
2564	/*
2565	 * At least two relevant locks must exist for this
2566	 * to be a head:
2567	 */
2568	if (curr->held_locks[depth].irq_context !=
2569			curr->held_locks[depth-1].irq_context)
2570		goto out_bug;
2571
2572	for (;;) {
2573		int distance = curr->lockdep_depth - depth + 1;
2574		hlock = curr->held_locks + depth - 1;
2575
2576		/*
2577		 * Only non-recursive-read entries get new dependencies
2578		 * added:
2579		 */
2580		if (hlock->read != 2 && hlock->check) {
2581			int ret = check_prev_add(curr, hlock, next, distance,
2582						 &trace);
2583			if (!ret)
2584				return 0;
2585
2586			/*
2587			 * Stop after the first non-trylock entry,
2588			 * as non-trylock entries have added their
2589			 * own direct dependencies already, so this
2590			 * lock is connected to them indirectly:
2591			 */
2592			if (!hlock->trylock)
2593				break;
2594		}
2595
2596		depth--;
2597		/*
2598		 * End of lock-stack?
2599		 */
2600		if (!depth)
2601			break;
2602		/*
2603		 * Stop the search if we cross into another context:
2604		 */
2605		if (curr->held_locks[depth].irq_context !=
2606				curr->held_locks[depth-1].irq_context)
2607			break;
2608	}
2609	return 1;
2610out_bug:
2611	if (!debug_locks_off_graph_unlock())
2612		return 0;
2613
2614	/*
2615	 * Clearly we all shouldn't be here, but since we made it we
2616	 * can reliable say we messed up our state. See the above two
2617	 * gotos for reasons why we could possibly end up here.
2618	 */
2619	WARN_ON(1);
2620
2621	return 0;
2622}
2623
 
2624struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2625static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
2626int nr_chain_hlocks;
2627static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2628
2629struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2630{
2631	return lock_classes + chain_hlocks[chain->base + i];
2632}
2633
2634/*
2635 * Returns the index of the first held_lock of the current chain
2636 */
2637static inline int get_first_held_lock(struct task_struct *curr,
2638					struct held_lock *hlock)
2639{
2640	int i;
2641	struct held_lock *hlock_curr;
2642
2643	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2644		hlock_curr = curr->held_locks + i;
2645		if (hlock_curr->irq_context != hlock->irq_context)
2646			break;
2647
2648	}
2649
2650	return ++i;
2651}
2652
2653#ifdef CONFIG_DEBUG_LOCKDEP
2654/*
2655 * Returns the next chain_key iteration
2656 */
2657static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2658{
2659	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2660
2661	printk(" class_idx:%d -> chain_key:%016Lx",
2662		class_idx,
2663		(unsigned long long)new_chain_key);
2664	return new_chain_key;
2665}
2666
2667static void
2668print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2669{
2670	struct held_lock *hlock;
2671	u64 chain_key = INITIAL_CHAIN_KEY;
2672	int depth = curr->lockdep_depth;
2673	int i = get_first_held_lock(curr, hlock_next);
2674
2675	printk("depth: %u (irq_context %u)\n", depth - i + 1,
2676		hlock_next->irq_context);
2677	for (; i < depth; i++) {
2678		hlock = curr->held_locks + i;
2679		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2680
2681		print_lock(hlock);
2682	}
2683
2684	print_chain_key_iteration(hlock_next->class_idx, chain_key);
2685	print_lock(hlock_next);
2686}
2687
2688static void print_chain_keys_chain(struct lock_chain *chain)
2689{
2690	int i;
2691	u64 chain_key = INITIAL_CHAIN_KEY;
2692	int class_id;
2693
2694	printk("depth: %u\n", chain->depth);
2695	for (i = 0; i < chain->depth; i++) {
2696		class_id = chain_hlocks[chain->base + i];
2697		chain_key = print_chain_key_iteration(class_id, chain_key);
2698
2699		print_lock_name(lock_classes + class_id);
2700		printk("\n");
2701	}
2702}
2703
2704static void print_collision(struct task_struct *curr,
2705			struct held_lock *hlock_next,
2706			struct lock_chain *chain)
2707{
2708	pr_warn("\n");
2709	pr_warn("============================\n");
2710	pr_warn("WARNING: chain_key collision\n");
2711	print_kernel_ident();
2712	pr_warn("----------------------------\n");
2713	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2714	pr_warn("Hash chain already cached but the contents don't match!\n");
2715
2716	pr_warn("Held locks:");
2717	print_chain_keys_held_locks(curr, hlock_next);
2718
2719	pr_warn("Locks in cached chain:");
2720	print_chain_keys_chain(chain);
2721
2722	pr_warn("\nstack backtrace:\n");
2723	dump_stack();
2724}
2725#endif
2726
2727/*
2728 * Checks whether the chain and the current held locks are consistent
2729 * in depth and also in content. If they are not it most likely means
2730 * that there was a collision during the calculation of the chain_key.
2731 * Returns: 0 not passed, 1 passed
2732 */
2733static int check_no_collision(struct task_struct *curr,
2734			struct held_lock *hlock,
2735			struct lock_chain *chain)
2736{
2737#ifdef CONFIG_DEBUG_LOCKDEP
2738	int i, j, id;
2739
2740	i = get_first_held_lock(curr, hlock);
2741
2742	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2743		print_collision(curr, hlock, chain);
2744		return 0;
2745	}
2746
2747	for (j = 0; j < chain->depth - 1; j++, i++) {
2748		id = curr->held_locks[i].class_idx;
2749
2750		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2751			print_collision(curr, hlock, chain);
2752			return 0;
2753		}
2754	}
2755#endif
2756	return 1;
2757}
2758
2759/*
2760 * Given an index that is >= -1, return the index of the next lock chain.
2761 * Return -2 if there is no next lock chain.
 
 
2762 */
2763long lockdep_next_lockchain(long i)
2764{
2765	i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
2766	return i < ARRAY_SIZE(lock_chains) ? i : -2;
2767}
2768
2769unsigned long lock_chain_count(void)
2770{
2771	return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
2772}
2773
2774/* Must be called with the graph lock held. */
2775static struct lock_chain *alloc_lock_chain(void)
2776{
2777	int idx = find_first_zero_bit(lock_chains_in_use,
2778				      ARRAY_SIZE(lock_chains));
2779
2780	if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
2781		return NULL;
2782	__set_bit(idx, lock_chains_in_use);
2783	return lock_chains + idx;
2784}
2785
2786/*
2787 * Adds a dependency chain into chain hashtable. And must be called with
2788 * graph_lock held.
2789 *
2790 * Return 0 if fail, and graph_lock is released.
2791 * Return 1 if succeed, with graph_lock held.
2792 */
2793static inline int add_chain_cache(struct task_struct *curr,
2794				  struct held_lock *hlock,
2795				  u64 chain_key)
2796{
2797	struct lock_class *class = hlock_class(hlock);
2798	struct hlist_head *hash_head = chainhashentry(chain_key);
2799	struct lock_chain *chain;
2800	int i, j;
2801
2802	/*
2803	 * The caller must hold the graph lock, ensure we've got IRQs
2804	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2805	 * lockdep won't complain about its own locking errors.
2806	 */
2807	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2808		return 0;
 
 
 
 
 
 
 
 
 
 
2809
2810	chain = alloc_lock_chain();
2811	if (!chain) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2812		if (!debug_locks_off_graph_unlock())
2813			return 0;
2814
2815		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2816		dump_stack();
2817		return 0;
2818	}
 
2819	chain->chain_key = chain_key;
2820	chain->irq_context = hlock->irq_context;
2821	i = get_first_held_lock(curr, hlock);
2822	chain->depth = curr->lockdep_depth + 1 - i;
2823
2824	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2825	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2826	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2827
2828	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2829		chain->base = nr_chain_hlocks;
2830		for (j = 0; j < chain->depth - 1; j++, i++) {
2831			int lock_id = curr->held_locks[i].class_idx;
2832			chain_hlocks[chain->base + j] = lock_id;
2833		}
2834		chain_hlocks[chain->base + j] = class - lock_classes;
 
 
 
2835		nr_chain_hlocks += chain->depth;
2836	} else {
2837		if (!debug_locks_off_graph_unlock())
 
 
 
 
 
2838			return 0;
2839
2840		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2841		dump_stack();
2842		return 0;
2843	}
 
2844
2845	hlist_add_head_rcu(&chain->entry, hash_head);
2846	debug_atomic_inc(chain_lookup_misses);
2847	inc_chains();
2848
2849	return 1;
2850}
2851
2852/*
2853 * Look up a dependency chain. Must be called with either the graph lock or
2854 * the RCU read lock held.
2855 */
2856static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2857{
2858	struct hlist_head *hash_head = chainhashentry(chain_key);
2859	struct lock_chain *chain;
2860
2861	hlist_for_each_entry_rcu(chain, hash_head, entry) {
2862		if (READ_ONCE(chain->chain_key) == chain_key) {
2863			debug_atomic_inc(chain_lookup_hits);
2864			return chain;
2865		}
2866	}
2867	return NULL;
2868}
2869
2870/*
2871 * If the key is not present yet in dependency chain cache then
2872 * add it and return 1 - in this case the new dependency chain is
2873 * validated. If the key is already hashed, return 0.
2874 * (On return with 1 graph_lock is held.)
2875 */
2876static inline int lookup_chain_cache_add(struct task_struct *curr,
2877					 struct held_lock *hlock,
2878					 u64 chain_key)
2879{
2880	struct lock_class *class = hlock_class(hlock);
2881	struct lock_chain *chain = lookup_chain_cache(chain_key);
2882
2883	if (chain) {
2884cache_hit:
2885		if (!check_no_collision(curr, hlock, chain))
2886			return 0;
2887
2888		if (very_verbose(class)) {
2889			printk("\nhash chain already cached, key: "
2890					"%016Lx tail class: [%px] %s\n",
2891					(unsigned long long)chain_key,
2892					class->key, class->name);
2893		}
2894
2895		return 0;
2896	}
2897
2898	if (very_verbose(class)) {
2899		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2900			(unsigned long long)chain_key, class->key, class->name);
2901	}
2902
2903	if (!graph_lock())
2904		return 0;
2905
2906	/*
2907	 * We have to walk the chain again locked - to avoid duplicates:
2908	 */
2909	chain = lookup_chain_cache(chain_key);
2910	if (chain) {
2911		graph_unlock();
2912		goto cache_hit;
2913	}
2914
2915	if (!add_chain_cache(curr, hlock, chain_key))
2916		return 0;
2917
2918	return 1;
2919}
2920
2921static int validate_chain(struct task_struct *curr,
2922			  struct held_lock *hlock,
2923			  int chain_head, u64 chain_key)
2924{
2925	/*
2926	 * Trylock needs to maintain the stack of held locks, but it
2927	 * does not add new dependencies, because trylock can be done
2928	 * in any order.
2929	 *
2930	 * We look up the chain_key and do the O(N^2) check and update of
2931	 * the dependencies only if this is a new dependency chain.
2932	 * (If lookup_chain_cache_add() return with 1 it acquires
2933	 * graph_lock for us)
2934	 */
2935	if (!hlock->trylock && hlock->check &&
2936	    lookup_chain_cache_add(curr, hlock, chain_key)) {
2937		/*
2938		 * Check whether last held lock:
2939		 *
2940		 * - is irq-safe, if this lock is irq-unsafe
2941		 * - is softirq-safe, if this lock is hardirq-unsafe
2942		 *
2943		 * And check whether the new lock's dependency graph
2944		 * could lead back to the previous lock:
2945		 *
2946		 * - within the current held-lock stack
2947		 * - across our accumulated lock dependency records
2948		 *
2949		 * any of these scenarios could lead to a deadlock.
2950		 */
2951		/*
2952		 * The simple case: does the current hold the same lock
2953		 * already?
2954		 */
2955		int ret = check_deadlock(curr, hlock);
2956
2957		if (!ret)
2958			return 0;
2959		/*
2960		 * Mark recursive read, as we jump over it when
2961		 * building dependencies (just like we jump over
2962		 * trylock entries):
2963		 */
2964		if (ret == 2)
2965			hlock->read = 2;
2966		/*
2967		 * Add dependency only if this lock is not the head
2968		 * of the chain, and if it's not a secondary read-lock:
2969		 */
2970		if (!chain_head && ret != 2) {
2971			if (!check_prevs_add(curr, hlock))
2972				return 0;
2973		}
2974
2975		graph_unlock();
2976	} else {
2977		/* after lookup_chain_cache_add(): */
2978		if (unlikely(!debug_locks))
2979			return 0;
2980	}
2981
2982	return 1;
2983}
2984#else
2985static inline int validate_chain(struct task_struct *curr,
2986				 struct held_lock *hlock,
2987				 int chain_head, u64 chain_key)
2988{
2989	return 1;
2990}
2991#endif /* CONFIG_PROVE_LOCKING */
2992
2993/*
2994 * We are building curr_chain_key incrementally, so double-check
2995 * it from scratch, to make sure that it's done correctly:
2996 */
2997static void check_chain_key(struct task_struct *curr)
2998{
2999#ifdef CONFIG_DEBUG_LOCKDEP
3000	struct held_lock *hlock, *prev_hlock = NULL;
3001	unsigned int i;
3002	u64 chain_key = INITIAL_CHAIN_KEY;
3003
3004	for (i = 0; i < curr->lockdep_depth; i++) {
3005		hlock = curr->held_locks + i;
3006		if (chain_key != hlock->prev_chain_key) {
3007			debug_locks_off();
3008			/*
3009			 * We got mighty confused, our chain keys don't match
3010			 * with what we expect, someone trample on our task state?
3011			 */
3012			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3013				curr->lockdep_depth, i,
3014				(unsigned long long)chain_key,
3015				(unsigned long long)hlock->prev_chain_key);
3016			return;
3017		}
3018
3019		/*
3020		 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3021		 * it registered lock class index?
3022		 */
3023		if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3024			return;
3025
3026		if (prev_hlock && (prev_hlock->irq_context !=
3027							hlock->irq_context))
3028			chain_key = INITIAL_CHAIN_KEY;
3029		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
3030		prev_hlock = hlock;
3031	}
3032	if (chain_key != curr->curr_chain_key) {
3033		debug_locks_off();
3034		/*
3035		 * More smoking hash instead of calculating it, damn see these
3036		 * numbers float.. I bet that a pink elephant stepped on my memory.
3037		 */
3038		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3039			curr->lockdep_depth, i,
3040			(unsigned long long)chain_key,
3041			(unsigned long long)curr->curr_chain_key);
3042	}
3043#endif
3044}
3045
3046#ifdef CONFIG_PROVE_LOCKING
3047static int mark_lock(struct task_struct *curr, struct held_lock *this,
3048		     enum lock_usage_bit new_bit);
3049
3050static void print_usage_bug_scenario(struct held_lock *lock)
3051{
3052	struct lock_class *class = hlock_class(lock);
3053
3054	printk(" Possible unsafe locking scenario:\n\n");
3055	printk("       CPU0\n");
3056	printk("       ----\n");
3057	printk("  lock(");
3058	__print_lock_name(class);
3059	printk(KERN_CONT ");\n");
3060	printk("  <Interrupt>\n");
3061	printk("    lock(");
3062	__print_lock_name(class);
3063	printk(KERN_CONT ");\n");
3064	printk("\n *** DEADLOCK ***\n\n");
3065}
3066
3067static void
3068print_usage_bug(struct task_struct *curr, struct held_lock *this,
3069		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3070{
3071	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3072		return;
3073
3074	pr_warn("\n");
3075	pr_warn("================================\n");
3076	pr_warn("WARNING: inconsistent lock state\n");
3077	print_kernel_ident();
3078	pr_warn("--------------------------------\n");
3079
3080	pr_warn("inconsistent {%s} -> {%s} usage.\n",
3081		usage_str[prev_bit], usage_str[new_bit]);
3082
3083	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3084		curr->comm, task_pid_nr(curr),
3085		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
3086		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3087		trace_hardirqs_enabled(curr),
3088		trace_softirqs_enabled(curr));
3089	print_lock(this);
3090
3091	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3092	print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3093
3094	print_irqtrace_events(curr);
3095	pr_warn("\nother info that might help us debug this:\n");
3096	print_usage_bug_scenario(this);
3097
3098	lockdep_print_held_locks(curr);
3099
3100	pr_warn("\nstack backtrace:\n");
3101	dump_stack();
 
 
3102}
3103
3104/*
3105 * Print out an error if an invalid bit is set:
3106 */
3107static inline int
3108valid_state(struct task_struct *curr, struct held_lock *this,
3109	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3110{
3111	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3112		print_usage_bug(curr, this, bad_bit, new_bit);
3113		return 0;
3114	}
3115	return 1;
3116}
3117
 
 
 
 
3118
3119/*
3120 * print irq inversion bug:
3121 */
3122static void
3123print_irq_inversion_bug(struct task_struct *curr,
3124			struct lock_list *root, struct lock_list *other,
3125			struct held_lock *this, int forwards,
3126			const char *irqclass)
3127{
3128	struct lock_list *entry = other;
3129	struct lock_list *middle = NULL;
3130	int depth;
3131
3132	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3133		return;
3134
3135	pr_warn("\n");
3136	pr_warn("========================================================\n");
3137	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3138	print_kernel_ident();
3139	pr_warn("--------------------------------------------------------\n");
3140	pr_warn("%s/%d just changed the state of lock:\n",
3141		curr->comm, task_pid_nr(curr));
3142	print_lock(this);
3143	if (forwards)
3144		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3145	else
3146		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3147	print_lock_name(other->class);
3148	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3149
3150	pr_warn("\nother info that might help us debug this:\n");
3151
3152	/* Find a middle lock (if one exists) */
3153	depth = get_lock_depth(other);
3154	do {
3155		if (depth == 0 && (entry != root)) {
3156			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3157			break;
3158		}
3159		middle = entry;
3160		entry = get_lock_parent(entry);
3161		depth--;
3162	} while (entry && entry != root && (depth >= 0));
3163	if (forwards)
3164		print_irq_lock_scenario(root, other,
3165			middle ? middle->class : root->class, other->class);
3166	else
3167		print_irq_lock_scenario(other, root,
3168			middle ? middle->class : other->class, root->class);
3169
3170	lockdep_print_held_locks(curr);
3171
3172	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3173	root->trace = save_trace();
3174	if (!root->trace)
3175		return;
3176	print_shortest_lock_dependencies(other, root);
3177
3178	pr_warn("\nstack backtrace:\n");
3179	dump_stack();
 
 
3180}
3181
3182/*
3183 * Prove that in the forwards-direction subgraph starting at <this>
3184 * there is no lock matching <mask>:
3185 */
3186static int
3187check_usage_forwards(struct task_struct *curr, struct held_lock *this,
3188		     enum lock_usage_bit bit, const char *irqclass)
3189{
3190	int ret;
3191	struct lock_list root;
3192	struct lock_list *uninitialized_var(target_entry);
3193
3194	root.parent = NULL;
3195	root.class = hlock_class(this);
3196	ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
3197	if (ret < 0) {
3198		print_bfs_bug(ret);
3199		return 0;
3200	}
3201	if (ret == 1)
3202		return ret;
3203
3204	print_irq_inversion_bug(curr, &root, target_entry,
3205				this, 1, irqclass);
3206	return 0;
3207}
3208
3209/*
3210 * Prove that in the backwards-direction subgraph starting at <this>
3211 * there is no lock matching <mask>:
3212 */
3213static int
3214check_usage_backwards(struct task_struct *curr, struct held_lock *this,
3215		      enum lock_usage_bit bit, const char *irqclass)
3216{
3217	int ret;
3218	struct lock_list root;
3219	struct lock_list *uninitialized_var(target_entry);
3220
3221	root.parent = NULL;
3222	root.class = hlock_class(this);
3223	ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
3224	if (ret < 0) {
3225		print_bfs_bug(ret);
3226		return 0;
3227	}
3228	if (ret == 1)
3229		return ret;
3230
3231	print_irq_inversion_bug(curr, &root, target_entry,
3232				this, 0, irqclass);
3233	return 0;
3234}
3235
3236void print_irqtrace_events(struct task_struct *curr)
3237{
3238	printk("irq event stamp: %u\n", curr->irq_events);
3239	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
3240		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
3241		(void *)curr->hardirq_enable_ip);
3242	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3243		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
3244		(void *)curr->hardirq_disable_ip);
3245	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
3246		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
3247		(void *)curr->softirq_enable_ip);
3248	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3249		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
3250		(void *)curr->softirq_disable_ip);
3251}
3252
3253static int HARDIRQ_verbose(struct lock_class *class)
3254{
3255#if HARDIRQ_VERBOSE
3256	return class_filter(class);
3257#endif
3258	return 0;
3259}
3260
3261static int SOFTIRQ_verbose(struct lock_class *class)
3262{
3263#if SOFTIRQ_VERBOSE
3264	return class_filter(class);
3265#endif
3266	return 0;
3267}
3268
 
 
 
 
 
 
 
 
3269#define STRICT_READ_CHECKS	1
3270
3271static int (*state_verbose_f[])(struct lock_class *class) = {
3272#define LOCKDEP_STATE(__STATE) \
3273	__STATE##_verbose,
3274#include "lockdep_states.h"
3275#undef LOCKDEP_STATE
3276};
3277
3278static inline int state_verbose(enum lock_usage_bit bit,
3279				struct lock_class *class)
3280{
3281	return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
3282}
3283
3284typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
3285			     enum lock_usage_bit bit, const char *name);
3286
3287static int
3288mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3289		enum lock_usage_bit new_bit)
3290{
3291	int excl_bit = exclusive_bit(new_bit);
3292	int read = new_bit & LOCK_USAGE_READ_MASK;
3293	int dir = new_bit & LOCK_USAGE_DIR_MASK;
3294
3295	/*
3296	 * mark USED_IN has to look forwards -- to ensure no dependency
3297	 * has ENABLED state, which would allow recursion deadlocks.
3298	 *
3299	 * mark ENABLED has to look backwards -- to ensure no dependee
3300	 * has USED_IN state, which, again, would allow  recursion deadlocks.
3301	 */
3302	check_usage_f usage = dir ?
3303		check_usage_backwards : check_usage_forwards;
3304
3305	/*
3306	 * Validate that this particular lock does not have conflicting
3307	 * usage states.
3308	 */
3309	if (!valid_state(curr, this, new_bit, excl_bit))
3310		return 0;
3311
3312	/*
3313	 * Validate that the lock dependencies don't have conflicting usage
3314	 * states.
3315	 */
3316	if ((!read || STRICT_READ_CHECKS) &&
3317			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
3318		return 0;
3319
3320	/*
3321	 * Check for read in write conflicts
3322	 */
3323	if (!read) {
3324		if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
3325			return 0;
3326
3327		if (STRICT_READ_CHECKS &&
3328			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
3329				state_name(new_bit + LOCK_USAGE_READ_MASK)))
3330			return 0;
3331	}
3332
3333	if (state_verbose(new_bit, hlock_class(this)))
3334		return 2;
3335
3336	return 1;
3337}
3338
 
 
 
 
 
 
3339/*
3340 * Mark all held locks with a usage bit:
3341 */
3342static int
3343mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
3344{
 
3345	struct held_lock *hlock;
3346	int i;
3347
3348	for (i = 0; i < curr->lockdep_depth; i++) {
3349		enum lock_usage_bit hlock_bit = base_bit;
3350		hlock = curr->held_locks + i;
3351
 
3352		if (hlock->read)
3353			hlock_bit += LOCK_USAGE_READ_MASK;
3354
3355		BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
3356
3357		if (!hlock->check)
3358			continue;
3359
3360		if (!mark_lock(curr, hlock, hlock_bit))
3361			return 0;
3362	}
3363
3364	return 1;
3365}
3366
3367/*
3368 * Hardirqs will be enabled:
3369 */
3370static void __trace_hardirqs_on_caller(unsigned long ip)
3371{
3372	struct task_struct *curr = current;
3373
3374	/* we'll do an OFF -> ON transition: */
3375	curr->hardirqs_enabled = 1;
3376
3377	/*
3378	 * We are going to turn hardirqs on, so set the
3379	 * usage bit for all held locks:
3380	 */
3381	if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
3382		return;
3383	/*
3384	 * If we have softirqs enabled, then set the usage
3385	 * bit for all held locks. (disabled hardirqs prevented
3386	 * this bit from being set before)
3387	 */
3388	if (curr->softirqs_enabled)
3389		if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
3390			return;
3391
3392	curr->hardirq_enable_ip = ip;
3393	curr->hardirq_enable_event = ++curr->irq_events;
3394	debug_atomic_inc(hardirqs_on_events);
3395}
3396
3397void lockdep_hardirqs_on(unsigned long ip)
3398{
 
 
3399	if (unlikely(!debug_locks || current->lockdep_recursion))
3400		return;
3401
3402	if (unlikely(current->hardirqs_enabled)) {
3403		/*
3404		 * Neither irq nor preemption are disabled here
3405		 * so this is racy by nature but losing one hit
3406		 * in a stat is not a big deal.
3407		 */
3408		__debug_atomic_inc(redundant_hardirqs_on);
3409		return;
3410	}
3411
3412	/*
3413	 * We're enabling irqs and according to our state above irqs weren't
3414	 * already enabled, yet we find the hardware thinks they are in fact
3415	 * enabled.. someone messed up their IRQ state tracing.
3416	 */
3417	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3418		return;
3419
3420	/*
3421	 * See the fine text that goes along with this variable definition.
3422	 */
3423	if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3424		return;
3425
3426	/*
3427	 * Can't allow enabling interrupts while in an interrupt handler,
3428	 * that's general bad form and such. Recursion, limited stack etc..
3429	 */
3430	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3431		return;
3432
3433	current->lockdep_recursion = 1;
3434	__trace_hardirqs_on_caller(ip);
3435	current->lockdep_recursion = 0;
3436}
3437NOKPROBE_SYMBOL(lockdep_hardirqs_on);
 
 
 
 
 
 
3438
3439/*
3440 * Hardirqs were disabled:
3441 */
3442void lockdep_hardirqs_off(unsigned long ip)
3443{
3444	struct task_struct *curr = current;
3445
 
 
3446	if (unlikely(!debug_locks || current->lockdep_recursion))
3447		return;
3448
3449	/*
3450	 * So we're supposed to get called after you mask local IRQs, but for
3451	 * some reason the hardware doesn't quite think you did a proper job.
3452	 */
3453	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3454		return;
3455
3456	if (curr->hardirqs_enabled) {
3457		/*
3458		 * We have done an ON -> OFF transition:
3459		 */
3460		curr->hardirqs_enabled = 0;
3461		curr->hardirq_disable_ip = ip;
3462		curr->hardirq_disable_event = ++curr->irq_events;
3463		debug_atomic_inc(hardirqs_off_events);
3464	} else
3465		debug_atomic_inc(redundant_hardirqs_off);
3466}
3467NOKPROBE_SYMBOL(lockdep_hardirqs_off);
 
 
 
 
 
 
3468
3469/*
3470 * Softirqs will be enabled:
3471 */
3472void trace_softirqs_on(unsigned long ip)
3473{
3474	struct task_struct *curr = current;
3475
3476	if (unlikely(!debug_locks || current->lockdep_recursion))
3477		return;
3478
3479	/*
3480	 * We fancy IRQs being disabled here, see softirq.c, avoids
3481	 * funny state and nesting things.
3482	 */
3483	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3484		return;
3485
3486	if (curr->softirqs_enabled) {
3487		debug_atomic_inc(redundant_softirqs_on);
3488		return;
3489	}
3490
3491	current->lockdep_recursion = 1;
3492	/*
3493	 * We'll do an OFF -> ON transition:
3494	 */
3495	curr->softirqs_enabled = 1;
3496	curr->softirq_enable_ip = ip;
3497	curr->softirq_enable_event = ++curr->irq_events;
3498	debug_atomic_inc(softirqs_on_events);
3499	/*
3500	 * We are going to turn softirqs on, so set the
3501	 * usage bit for all held locks, if hardirqs are
3502	 * enabled too:
3503	 */
3504	if (curr->hardirqs_enabled)
3505		mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
3506	current->lockdep_recursion = 0;
3507}
3508
3509/*
3510 * Softirqs were disabled:
3511 */
3512void trace_softirqs_off(unsigned long ip)
3513{
3514	struct task_struct *curr = current;
3515
3516	if (unlikely(!debug_locks || current->lockdep_recursion))
3517		return;
3518
3519	/*
3520	 * We fancy IRQs being disabled here, see softirq.c
3521	 */
3522	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3523		return;
3524
3525	if (curr->softirqs_enabled) {
3526		/*
3527		 * We have done an ON -> OFF transition:
3528		 */
3529		curr->softirqs_enabled = 0;
3530		curr->softirq_disable_ip = ip;
3531		curr->softirq_disable_event = ++curr->irq_events;
3532		debug_atomic_inc(softirqs_off_events);
3533		/*
3534		 * Whoops, we wanted softirqs off, so why aren't they?
3535		 */
3536		DEBUG_LOCKS_WARN_ON(!softirq_count());
3537	} else
3538		debug_atomic_inc(redundant_softirqs_off);
3539}
3540
3541static int
3542mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3543{
3544	if (!check)
3545		goto lock_used;
3546
 
 
 
 
 
 
 
 
 
 
 
 
 
3547	/*
3548	 * If non-trylock use in a hardirq or softirq context, then
3549	 * mark the lock as used in these contexts:
3550	 */
3551	if (!hlock->trylock) {
3552		if (hlock->read) {
3553			if (curr->hardirq_context)
3554				if (!mark_lock(curr, hlock,
3555						LOCK_USED_IN_HARDIRQ_READ))
3556					return 0;
3557			if (curr->softirq_context)
3558				if (!mark_lock(curr, hlock,
3559						LOCK_USED_IN_SOFTIRQ_READ))
3560					return 0;
3561		} else {
3562			if (curr->hardirq_context)
3563				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3564					return 0;
3565			if (curr->softirq_context)
3566				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3567					return 0;
3568		}
3569	}
3570	if (!hlock->hardirqs_off) {
3571		if (hlock->read) {
3572			if (!mark_lock(curr, hlock,
3573					LOCK_ENABLED_HARDIRQ_READ))
3574				return 0;
3575			if (curr->softirqs_enabled)
3576				if (!mark_lock(curr, hlock,
3577						LOCK_ENABLED_SOFTIRQ_READ))
3578					return 0;
3579		} else {
3580			if (!mark_lock(curr, hlock,
3581					LOCK_ENABLED_HARDIRQ))
3582				return 0;
3583			if (curr->softirqs_enabled)
3584				if (!mark_lock(curr, hlock,
3585						LOCK_ENABLED_SOFTIRQ))
3586					return 0;
3587		}
3588	}
3589
3590lock_used:
3591	/* mark it as used: */
3592	if (!mark_lock(curr, hlock, LOCK_USED))
3593		return 0;
 
 
 
 
 
 
 
 
 
 
 
3594
3595	return 1;
3596}
3597
3598static inline unsigned int task_irq_context(struct task_struct *task)
3599{
3600	return 2 * !!task->hardirq_context + !!task->softirq_context;
3601}
3602
3603static int separate_irq_context(struct task_struct *curr,
3604		struct held_lock *hlock)
3605{
3606	unsigned int depth = curr->lockdep_depth;
3607
3608	/*
3609	 * Keep track of points where we cross into an interrupt context:
3610	 */
3611	if (depth) {
3612		struct held_lock *prev_hlock;
3613
3614		prev_hlock = curr->held_locks + depth-1;
3615		/*
3616		 * If we cross into another context, reset the
3617		 * hash key (this also prevents the checking and the
3618		 * adding of the dependency to 'prev'):
3619		 */
3620		if (prev_hlock->irq_context != hlock->irq_context)
3621			return 1;
3622	}
3623	return 0;
3624}
3625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3626/*
3627 * Mark a lock with a usage bit, and validate the state transition:
3628 */
3629static int mark_lock(struct task_struct *curr, struct held_lock *this,
3630			     enum lock_usage_bit new_bit)
3631{
3632	unsigned int new_mask = 1 << new_bit, ret = 1;
3633
3634	if (new_bit >= LOCK_USAGE_STATES) {
3635		DEBUG_LOCKS_WARN_ON(1);
3636		return 0;
3637	}
3638
3639	/*
3640	 * If already set then do not dirty the cacheline,
3641	 * nor do any checks:
3642	 */
3643	if (likely(hlock_class(this)->usage_mask & new_mask))
3644		return 1;
3645
3646	if (!graph_lock())
3647		return 0;
3648	/*
3649	 * Make sure we didn't race:
3650	 */
3651	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3652		graph_unlock();
3653		return 1;
3654	}
3655
3656	hlock_class(this)->usage_mask |= new_mask;
3657
3658	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
3659		return 0;
3660
3661	switch (new_bit) {
 
 
 
 
 
 
 
 
 
 
 
3662	case LOCK_USED:
3663		debug_atomic_dec(nr_unused_locks);
3664		break;
3665	default:
3666		ret = mark_lock_irq(curr, this, new_bit);
3667		if (!ret)
3668			return 0;
 
 
3669	}
3670
3671	graph_unlock();
3672
3673	/*
3674	 * We must printk outside of the graph_lock:
3675	 */
3676	if (ret == 2) {
3677		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3678		print_lock(this);
3679		print_irqtrace_events(curr);
3680		dump_stack();
3681	}
3682
3683	return ret;
3684}
3685
3686#else /* CONFIG_PROVE_LOCKING */
3687
3688static inline int
3689mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3690{
3691	return 1;
3692}
3693
3694static inline unsigned int task_irq_context(struct task_struct *task)
3695{
3696	return 0;
3697}
3698
3699static inline int separate_irq_context(struct task_struct *curr,
3700		struct held_lock *hlock)
3701{
3702	return 0;
3703}
3704
3705#endif /* CONFIG_PROVE_LOCKING */
3706
3707/*
3708 * Initialize a lock instance's lock-class mapping info:
3709 */
3710void lockdep_init_map(struct lockdep_map *lock, const char *name,
3711		      struct lock_class_key *key, int subclass)
3712{
3713	int i;
3714
 
 
3715	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3716		lock->class_cache[i] = NULL;
3717
3718#ifdef CONFIG_LOCK_STAT
3719	lock->cpu = raw_smp_processor_id();
3720#endif
3721
3722	/*
3723	 * Can't be having no nameless bastards around this place!
3724	 */
3725	if (DEBUG_LOCKS_WARN_ON(!name)) {
3726		lock->name = "NULL";
3727		return;
3728	}
3729
3730	lock->name = name;
3731
3732	/*
3733	 * No key, no joy, we need to hash something.
3734	 */
3735	if (DEBUG_LOCKS_WARN_ON(!key))
3736		return;
3737	/*
3738	 * Sanity check, the lock-class key must either have been allocated
3739	 * statically or must have been registered as a dynamic key.
3740	 */
3741	if (!static_obj(key) && !is_dynamic_key(key)) {
3742		if (debug_locks)
3743			printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
 
 
3744		DEBUG_LOCKS_WARN_ON(1);
3745		return;
3746	}
3747	lock->key = key;
3748
3749	if (unlikely(!debug_locks))
3750		return;
3751
3752	if (subclass) {
3753		unsigned long flags;
3754
3755		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3756			return;
3757
3758		raw_local_irq_save(flags);
3759		current->lockdep_recursion = 1;
3760		register_lock_class(lock, subclass, 1);
3761		current->lockdep_recursion = 0;
3762		raw_local_irq_restore(flags);
3763	}
3764}
3765EXPORT_SYMBOL_GPL(lockdep_init_map);
3766
3767struct lock_class_key __lockdep_no_validate__;
3768EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3769
3770static void
3771print_lock_nested_lock_not_held(struct task_struct *curr,
3772				struct held_lock *hlock,
3773				unsigned long ip)
3774{
3775	if (!debug_locks_off())
3776		return;
3777	if (debug_locks_silent)
3778		return;
3779
3780	pr_warn("\n");
3781	pr_warn("==================================\n");
3782	pr_warn("WARNING: Nested lock was not taken\n");
3783	print_kernel_ident();
3784	pr_warn("----------------------------------\n");
3785
3786	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3787	print_lock(hlock);
3788
3789	pr_warn("\nbut this task is not holding:\n");
3790	pr_warn("%s\n", hlock->nest_lock->name);
3791
3792	pr_warn("\nstack backtrace:\n");
3793	dump_stack();
3794
3795	pr_warn("\nother info that might help us debug this:\n");
3796	lockdep_print_held_locks(curr);
3797
3798	pr_warn("\nstack backtrace:\n");
3799	dump_stack();
 
 
3800}
3801
3802static int __lock_is_held(const struct lockdep_map *lock, int read);
3803
3804/*
3805 * This gets called for every mutex_lock*()/spin_lock*() operation.
3806 * We maintain the dependency maps and validate the locking attempt:
3807 *
3808 * The callers must make sure that IRQs are disabled before calling it,
3809 * otherwise we could get an interrupt which would want to take locks,
3810 * which would end up in lockdep again.
3811 */
3812static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3813			  int trylock, int read, int check, int hardirqs_off,
3814			  struct lockdep_map *nest_lock, unsigned long ip,
3815			  int references, int pin_count)
3816{
3817	struct task_struct *curr = current;
3818	struct lock_class *class = NULL;
3819	struct held_lock *hlock;
3820	unsigned int depth;
3821	int chain_head = 0;
3822	int class_idx;
3823	u64 chain_key;
3824
3825	if (unlikely(!debug_locks))
3826		return 0;
3827
 
 
 
 
 
 
 
 
3828	if (!prove_locking || lock->key == &__lockdep_no_validate__)
3829		check = 0;
3830
3831	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3832		class = lock->class_cache[subclass];
3833	/*
3834	 * Not cached?
3835	 */
3836	if (unlikely(!class)) {
3837		class = register_lock_class(lock, subclass, 0);
3838		if (!class)
3839			return 0;
3840	}
3841
3842	debug_class_ops_inc(class);
3843
3844	if (very_verbose(class)) {
3845		printk("\nacquire class [%px] %s", class->key, class->name);
3846		if (class->name_version > 1)
3847			printk(KERN_CONT "#%d", class->name_version);
3848		printk(KERN_CONT "\n");
3849		dump_stack();
3850	}
3851
3852	/*
3853	 * Add the lock to the list of currently held locks.
3854	 * (we dont increase the depth just yet, up until the
3855	 * dependency checks are done)
3856	 */
3857	depth = curr->lockdep_depth;
3858	/*
3859	 * Ran out of static storage for our per-task lock stack again have we?
3860	 */
3861	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3862		return 0;
3863
3864	class_idx = class - lock_classes;
3865
3866	if (depth) {
3867		hlock = curr->held_locks + depth - 1;
3868		if (hlock->class_idx == class_idx && nest_lock) {
3869			if (!references)
3870				references++;
3871
3872			if (!hlock->references)
3873				hlock->references++;
 
 
3874
3875			hlock->references += references;
3876
3877			/* Overflow */
3878			if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
3879				return 0;
3880
3881			return 2;
3882		}
3883	}
3884
3885	hlock = curr->held_locks + depth;
3886	/*
3887	 * Plain impossible, we just registered it and checked it weren't no
3888	 * NULL like.. I bet this mushroom I ate was good!
3889	 */
3890	if (DEBUG_LOCKS_WARN_ON(!class))
3891		return 0;
3892	hlock->class_idx = class_idx;
3893	hlock->acquire_ip = ip;
3894	hlock->instance = lock;
3895	hlock->nest_lock = nest_lock;
3896	hlock->irq_context = task_irq_context(curr);
3897	hlock->trylock = trylock;
3898	hlock->read = read;
3899	hlock->check = check;
3900	hlock->hardirqs_off = !!hardirqs_off;
3901	hlock->references = references;
3902#ifdef CONFIG_LOCK_STAT
3903	hlock->waittime_stamp = 0;
3904	hlock->holdtime_stamp = lockstat_clock();
3905#endif
3906	hlock->pin_count = pin_count;
3907
3908	/* Initialize the lock usage bit */
3909	if (!mark_usage(curr, hlock, check))
 
 
 
3910		return 0;
3911
3912	/*
3913	 * Calculate the chain hash: it's the combined hash of all the
3914	 * lock keys along the dependency chain. We save the hash value
3915	 * at every step so that we can get the current hash easily
3916	 * after unlock. The chain hash is then used to cache dependency
3917	 * results.
3918	 *
3919	 * The 'key ID' is what is the most compact key value to drive
3920	 * the hash, not class->key.
3921	 */
3922	/*
3923	 * Whoops, we did it again.. class_idx is invalid.
3924	 */
3925	if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
3926		return 0;
3927
3928	chain_key = curr->curr_chain_key;
3929	if (!depth) {
3930		/*
3931		 * How can we have a chain hash when we ain't got no keys?!
3932		 */
3933		if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
3934			return 0;
3935		chain_head = 1;
3936	}
3937
3938	hlock->prev_chain_key = chain_key;
3939	if (separate_irq_context(curr, hlock)) {
3940		chain_key = INITIAL_CHAIN_KEY;
3941		chain_head = 1;
3942	}
3943	chain_key = iterate_chain_key(chain_key, class_idx);
3944
3945	if (nest_lock && !__lock_is_held(nest_lock, -1)) {
3946		print_lock_nested_lock_not_held(curr, hlock, ip);
3947		return 0;
3948	}
3949
3950	if (!debug_locks_silent) {
3951		WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
3952		WARN_ON_ONCE(!hlock_class(hlock)->key);
3953	}
3954
3955	if (!validate_chain(curr, hlock, chain_head, chain_key))
3956		return 0;
3957
3958	curr->curr_chain_key = chain_key;
3959	curr->lockdep_depth++;
3960	check_chain_key(curr);
3961#ifdef CONFIG_DEBUG_LOCKDEP
3962	if (unlikely(!debug_locks))
3963		return 0;
3964#endif
3965	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3966		debug_locks_off();
3967		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3968		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3969		       curr->lockdep_depth, MAX_LOCK_DEPTH);
3970
3971		lockdep_print_held_locks(current);
3972		debug_show_all_locks();
3973		dump_stack();
3974
3975		return 0;
3976	}
3977
3978	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3979		max_lockdep_depth = curr->lockdep_depth;
3980
3981	return 1;
3982}
3983
3984static void print_unlock_imbalance_bug(struct task_struct *curr,
3985				       struct lockdep_map *lock,
3986				       unsigned long ip)
3987{
3988	if (!debug_locks_off())
3989		return;
3990	if (debug_locks_silent)
3991		return;
3992
3993	pr_warn("\n");
3994	pr_warn("=====================================\n");
3995	pr_warn("WARNING: bad unlock balance detected!\n");
3996	print_kernel_ident();
3997	pr_warn("-------------------------------------\n");
3998	pr_warn("%s/%d is trying to release lock (",
3999		curr->comm, task_pid_nr(curr));
4000	print_lockdep_cache(lock);
4001	pr_cont(") at:\n");
4002	print_ip_sym(ip);
4003	pr_warn("but there are no more locks to release!\n");
4004	pr_warn("\nother info that might help us debug this:\n");
4005	lockdep_print_held_locks(curr);
4006
4007	pr_warn("\nstack backtrace:\n");
4008	dump_stack();
 
 
4009}
4010
4011static int match_held_lock(const struct held_lock *hlock,
4012					const struct lockdep_map *lock)
4013{
4014	if (hlock->instance == lock)
4015		return 1;
4016
4017	if (hlock->references) {
4018		const struct lock_class *class = lock->class_cache[0];
4019
4020		if (!class)
4021			class = look_up_lock_class(lock, 0);
4022
4023		/*
4024		 * If look_up_lock_class() failed to find a class, we're trying
4025		 * to test if we hold a lock that has never yet been acquired.
4026		 * Clearly if the lock hasn't been acquired _ever_, we're not
4027		 * holding it either, so report failure.
4028		 */
4029		if (!class)
4030			return 0;
4031
4032		/*
4033		 * References, but not a lock we're actually ref-counting?
4034		 * State got messed up, follow the sites that change ->references
4035		 * and try to make sense of it.
4036		 */
4037		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
4038			return 0;
4039
4040		if (hlock->class_idx == class - lock_classes)
4041			return 1;
4042	}
4043
4044	return 0;
4045}
4046
4047/* @depth must not be zero */
4048static struct held_lock *find_held_lock(struct task_struct *curr,
4049					struct lockdep_map *lock,
4050					unsigned int depth, int *idx)
4051{
4052	struct held_lock *ret, *hlock, *prev_hlock;
4053	int i;
4054
4055	i = depth - 1;
4056	hlock = curr->held_locks + i;
4057	ret = hlock;
4058	if (match_held_lock(hlock, lock))
4059		goto out;
4060
4061	ret = NULL;
4062	for (i--, prev_hlock = hlock--;
4063	     i >= 0;
4064	     i--, prev_hlock = hlock--) {
4065		/*
4066		 * We must not cross into another context:
4067		 */
4068		if (prev_hlock->irq_context != hlock->irq_context) {
4069			ret = NULL;
4070			break;
4071		}
4072		if (match_held_lock(hlock, lock)) {
4073			ret = hlock;
4074			break;
4075		}
4076	}
4077
4078out:
4079	*idx = i;
4080	return ret;
4081}
4082
4083static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
4084				int idx, unsigned int *merged)
4085{
4086	struct held_lock *hlock;
4087	int first_idx = idx;
4088
4089	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4090		return 0;
4091
4092	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
4093		switch (__lock_acquire(hlock->instance,
4094				    hlock_class(hlock)->subclass,
4095				    hlock->trylock,
4096				    hlock->read, hlock->check,
4097				    hlock->hardirqs_off,
4098				    hlock->nest_lock, hlock->acquire_ip,
4099				    hlock->references, hlock->pin_count)) {
4100		case 0:
4101			return 1;
4102		case 1:
4103			break;
4104		case 2:
4105			*merged += (idx == first_idx);
4106			break;
4107		default:
4108			WARN_ON(1);
4109			return 0;
4110		}
4111	}
4112	return 0;
4113}
4114
4115static int
4116__lock_set_class(struct lockdep_map *lock, const char *name,
4117		 struct lock_class_key *key, unsigned int subclass,
4118		 unsigned long ip)
4119{
4120	struct task_struct *curr = current;
4121	unsigned int depth, merged = 0;
4122	struct held_lock *hlock;
4123	struct lock_class *class;
 
4124	int i;
4125
4126	if (unlikely(!debug_locks))
4127		return 0;
4128
4129	depth = curr->lockdep_depth;
4130	/*
4131	 * This function is about (re)setting the class of a held lock,
4132	 * yet we're not actually holding any locks. Naughty user!
4133	 */
4134	if (DEBUG_LOCKS_WARN_ON(!depth))
4135		return 0;
4136
4137	hlock = find_held_lock(curr, lock, depth, &i);
4138	if (!hlock) {
4139		print_unlock_imbalance_bug(curr, lock, ip);
4140		return 0;
 
 
 
 
 
 
 
4141	}
 
4142
 
4143	lockdep_init_map(lock, name, key, 0);
4144	class = register_lock_class(lock, subclass, 0);
4145	hlock->class_idx = class - lock_classes;
4146
4147	curr->lockdep_depth = i;
4148	curr->curr_chain_key = hlock->prev_chain_key;
4149
4150	if (reacquire_held_locks(curr, depth, i, &merged))
4151		return 0;
4152
4153	/*
4154	 * I took it apart and put it back together again, except now I have
4155	 * these 'spare' parts.. where shall I put them.
4156	 */
4157	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
4158		return 0;
4159	return 1;
4160}
4161
4162static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4163{
4164	struct task_struct *curr = current;
4165	unsigned int depth, merged = 0;
4166	struct held_lock *hlock;
4167	int i;
4168
4169	if (unlikely(!debug_locks))
4170		return 0;
4171
4172	depth = curr->lockdep_depth;
4173	/*
4174	 * This function is about (re)setting the class of a held lock,
4175	 * yet we're not actually holding any locks. Naughty user!
4176	 */
4177	if (DEBUG_LOCKS_WARN_ON(!depth))
4178		return 0;
4179
4180	hlock = find_held_lock(curr, lock, depth, &i);
4181	if (!hlock) {
4182		print_unlock_imbalance_bug(curr, lock, ip);
4183		return 0;
4184	}
4185
4186	curr->lockdep_depth = i;
4187	curr->curr_chain_key = hlock->prev_chain_key;
4188
4189	WARN(hlock->read, "downgrading a read lock");
4190	hlock->read = 1;
4191	hlock->acquire_ip = ip;
4192
4193	if (reacquire_held_locks(curr, depth, i, &merged))
4194		return 0;
4195
4196	/* Merging can't happen with unchanged classes.. */
4197	if (DEBUG_LOCKS_WARN_ON(merged))
4198		return 0;
4199
4200	/*
4201	 * I took it apart and put it back together again, except now I have
4202	 * these 'spare' parts.. where shall I put them.
4203	 */
4204	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
4205		return 0;
4206
4207	return 1;
4208}
4209
4210/*
4211 * Remove the lock to the list of currently held locks - this gets
4212 * called on mutex_unlock()/spin_unlock*() (or on a failed
4213 * mutex_lock_interruptible()).
4214 *
4215 * @nested is an hysterical artifact, needs a tree wide cleanup.
4216 */
4217static int
4218__lock_release(struct lockdep_map *lock, unsigned long ip)
4219{
4220	struct task_struct *curr = current;
4221	unsigned int depth, merged = 1;
4222	struct held_lock *hlock;
4223	int i;
4224
4225	if (unlikely(!debug_locks))
4226		return 0;
4227
4228	depth = curr->lockdep_depth;
4229	/*
4230	 * So we're all set to release this lock.. wait what lock? We don't
4231	 * own any locks, you've been drinking again?
4232	 */
4233	if (depth <= 0) {
4234		print_unlock_imbalance_bug(curr, lock, ip);
4235		return 0;
4236	}
4237
4238	/*
4239	 * Check whether the lock exists in the current stack
4240	 * of held locks:
4241	 */
4242	hlock = find_held_lock(curr, lock, depth, &i);
4243	if (!hlock) {
4244		print_unlock_imbalance_bug(curr, lock, ip);
4245		return 0;
 
 
 
 
 
 
 
4246	}
 
4247
 
4248	if (hlock->instance == lock)
4249		lock_release_holdtime(hlock);
4250
4251	WARN(hlock->pin_count, "releasing a pinned lock\n");
4252
4253	if (hlock->references) {
4254		hlock->references--;
4255		if (hlock->references) {
4256			/*
4257			 * We had, and after removing one, still have
4258			 * references, the current lock stack is still
4259			 * valid. We're done!
4260			 */
4261			return 1;
4262		}
4263	}
4264
4265	/*
4266	 * We have the right lock to unlock, 'hlock' points to it.
4267	 * Now we remove it from the stack, and add back the other
4268	 * entries (if any), recalculating the hash along the way:
4269	 */
4270
4271	curr->lockdep_depth = i;
4272	curr->curr_chain_key = hlock->prev_chain_key;
4273
4274	/*
4275	 * The most likely case is when the unlock is on the innermost
4276	 * lock. In this case, we are done!
4277	 */
4278	if (i == depth-1)
4279		return 1;
4280
4281	if (reacquire_held_locks(curr, depth, i + 1, &merged))
4282		return 0;
4283
4284	/*
4285	 * We had N bottles of beer on the wall, we drank one, but now
4286	 * there's not N-1 bottles of beer left on the wall...
4287	 * Pouring two of the bottles together is acceptable.
4288	 */
4289	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
 
4290
4291	/*
4292	 * Since reacquire_held_locks() would have called check_chain_key()
4293	 * indirectly via __lock_acquire(), we don't need to do it again
4294	 * on return.
4295	 */
4296	return 0;
4297}
4298
4299static nokprobe_inline
4300int __lock_is_held(const struct lockdep_map *lock, int read)
4301{
4302	struct task_struct *curr = current;
4303	int i;
4304
4305	for (i = 0; i < curr->lockdep_depth; i++) {
4306		struct held_lock *hlock = curr->held_locks + i;
4307
4308		if (match_held_lock(hlock, lock)) {
4309			if (read == -1 || hlock->read == read)
4310				return 1;
4311
4312			return 0;
4313		}
4314	}
4315
4316	return 0;
4317}
4318
4319static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
4320{
4321	struct pin_cookie cookie = NIL_COOKIE;
4322	struct task_struct *curr = current;
4323	int i;
4324
4325	if (unlikely(!debug_locks))
4326		return cookie;
4327
4328	for (i = 0; i < curr->lockdep_depth; i++) {
4329		struct held_lock *hlock = curr->held_locks + i;
4330
4331		if (match_held_lock(hlock, lock)) {
4332			/*
4333			 * Grab 16bits of randomness; this is sufficient to not
4334			 * be guessable and still allows some pin nesting in
4335			 * our u32 pin_count.
4336			 */
4337			cookie.val = 1 + (prandom_u32() >> 16);
4338			hlock->pin_count += cookie.val;
4339			return cookie;
4340		}
4341	}
4342
4343	WARN(1, "pinning an unheld lock\n");
4344	return cookie;
4345}
4346
4347static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4348{
4349	struct task_struct *curr = current;
4350	int i;
4351
4352	if (unlikely(!debug_locks))
4353		return;
4354
4355	for (i = 0; i < curr->lockdep_depth; i++) {
4356		struct held_lock *hlock = curr->held_locks + i;
4357
4358		if (match_held_lock(hlock, lock)) {
4359			hlock->pin_count += cookie.val;
4360			return;
4361		}
4362	}
4363
4364	WARN(1, "pinning an unheld lock\n");
4365}
4366
4367static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4368{
4369	struct task_struct *curr = current;
4370	int i;
4371
4372	if (unlikely(!debug_locks))
4373		return;
4374
4375	for (i = 0; i < curr->lockdep_depth; i++) {
4376		struct held_lock *hlock = curr->held_locks + i;
4377
4378		if (match_held_lock(hlock, lock)) {
4379			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
4380				return;
4381
4382			hlock->pin_count -= cookie.val;
4383
4384			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
4385				hlock->pin_count = 0;
4386
4387			return;
4388		}
4389	}
4390
4391	WARN(1, "unpinning an unheld lock\n");
4392}
4393
4394/*
4395 * Check whether we follow the irq-flags state precisely:
4396 */
4397static void check_flags(unsigned long flags)
4398{
4399#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
 
4400	if (!debug_locks)
4401		return;
4402
4403	if (irqs_disabled_flags(flags)) {
4404		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4405			printk("possible reason: unannotated irqs-off.\n");
4406		}
4407	} else {
4408		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4409			printk("possible reason: unannotated irqs-on.\n");
4410		}
4411	}
4412
4413	/*
4414	 * We dont accurately track softirq state in e.g.
4415	 * hardirq contexts (such as on 4KSTACKS), so only
4416	 * check if not in hardirq contexts:
4417	 */
4418	if (!hardirq_count()) {
4419		if (softirq_count()) {
4420			/* like the above, but with softirqs */
4421			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
4422		} else {
4423			/* lick the above, does it taste good? */
4424			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
4425		}
4426	}
4427
4428	if (!debug_locks)
4429		print_irqtrace_events(current);
4430#endif
4431}
4432
4433void lock_set_class(struct lockdep_map *lock, const char *name,
4434		    struct lock_class_key *key, unsigned int subclass,
4435		    unsigned long ip)
4436{
4437	unsigned long flags;
4438
4439	if (unlikely(current->lockdep_recursion))
4440		return;
4441
4442	raw_local_irq_save(flags);
4443	current->lockdep_recursion = 1;
4444	check_flags(flags);
4445	if (__lock_set_class(lock, name, key, subclass, ip))
4446		check_chain_key(current);
4447	current->lockdep_recursion = 0;
4448	raw_local_irq_restore(flags);
4449}
4450EXPORT_SYMBOL_GPL(lock_set_class);
4451
4452void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4453{
4454	unsigned long flags;
4455
4456	if (unlikely(current->lockdep_recursion))
4457		return;
4458
4459	raw_local_irq_save(flags);
4460	current->lockdep_recursion = 1;
4461	check_flags(flags);
4462	if (__lock_downgrade(lock, ip))
4463		check_chain_key(current);
4464	current->lockdep_recursion = 0;
4465	raw_local_irq_restore(flags);
4466}
4467EXPORT_SYMBOL_GPL(lock_downgrade);
4468
4469/*
4470 * We are not always called with irqs disabled - do that here,
4471 * and also avoid lockdep recursion:
4472 */
4473void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4474			  int trylock, int read, int check,
4475			  struct lockdep_map *nest_lock, unsigned long ip)
4476{
4477	unsigned long flags;
4478
4479	if (unlikely(current->lockdep_recursion))
4480		return;
4481
4482	raw_local_irq_save(flags);
4483	check_flags(flags);
4484
4485	current->lockdep_recursion = 1;
4486	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
4487	__lock_acquire(lock, subclass, trylock, read, check,
4488		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
4489	current->lockdep_recursion = 0;
4490	raw_local_irq_restore(flags);
4491}
4492EXPORT_SYMBOL_GPL(lock_acquire);
4493
4494void lock_release(struct lockdep_map *lock, int nested,
4495			  unsigned long ip)
4496{
4497	unsigned long flags;
4498
4499	if (unlikely(current->lockdep_recursion))
4500		return;
4501
4502	raw_local_irq_save(flags);
4503	check_flags(flags);
4504	current->lockdep_recursion = 1;
4505	trace_lock_release(lock, ip);
4506	if (__lock_release(lock, ip))
4507		check_chain_key(current);
4508	current->lockdep_recursion = 0;
4509	raw_local_irq_restore(flags);
4510}
4511EXPORT_SYMBOL_GPL(lock_release);
4512
4513int lock_is_held_type(const struct lockdep_map *lock, int read)
4514{
4515	unsigned long flags;
4516	int ret = 0;
4517
4518	if (unlikely(current->lockdep_recursion))
4519		return 1; /* avoid false negative lockdep_assert_held() */
4520
4521	raw_local_irq_save(flags);
4522	check_flags(flags);
4523
4524	current->lockdep_recursion = 1;
4525	ret = __lock_is_held(lock, read);
4526	current->lockdep_recursion = 0;
4527	raw_local_irq_restore(flags);
4528
4529	return ret;
4530}
4531EXPORT_SYMBOL_GPL(lock_is_held_type);
4532NOKPROBE_SYMBOL(lock_is_held_type);
4533
4534struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
4535{
4536	struct pin_cookie cookie = NIL_COOKIE;
4537	unsigned long flags;
4538
4539	if (unlikely(current->lockdep_recursion))
4540		return cookie;
4541
4542	raw_local_irq_save(flags);
4543	check_flags(flags);
4544
4545	current->lockdep_recursion = 1;
4546	cookie = __lock_pin_lock(lock);
4547	current->lockdep_recursion = 0;
4548	raw_local_irq_restore(flags);
4549
4550	return cookie;
4551}
4552EXPORT_SYMBOL_GPL(lock_pin_lock);
4553
4554void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4555{
4556	unsigned long flags;
4557
4558	if (unlikely(current->lockdep_recursion))
4559		return;
4560
4561	raw_local_irq_save(flags);
4562	check_flags(flags);
4563
4564	current->lockdep_recursion = 1;
4565	__lock_repin_lock(lock, cookie);
4566	current->lockdep_recursion = 0;
4567	raw_local_irq_restore(flags);
4568}
4569EXPORT_SYMBOL_GPL(lock_repin_lock);
4570
4571void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4572{
4573	unsigned long flags;
4574
4575	if (unlikely(current->lockdep_recursion))
4576		return;
4577
4578	raw_local_irq_save(flags);
4579	check_flags(flags);
4580
4581	current->lockdep_recursion = 1;
4582	__lock_unpin_lock(lock, cookie);
4583	current->lockdep_recursion = 0;
4584	raw_local_irq_restore(flags);
4585}
4586EXPORT_SYMBOL_GPL(lock_unpin_lock);
4587
 
 
 
 
 
 
 
 
 
 
4588#ifdef CONFIG_LOCK_STAT
4589static void print_lock_contention_bug(struct task_struct *curr,
4590				      struct lockdep_map *lock,
4591				      unsigned long ip)
4592{
4593	if (!debug_locks_off())
4594		return;
4595	if (debug_locks_silent)
4596		return;
4597
4598	pr_warn("\n");
4599	pr_warn("=================================\n");
4600	pr_warn("WARNING: bad contention detected!\n");
4601	print_kernel_ident();
4602	pr_warn("---------------------------------\n");
4603	pr_warn("%s/%d is trying to contend lock (",
4604		curr->comm, task_pid_nr(curr));
4605	print_lockdep_cache(lock);
4606	pr_cont(") at:\n");
4607	print_ip_sym(ip);
4608	pr_warn("but there are no locks held!\n");
4609	pr_warn("\nother info that might help us debug this:\n");
4610	lockdep_print_held_locks(curr);
4611
4612	pr_warn("\nstack backtrace:\n");
4613	dump_stack();
 
 
4614}
4615
4616static void
4617__lock_contended(struct lockdep_map *lock, unsigned long ip)
4618{
4619	struct task_struct *curr = current;
4620	struct held_lock *hlock;
4621	struct lock_class_stats *stats;
4622	unsigned int depth;
4623	int i, contention_point, contending_point;
4624
4625	depth = curr->lockdep_depth;
4626	/*
4627	 * Whee, we contended on this lock, except it seems we're not
4628	 * actually trying to acquire anything much at all..
4629	 */
4630	if (DEBUG_LOCKS_WARN_ON(!depth))
4631		return;
4632
4633	hlock = find_held_lock(curr, lock, depth, &i);
4634	if (!hlock) {
4635		print_lock_contention_bug(curr, lock, ip);
4636		return;
 
 
 
 
 
 
 
4637	}
 
 
4638
 
4639	if (hlock->instance != lock)
4640		return;
4641
4642	hlock->waittime_stamp = lockstat_clock();
4643
4644	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4645	contending_point = lock_point(hlock_class(hlock)->contending_point,
4646				      lock->ip);
4647
4648	stats = get_lock_stats(hlock_class(hlock));
4649	if (contention_point < LOCKSTAT_POINTS)
4650		stats->contention_point[contention_point]++;
4651	if (contending_point < LOCKSTAT_POINTS)
4652		stats->contending_point[contending_point]++;
4653	if (lock->cpu != smp_processor_id())
4654		stats->bounces[bounce_contended + !!hlock->read]++;
 
4655}
4656
4657static void
4658__lock_acquired(struct lockdep_map *lock, unsigned long ip)
4659{
4660	struct task_struct *curr = current;
4661	struct held_lock *hlock;
4662	struct lock_class_stats *stats;
4663	unsigned int depth;
4664	u64 now, waittime = 0;
4665	int i, cpu;
4666
4667	depth = curr->lockdep_depth;
4668	/*
4669	 * Yay, we acquired ownership of this lock we didn't try to
4670	 * acquire, how the heck did that happen?
4671	 */
4672	if (DEBUG_LOCKS_WARN_ON(!depth))
4673		return;
4674
4675	hlock = find_held_lock(curr, lock, depth, &i);
4676	if (!hlock) {
4677		print_lock_contention_bug(curr, lock, _RET_IP_);
4678		return;
 
 
 
 
 
 
 
4679	}
 
 
4680
 
4681	if (hlock->instance != lock)
4682		return;
4683
4684	cpu = smp_processor_id();
4685	if (hlock->waittime_stamp) {
4686		now = lockstat_clock();
4687		waittime = now - hlock->waittime_stamp;
4688		hlock->holdtime_stamp = now;
4689	}
4690
4691	trace_lock_acquired(lock, ip);
4692
4693	stats = get_lock_stats(hlock_class(hlock));
4694	if (waittime) {
4695		if (hlock->read)
4696			lock_time_inc(&stats->read_waittime, waittime);
4697		else
4698			lock_time_inc(&stats->write_waittime, waittime);
4699	}
4700	if (lock->cpu != cpu)
4701		stats->bounces[bounce_acquired + !!hlock->read]++;
 
4702
4703	lock->cpu = cpu;
4704	lock->ip = ip;
4705}
4706
4707void lock_contended(struct lockdep_map *lock, unsigned long ip)
4708{
4709	unsigned long flags;
4710
4711	if (unlikely(!lock_stat || !debug_locks))
4712		return;
4713
4714	if (unlikely(current->lockdep_recursion))
4715		return;
4716
4717	raw_local_irq_save(flags);
4718	check_flags(flags);
4719	current->lockdep_recursion = 1;
4720	trace_lock_contended(lock, ip);
4721	__lock_contended(lock, ip);
4722	current->lockdep_recursion = 0;
4723	raw_local_irq_restore(flags);
4724}
4725EXPORT_SYMBOL_GPL(lock_contended);
4726
4727void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4728{
4729	unsigned long flags;
4730
4731	if (unlikely(!lock_stat || !debug_locks))
4732		return;
4733
4734	if (unlikely(current->lockdep_recursion))
4735		return;
4736
4737	raw_local_irq_save(flags);
4738	check_flags(flags);
4739	current->lockdep_recursion = 1;
4740	__lock_acquired(lock, ip);
4741	current->lockdep_recursion = 0;
4742	raw_local_irq_restore(flags);
4743}
4744EXPORT_SYMBOL_GPL(lock_acquired);
4745#endif
4746
4747/*
4748 * Used by the testsuite, sanitize the validator state
4749 * after a simulated failure:
4750 */
4751
4752void lockdep_reset(void)
4753{
4754	unsigned long flags;
4755	int i;
4756
4757	raw_local_irq_save(flags);
4758	lockdep_init_task(current);
 
 
4759	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4760	nr_hardirq_chains = 0;
4761	nr_softirq_chains = 0;
4762	nr_process_chains = 0;
4763	debug_locks = 1;
4764	for (i = 0; i < CHAINHASH_SIZE; i++)
4765		INIT_HLIST_HEAD(chainhash_table + i);
4766	raw_local_irq_restore(flags);
4767}
4768
4769/* Remove a class from a lock chain. Must be called with the graph lock held. */
4770static void remove_class_from_lock_chain(struct pending_free *pf,
4771					 struct lock_chain *chain,
4772					 struct lock_class *class)
4773{
4774#ifdef CONFIG_PROVE_LOCKING
4775	struct lock_chain *new_chain;
4776	u64 chain_key;
4777	int i;
4778
4779	for (i = chain->base; i < chain->base + chain->depth; i++) {
4780		if (chain_hlocks[i] != class - lock_classes)
4781			continue;
4782		/* The code below leaks one chain_hlock[] entry. */
4783		if (--chain->depth > 0) {
4784			memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4785				(chain->base + chain->depth - i) *
4786				sizeof(chain_hlocks[0]));
4787		}
4788		/*
4789		 * Each lock class occurs at most once in a lock chain so once
4790		 * we found a match we can break out of this loop.
4791		 */
4792		goto recalc;
4793	}
4794	/* Since the chain has not been modified, return. */
4795	return;
4796
4797recalc:
4798	chain_key = INITIAL_CHAIN_KEY;
4799	for (i = chain->base; i < chain->base + chain->depth; i++)
4800		chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
4801	if (chain->depth && chain->chain_key == chain_key)
4802		return;
4803	/* Overwrite the chain key for concurrent RCU readers. */
4804	WRITE_ONCE(chain->chain_key, chain_key);
4805	/*
4806	 * Note: calling hlist_del_rcu() from inside a
4807	 * hlist_for_each_entry_rcu() loop is safe.
4808	 */
4809	hlist_del_rcu(&chain->entry);
4810	__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
4811	if (chain->depth == 0)
4812		return;
4813	/*
4814	 * If the modified lock chain matches an existing lock chain, drop
4815	 * the modified lock chain.
4816	 */
4817	if (lookup_chain_cache(chain_key))
4818		return;
4819	new_chain = alloc_lock_chain();
4820	if (WARN_ON_ONCE(!new_chain)) {
4821		debug_locks_off();
4822		return;
4823	}
4824	*new_chain = *chain;
4825	hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4826#endif
4827}
4828
4829/* Must be called with the graph lock held. */
4830static void remove_class_from_lock_chains(struct pending_free *pf,
4831					  struct lock_class *class)
4832{
4833	struct lock_chain *chain;
4834	struct hlist_head *head;
4835	int i;
4836
4837	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4838		head = chainhash_table + i;
4839		hlist_for_each_entry_rcu(chain, head, entry) {
4840			remove_class_from_lock_chain(pf, chain, class);
4841		}
4842	}
4843}
4844
4845/*
4846 * Remove all references to a lock class. The caller must hold the graph lock.
4847 */
4848static void zap_class(struct pending_free *pf, struct lock_class *class)
4849{
4850	struct lock_list *entry;
4851	int i;
4852
4853	WARN_ON_ONCE(!class->key);
4854
4855	/*
4856	 * Remove all dependencies this lock is
4857	 * involved in:
4858	 */
4859	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
4860		entry = list_entries + i;
4861		if (entry->class != class && entry->links_to != class)
4862			continue;
4863		__clear_bit(i, list_entries_in_use);
4864		nr_list_entries--;
4865		list_del_rcu(&entry->entry);
4866	}
4867	if (list_empty(&class->locks_after) &&
4868	    list_empty(&class->locks_before)) {
4869		list_move_tail(&class->lock_entry, &pf->zapped);
4870		hlist_del_rcu(&class->hash_entry);
4871		WRITE_ONCE(class->key, NULL);
4872		WRITE_ONCE(class->name, NULL);
4873		nr_lock_classes--;
4874		__clear_bit(class - lock_classes, lock_classes_in_use);
4875	} else {
4876		WARN_ONCE(true, "%s() failed for class %s\n", __func__,
4877			  class->name);
4878	}
4879
4880	remove_class_from_lock_chains(pf, class);
4881}
4882
4883static void reinit_class(struct lock_class *class)
4884{
4885	void *const p = class;
4886	const unsigned int offset = offsetof(struct lock_class, key);
4887
4888	WARN_ON_ONCE(!class->lock_entry.next);
4889	WARN_ON_ONCE(!list_empty(&class->locks_after));
4890	WARN_ON_ONCE(!list_empty(&class->locks_before));
4891	memset(p + offset, 0, sizeof(*class) - offset);
4892	WARN_ON_ONCE(!class->lock_entry.next);
4893	WARN_ON_ONCE(!list_empty(&class->locks_after));
4894	WARN_ON_ONCE(!list_empty(&class->locks_before));
4895}
4896
4897static inline int within(const void *addr, void *start, unsigned long size)
4898{
4899	return addr >= start && addr < start + size;
4900}
4901
4902static bool inside_selftest(void)
4903{
4904	return current == lockdep_selftest_task_struct;
4905}
4906
4907/* The caller must hold the graph lock. */
4908static struct pending_free *get_pending_free(void)
4909{
4910	return delayed_free.pf + delayed_free.index;
4911}
4912
4913static void free_zapped_rcu(struct rcu_head *cb);
4914
4915/*
4916 * Schedule an RCU callback if no RCU callback is pending. Must be called with
4917 * the graph lock held.
 
 
 
 
4918 */
4919static void call_rcu_zapped(struct pending_free *pf)
4920{
4921	WARN_ON_ONCE(inside_selftest());
4922
4923	if (list_empty(&pf->zapped))
4924		return;
4925
4926	if (delayed_free.scheduled)
4927		return;
4928
4929	delayed_free.scheduled = true;
4930
4931	WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
4932	delayed_free.index ^= 1;
4933
4934	call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
4935}
4936
4937/* The caller must hold the graph lock. May be called from RCU context. */
4938static void __free_zapped_classes(struct pending_free *pf)
4939{
4940	struct lock_class *class;
4941
4942	check_data_structures();
4943
4944	list_for_each_entry(class, &pf->zapped, lock_entry)
4945		reinit_class(class);
4946
4947	list_splice_init(&pf->zapped, &free_lock_classes);
4948
4949#ifdef CONFIG_PROVE_LOCKING
4950	bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
4951		      pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
4952	bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
4953#endif
4954}
4955
4956static void free_zapped_rcu(struct rcu_head *ch)
4957{
4958	struct pending_free *pf;
4959	unsigned long flags;
4960
4961	if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
4962		return;
4963
4964	raw_local_irq_save(flags);
4965	arch_spin_lock(&lockdep_lock);
4966	current->lockdep_recursion = 1;
4967
4968	/* closed head */
4969	pf = delayed_free.pf + (delayed_free.index ^ 1);
4970	__free_zapped_classes(pf);
4971	delayed_free.scheduled = false;
4972
4973	/*
4974	 * If there's anything on the open list, close and start a new callback.
4975	 */
4976	call_rcu_zapped(delayed_free.pf + delayed_free.index);
4977
4978	current->lockdep_recursion = 0;
4979	arch_spin_unlock(&lockdep_lock);
4980	raw_local_irq_restore(flags);
4981}
4982
4983/*
4984 * Remove all lock classes from the class hash table and from the
4985 * all_lock_classes list whose key or name is in the address range [start,
4986 * start + size). Move these lock classes to the zapped_classes list. Must
4987 * be called with the graph lock held.
4988 */
4989static void __lockdep_free_key_range(struct pending_free *pf, void *start,
4990				     unsigned long size)
4991{
4992	struct lock_class *class;
4993	struct hlist_head *head;
4994	int i;
4995
4996	/* Unhash all classes that were created by a module. */
4997	for (i = 0; i < CLASSHASH_SIZE; i++) {
4998		head = classhash_table + i;
4999		hlist_for_each_entry_rcu(class, head, hash_entry) {
5000			if (!within(class->key, start, size) &&
5001			    !within(class->name, start, size))
5002				continue;
5003			zap_class(pf, class);
5004		}
5005	}
5006}
5007
5008/*
5009 * Used in module.c to remove lock classes from memory that is going to be
5010 * freed; and possibly re-used by other modules.
5011 *
5012 * We will have had one synchronize_rcu() before getting here, so we're
5013 * guaranteed nobody will look up these exact classes -- they're properly dead
5014 * but still allocated.
5015 */
5016static void lockdep_free_key_range_reg(void *start, unsigned long size)
5017{
5018	struct pending_free *pf;
5019	unsigned long flags;
5020
5021	init_data_structures_once();
5022
5023	raw_local_irq_save(flags);
5024	arch_spin_lock(&lockdep_lock);
5025	current->lockdep_recursion = 1;
5026	pf = get_pending_free();
5027	__lockdep_free_key_range(pf, start, size);
5028	call_rcu_zapped(pf);
5029	current->lockdep_recursion = 0;
5030	arch_spin_unlock(&lockdep_lock);
5031	raw_local_irq_restore(flags);
5032
5033	/*
5034	 * Wait for any possible iterators from look_up_lock_class() to pass
5035	 * before continuing to free the memory they refer to.
 
 
5036	 */
5037	synchronize_rcu();
5038}
5039
5040/*
5041 * Free all lockdep keys in the range [start, start+size). Does not sleep.
5042 * Ignores debug_locks. Must only be used by the lockdep selftests.
5043 */
5044static void lockdep_free_key_range_imm(void *start, unsigned long size)
5045{
5046	struct pending_free *pf = delayed_free.pf;
5047	unsigned long flags;
5048
5049	init_data_structures_once();
5050
5051	raw_local_irq_save(flags);
5052	arch_spin_lock(&lockdep_lock);
5053	__lockdep_free_key_range(pf, start, size);
5054	__free_zapped_classes(pf);
5055	arch_spin_unlock(&lockdep_lock);
5056	raw_local_irq_restore(flags);
5057}
5058
5059void lockdep_free_key_range(void *start, unsigned long size)
5060{
5061	init_data_structures_once();
5062
5063	if (inside_selftest())
5064		lockdep_free_key_range_imm(start, size);
5065	else
5066		lockdep_free_key_range_reg(start, size);
5067}
5068
5069/*
5070 * Check whether any element of the @lock->class_cache[] array refers to a
5071 * registered lock class. The caller must hold either the graph lock or the
5072 * RCU read lock.
5073 */
5074static bool lock_class_cache_is_registered(struct lockdep_map *lock)
5075{
5076	struct lock_class *class;
5077	struct hlist_head *head;
 
5078	int i, j;
 
5079
5080	for (i = 0; i < CLASSHASH_SIZE; i++) {
5081		head = classhash_table + i;
5082		hlist_for_each_entry_rcu(class, head, hash_entry) {
5083			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
5084				if (lock->class_cache[j] == class)
5085					return true;
5086		}
5087	}
5088	return false;
5089}
5090
5091/* The caller must hold the graph lock. Does not sleep. */
5092static void __lockdep_reset_lock(struct pending_free *pf,
5093				 struct lockdep_map *lock)
5094{
5095	struct lock_class *class;
5096	int j;
5097
5098	/*
5099	 * Remove all classes this lock might have:
5100	 */
5101	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
5102		/*
5103		 * If the class exists we look it up and zap it:
5104		 */
5105		class = look_up_lock_class(lock, j);
5106		if (class)
5107			zap_class(pf, class);
5108	}
5109	/*
5110	 * Debug check: in the end all mapped classes should
5111	 * be gone.
5112	 */
5113	if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
5114		debug_locks_off();
5115}
5116
5117/*
5118 * Remove all information lockdep has about a lock if debug_locks == 1. Free
5119 * released data structures from RCU context.
5120 */
5121static void lockdep_reset_lock_reg(struct lockdep_map *lock)
5122{
5123	struct pending_free *pf;
5124	unsigned long flags;
5125	int locked;
5126
5127	raw_local_irq_save(flags);
5128	locked = graph_lock();
5129	if (!locked)
5130		goto out_irq;
 
 
5131
5132	pf = get_pending_free();
5133	__lockdep_reset_lock(pf, lock);
5134	call_rcu_zapped(pf);
5135
5136	graph_unlock();
5137out_irq:
5138	raw_local_irq_restore(flags);
5139}
5140
5141/*
5142 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
5143 * lockdep selftests.
5144 */
5145static void lockdep_reset_lock_imm(struct lockdep_map *lock)
5146{
5147	struct pending_free *pf = delayed_free.pf;
5148	unsigned long flags;
5149
5150	raw_local_irq_save(flags);
5151	arch_spin_lock(&lockdep_lock);
5152	__lockdep_reset_lock(pf, lock);
5153	__free_zapped_classes(pf);
5154	arch_spin_unlock(&lockdep_lock);
5155	raw_local_irq_restore(flags);
5156}
5157
5158void lockdep_reset_lock(struct lockdep_map *lock)
5159{
5160	init_data_structures_once();
5161
5162	if (inside_selftest())
5163		lockdep_reset_lock_imm(lock);
5164	else
5165		lockdep_reset_lock_reg(lock);
5166}
5167
5168/* Unregister a dynamically allocated key. */
5169void lockdep_unregister_key(struct lock_class_key *key)
5170{
5171	struct hlist_head *hash_head = keyhashentry(key);
5172	struct lock_class_key *k;
5173	struct pending_free *pf;
5174	unsigned long flags;
5175	bool found = false;
5176
5177	might_sleep();
5178
5179	if (WARN_ON_ONCE(static_obj(key)))
5180		return;
5181
5182	raw_local_irq_save(flags);
5183	if (!graph_lock())
5184		goto out_irq;
5185
5186	pf = get_pending_free();
5187	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
5188		if (k == key) {
5189			hlist_del_rcu(&k->hash_entry);
5190			found = true;
5191			break;
5192		}
5193	}
5194	WARN_ON_ONCE(!found);
5195	__lockdep_free_key_range(pf, key, 1);
5196	call_rcu_zapped(pf);
5197	graph_unlock();
5198out_irq:
5199	raw_local_irq_restore(flags);
5200
5201	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5202	synchronize_rcu();
5203}
5204EXPORT_SYMBOL_GPL(lockdep_unregister_key);
5205
5206void __init lockdep_init(void)
5207{
5208	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
5209
5210	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
5211	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
5212	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
5213	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
5214	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
5215	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
5216	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
5217
5218	printk(" memory used by lock dependency info: %zu kB\n",
5219	       (sizeof(lock_classes) +
5220		sizeof(lock_classes_in_use) +
5221		sizeof(classhash_table) +
5222		sizeof(list_entries) +
5223		sizeof(list_entries_in_use) +
5224		sizeof(chainhash_table) +
5225		sizeof(delayed_free)
5226#ifdef CONFIG_PROVE_LOCKING
5227		+ sizeof(lock_cq)
5228		+ sizeof(lock_chains)
5229		+ sizeof(lock_chains_in_use)
5230		+ sizeof(chain_hlocks)
5231#endif
5232		) / 1024
5233		);
5234
5235#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
5236	printk(" memory used for stack traces: %zu kB\n",
5237	       (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
5238	       );
5239#endif
5240
5241	printk(" per task-struct memory footprint: %zu bytes\n",
5242	       sizeof(((struct task_struct *)NULL)->held_locks));
5243}
5244
5245static void
5246print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
5247		     const void *mem_to, struct held_lock *hlock)
5248{
5249	if (!debug_locks_off())
5250		return;
5251	if (debug_locks_silent)
5252		return;
5253
5254	pr_warn("\n");
5255	pr_warn("=========================\n");
5256	pr_warn("WARNING: held lock freed!\n");
5257	print_kernel_ident();
5258	pr_warn("-------------------------\n");
5259	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
5260		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
5261	print_lock(hlock);
5262	lockdep_print_held_locks(curr);
5263
5264	pr_warn("\nstack backtrace:\n");
5265	dump_stack();
5266}
5267
5268static inline int not_in_range(const void* mem_from, unsigned long mem_len,
5269				const void* lock_from, unsigned long lock_len)
5270{
5271	return lock_from + lock_len <= mem_from ||
5272		mem_from + mem_len <= lock_from;
5273}
5274
5275/*
5276 * Called when kernel memory is freed (or unmapped), or if a lock
5277 * is destroyed or reinitialized - this code checks whether there is
5278 * any held lock in the memory range of <from> to <to>:
5279 */
5280void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
5281{
5282	struct task_struct *curr = current;
5283	struct held_lock *hlock;
5284	unsigned long flags;
5285	int i;
5286
5287	if (unlikely(!debug_locks))
5288		return;
5289
5290	raw_local_irq_save(flags);
5291	for (i = 0; i < curr->lockdep_depth; i++) {
5292		hlock = curr->held_locks + i;
5293
5294		if (not_in_range(mem_from, mem_len, hlock->instance,
5295					sizeof(*hlock->instance)))
5296			continue;
5297
5298		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
5299		break;
5300	}
5301	raw_local_irq_restore(flags);
5302}
5303EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
5304
5305static void print_held_locks_bug(void)
5306{
5307	if (!debug_locks_off())
5308		return;
5309	if (debug_locks_silent)
5310		return;
5311
5312	pr_warn("\n");
5313	pr_warn("====================================\n");
5314	pr_warn("WARNING: %s/%d still has locks held!\n",
5315	       current->comm, task_pid_nr(current));
5316	print_kernel_ident();
5317	pr_warn("------------------------------------\n");
5318	lockdep_print_held_locks(current);
5319	pr_warn("\nstack backtrace:\n");
5320	dump_stack();
5321}
5322
5323void debug_check_no_locks_held(void)
5324{
5325	if (unlikely(current->lockdep_depth > 0))
5326		print_held_locks_bug();
5327}
5328EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
5329
5330#ifdef __KERNEL__
5331void debug_show_all_locks(void)
5332{
5333	struct task_struct *g, *p;
 
 
5334
5335	if (unlikely(!debug_locks)) {
5336		pr_warn("INFO: lockdep is turned off.\n");
5337		return;
5338	}
5339	pr_warn("\nShowing all locks held in the system:\n");
5340
5341	rcu_read_lock();
5342	for_each_process_thread(g, p) {
5343		if (!p->lockdep_depth)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5344			continue;
5345		lockdep_print_held_locks(p);
5346		touch_nmi_watchdog();
5347		touch_all_softlockup_watchdogs();
5348	}
5349	rcu_read_unlock();
 
 
 
 
5350
5351	pr_warn("\n");
5352	pr_warn("=============================================\n\n");
5353}
5354EXPORT_SYMBOL_GPL(debug_show_all_locks);
5355#endif
5356
5357/*
5358 * Careful: only use this function if you are sure that
5359 * the task cannot run in parallel!
5360 */
5361void debug_show_held_locks(struct task_struct *task)
5362{
5363	if (unlikely(!debug_locks)) {
5364		printk("INFO: lockdep is turned off.\n");
5365		return;
5366	}
5367	lockdep_print_held_locks(task);
5368}
5369EXPORT_SYMBOL_GPL(debug_show_held_locks);
5370
5371asmlinkage __visible void lockdep_sys_exit(void)
5372{
5373	struct task_struct *curr = current;
5374
5375	if (unlikely(curr->lockdep_depth)) {
5376		if (!debug_locks_off())
5377			return;
5378		pr_warn("\n");
5379		pr_warn("================================================\n");
5380		pr_warn("WARNING: lock held when returning to user space!\n");
5381		print_kernel_ident();
5382		pr_warn("------------------------------------------------\n");
5383		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5384				curr->comm, curr->pid);
5385		lockdep_print_held_locks(curr);
5386	}
5387
5388	/*
5389	 * The lock history for each syscall should be independent. So wipe the
5390	 * slate clean on return to userspace.
5391	 */
5392	lockdep_invariant_state(false);
5393}
5394
5395void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
5396{
5397	struct task_struct *curr = current;
5398
 
 
 
 
5399	/* Note: the following can be executed concurrently, so be careful. */
5400	pr_warn("\n");
5401	pr_warn("=============================\n");
5402	pr_warn("WARNING: suspicious RCU usage\n");
5403	print_kernel_ident();
5404	pr_warn("-----------------------------\n");
5405	pr_warn("%s:%d %s!\n", file, line, s);
5406	pr_warn("\nother info that might help us debug this:\n\n");
5407	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5408	       !rcu_lockdep_current_cpu_online()
5409			? "RCU used illegally from offline CPU!\n"
5410			: !rcu_is_watching()
5411				? "RCU used illegally from idle CPU!\n"
5412				: "",
5413	       rcu_scheduler_active, debug_locks);
5414
5415	/*
5416	 * If a CPU is in the RCU-free window in idle (ie: in the section
5417	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5418	 * considers that CPU to be in an "extended quiescent state",
5419	 * which means that RCU will be completely ignoring that CPU.
5420	 * Therefore, rcu_read_lock() and friends have absolutely no
5421	 * effect on a CPU running in that state. In other words, even if
5422	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5423	 * delete data structures out from under it.  RCU really has no
5424	 * choice here: we need to keep an RCU-free window in idle where
5425	 * the CPU may possibly enter into low power mode. This way we can
5426	 * notice an extended quiescent state to other CPUs that started a grace
5427	 * period. Otherwise we would delay any grace period as long as we run
5428	 * in the idle task.
5429	 *
5430	 * So complain bitterly if someone does call rcu_read_lock(),
5431	 * rcu_read_lock_bh() and so on from extended quiescent states.
5432	 */
5433	if (!rcu_is_watching())
5434		pr_warn("RCU used illegally from extended quiescent state!\n");
5435
5436	lockdep_print_held_locks(curr);
5437	pr_warn("\nstack backtrace:\n");
5438	dump_stack();
5439}
5440EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);