Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  10 *
  11 * this code maps all the lock dependencies as they occur in a live kernel
  12 * and will warn about the following classes of locking bugs:
  13 *
  14 * - lock inversion scenarios
  15 * - circular lock dependencies
  16 * - hardirq/softirq safe/unsafe locking bugs
  17 *
  18 * Bugs are reported even if the current locking scenario does not cause
  19 * any deadlock at this point.
  20 *
  21 * I.e. if anytime in the past two locks were taken in a different order,
  22 * even if it happened for another task, even if those were different
  23 * locks (but of the same class as this lock), this code will detect it.
  24 *
  25 * Thanks to Arjan van de Ven for coming up with the initial idea of
  26 * mapping lock dependencies runtime.
  27 */
  28#define DISABLE_BRANCH_PROFILING
  29#include <linux/mutex.h>
  30#include <linux/sched.h>
  31#include <linux/delay.h>
  32#include <linux/module.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/spinlock.h>
  36#include <linux/kallsyms.h>
  37#include <linux/interrupt.h>
  38#include <linux/stacktrace.h>
  39#include <linux/debug_locks.h>
  40#include <linux/irqflags.h>
  41#include <linux/utsname.h>
  42#include <linux/hash.h>
  43#include <linux/ftrace.h>
  44#include <linux/stringify.h>
  45#include <linux/bitops.h>
  46#include <linux/gfp.h>
 
  47
  48#include <asm/sections.h>
  49
  50#include "lockdep_internals.h"
  51
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/lock.h>
  54
  55#ifdef CONFIG_PROVE_LOCKING
  56int prove_locking = 1;
  57module_param(prove_locking, int, 0644);
  58#else
  59#define prove_locking 0
  60#endif
  61
  62#ifdef CONFIG_LOCK_STAT
  63int lock_stat = 1;
  64module_param(lock_stat, int, 0644);
  65#else
  66#define lock_stat 0
  67#endif
  68
  69/*
  70 * lockdep_lock: protects the lockdep graph, the hashes and the
  71 *               class/list/hash allocators.
  72 *
  73 * This is one of the rare exceptions where it's justified
  74 * to use a raw spinlock - we really dont want the spinlock
  75 * code to recurse back into the lockdep code...
  76 */
  77static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  78
  79static int graph_lock(void)
  80{
  81	arch_spin_lock(&lockdep_lock);
  82	/*
  83	 * Make sure that if another CPU detected a bug while
  84	 * walking the graph we dont change it (while the other
  85	 * CPU is busy printing out stuff with the graph lock
  86	 * dropped already)
  87	 */
  88	if (!debug_locks) {
  89		arch_spin_unlock(&lockdep_lock);
  90		return 0;
  91	}
  92	/* prevent any recursions within lockdep from causing deadlocks */
  93	current->lockdep_recursion++;
  94	return 1;
  95}
  96
  97static inline int graph_unlock(void)
  98{
  99	if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
 
 
 
 
 100		return DEBUG_LOCKS_WARN_ON(1);
 
 101
 102	current->lockdep_recursion--;
 103	arch_spin_unlock(&lockdep_lock);
 104	return 0;
 105}
 106
 107/*
 108 * Turn lock debugging off and return with 0 if it was off already,
 109 * and also release the graph lock:
 110 */
 111static inline int debug_locks_off_graph_unlock(void)
 112{
 113	int ret = debug_locks_off();
 114
 115	arch_spin_unlock(&lockdep_lock);
 116
 117	return ret;
 118}
 119
 120static int lockdep_initialized;
 121
 122unsigned long nr_list_entries;
 123static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 124
 125/*
 126 * All data structures here are protected by the global debug_lock.
 127 *
 128 * Mutex key structs only get allocated, once during bootup, and never
 129 * get freed - this significantly simplifies the debugging code.
 130 */
 131unsigned long nr_lock_classes;
 132static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 133
 134static inline struct lock_class *hlock_class(struct held_lock *hlock)
 135{
 136	if (!hlock->class_idx) {
 
 
 
 137		DEBUG_LOCKS_WARN_ON(1);
 138		return NULL;
 139	}
 140	return lock_classes + hlock->class_idx - 1;
 141}
 142
 143#ifdef CONFIG_LOCK_STAT
 144static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
 145		      cpu_lock_stats);
 146
 147static inline u64 lockstat_clock(void)
 148{
 149	return local_clock();
 150}
 151
 152static int lock_point(unsigned long points[], unsigned long ip)
 153{
 154	int i;
 155
 156	for (i = 0; i < LOCKSTAT_POINTS; i++) {
 157		if (points[i] == 0) {
 158			points[i] = ip;
 159			break;
 160		}
 161		if (points[i] == ip)
 162			break;
 163	}
 164
 165	return i;
 166}
 167
 168static void lock_time_inc(struct lock_time *lt, u64 time)
 169{
 170	if (time > lt->max)
 171		lt->max = time;
 172
 173	if (time < lt->min || !lt->nr)
 174		lt->min = time;
 175
 176	lt->total += time;
 177	lt->nr++;
 178}
 179
 180static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 181{
 182	if (!src->nr)
 183		return;
 184
 185	if (src->max > dst->max)
 186		dst->max = src->max;
 187
 188	if (src->min < dst->min || !dst->nr)
 189		dst->min = src->min;
 190
 191	dst->total += src->total;
 192	dst->nr += src->nr;
 193}
 194
 195struct lock_class_stats lock_stats(struct lock_class *class)
 196{
 197	struct lock_class_stats stats;
 198	int cpu, i;
 199
 200	memset(&stats, 0, sizeof(struct lock_class_stats));
 201	for_each_possible_cpu(cpu) {
 202		struct lock_class_stats *pcs =
 203			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 204
 205		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 206			stats.contention_point[i] += pcs->contention_point[i];
 207
 208		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 209			stats.contending_point[i] += pcs->contending_point[i];
 210
 211		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 212		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 213
 214		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 215		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 216
 217		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 218			stats.bounces[i] += pcs->bounces[i];
 219	}
 220
 221	return stats;
 222}
 223
 224void clear_lock_stats(struct lock_class *class)
 225{
 226	int cpu;
 227
 228	for_each_possible_cpu(cpu) {
 229		struct lock_class_stats *cpu_stats =
 230			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 231
 232		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 233	}
 234	memset(class->contention_point, 0, sizeof(class->contention_point));
 235	memset(class->contending_point, 0, sizeof(class->contending_point));
 236}
 237
 238static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 239{
 240	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 241}
 242
 243static void put_lock_stats(struct lock_class_stats *stats)
 244{
 245	put_cpu_var(cpu_lock_stats);
 246}
 247
 248static void lock_release_holdtime(struct held_lock *hlock)
 249{
 250	struct lock_class_stats *stats;
 251	u64 holdtime;
 252
 253	if (!lock_stat)
 254		return;
 255
 256	holdtime = lockstat_clock() - hlock->holdtime_stamp;
 257
 258	stats = get_lock_stats(hlock_class(hlock));
 259	if (hlock->read)
 260		lock_time_inc(&stats->read_holdtime, holdtime);
 261	else
 262		lock_time_inc(&stats->write_holdtime, holdtime);
 263	put_lock_stats(stats);
 264}
 265#else
 266static inline void lock_release_holdtime(struct held_lock *hlock)
 267{
 268}
 269#endif
 270
 271/*
 272 * We keep a global list of all lock classes. The list only grows,
 273 * never shrinks. The list is only accessed with the lockdep
 274 * spinlock lock held.
 275 */
 276LIST_HEAD(all_lock_classes);
 277
 278/*
 279 * The lockdep classes are in a hash-table as well, for fast lookup:
 280 */
 281#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 282#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
 283#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 284#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 285
 286static struct list_head classhash_table[CLASSHASH_SIZE];
 287
 288/*
 289 * We put the lock dependency chains into a hash-table as well, to cache
 290 * their existence:
 291 */
 292#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 293#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
 294#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 295#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 296
 297static struct list_head chainhash_table[CHAINHASH_SIZE];
 298
 299/*
 300 * The hash key of the lock dependency chains is a hash itself too:
 301 * it's a hash of all locks taken up to that lock, including that lock.
 302 * It's a 64-bit hash, because it's important for the keys to be
 303 * unique.
 304 */
 305#define iterate_chain_key(key1, key2) \
 306	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
 307	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
 308	(key2))
 309
 310void lockdep_off(void)
 311{
 312	current->lockdep_recursion++;
 313}
 314EXPORT_SYMBOL(lockdep_off);
 315
 316void lockdep_on(void)
 317{
 318	current->lockdep_recursion--;
 319}
 320EXPORT_SYMBOL(lockdep_on);
 321
 322/*
 323 * Debugging switches:
 324 */
 325
 326#define VERBOSE			0
 327#define VERY_VERBOSE		0
 328
 329#if VERBOSE
 330# define HARDIRQ_VERBOSE	1
 331# define SOFTIRQ_VERBOSE	1
 332# define RECLAIM_VERBOSE	1
 333#else
 334# define HARDIRQ_VERBOSE	0
 335# define SOFTIRQ_VERBOSE	0
 336# define RECLAIM_VERBOSE	0
 337#endif
 338
 339#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 340/*
 341 * Quick filtering for interesting events:
 342 */
 343static int class_filter(struct lock_class *class)
 344{
 345#if 0
 346	/* Example */
 347	if (class->name_version == 1 &&
 348			!strcmp(class->name, "lockname"))
 349		return 1;
 350	if (class->name_version == 1 &&
 351			!strcmp(class->name, "&struct->lockfield"))
 352		return 1;
 353#endif
 354	/* Filter everything else. 1 would be to allow everything else */
 355	return 0;
 356}
 357#endif
 358
 359static int verbose(struct lock_class *class)
 360{
 361#if VERBOSE
 362	return class_filter(class);
 363#endif
 364	return 0;
 365}
 366
 367/*
 368 * Stack-trace: tightly packed array of stack backtrace
 369 * addresses. Protected by the graph_lock.
 370 */
 371unsigned long nr_stack_trace_entries;
 372static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 373
 374static int save_trace(struct stack_trace *trace)
 375{
 376	trace->nr_entries = 0;
 377	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 378	trace->entries = stack_trace + nr_stack_trace_entries;
 379
 380	trace->skip = 3;
 381
 382	save_stack_trace(trace);
 383
 384	/*
 385	 * Some daft arches put -1 at the end to indicate its a full trace.
 386	 *
 387	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
 388	 * complete trace that maxes out the entries provided will be reported
 389	 * as incomplete, friggin useless </rant>
 390	 */
 391	if (trace->nr_entries != 0 &&
 392	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
 393		trace->nr_entries--;
 394
 395	trace->max_entries = trace->nr_entries;
 396
 397	nr_stack_trace_entries += trace->nr_entries;
 398
 399	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
 400		if (!debug_locks_off_graph_unlock())
 401			return 0;
 402
 403		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
 404		printk("turning off the locking correctness validator.\n");
 405		dump_stack();
 406
 407		return 0;
 408	}
 409
 410	return 1;
 411}
 412
 413unsigned int nr_hardirq_chains;
 414unsigned int nr_softirq_chains;
 415unsigned int nr_process_chains;
 416unsigned int max_lockdep_depth;
 417
 418#ifdef CONFIG_DEBUG_LOCKDEP
 419/*
 420 * We cannot printk in early bootup code. Not even early_printk()
 421 * might work. So we mark any initialization errors and printk
 422 * about it later on, in lockdep_info().
 423 */
 424static int lockdep_init_error;
 
 425static unsigned long lockdep_init_trace_data[20];
 426static struct stack_trace lockdep_init_trace = {
 427	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
 428	.entries = lockdep_init_trace_data,
 429};
 430
 431/*
 432 * Various lockdep statistics:
 433 */
 434DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
 435#endif
 436
 437/*
 438 * Locking printouts:
 439 */
 440
 441#define __USAGE(__STATE)						\
 442	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
 443	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
 444	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
 445	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
 446
 447static const char *usage_str[] =
 448{
 449#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
 450#include "lockdep_states.h"
 451#undef LOCKDEP_STATE
 452	[LOCK_USED] = "INITIAL USE",
 453};
 454
 455const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 456{
 457	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 458}
 459
 460static inline unsigned long lock_flag(enum lock_usage_bit bit)
 461{
 462	return 1UL << bit;
 463}
 464
 465static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 466{
 467	char c = '.';
 468
 469	if (class->usage_mask & lock_flag(bit + 2))
 470		c = '+';
 471	if (class->usage_mask & lock_flag(bit)) {
 472		c = '-';
 473		if (class->usage_mask & lock_flag(bit + 2))
 474			c = '?';
 475	}
 476
 477	return c;
 478}
 479
 480void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 481{
 482	int i = 0;
 483
 484#define LOCKDEP_STATE(__STATE) 						\
 485	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
 486	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
 487#include "lockdep_states.h"
 488#undef LOCKDEP_STATE
 489
 490	usage[i] = '\0';
 491}
 492
 493static int __print_lock_name(struct lock_class *class)
 494{
 495	char str[KSYM_NAME_LEN];
 496	const char *name;
 497
 498	name = class->name;
 499	if (!name)
 500		name = __get_key_name(class->key, str);
 501
 502	return printk("%s", name);
 503}
 504
 505static void print_lock_name(struct lock_class *class)
 506{
 507	char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
 508	const char *name;
 509
 510	get_usage_chars(class, usage);
 511
 512	name = class->name;
 513	if (!name) {
 514		name = __get_key_name(class->key, str);
 515		printk(" (%s", name);
 516	} else {
 517		printk(" (%s", name);
 518		if (class->name_version > 1)
 519			printk("#%d", class->name_version);
 520		if (class->subclass)
 521			printk("/%d", class->subclass);
 522	}
 
 
 
 
 
 
 
 
 
 
 523	printk("){%s}", usage);
 524}
 525
 526static void print_lockdep_cache(struct lockdep_map *lock)
 527{
 528	const char *name;
 529	char str[KSYM_NAME_LEN];
 530
 531	name = lock->name;
 532	if (!name)
 533		name = __get_key_name(lock->key->subkeys, str);
 534
 535	printk("%s", name);
 536}
 537
 538static void print_lock(struct held_lock *hlock)
 539{
 540	print_lock_name(hlock_class(hlock));
 541	printk(", at: ");
 542	print_ip_sym(hlock->acquire_ip);
 543}
 544
 545static void lockdep_print_held_locks(struct task_struct *curr)
 546{
 547	int i, depth = curr->lockdep_depth;
 548
 549	if (!depth) {
 550		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
 551		return;
 552	}
 553	printk("%d lock%s held by %s/%d:\n",
 554		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 555
 556	for (i = 0; i < depth; i++) {
 557		printk(" #%d: ", i);
 558		print_lock(curr->held_locks + i);
 559	}
 560}
 561
 562static void print_kernel_version(void)
 563{
 564	printk("%s %.*s\n", init_utsname()->release,
 565		(int)strcspn(init_utsname()->version, " "),
 566		init_utsname()->version);
 
 567}
 568
 569static int very_verbose(struct lock_class *class)
 570{
 571#if VERY_VERBOSE
 572	return class_filter(class);
 573#endif
 574	return 0;
 575}
 576
 577/*
 578 * Is this the address of a static object:
 579 */
 580static int static_obj(void *obj)
 581{
 582	unsigned long start = (unsigned long) &_stext,
 583		      end   = (unsigned long) &_end,
 584		      addr  = (unsigned long) obj;
 585
 586	/*
 587	 * static variable?
 588	 */
 589	if ((addr >= start) && (addr < end))
 590		return 1;
 591
 592	if (arch_is_kernel_data(addr))
 593		return 1;
 594
 595	/*
 596	 * in-kernel percpu var?
 597	 */
 598	if (is_kernel_percpu_address(addr))
 599		return 1;
 600
 601	/*
 602	 * module static or percpu var?
 603	 */
 604	return is_module_address(addr) || is_module_percpu_address(addr);
 605}
 606
 607/*
 608 * To make lock name printouts unique, we calculate a unique
 609 * class->name_version generation counter:
 610 */
 611static int count_matching_names(struct lock_class *new_class)
 612{
 613	struct lock_class *class;
 614	int count = 0;
 615
 616	if (!new_class->name)
 617		return 0;
 618
 619	list_for_each_entry(class, &all_lock_classes, lock_entry) {
 620		if (new_class->key - new_class->subclass == class->key)
 621			return class->name_version;
 622		if (class->name && !strcmp(class->name, new_class->name))
 623			count = max(count, class->name_version);
 624	}
 625
 626	return count + 1;
 627}
 628
 629/*
 630 * Register a lock's class in the hash-table, if the class is not present
 631 * yet. Otherwise we look it up. We cache the result in the lock object
 632 * itself, so actual lookup of the hash should be once per lock object.
 633 */
 634static inline struct lock_class *
 635look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 636{
 637	struct lockdep_subclass_key *key;
 638	struct list_head *hash_head;
 639	struct lock_class *class;
 640
 641#ifdef CONFIG_DEBUG_LOCKDEP
 642	/*
 643	 * If the architecture calls into lockdep before initializing
 644	 * the hashes then we'll warn about it later. (we cannot printk
 645	 * right now)
 646	 */
 647	if (unlikely(!lockdep_initialized)) {
 648		lockdep_init();
 649		lockdep_init_error = 1;
 
 650		save_stack_trace(&lockdep_init_trace);
 651	}
 652#endif
 653
 654	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 655		debug_locks_off();
 656		printk(KERN_ERR
 657			"BUG: looking up invalid subclass: %u\n", subclass);
 658		printk(KERN_ERR
 659			"turning off the locking correctness validator.\n");
 660		dump_stack();
 661		return NULL;
 662	}
 663
 664	/*
 665	 * Static locks do not have their class-keys yet - for them the key
 666	 * is the lock object itself:
 667	 */
 668	if (unlikely(!lock->key))
 669		lock->key = (void *)lock;
 670
 671	/*
 672	 * NOTE: the class-key must be unique. For dynamic locks, a static
 673	 * lock_class_key variable is passed in through the mutex_init()
 674	 * (or spin_lock_init()) call - which acts as the key. For static
 675	 * locks we use the lock object itself as the key.
 676	 */
 677	BUILD_BUG_ON(sizeof(struct lock_class_key) >
 678			sizeof(struct lockdep_map));
 679
 680	key = lock->key->subkeys + subclass;
 681
 682	hash_head = classhashentry(key);
 683
 684	/*
 685	 * We can walk the hash lockfree, because the hash only
 686	 * grows, and we are careful when adding entries to the end:
 687	 */
 688	list_for_each_entry(class, hash_head, hash_entry) {
 689		if (class->key == key) {
 
 
 
 
 690			WARN_ON_ONCE(class->name != lock->name);
 691			return class;
 692		}
 693	}
 694
 695	return NULL;
 696}
 697
 698/*
 699 * Register a lock's class in the hash-table, if the class is not present
 700 * yet. Otherwise we look it up. We cache the result in the lock object
 701 * itself, so actual lookup of the hash should be once per lock object.
 702 */
 703static inline struct lock_class *
 704register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 705{
 706	struct lockdep_subclass_key *key;
 707	struct list_head *hash_head;
 708	struct lock_class *class;
 709	unsigned long flags;
 710
 711	class = look_up_lock_class(lock, subclass);
 712	if (likely(class))
 713		return class;
 714
 715	/*
 716	 * Debug-check: all keys must be persistent!
 717 	 */
 718	if (!static_obj(lock->key)) {
 719		debug_locks_off();
 720		printk("INFO: trying to register non-static key.\n");
 721		printk("the code is fine but needs lockdep annotation.\n");
 722		printk("turning off the locking correctness validator.\n");
 723		dump_stack();
 724
 725		return NULL;
 726	}
 727
 728	key = lock->key->subkeys + subclass;
 729	hash_head = classhashentry(key);
 730
 731	raw_local_irq_save(flags);
 732	if (!graph_lock()) {
 733		raw_local_irq_restore(flags);
 734		return NULL;
 735	}
 736	/*
 737	 * We have to do the hash-walk again, to avoid races
 738	 * with another CPU:
 739	 */
 740	list_for_each_entry(class, hash_head, hash_entry)
 741		if (class->key == key)
 742			goto out_unlock_set;
 743	/*
 744	 * Allocate a new key from the static array, and add it to
 745	 * the hash:
 746	 */
 747	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 748		if (!debug_locks_off_graph_unlock()) {
 749			raw_local_irq_restore(flags);
 750			return NULL;
 751		}
 752		raw_local_irq_restore(flags);
 753
 754		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 755		printk("turning off the locking correctness validator.\n");
 756		dump_stack();
 757		return NULL;
 758	}
 759	class = lock_classes + nr_lock_classes++;
 760	debug_atomic_inc(nr_unused_locks);
 761	class->key = key;
 762	class->name = lock->name;
 763	class->subclass = subclass;
 764	INIT_LIST_HEAD(&class->lock_entry);
 765	INIT_LIST_HEAD(&class->locks_before);
 766	INIT_LIST_HEAD(&class->locks_after);
 767	class->name_version = count_matching_names(class);
 768	/*
 769	 * We use RCU's safe list-add method to make
 770	 * parallel walking of the hash-list safe:
 771	 */
 772	list_add_tail_rcu(&class->hash_entry, hash_head);
 773	/*
 774	 * Add it to the global list of classes:
 775	 */
 776	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
 777
 778	if (verbose(class)) {
 779		graph_unlock();
 780		raw_local_irq_restore(flags);
 781
 782		printk("\nnew class %p: %s", class->key, class->name);
 783		if (class->name_version > 1)
 784			printk("#%d", class->name_version);
 785		printk("\n");
 786		dump_stack();
 787
 788		raw_local_irq_save(flags);
 789		if (!graph_lock()) {
 790			raw_local_irq_restore(flags);
 791			return NULL;
 792		}
 793	}
 794out_unlock_set:
 795	graph_unlock();
 796	raw_local_irq_restore(flags);
 797
 
 798	if (!subclass || force)
 799		lock->class_cache[0] = class;
 800	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
 801		lock->class_cache[subclass] = class;
 802
 
 
 
 
 803	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
 804		return NULL;
 805
 806	return class;
 807}
 808
 809#ifdef CONFIG_PROVE_LOCKING
 810/*
 811 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 812 * with NULL on failure)
 813 */
 814static struct lock_list *alloc_list_entry(void)
 815{
 816	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
 817		if (!debug_locks_off_graph_unlock())
 818			return NULL;
 819
 820		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
 821		printk("turning off the locking correctness validator.\n");
 822		dump_stack();
 823		return NULL;
 824	}
 825	return list_entries + nr_list_entries++;
 826}
 827
 828/*
 829 * Add a new dependency to the head of the list:
 830 */
 831static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 832			    struct list_head *head, unsigned long ip,
 833			    int distance, struct stack_trace *trace)
 834{
 835	struct lock_list *entry;
 836	/*
 837	 * Lock not present yet - get a new dependency struct and
 838	 * add it to the list:
 839	 */
 840	entry = alloc_list_entry();
 841	if (!entry)
 842		return 0;
 843
 844	entry->class = this;
 845	entry->distance = distance;
 846	entry->trace = *trace;
 847	/*
 848	 * Since we never remove from the dependency list, the list can
 849	 * be walked lockless by other CPUs, it's only allocation
 850	 * that must be protected by the spinlock. But this also means
 851	 * we must make new entries visible only once writes to the
 852	 * entry become visible - hence the RCU op:
 853	 */
 854	list_add_tail_rcu(&entry->entry, head);
 855
 856	return 1;
 857}
 858
 859/*
 860 * For good efficiency of modular, we use power of 2
 861 */
 862#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
 863#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
 864
 865/*
 866 * The circular_queue and helpers is used to implement the
 867 * breadth-first search(BFS)algorithem, by which we can build
 868 * the shortest path from the next lock to be acquired to the
 869 * previous held lock if there is a circular between them.
 870 */
 871struct circular_queue {
 872	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
 873	unsigned int  front, rear;
 874};
 875
 876static struct circular_queue lock_cq;
 877
 878unsigned int max_bfs_queue_depth;
 879
 880static unsigned int lockdep_dependency_gen_id;
 881
 882static inline void __cq_init(struct circular_queue *cq)
 883{
 884	cq->front = cq->rear = 0;
 885	lockdep_dependency_gen_id++;
 886}
 887
 888static inline int __cq_empty(struct circular_queue *cq)
 889{
 890	return (cq->front == cq->rear);
 891}
 892
 893static inline int __cq_full(struct circular_queue *cq)
 894{
 895	return ((cq->rear + 1) & CQ_MASK) == cq->front;
 896}
 897
 898static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 899{
 900	if (__cq_full(cq))
 901		return -1;
 902
 903	cq->element[cq->rear] = elem;
 904	cq->rear = (cq->rear + 1) & CQ_MASK;
 905	return 0;
 906}
 907
 908static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 909{
 910	if (__cq_empty(cq))
 911		return -1;
 912
 913	*elem = cq->element[cq->front];
 914	cq->front = (cq->front + 1) & CQ_MASK;
 915	return 0;
 916}
 917
 918static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 919{
 920	return (cq->rear - cq->front) & CQ_MASK;
 921}
 922
 923static inline void mark_lock_accessed(struct lock_list *lock,
 924					struct lock_list *parent)
 925{
 926	unsigned long nr;
 927
 928	nr = lock - list_entries;
 929	WARN_ON(nr >= nr_list_entries);
 930	lock->parent = parent;
 931	lock->class->dep_gen_id = lockdep_dependency_gen_id;
 932}
 933
 934static inline unsigned long lock_accessed(struct lock_list *lock)
 935{
 936	unsigned long nr;
 937
 938	nr = lock - list_entries;
 939	WARN_ON(nr >= nr_list_entries);
 940	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 941}
 942
 943static inline struct lock_list *get_lock_parent(struct lock_list *child)
 944{
 945	return child->parent;
 946}
 947
 948static inline int get_lock_depth(struct lock_list *child)
 949{
 950	int depth = 0;
 951	struct lock_list *parent;
 952
 953	while ((parent = get_lock_parent(child))) {
 954		child = parent;
 955		depth++;
 956	}
 957	return depth;
 958}
 959
 960static int __bfs(struct lock_list *source_entry,
 961		 void *data,
 962		 int (*match)(struct lock_list *entry, void *data),
 963		 struct lock_list **target_entry,
 964		 int forward)
 965{
 966	struct lock_list *entry;
 967	struct list_head *head;
 968	struct circular_queue *cq = &lock_cq;
 969	int ret = 1;
 970
 971	if (match(source_entry, data)) {
 972		*target_entry = source_entry;
 973		ret = 0;
 974		goto exit;
 975	}
 976
 977	if (forward)
 978		head = &source_entry->class->locks_after;
 979	else
 980		head = &source_entry->class->locks_before;
 981
 982	if (list_empty(head))
 983		goto exit;
 984
 985	__cq_init(cq);
 986	__cq_enqueue(cq, (unsigned long)source_entry);
 987
 988	while (!__cq_empty(cq)) {
 989		struct lock_list *lock;
 990
 991		__cq_dequeue(cq, (unsigned long *)&lock);
 992
 993		if (!lock->class) {
 994			ret = -2;
 995			goto exit;
 996		}
 997
 998		if (forward)
 999			head = &lock->class->locks_after;
1000		else
1001			head = &lock->class->locks_before;
1002
1003		list_for_each_entry(entry, head, entry) {
1004			if (!lock_accessed(entry)) {
1005				unsigned int cq_depth;
1006				mark_lock_accessed(entry, lock);
1007				if (match(entry, data)) {
1008					*target_entry = entry;
1009					ret = 0;
1010					goto exit;
1011				}
1012
1013				if (__cq_enqueue(cq, (unsigned long)entry)) {
1014					ret = -1;
1015					goto exit;
1016				}
1017				cq_depth = __cq_get_elem_count(cq);
1018				if (max_bfs_queue_depth < cq_depth)
1019					max_bfs_queue_depth = cq_depth;
1020			}
1021		}
1022	}
1023exit:
1024	return ret;
1025}
1026
1027static inline int __bfs_forwards(struct lock_list *src_entry,
1028			void *data,
1029			int (*match)(struct lock_list *entry, void *data),
1030			struct lock_list **target_entry)
1031{
1032	return __bfs(src_entry, data, match, target_entry, 1);
1033
1034}
1035
1036static inline int __bfs_backwards(struct lock_list *src_entry,
1037			void *data,
1038			int (*match)(struct lock_list *entry, void *data),
1039			struct lock_list **target_entry)
1040{
1041	return __bfs(src_entry, data, match, target_entry, 0);
1042
1043}
1044
1045/*
1046 * Recursive, forwards-direction lock-dependency checking, used for
1047 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1048 * checking.
1049 */
1050
1051/*
1052 * Print a dependency chain entry (this is only done when a deadlock
1053 * has been detected):
1054 */
1055static noinline int
1056print_circular_bug_entry(struct lock_list *target, int depth)
1057{
1058	if (debug_locks_silent)
1059		return 0;
1060	printk("\n-> #%u", depth);
1061	print_lock_name(target->class);
1062	printk(":\n");
1063	print_stack_trace(&target->trace, 6);
1064
1065	return 0;
1066}
1067
1068static void
1069print_circular_lock_scenario(struct held_lock *src,
1070			     struct held_lock *tgt,
1071			     struct lock_list *prt)
1072{
1073	struct lock_class *source = hlock_class(src);
1074	struct lock_class *target = hlock_class(tgt);
1075	struct lock_class *parent = prt->class;
1076
1077	/*
1078	 * A direct locking problem where unsafe_class lock is taken
1079	 * directly by safe_class lock, then all we need to show
1080	 * is the deadlock scenario, as it is obvious that the
1081	 * unsafe lock is taken under the safe lock.
1082	 *
1083	 * But if there is a chain instead, where the safe lock takes
1084	 * an intermediate lock (middle_class) where this lock is
1085	 * not the same as the safe lock, then the lock chain is
1086	 * used to describe the problem. Otherwise we would need
1087	 * to show a different CPU case for each link in the chain
1088	 * from the safe_class lock to the unsafe_class lock.
1089	 */
1090	if (parent != source) {
1091		printk("Chain exists of:\n  ");
1092		__print_lock_name(source);
1093		printk(" --> ");
1094		__print_lock_name(parent);
1095		printk(" --> ");
1096		__print_lock_name(target);
1097		printk("\n\n");
1098	}
1099
1100	printk(" Possible unsafe locking scenario:\n\n");
1101	printk("       CPU0                    CPU1\n");
1102	printk("       ----                    ----\n");
1103	printk("  lock(");
1104	__print_lock_name(target);
1105	printk(");\n");
1106	printk("                               lock(");
1107	__print_lock_name(parent);
1108	printk(");\n");
1109	printk("                               lock(");
1110	__print_lock_name(target);
1111	printk(");\n");
1112	printk("  lock(");
1113	__print_lock_name(source);
1114	printk(");\n");
1115	printk("\n *** DEADLOCK ***\n\n");
1116}
1117
1118/*
1119 * When a circular dependency is detected, print the
1120 * header first:
1121 */
1122static noinline int
1123print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1124			struct held_lock *check_src,
1125			struct held_lock *check_tgt)
1126{
1127	struct task_struct *curr = current;
1128
1129	if (debug_locks_silent)
1130		return 0;
1131
1132	printk("\n=======================================================\n");
1133	printk(  "[ INFO: possible circular locking dependency detected ]\n");
1134	print_kernel_version();
1135	printk(  "-------------------------------------------------------\n");
 
1136	printk("%s/%d is trying to acquire lock:\n",
1137		curr->comm, task_pid_nr(curr));
1138	print_lock(check_src);
1139	printk("\nbut task is already holding lock:\n");
1140	print_lock(check_tgt);
1141	printk("\nwhich lock already depends on the new lock.\n\n");
1142	printk("\nthe existing dependency chain (in reverse order) is:\n");
1143
1144	print_circular_bug_entry(entry, depth);
1145
1146	return 0;
1147}
1148
1149static inline int class_equal(struct lock_list *entry, void *data)
1150{
1151	return entry->class == data;
1152}
1153
1154static noinline int print_circular_bug(struct lock_list *this,
1155				struct lock_list *target,
1156				struct held_lock *check_src,
1157				struct held_lock *check_tgt)
1158{
1159	struct task_struct *curr = current;
1160	struct lock_list *parent;
1161	struct lock_list *first_parent;
1162	int depth;
1163
1164	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1165		return 0;
1166
1167	if (!save_trace(&this->trace))
1168		return 0;
1169
1170	depth = get_lock_depth(target);
1171
1172	print_circular_bug_header(target, depth, check_src, check_tgt);
1173
1174	parent = get_lock_parent(target);
1175	first_parent = parent;
1176
1177	while (parent) {
1178		print_circular_bug_entry(parent, --depth);
1179		parent = get_lock_parent(parent);
1180	}
1181
1182	printk("\nother info that might help us debug this:\n\n");
1183	print_circular_lock_scenario(check_src, check_tgt,
1184				     first_parent);
1185
1186	lockdep_print_held_locks(curr);
1187
1188	printk("\nstack backtrace:\n");
1189	dump_stack();
1190
1191	return 0;
1192}
1193
1194static noinline int print_bfs_bug(int ret)
1195{
1196	if (!debug_locks_off_graph_unlock())
1197		return 0;
1198
 
 
 
1199	WARN(1, "lockdep bfs error:%d\n", ret);
1200
1201	return 0;
1202}
1203
1204static int noop_count(struct lock_list *entry, void *data)
1205{
1206	(*(unsigned long *)data)++;
1207	return 0;
1208}
1209
1210unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1211{
1212	unsigned long  count = 0;
1213	struct lock_list *uninitialized_var(target_entry);
1214
1215	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1216
1217	return count;
1218}
1219unsigned long lockdep_count_forward_deps(struct lock_class *class)
1220{
1221	unsigned long ret, flags;
1222	struct lock_list this;
1223
1224	this.parent = NULL;
1225	this.class = class;
1226
1227	local_irq_save(flags);
1228	arch_spin_lock(&lockdep_lock);
1229	ret = __lockdep_count_forward_deps(&this);
1230	arch_spin_unlock(&lockdep_lock);
1231	local_irq_restore(flags);
1232
1233	return ret;
1234}
1235
1236unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1237{
1238	unsigned long  count = 0;
1239	struct lock_list *uninitialized_var(target_entry);
1240
1241	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1242
1243	return count;
1244}
1245
1246unsigned long lockdep_count_backward_deps(struct lock_class *class)
1247{
1248	unsigned long ret, flags;
1249	struct lock_list this;
1250
1251	this.parent = NULL;
1252	this.class = class;
1253
1254	local_irq_save(flags);
1255	arch_spin_lock(&lockdep_lock);
1256	ret = __lockdep_count_backward_deps(&this);
1257	arch_spin_unlock(&lockdep_lock);
1258	local_irq_restore(flags);
1259
1260	return ret;
1261}
1262
1263/*
1264 * Prove that the dependency graph starting at <entry> can not
1265 * lead to <target>. Print an error and return 0 if it does.
1266 */
1267static noinline int
1268check_noncircular(struct lock_list *root, struct lock_class *target,
1269		struct lock_list **target_entry)
1270{
1271	int result;
1272
1273	debug_atomic_inc(nr_cyclic_checks);
1274
1275	result = __bfs_forwards(root, target, class_equal, target_entry);
1276
1277	return result;
1278}
1279
1280#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1281/*
1282 * Forwards and backwards subgraph searching, for the purposes of
1283 * proving that two subgraphs can be connected by a new dependency
1284 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1285 */
1286
1287static inline int usage_match(struct lock_list *entry, void *bit)
1288{
1289	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1290}
1291
1292
1293
1294/*
1295 * Find a node in the forwards-direction dependency sub-graph starting
1296 * at @root->class that matches @bit.
1297 *
1298 * Return 0 if such a node exists in the subgraph, and put that node
1299 * into *@target_entry.
1300 *
1301 * Return 1 otherwise and keep *@target_entry unchanged.
1302 * Return <0 on error.
1303 */
1304static int
1305find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1306			struct lock_list **target_entry)
1307{
1308	int result;
1309
1310	debug_atomic_inc(nr_find_usage_forwards_checks);
1311
1312	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1313
1314	return result;
1315}
1316
1317/*
1318 * Find a node in the backwards-direction dependency sub-graph starting
1319 * at @root->class that matches @bit.
1320 *
1321 * Return 0 if such a node exists in the subgraph, and put that node
1322 * into *@target_entry.
1323 *
1324 * Return 1 otherwise and keep *@target_entry unchanged.
1325 * Return <0 on error.
1326 */
1327static int
1328find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1329			struct lock_list **target_entry)
1330{
1331	int result;
1332
1333	debug_atomic_inc(nr_find_usage_backwards_checks);
1334
1335	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1336
1337	return result;
1338}
1339
1340static void print_lock_class_header(struct lock_class *class, int depth)
1341{
1342	int bit;
1343
1344	printk("%*s->", depth, "");
1345	print_lock_name(class);
1346	printk(" ops: %lu", class->ops);
1347	printk(" {\n");
1348
1349	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1350		if (class->usage_mask & (1 << bit)) {
1351			int len = depth;
1352
1353			len += printk("%*s   %s", depth, "", usage_str[bit]);
1354			len += printk(" at:\n");
1355			print_stack_trace(class->usage_traces + bit, len);
1356		}
1357	}
1358	printk("%*s }\n", depth, "");
1359
1360	printk("%*s ... key      at: ",depth,"");
1361	print_ip_sym((unsigned long)class->key);
1362}
1363
1364/*
1365 * printk the shortest lock dependencies from @start to @end in reverse order:
1366 */
1367static void __used
1368print_shortest_lock_dependencies(struct lock_list *leaf,
1369				struct lock_list *root)
1370{
1371	struct lock_list *entry = leaf;
1372	int depth;
1373
1374	/*compute depth from generated tree by BFS*/
1375	depth = get_lock_depth(leaf);
1376
1377	do {
1378		print_lock_class_header(entry->class, depth);
1379		printk("%*s ... acquired at:\n", depth, "");
1380		print_stack_trace(&entry->trace, 2);
1381		printk("\n");
1382
1383		if (depth == 0 && (entry != root)) {
1384			printk("lockdep:%s bad path found in chain graph\n", __func__);
1385			break;
1386		}
1387
1388		entry = get_lock_parent(entry);
1389		depth--;
1390	} while (entry && (depth >= 0));
1391
1392	return;
1393}
1394
1395static void
1396print_irq_lock_scenario(struct lock_list *safe_entry,
1397			struct lock_list *unsafe_entry,
1398			struct lock_class *prev_class,
1399			struct lock_class *next_class)
1400{
1401	struct lock_class *safe_class = safe_entry->class;
1402	struct lock_class *unsafe_class = unsafe_entry->class;
1403	struct lock_class *middle_class = prev_class;
1404
1405	if (middle_class == safe_class)
1406		middle_class = next_class;
1407
1408	/*
1409	 * A direct locking problem where unsafe_class lock is taken
1410	 * directly by safe_class lock, then all we need to show
1411	 * is the deadlock scenario, as it is obvious that the
1412	 * unsafe lock is taken under the safe lock.
1413	 *
1414	 * But if there is a chain instead, where the safe lock takes
1415	 * an intermediate lock (middle_class) where this lock is
1416	 * not the same as the safe lock, then the lock chain is
1417	 * used to describe the problem. Otherwise we would need
1418	 * to show a different CPU case for each link in the chain
1419	 * from the safe_class lock to the unsafe_class lock.
1420	 */
1421	if (middle_class != unsafe_class) {
1422		printk("Chain exists of:\n  ");
1423		__print_lock_name(safe_class);
1424		printk(" --> ");
1425		__print_lock_name(middle_class);
1426		printk(" --> ");
1427		__print_lock_name(unsafe_class);
1428		printk("\n\n");
1429	}
1430
1431	printk(" Possible interrupt unsafe locking scenario:\n\n");
1432	printk("       CPU0                    CPU1\n");
1433	printk("       ----                    ----\n");
1434	printk("  lock(");
1435	__print_lock_name(unsafe_class);
1436	printk(");\n");
1437	printk("                               local_irq_disable();\n");
1438	printk("                               lock(");
1439	__print_lock_name(safe_class);
1440	printk(");\n");
1441	printk("                               lock(");
1442	__print_lock_name(middle_class);
1443	printk(");\n");
1444	printk("  <Interrupt>\n");
1445	printk("    lock(");
1446	__print_lock_name(safe_class);
1447	printk(");\n");
1448	printk("\n *** DEADLOCK ***\n\n");
1449}
1450
1451static int
1452print_bad_irq_dependency(struct task_struct *curr,
1453			 struct lock_list *prev_root,
1454			 struct lock_list *next_root,
1455			 struct lock_list *backwards_entry,
1456			 struct lock_list *forwards_entry,
1457			 struct held_lock *prev,
1458			 struct held_lock *next,
1459			 enum lock_usage_bit bit1,
1460			 enum lock_usage_bit bit2,
1461			 const char *irqclass)
1462{
1463	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1464		return 0;
1465
1466	printk("\n======================================================\n");
1467	printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
 
1468		irqclass, irqclass);
1469	print_kernel_version();
1470	printk(  "------------------------------------------------------\n");
1471	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1472		curr->comm, task_pid_nr(curr),
1473		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1474		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1475		curr->hardirqs_enabled,
1476		curr->softirqs_enabled);
1477	print_lock(next);
1478
1479	printk("\nand this task is already holding:\n");
1480	print_lock(prev);
1481	printk("which would create a new lock dependency:\n");
1482	print_lock_name(hlock_class(prev));
1483	printk(" ->");
1484	print_lock_name(hlock_class(next));
1485	printk("\n");
1486
1487	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1488		irqclass);
1489	print_lock_name(backwards_entry->class);
1490	printk("\n... which became %s-irq-safe at:\n", irqclass);
1491
1492	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1493
1494	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1495	print_lock_name(forwards_entry->class);
1496	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1497	printk("...");
1498
1499	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1500
1501	printk("\nother info that might help us debug this:\n\n");
1502	print_irq_lock_scenario(backwards_entry, forwards_entry,
1503				hlock_class(prev), hlock_class(next));
1504
1505	lockdep_print_held_locks(curr);
1506
1507	printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1508	printk(" and the holding lock:\n");
1509	if (!save_trace(&prev_root->trace))
1510		return 0;
1511	print_shortest_lock_dependencies(backwards_entry, prev_root);
1512
1513	printk("\nthe dependencies between the lock to be acquired");
1514	printk(" and %s-irq-unsafe lock:\n", irqclass);
1515	if (!save_trace(&next_root->trace))
1516		return 0;
1517	print_shortest_lock_dependencies(forwards_entry, next_root);
1518
1519	printk("\nstack backtrace:\n");
1520	dump_stack();
1521
1522	return 0;
1523}
1524
1525static int
1526check_usage(struct task_struct *curr, struct held_lock *prev,
1527	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1528	    enum lock_usage_bit bit_forwards, const char *irqclass)
1529{
1530	int ret;
1531	struct lock_list this, that;
1532	struct lock_list *uninitialized_var(target_entry);
1533	struct lock_list *uninitialized_var(target_entry1);
1534
1535	this.parent = NULL;
1536
1537	this.class = hlock_class(prev);
1538	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1539	if (ret < 0)
1540		return print_bfs_bug(ret);
1541	if (ret == 1)
1542		return ret;
1543
1544	that.parent = NULL;
1545	that.class = hlock_class(next);
1546	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1547	if (ret < 0)
1548		return print_bfs_bug(ret);
1549	if (ret == 1)
1550		return ret;
1551
1552	return print_bad_irq_dependency(curr, &this, &that,
1553			target_entry, target_entry1,
1554			prev, next,
1555			bit_backwards, bit_forwards, irqclass);
1556}
1557
1558static const char *state_names[] = {
1559#define LOCKDEP_STATE(__STATE) \
1560	__stringify(__STATE),
1561#include "lockdep_states.h"
1562#undef LOCKDEP_STATE
1563};
1564
1565static const char *state_rnames[] = {
1566#define LOCKDEP_STATE(__STATE) \
1567	__stringify(__STATE)"-READ",
1568#include "lockdep_states.h"
1569#undef LOCKDEP_STATE
1570};
1571
1572static inline const char *state_name(enum lock_usage_bit bit)
1573{
1574	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1575}
1576
1577static int exclusive_bit(int new_bit)
1578{
1579	/*
1580	 * USED_IN
1581	 * USED_IN_READ
1582	 * ENABLED
1583	 * ENABLED_READ
1584	 *
1585	 * bit 0 - write/read
1586	 * bit 1 - used_in/enabled
1587	 * bit 2+  state
1588	 */
1589
1590	int state = new_bit & ~3;
1591	int dir = new_bit & 2;
1592
1593	/*
1594	 * keep state, bit flip the direction and strip read.
1595	 */
1596	return state | (dir ^ 2);
1597}
1598
1599static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1600			   struct held_lock *next, enum lock_usage_bit bit)
1601{
1602	/*
1603	 * Prove that the new dependency does not connect a hardirq-safe
1604	 * lock with a hardirq-unsafe lock - to achieve this we search
1605	 * the backwards-subgraph starting at <prev>, and the
1606	 * forwards-subgraph starting at <next>:
1607	 */
1608	if (!check_usage(curr, prev, next, bit,
1609			   exclusive_bit(bit), state_name(bit)))
1610		return 0;
1611
1612	bit++; /* _READ */
1613
1614	/*
1615	 * Prove that the new dependency does not connect a hardirq-safe-read
1616	 * lock with a hardirq-unsafe lock - to achieve this we search
1617	 * the backwards-subgraph starting at <prev>, and the
1618	 * forwards-subgraph starting at <next>:
1619	 */
1620	if (!check_usage(curr, prev, next, bit,
1621			   exclusive_bit(bit), state_name(bit)))
1622		return 0;
1623
1624	return 1;
1625}
1626
1627static int
1628check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1629		struct held_lock *next)
1630{
1631#define LOCKDEP_STATE(__STATE)						\
1632	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1633		return 0;
1634#include "lockdep_states.h"
1635#undef LOCKDEP_STATE
1636
1637	return 1;
1638}
1639
1640static void inc_chains(void)
1641{
1642	if (current->hardirq_context)
1643		nr_hardirq_chains++;
1644	else {
1645		if (current->softirq_context)
1646			nr_softirq_chains++;
1647		else
1648			nr_process_chains++;
1649	}
1650}
1651
1652#else
1653
1654static inline int
1655check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1656		struct held_lock *next)
1657{
1658	return 1;
1659}
1660
1661static inline void inc_chains(void)
1662{
1663	nr_process_chains++;
1664}
1665
1666#endif
1667
1668static void
1669print_deadlock_scenario(struct held_lock *nxt,
1670			     struct held_lock *prv)
1671{
1672	struct lock_class *next = hlock_class(nxt);
1673	struct lock_class *prev = hlock_class(prv);
1674
1675	printk(" Possible unsafe locking scenario:\n\n");
1676	printk("       CPU0\n");
1677	printk("       ----\n");
1678	printk("  lock(");
1679	__print_lock_name(prev);
1680	printk(");\n");
1681	printk("  lock(");
1682	__print_lock_name(next);
1683	printk(");\n");
1684	printk("\n *** DEADLOCK ***\n\n");
1685	printk(" May be due to missing lock nesting notation\n\n");
1686}
1687
1688static int
1689print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1690		   struct held_lock *next)
1691{
1692	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1693		return 0;
1694
1695	printk("\n=============================================\n");
1696	printk(  "[ INFO: possible recursive locking detected ]\n");
1697	print_kernel_version();
1698	printk(  "---------------------------------------------\n");
 
1699	printk("%s/%d is trying to acquire lock:\n",
1700		curr->comm, task_pid_nr(curr));
1701	print_lock(next);
1702	printk("\nbut task is already holding lock:\n");
1703	print_lock(prev);
1704
1705	printk("\nother info that might help us debug this:\n");
1706	print_deadlock_scenario(next, prev);
1707	lockdep_print_held_locks(curr);
1708
1709	printk("\nstack backtrace:\n");
1710	dump_stack();
1711
1712	return 0;
1713}
1714
1715/*
1716 * Check whether we are holding such a class already.
1717 *
1718 * (Note that this has to be done separately, because the graph cannot
1719 * detect such classes of deadlocks.)
1720 *
1721 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1722 */
1723static int
1724check_deadlock(struct task_struct *curr, struct held_lock *next,
1725	       struct lockdep_map *next_instance, int read)
1726{
1727	struct held_lock *prev;
1728	struct held_lock *nest = NULL;
1729	int i;
1730
1731	for (i = 0; i < curr->lockdep_depth; i++) {
1732		prev = curr->held_locks + i;
1733
1734		if (prev->instance == next->nest_lock)
1735			nest = prev;
1736
1737		if (hlock_class(prev) != hlock_class(next))
1738			continue;
1739
1740		/*
1741		 * Allow read-after-read recursion of the same
1742		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1743		 */
1744		if ((read == 2) && prev->read)
1745			return 2;
1746
1747		/*
1748		 * We're holding the nest_lock, which serializes this lock's
1749		 * nesting behaviour.
1750		 */
1751		if (nest)
1752			return 2;
1753
1754		return print_deadlock_bug(curr, prev, next);
1755	}
1756	return 1;
1757}
1758
1759/*
1760 * There was a chain-cache miss, and we are about to add a new dependency
1761 * to a previous lock. We recursively validate the following rules:
1762 *
1763 *  - would the adding of the <prev> -> <next> dependency create a
1764 *    circular dependency in the graph? [== circular deadlock]
1765 *
1766 *  - does the new prev->next dependency connect any hardirq-safe lock
1767 *    (in the full backwards-subgraph starting at <prev>) with any
1768 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1769 *    <next>)? [== illegal lock inversion with hardirq contexts]
1770 *
1771 *  - does the new prev->next dependency connect any softirq-safe lock
1772 *    (in the full backwards-subgraph starting at <prev>) with any
1773 *    softirq-unsafe lock (in the full forwards-subgraph starting at
1774 *    <next>)? [== illegal lock inversion with softirq contexts]
1775 *
1776 * any of these scenarios could lead to a deadlock.
1777 *
1778 * Then if all the validations pass, we add the forwards and backwards
1779 * dependency.
1780 */
1781static int
1782check_prev_add(struct task_struct *curr, struct held_lock *prev,
1783	       struct held_lock *next, int distance, int trylock_loop)
1784{
1785	struct lock_list *entry;
1786	int ret;
1787	struct lock_list this;
1788	struct lock_list *uninitialized_var(target_entry);
1789	/*
1790	 * Static variable, serialized by the graph_lock().
1791	 *
1792	 * We use this static variable to save the stack trace in case
1793	 * we call into this function multiple times due to encountering
1794	 * trylocks in the held lock stack.
1795	 */
1796	static struct stack_trace trace;
1797
1798	/*
1799	 * Prove that the new <prev> -> <next> dependency would not
1800	 * create a circular dependency in the graph. (We do this by
1801	 * forward-recursing into the graph starting at <next>, and
1802	 * checking whether we can reach <prev>.)
1803	 *
1804	 * We are using global variables to control the recursion, to
1805	 * keep the stackframe size of the recursive functions low:
1806	 */
1807	this.class = hlock_class(next);
1808	this.parent = NULL;
1809	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1810	if (unlikely(!ret))
1811		return print_circular_bug(&this, target_entry, next, prev);
1812	else if (unlikely(ret < 0))
1813		return print_bfs_bug(ret);
1814
1815	if (!check_prev_add_irq(curr, prev, next))
1816		return 0;
1817
1818	/*
1819	 * For recursive read-locks we do all the dependency checks,
1820	 * but we dont store read-triggered dependencies (only
1821	 * write-triggered dependencies). This ensures that only the
1822	 * write-side dependencies matter, and that if for example a
1823	 * write-lock never takes any other locks, then the reads are
1824	 * equivalent to a NOP.
1825	 */
1826	if (next->read == 2 || prev->read == 2)
1827		return 1;
1828	/*
1829	 * Is the <prev> -> <next> dependency already present?
1830	 *
1831	 * (this may occur even though this is a new chain: consider
1832	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1833	 *  chains - the second one will be new, but L1 already has
1834	 *  L2 added to its dependency list, due to the first chain.)
1835	 */
1836	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1837		if (entry->class == hlock_class(next)) {
1838			if (distance == 1)
1839				entry->distance = 1;
1840			return 2;
1841		}
1842	}
1843
1844	if (!trylock_loop && !save_trace(&trace))
1845		return 0;
1846
1847	/*
1848	 * Ok, all validations passed, add the new lock
1849	 * to the previous lock's dependency list:
1850	 */
1851	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1852			       &hlock_class(prev)->locks_after,
1853			       next->acquire_ip, distance, &trace);
1854
1855	if (!ret)
1856		return 0;
1857
1858	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1859			       &hlock_class(next)->locks_before,
1860			       next->acquire_ip, distance, &trace);
1861	if (!ret)
1862		return 0;
1863
1864	/*
1865	 * Debugging printouts:
1866	 */
1867	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1868		graph_unlock();
1869		printk("\n new dependency: ");
1870		print_lock_name(hlock_class(prev));
1871		printk(" => ");
1872		print_lock_name(hlock_class(next));
1873		printk("\n");
1874		dump_stack();
1875		return graph_lock();
1876	}
1877	return 1;
1878}
1879
1880/*
1881 * Add the dependency to all directly-previous locks that are 'relevant'.
1882 * The ones that are relevant are (in increasing distance from curr):
1883 * all consecutive trylock entries and the final non-trylock entry - or
1884 * the end of this context's lock-chain - whichever comes first.
1885 */
1886static int
1887check_prevs_add(struct task_struct *curr, struct held_lock *next)
1888{
1889	int depth = curr->lockdep_depth;
1890	int trylock_loop = 0;
1891	struct held_lock *hlock;
1892
1893	/*
1894	 * Debugging checks.
1895	 *
1896	 * Depth must not be zero for a non-head lock:
1897	 */
1898	if (!depth)
1899		goto out_bug;
1900	/*
1901	 * At least two relevant locks must exist for this
1902	 * to be a head:
1903	 */
1904	if (curr->held_locks[depth].irq_context !=
1905			curr->held_locks[depth-1].irq_context)
1906		goto out_bug;
1907
1908	for (;;) {
1909		int distance = curr->lockdep_depth - depth + 1;
1910		hlock = curr->held_locks + depth-1;
1911		/*
1912		 * Only non-recursive-read entries get new dependencies
1913		 * added:
1914		 */
1915		if (hlock->read != 2) {
1916			if (!check_prev_add(curr, hlock, next,
1917						distance, trylock_loop))
1918				return 0;
1919			/*
1920			 * Stop after the first non-trylock entry,
1921			 * as non-trylock entries have added their
1922			 * own direct dependencies already, so this
1923			 * lock is connected to them indirectly:
1924			 */
1925			if (!hlock->trylock)
1926				break;
1927		}
1928		depth--;
1929		/*
1930		 * End of lock-stack?
1931		 */
1932		if (!depth)
1933			break;
1934		/*
1935		 * Stop the search if we cross into another context:
1936		 */
1937		if (curr->held_locks[depth].irq_context !=
1938				curr->held_locks[depth-1].irq_context)
1939			break;
1940		trylock_loop = 1;
1941	}
1942	return 1;
1943out_bug:
1944	if (!debug_locks_off_graph_unlock())
1945		return 0;
1946
 
 
 
 
 
1947	WARN_ON(1);
1948
1949	return 0;
1950}
1951
1952unsigned long nr_lock_chains;
1953struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1954int nr_chain_hlocks;
1955static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1956
1957struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1958{
1959	return lock_classes + chain_hlocks[chain->base + i];
1960}
1961
1962/*
1963 * Look up a dependency chain. If the key is not present yet then
1964 * add it and return 1 - in this case the new dependency chain is
1965 * validated. If the key is already hashed, return 0.
1966 * (On return with 1 graph_lock is held.)
1967 */
1968static inline int lookup_chain_cache(struct task_struct *curr,
1969				     struct held_lock *hlock,
1970				     u64 chain_key)
1971{
1972	struct lock_class *class = hlock_class(hlock);
1973	struct list_head *hash_head = chainhashentry(chain_key);
1974	struct lock_chain *chain;
1975	struct held_lock *hlock_curr, *hlock_next;
1976	int i, j;
1977
 
 
 
 
 
1978	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1979		return 0;
1980	/*
1981	 * We can walk it lock-free, because entries only get added
1982	 * to the hash:
1983	 */
1984	list_for_each_entry(chain, hash_head, entry) {
1985		if (chain->chain_key == chain_key) {
1986cache_hit:
1987			debug_atomic_inc(chain_lookup_hits);
1988			if (very_verbose(class))
1989				printk("\nhash chain already cached, key: "
1990					"%016Lx tail class: [%p] %s\n",
1991					(unsigned long long)chain_key,
1992					class->key, class->name);
1993			return 0;
1994		}
1995	}
1996	if (very_verbose(class))
1997		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1998			(unsigned long long)chain_key, class->key, class->name);
1999	/*
2000	 * Allocate a new chain entry from the static array, and add
2001	 * it to the hash:
2002	 */
2003	if (!graph_lock())
2004		return 0;
2005	/*
2006	 * We have to walk the chain again locked - to avoid duplicates:
2007	 */
2008	list_for_each_entry(chain, hash_head, entry) {
2009		if (chain->chain_key == chain_key) {
2010			graph_unlock();
2011			goto cache_hit;
2012		}
2013	}
2014	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2015		if (!debug_locks_off_graph_unlock())
2016			return 0;
2017
2018		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
2019		printk("turning off the locking correctness validator.\n");
2020		dump_stack();
2021		return 0;
2022	}
2023	chain = lock_chains + nr_lock_chains++;
2024	chain->chain_key = chain_key;
2025	chain->irq_context = hlock->irq_context;
2026	/* Find the first held_lock of current chain */
2027	hlock_next = hlock;
2028	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2029		hlock_curr = curr->held_locks + i;
2030		if (hlock_curr->irq_context != hlock_next->irq_context)
2031			break;
2032		hlock_next = hlock;
2033	}
2034	i++;
2035	chain->depth = curr->lockdep_depth + 1 - i;
2036	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2037		chain->base = nr_chain_hlocks;
2038		nr_chain_hlocks += chain->depth;
2039		for (j = 0; j < chain->depth - 1; j++, i++) {
2040			int lock_id = curr->held_locks[i].class_idx - 1;
2041			chain_hlocks[chain->base + j] = lock_id;
2042		}
2043		chain_hlocks[chain->base + j] = class - lock_classes;
2044	}
2045	list_add_tail_rcu(&chain->entry, hash_head);
2046	debug_atomic_inc(chain_lookup_misses);
2047	inc_chains();
2048
2049	return 1;
2050}
2051
2052static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2053		struct held_lock *hlock, int chain_head, u64 chain_key)
2054{
2055	/*
2056	 * Trylock needs to maintain the stack of held locks, but it
2057	 * does not add new dependencies, because trylock can be done
2058	 * in any order.
2059	 *
2060	 * We look up the chain_key and do the O(N^2) check and update of
2061	 * the dependencies only if this is a new dependency chain.
2062	 * (If lookup_chain_cache() returns with 1 it acquires
2063	 * graph_lock for us)
2064	 */
2065	if (!hlock->trylock && (hlock->check == 2) &&
2066	    lookup_chain_cache(curr, hlock, chain_key)) {
2067		/*
2068		 * Check whether last held lock:
2069		 *
2070		 * - is irq-safe, if this lock is irq-unsafe
2071		 * - is softirq-safe, if this lock is hardirq-unsafe
2072		 *
2073		 * And check whether the new lock's dependency graph
2074		 * could lead back to the previous lock.
2075		 *
2076		 * any of these scenarios could lead to a deadlock. If
2077		 * All validations
2078		 */
2079		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2080
2081		if (!ret)
2082			return 0;
2083		/*
2084		 * Mark recursive read, as we jump over it when
2085		 * building dependencies (just like we jump over
2086		 * trylock entries):
2087		 */
2088		if (ret == 2)
2089			hlock->read = 2;
2090		/*
2091		 * Add dependency only if this lock is not the head
2092		 * of the chain, and if it's not a secondary read-lock:
2093		 */
2094		if (!chain_head && ret != 2)
2095			if (!check_prevs_add(curr, hlock))
2096				return 0;
2097		graph_unlock();
2098	} else
2099		/* after lookup_chain_cache(): */
2100		if (unlikely(!debug_locks))
2101			return 0;
2102
2103	return 1;
2104}
2105#else
2106static inline int validate_chain(struct task_struct *curr,
2107	       	struct lockdep_map *lock, struct held_lock *hlock,
2108		int chain_head, u64 chain_key)
2109{
2110	return 1;
2111}
2112#endif
2113
2114/*
2115 * We are building curr_chain_key incrementally, so double-check
2116 * it from scratch, to make sure that it's done correctly:
2117 */
2118static void check_chain_key(struct task_struct *curr)
2119{
2120#ifdef CONFIG_DEBUG_LOCKDEP
2121	struct held_lock *hlock, *prev_hlock = NULL;
2122	unsigned int i, id;
2123	u64 chain_key = 0;
2124
2125	for (i = 0; i < curr->lockdep_depth; i++) {
2126		hlock = curr->held_locks + i;
2127		if (chain_key != hlock->prev_chain_key) {
2128			debug_locks_off();
 
 
 
 
2129			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2130				curr->lockdep_depth, i,
2131				(unsigned long long)chain_key,
2132				(unsigned long long)hlock->prev_chain_key);
2133			return;
2134		}
2135		id = hlock->class_idx - 1;
 
 
 
2136		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2137			return;
2138
2139		if (prev_hlock && (prev_hlock->irq_context !=
2140							hlock->irq_context))
2141			chain_key = 0;
2142		chain_key = iterate_chain_key(chain_key, id);
2143		prev_hlock = hlock;
2144	}
2145	if (chain_key != curr->curr_chain_key) {
2146		debug_locks_off();
 
 
 
 
2147		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2148			curr->lockdep_depth, i,
2149			(unsigned long long)chain_key,
2150			(unsigned long long)curr->curr_chain_key);
2151	}
2152#endif
2153}
2154
2155static void
2156print_usage_bug_scenario(struct held_lock *lock)
2157{
2158	struct lock_class *class = hlock_class(lock);
2159
2160	printk(" Possible unsafe locking scenario:\n\n");
2161	printk("       CPU0\n");
2162	printk("       ----\n");
2163	printk("  lock(");
2164	__print_lock_name(class);
2165	printk(");\n");
2166	printk("  <Interrupt>\n");
2167	printk("    lock(");
2168	__print_lock_name(class);
2169	printk(");\n");
2170	printk("\n *** DEADLOCK ***\n\n");
2171}
2172
2173static int
2174print_usage_bug(struct task_struct *curr, struct held_lock *this,
2175		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2176{
2177	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2178		return 0;
2179
2180	printk("\n=================================\n");
2181	printk(  "[ INFO: inconsistent lock state ]\n");
2182	print_kernel_version();
2183	printk(  "---------------------------------\n");
 
2184
2185	printk("inconsistent {%s} -> {%s} usage.\n",
2186		usage_str[prev_bit], usage_str[new_bit]);
2187
2188	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2189		curr->comm, task_pid_nr(curr),
2190		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2191		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2192		trace_hardirqs_enabled(curr),
2193		trace_softirqs_enabled(curr));
2194	print_lock(this);
2195
2196	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2197	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2198
2199	print_irqtrace_events(curr);
2200	printk("\nother info that might help us debug this:\n");
2201	print_usage_bug_scenario(this);
2202
2203	lockdep_print_held_locks(curr);
2204
2205	printk("\nstack backtrace:\n");
2206	dump_stack();
2207
2208	return 0;
2209}
2210
2211/*
2212 * Print out an error if an invalid bit is set:
2213 */
2214static inline int
2215valid_state(struct task_struct *curr, struct held_lock *this,
2216	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2217{
2218	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2219		return print_usage_bug(curr, this, bad_bit, new_bit);
2220	return 1;
2221}
2222
2223static int mark_lock(struct task_struct *curr, struct held_lock *this,
2224		     enum lock_usage_bit new_bit);
2225
2226#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2227
2228/*
2229 * print irq inversion bug:
2230 */
2231static int
2232print_irq_inversion_bug(struct task_struct *curr,
2233			struct lock_list *root, struct lock_list *other,
2234			struct held_lock *this, int forwards,
2235			const char *irqclass)
2236{
2237	struct lock_list *entry = other;
2238	struct lock_list *middle = NULL;
2239	int depth;
2240
2241	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2242		return 0;
2243
2244	printk("\n=========================================================\n");
2245	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
2246	print_kernel_version();
2247	printk(  "---------------------------------------------------------\n");
 
2248	printk("%s/%d just changed the state of lock:\n",
2249		curr->comm, task_pid_nr(curr));
2250	print_lock(this);
2251	if (forwards)
2252		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2253	else
2254		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2255	print_lock_name(other->class);
2256	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2257
2258	printk("\nother info that might help us debug this:\n");
2259
2260	/* Find a middle lock (if one exists) */
2261	depth = get_lock_depth(other);
2262	do {
2263		if (depth == 0 && (entry != root)) {
2264			printk("lockdep:%s bad path found in chain graph\n", __func__);
2265			break;
2266		}
2267		middle = entry;
2268		entry = get_lock_parent(entry);
2269		depth--;
2270	} while (entry && entry != root && (depth >= 0));
2271	if (forwards)
2272		print_irq_lock_scenario(root, other,
2273			middle ? middle->class : root->class, other->class);
2274	else
2275		print_irq_lock_scenario(other, root,
2276			middle ? middle->class : other->class, root->class);
2277
2278	lockdep_print_held_locks(curr);
2279
2280	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2281	if (!save_trace(&root->trace))
2282		return 0;
2283	print_shortest_lock_dependencies(other, root);
2284
2285	printk("\nstack backtrace:\n");
2286	dump_stack();
2287
2288	return 0;
2289}
2290
2291/*
2292 * Prove that in the forwards-direction subgraph starting at <this>
2293 * there is no lock matching <mask>:
2294 */
2295static int
2296check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2297		     enum lock_usage_bit bit, const char *irqclass)
2298{
2299	int ret;
2300	struct lock_list root;
2301	struct lock_list *uninitialized_var(target_entry);
2302
2303	root.parent = NULL;
2304	root.class = hlock_class(this);
2305	ret = find_usage_forwards(&root, bit, &target_entry);
2306	if (ret < 0)
2307		return print_bfs_bug(ret);
2308	if (ret == 1)
2309		return ret;
2310
2311	return print_irq_inversion_bug(curr, &root, target_entry,
2312					this, 1, irqclass);
2313}
2314
2315/*
2316 * Prove that in the backwards-direction subgraph starting at <this>
2317 * there is no lock matching <mask>:
2318 */
2319static int
2320check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2321		      enum lock_usage_bit bit, const char *irqclass)
2322{
2323	int ret;
2324	struct lock_list root;
2325	struct lock_list *uninitialized_var(target_entry);
2326
2327	root.parent = NULL;
2328	root.class = hlock_class(this);
2329	ret = find_usage_backwards(&root, bit, &target_entry);
2330	if (ret < 0)
2331		return print_bfs_bug(ret);
2332	if (ret == 1)
2333		return ret;
2334
2335	return print_irq_inversion_bug(curr, &root, target_entry,
2336					this, 0, irqclass);
2337}
2338
2339void print_irqtrace_events(struct task_struct *curr)
2340{
2341	printk("irq event stamp: %u\n", curr->irq_events);
2342	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
2343	print_ip_sym(curr->hardirq_enable_ip);
2344	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2345	print_ip_sym(curr->hardirq_disable_ip);
2346	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
2347	print_ip_sym(curr->softirq_enable_ip);
2348	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2349	print_ip_sym(curr->softirq_disable_ip);
2350}
2351
2352static int HARDIRQ_verbose(struct lock_class *class)
2353{
2354#if HARDIRQ_VERBOSE
2355	return class_filter(class);
2356#endif
2357	return 0;
2358}
2359
2360static int SOFTIRQ_verbose(struct lock_class *class)
2361{
2362#if SOFTIRQ_VERBOSE
2363	return class_filter(class);
2364#endif
2365	return 0;
2366}
2367
2368static int RECLAIM_FS_verbose(struct lock_class *class)
2369{
2370#if RECLAIM_VERBOSE
2371	return class_filter(class);
2372#endif
2373	return 0;
2374}
2375
2376#define STRICT_READ_CHECKS	1
2377
2378static int (*state_verbose_f[])(struct lock_class *class) = {
2379#define LOCKDEP_STATE(__STATE) \
2380	__STATE##_verbose,
2381#include "lockdep_states.h"
2382#undef LOCKDEP_STATE
2383};
2384
2385static inline int state_verbose(enum lock_usage_bit bit,
2386				struct lock_class *class)
2387{
2388	return state_verbose_f[bit >> 2](class);
2389}
2390
2391typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2392			     enum lock_usage_bit bit, const char *name);
2393
2394static int
2395mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2396		enum lock_usage_bit new_bit)
2397{
2398	int excl_bit = exclusive_bit(new_bit);
2399	int read = new_bit & 1;
2400	int dir = new_bit & 2;
2401
2402	/*
2403	 * mark USED_IN has to look forwards -- to ensure no dependency
2404	 * has ENABLED state, which would allow recursion deadlocks.
2405	 *
2406	 * mark ENABLED has to look backwards -- to ensure no dependee
2407	 * has USED_IN state, which, again, would allow  recursion deadlocks.
2408	 */
2409	check_usage_f usage = dir ?
2410		check_usage_backwards : check_usage_forwards;
2411
2412	/*
2413	 * Validate that this particular lock does not have conflicting
2414	 * usage states.
2415	 */
2416	if (!valid_state(curr, this, new_bit, excl_bit))
2417		return 0;
2418
2419	/*
2420	 * Validate that the lock dependencies don't have conflicting usage
2421	 * states.
2422	 */
2423	if ((!read || !dir || STRICT_READ_CHECKS) &&
2424			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2425		return 0;
2426
2427	/*
2428	 * Check for read in write conflicts
2429	 */
2430	if (!read) {
2431		if (!valid_state(curr, this, new_bit, excl_bit + 1))
2432			return 0;
2433
2434		if (STRICT_READ_CHECKS &&
2435			!usage(curr, this, excl_bit + 1,
2436				state_name(new_bit + 1)))
2437			return 0;
2438	}
2439
2440	if (state_verbose(new_bit, hlock_class(this)))
2441		return 2;
2442
2443	return 1;
2444}
2445
2446enum mark_type {
2447#define LOCKDEP_STATE(__STATE)	__STATE,
2448#include "lockdep_states.h"
2449#undef LOCKDEP_STATE
2450};
2451
2452/*
2453 * Mark all held locks with a usage bit:
2454 */
2455static int
2456mark_held_locks(struct task_struct *curr, enum mark_type mark)
2457{
2458	enum lock_usage_bit usage_bit;
2459	struct held_lock *hlock;
2460	int i;
2461
2462	for (i = 0; i < curr->lockdep_depth; i++) {
2463		hlock = curr->held_locks + i;
2464
2465		usage_bit = 2 + (mark << 2); /* ENABLED */
2466		if (hlock->read)
2467			usage_bit += 1; /* READ */
2468
2469		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2470
2471		if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2472			continue;
2473
2474		if (!mark_lock(curr, hlock, usage_bit))
2475			return 0;
2476	}
2477
2478	return 1;
2479}
2480
2481/*
2482 * Hardirqs will be enabled:
2483 */
2484static void __trace_hardirqs_on_caller(unsigned long ip)
2485{
2486	struct task_struct *curr = current;
2487
2488	/* we'll do an OFF -> ON transition: */
2489	curr->hardirqs_enabled = 1;
2490
2491	/*
2492	 * We are going to turn hardirqs on, so set the
2493	 * usage bit for all held locks:
2494	 */
2495	if (!mark_held_locks(curr, HARDIRQ))
2496		return;
2497	/*
2498	 * If we have softirqs enabled, then set the usage
2499	 * bit for all held locks. (disabled hardirqs prevented
2500	 * this bit from being set before)
2501	 */
2502	if (curr->softirqs_enabled)
2503		if (!mark_held_locks(curr, SOFTIRQ))
2504			return;
2505
2506	curr->hardirq_enable_ip = ip;
2507	curr->hardirq_enable_event = ++curr->irq_events;
2508	debug_atomic_inc(hardirqs_on_events);
2509}
2510
2511void trace_hardirqs_on_caller(unsigned long ip)
2512{
2513	time_hardirqs_on(CALLER_ADDR0, ip);
2514
2515	if (unlikely(!debug_locks || current->lockdep_recursion))
2516		return;
2517
2518	if (unlikely(current->hardirqs_enabled)) {
2519		/*
2520		 * Neither irq nor preemption are disabled here
2521		 * so this is racy by nature but losing one hit
2522		 * in a stat is not a big deal.
2523		 */
2524		__debug_atomic_inc(redundant_hardirqs_on);
2525		return;
2526	}
2527
 
 
 
 
 
2528	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2529		return;
2530
 
 
 
2531	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2532		return;
2533
 
 
 
 
2534	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2535		return;
2536
2537	current->lockdep_recursion = 1;
2538	__trace_hardirqs_on_caller(ip);
2539	current->lockdep_recursion = 0;
2540}
2541EXPORT_SYMBOL(trace_hardirqs_on_caller);
2542
2543void trace_hardirqs_on(void)
2544{
2545	trace_hardirqs_on_caller(CALLER_ADDR0);
2546}
2547EXPORT_SYMBOL(trace_hardirqs_on);
2548
2549/*
2550 * Hardirqs were disabled:
2551 */
2552void trace_hardirqs_off_caller(unsigned long ip)
2553{
2554	struct task_struct *curr = current;
2555
2556	time_hardirqs_off(CALLER_ADDR0, ip);
2557
2558	if (unlikely(!debug_locks || current->lockdep_recursion))
2559		return;
2560
 
 
 
 
2561	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2562		return;
2563
2564	if (curr->hardirqs_enabled) {
2565		/*
2566		 * We have done an ON -> OFF transition:
2567		 */
2568		curr->hardirqs_enabled = 0;
2569		curr->hardirq_disable_ip = ip;
2570		curr->hardirq_disable_event = ++curr->irq_events;
2571		debug_atomic_inc(hardirqs_off_events);
2572	} else
2573		debug_atomic_inc(redundant_hardirqs_off);
2574}
2575EXPORT_SYMBOL(trace_hardirqs_off_caller);
2576
2577void trace_hardirqs_off(void)
2578{
2579	trace_hardirqs_off_caller(CALLER_ADDR0);
2580}
2581EXPORT_SYMBOL(trace_hardirqs_off);
2582
2583/*
2584 * Softirqs will be enabled:
2585 */
2586void trace_softirqs_on(unsigned long ip)
2587{
2588	struct task_struct *curr = current;
2589
2590	if (unlikely(!debug_locks || current->lockdep_recursion))
2591		return;
2592
 
 
 
 
2593	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2594		return;
2595
2596	if (curr->softirqs_enabled) {
2597		debug_atomic_inc(redundant_softirqs_on);
2598		return;
2599	}
2600
2601	current->lockdep_recursion = 1;
2602	/*
2603	 * We'll do an OFF -> ON transition:
2604	 */
2605	curr->softirqs_enabled = 1;
2606	curr->softirq_enable_ip = ip;
2607	curr->softirq_enable_event = ++curr->irq_events;
2608	debug_atomic_inc(softirqs_on_events);
2609	/*
2610	 * We are going to turn softirqs on, so set the
2611	 * usage bit for all held locks, if hardirqs are
2612	 * enabled too:
2613	 */
2614	if (curr->hardirqs_enabled)
2615		mark_held_locks(curr, SOFTIRQ);
2616	current->lockdep_recursion = 0;
2617}
2618
2619/*
2620 * Softirqs were disabled:
2621 */
2622void trace_softirqs_off(unsigned long ip)
2623{
2624	struct task_struct *curr = current;
2625
2626	if (unlikely(!debug_locks || current->lockdep_recursion))
2627		return;
2628
 
 
 
2629	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2630		return;
2631
2632	if (curr->softirqs_enabled) {
2633		/*
2634		 * We have done an ON -> OFF transition:
2635		 */
2636		curr->softirqs_enabled = 0;
2637		curr->softirq_disable_ip = ip;
2638		curr->softirq_disable_event = ++curr->irq_events;
2639		debug_atomic_inc(softirqs_off_events);
 
 
 
2640		DEBUG_LOCKS_WARN_ON(!softirq_count());
2641	} else
2642		debug_atomic_inc(redundant_softirqs_off);
2643}
2644
2645static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2646{
2647	struct task_struct *curr = current;
2648
2649	if (unlikely(!debug_locks))
2650		return;
2651
2652	/* no reclaim without waiting on it */
2653	if (!(gfp_mask & __GFP_WAIT))
2654		return;
2655
2656	/* this guy won't enter reclaim */
2657	if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2658		return;
2659
2660	/* We're only interested __GFP_FS allocations for now */
2661	if (!(gfp_mask & __GFP_FS))
2662		return;
2663
 
 
 
2664	if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2665		return;
2666
2667	mark_held_locks(curr, RECLAIM_FS);
2668}
2669
2670static void check_flags(unsigned long flags);
2671
2672void lockdep_trace_alloc(gfp_t gfp_mask)
2673{
2674	unsigned long flags;
2675
2676	if (unlikely(current->lockdep_recursion))
2677		return;
2678
2679	raw_local_irq_save(flags);
2680	check_flags(flags);
2681	current->lockdep_recursion = 1;
2682	__lockdep_trace_alloc(gfp_mask, flags);
2683	current->lockdep_recursion = 0;
2684	raw_local_irq_restore(flags);
2685}
2686
2687static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2688{
2689	/*
2690	 * If non-trylock use in a hardirq or softirq context, then
2691	 * mark the lock as used in these contexts:
2692	 */
2693	if (!hlock->trylock) {
2694		if (hlock->read) {
2695			if (curr->hardirq_context)
2696				if (!mark_lock(curr, hlock,
2697						LOCK_USED_IN_HARDIRQ_READ))
2698					return 0;
2699			if (curr->softirq_context)
2700				if (!mark_lock(curr, hlock,
2701						LOCK_USED_IN_SOFTIRQ_READ))
2702					return 0;
2703		} else {
2704			if (curr->hardirq_context)
2705				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2706					return 0;
2707			if (curr->softirq_context)
2708				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2709					return 0;
2710		}
2711	}
2712	if (!hlock->hardirqs_off) {
2713		if (hlock->read) {
2714			if (!mark_lock(curr, hlock,
2715					LOCK_ENABLED_HARDIRQ_READ))
2716				return 0;
2717			if (curr->softirqs_enabled)
2718				if (!mark_lock(curr, hlock,
2719						LOCK_ENABLED_SOFTIRQ_READ))
2720					return 0;
2721		} else {
2722			if (!mark_lock(curr, hlock,
2723					LOCK_ENABLED_HARDIRQ))
2724				return 0;
2725			if (curr->softirqs_enabled)
2726				if (!mark_lock(curr, hlock,
2727						LOCK_ENABLED_SOFTIRQ))
2728					return 0;
2729		}
2730	}
2731
2732	/*
2733	 * We reuse the irq context infrastructure more broadly as a general
2734	 * context checking code. This tests GFP_FS recursion (a lock taken
2735	 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2736	 * allocation).
2737	 */
2738	if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2739		if (hlock->read) {
2740			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2741					return 0;
2742		} else {
2743			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2744					return 0;
2745		}
2746	}
2747
2748	return 1;
2749}
2750
2751static int separate_irq_context(struct task_struct *curr,
2752		struct held_lock *hlock)
2753{
2754	unsigned int depth = curr->lockdep_depth;
2755
2756	/*
2757	 * Keep track of points where we cross into an interrupt context:
2758	 */
2759	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2760				curr->softirq_context;
2761	if (depth) {
2762		struct held_lock *prev_hlock;
2763
2764		prev_hlock = curr->held_locks + depth-1;
2765		/*
2766		 * If we cross into another context, reset the
2767		 * hash key (this also prevents the checking and the
2768		 * adding of the dependency to 'prev'):
2769		 */
2770		if (prev_hlock->irq_context != hlock->irq_context)
2771			return 1;
2772	}
2773	return 0;
2774}
2775
2776#else
2777
2778static inline
2779int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2780		enum lock_usage_bit new_bit)
2781{
2782	WARN_ON(1);
2783	return 1;
2784}
2785
2786static inline int mark_irqflags(struct task_struct *curr,
2787		struct held_lock *hlock)
2788{
2789	return 1;
2790}
2791
2792static inline int separate_irq_context(struct task_struct *curr,
2793		struct held_lock *hlock)
2794{
2795	return 0;
2796}
2797
2798void lockdep_trace_alloc(gfp_t gfp_mask)
2799{
2800}
2801
2802#endif
2803
2804/*
2805 * Mark a lock with a usage bit, and validate the state transition:
2806 */
2807static int mark_lock(struct task_struct *curr, struct held_lock *this,
2808			     enum lock_usage_bit new_bit)
2809{
2810	unsigned int new_mask = 1 << new_bit, ret = 1;
2811
2812	/*
2813	 * If already set then do not dirty the cacheline,
2814	 * nor do any checks:
2815	 */
2816	if (likely(hlock_class(this)->usage_mask & new_mask))
2817		return 1;
2818
2819	if (!graph_lock())
2820		return 0;
2821	/*
2822	 * Make sure we didn't race:
2823	 */
2824	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2825		graph_unlock();
2826		return 1;
2827	}
2828
2829	hlock_class(this)->usage_mask |= new_mask;
2830
2831	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2832		return 0;
2833
2834	switch (new_bit) {
2835#define LOCKDEP_STATE(__STATE)			\
2836	case LOCK_USED_IN_##__STATE:		\
2837	case LOCK_USED_IN_##__STATE##_READ:	\
2838	case LOCK_ENABLED_##__STATE:		\
2839	case LOCK_ENABLED_##__STATE##_READ:
2840#include "lockdep_states.h"
2841#undef LOCKDEP_STATE
2842		ret = mark_lock_irq(curr, this, new_bit);
2843		if (!ret)
2844			return 0;
2845		break;
2846	case LOCK_USED:
2847		debug_atomic_dec(nr_unused_locks);
2848		break;
2849	default:
2850		if (!debug_locks_off_graph_unlock())
2851			return 0;
2852		WARN_ON(1);
2853		return 0;
2854	}
2855
2856	graph_unlock();
2857
2858	/*
2859	 * We must printk outside of the graph_lock:
2860	 */
2861	if (ret == 2) {
2862		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2863		print_lock(this);
2864		print_irqtrace_events(curr);
2865		dump_stack();
2866	}
2867
2868	return ret;
2869}
2870
2871/*
2872 * Initialize a lock instance's lock-class mapping info:
2873 */
2874void lockdep_init_map(struct lockdep_map *lock, const char *name,
2875		      struct lock_class_key *key, int subclass)
2876{
2877	memset(lock, 0, sizeof(*lock));
 
 
 
 
 
2878
2879#ifdef CONFIG_LOCK_STAT
2880	lock->cpu = raw_smp_processor_id();
2881#endif
2882
 
 
 
2883	if (DEBUG_LOCKS_WARN_ON(!name)) {
2884		lock->name = "NULL";
2885		return;
2886	}
2887
2888	lock->name = name;
2889
 
 
 
2890	if (DEBUG_LOCKS_WARN_ON(!key))
2891		return;
2892	/*
2893	 * Sanity check, the lock-class key must be persistent:
2894	 */
2895	if (!static_obj(key)) {
2896		printk("BUG: key %p not in .data!\n", key);
 
 
 
2897		DEBUG_LOCKS_WARN_ON(1);
2898		return;
2899	}
2900	lock->key = key;
2901
2902	if (unlikely(!debug_locks))
2903		return;
2904
2905	if (subclass)
2906		register_lock_class(lock, subclass, 1);
2907}
2908EXPORT_SYMBOL_GPL(lockdep_init_map);
2909
2910struct lock_class_key __lockdep_no_validate__;
2911
2912/*
2913 * This gets called for every mutex_lock*()/spin_lock*() operation.
2914 * We maintain the dependency maps and validate the locking attempt:
2915 */
2916static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2917			  int trylock, int read, int check, int hardirqs_off,
2918			  struct lockdep_map *nest_lock, unsigned long ip,
2919			  int references)
2920{
2921	struct task_struct *curr = current;
2922	struct lock_class *class = NULL;
2923	struct held_lock *hlock;
2924	unsigned int depth, id;
2925	int chain_head = 0;
2926	int class_idx;
2927	u64 chain_key;
2928
2929	if (!prove_locking)
2930		check = 1;
2931
2932	if (unlikely(!debug_locks))
2933		return 0;
2934
 
 
 
 
 
2935	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2936		return 0;
2937
2938	if (lock->key == &__lockdep_no_validate__)
2939		check = 1;
2940
2941	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
2942		class = lock->class_cache[subclass];
2943	/*
2944	 * Not cached?
2945	 */
2946	if (unlikely(!class)) {
2947		class = register_lock_class(lock, subclass, 0);
2948		if (!class)
2949			return 0;
2950	}
2951	atomic_inc((atomic_t *)&class->ops);
2952	if (very_verbose(class)) {
2953		printk("\nacquire class [%p] %s", class->key, class->name);
2954		if (class->name_version > 1)
2955			printk("#%d", class->name_version);
2956		printk("\n");
2957		dump_stack();
2958	}
2959
2960	/*
2961	 * Add the lock to the list of currently held locks.
2962	 * (we dont increase the depth just yet, up until the
2963	 * dependency checks are done)
2964	 */
2965	depth = curr->lockdep_depth;
 
 
 
2966	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2967		return 0;
2968
2969	class_idx = class - lock_classes + 1;
2970
2971	if (depth) {
2972		hlock = curr->held_locks + depth - 1;
2973		if (hlock->class_idx == class_idx && nest_lock) {
2974			if (hlock->references)
2975				hlock->references++;
2976			else
2977				hlock->references = 2;
2978
2979			return 1;
2980		}
2981	}
2982
2983	hlock = curr->held_locks + depth;
 
 
 
 
2984	if (DEBUG_LOCKS_WARN_ON(!class))
2985		return 0;
2986	hlock->class_idx = class_idx;
2987	hlock->acquire_ip = ip;
2988	hlock->instance = lock;
2989	hlock->nest_lock = nest_lock;
2990	hlock->trylock = trylock;
2991	hlock->read = read;
2992	hlock->check = check;
2993	hlock->hardirqs_off = !!hardirqs_off;
2994	hlock->references = references;
2995#ifdef CONFIG_LOCK_STAT
2996	hlock->waittime_stamp = 0;
2997	hlock->holdtime_stamp = lockstat_clock();
2998#endif
2999
3000	if (check == 2 && !mark_irqflags(curr, hlock))
3001		return 0;
3002
3003	/* mark it as used: */
3004	if (!mark_lock(curr, hlock, LOCK_USED))
3005		return 0;
3006
3007	/*
3008	 * Calculate the chain hash: it's the combined hash of all the
3009	 * lock keys along the dependency chain. We save the hash value
3010	 * at every step so that we can get the current hash easily
3011	 * after unlock. The chain hash is then used to cache dependency
3012	 * results.
3013	 *
3014	 * The 'key ID' is what is the most compact key value to drive
3015	 * the hash, not class->key.
3016	 */
3017	id = class - lock_classes;
 
 
 
3018	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3019		return 0;
3020
3021	chain_key = curr->curr_chain_key;
3022	if (!depth) {
 
 
 
3023		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3024			return 0;
3025		chain_head = 1;
3026	}
3027
3028	hlock->prev_chain_key = chain_key;
3029	if (separate_irq_context(curr, hlock)) {
3030		chain_key = 0;
3031		chain_head = 1;
3032	}
3033	chain_key = iterate_chain_key(chain_key, id);
3034
3035	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3036		return 0;
3037
3038	curr->curr_chain_key = chain_key;
3039	curr->lockdep_depth++;
3040	check_chain_key(curr);
3041#ifdef CONFIG_DEBUG_LOCKDEP
3042	if (unlikely(!debug_locks))
3043		return 0;
3044#endif
3045	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3046		debug_locks_off();
3047		printk("BUG: MAX_LOCK_DEPTH too low!\n");
3048		printk("turning off the locking correctness validator.\n");
3049		dump_stack();
3050		return 0;
3051	}
3052
3053	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3054		max_lockdep_depth = curr->lockdep_depth;
3055
3056	return 1;
3057}
3058
3059static int
3060print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3061			   unsigned long ip)
3062{
3063	if (!debug_locks_off())
3064		return 0;
3065	if (debug_locks_silent)
3066		return 0;
3067
3068	printk("\n=====================================\n");
3069	printk(  "[ BUG: bad unlock balance detected! ]\n");
3070	printk(  "-------------------------------------\n");
 
 
3071	printk("%s/%d is trying to release lock (",
3072		curr->comm, task_pid_nr(curr));
3073	print_lockdep_cache(lock);
3074	printk(") at:\n");
3075	print_ip_sym(ip);
3076	printk("but there are no more locks to release!\n");
3077	printk("\nother info that might help us debug this:\n");
3078	lockdep_print_held_locks(curr);
3079
3080	printk("\nstack backtrace:\n");
3081	dump_stack();
3082
3083	return 0;
3084}
3085
3086/*
3087 * Common debugging checks for both nested and non-nested unlock:
3088 */
3089static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3090			unsigned long ip)
3091{
3092	if (unlikely(!debug_locks))
3093		return 0;
 
 
 
3094	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3095		return 0;
3096
3097	if (curr->lockdep_depth <= 0)
3098		return print_unlock_inbalance_bug(curr, lock, ip);
3099
3100	return 1;
3101}
3102
3103static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3104{
3105	if (hlock->instance == lock)
3106		return 1;
3107
3108	if (hlock->references) {
3109		struct lock_class *class = lock->class_cache[0];
3110
3111		if (!class)
3112			class = look_up_lock_class(lock, 0);
3113
3114		/*
3115		 * If look_up_lock_class() failed to find a class, we're trying
3116		 * to test if we hold a lock that has never yet been acquired.
3117		 * Clearly if the lock hasn't been acquired _ever_, we're not
3118		 * holding it either, so report failure.
3119		 */
3120		if (!class)
3121			return 0;
3122
 
 
 
 
 
3123		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3124			return 0;
3125
3126		if (hlock->class_idx == class - lock_classes + 1)
3127			return 1;
3128	}
3129
3130	return 0;
3131}
3132
3133static int
3134__lock_set_class(struct lockdep_map *lock, const char *name,
3135		 struct lock_class_key *key, unsigned int subclass,
3136		 unsigned long ip)
3137{
3138	struct task_struct *curr = current;
3139	struct held_lock *hlock, *prev_hlock;
3140	struct lock_class *class;
3141	unsigned int depth;
3142	int i;
3143
3144	depth = curr->lockdep_depth;
 
 
 
 
3145	if (DEBUG_LOCKS_WARN_ON(!depth))
3146		return 0;
3147
3148	prev_hlock = NULL;
3149	for (i = depth-1; i >= 0; i--) {
3150		hlock = curr->held_locks + i;
3151		/*
3152		 * We must not cross into another context:
3153		 */
3154		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3155			break;
3156		if (match_held_lock(hlock, lock))
3157			goto found_it;
3158		prev_hlock = hlock;
3159	}
3160	return print_unlock_inbalance_bug(curr, lock, ip);
3161
3162found_it:
3163	lockdep_init_map(lock, name, key, 0);
3164	class = register_lock_class(lock, subclass, 0);
3165	hlock->class_idx = class - lock_classes + 1;
3166
3167	curr->lockdep_depth = i;
3168	curr->curr_chain_key = hlock->prev_chain_key;
3169
3170	for (; i < depth; i++) {
3171		hlock = curr->held_locks + i;
3172		if (!__lock_acquire(hlock->instance,
3173			hlock_class(hlock)->subclass, hlock->trylock,
3174				hlock->read, hlock->check, hlock->hardirqs_off,
3175				hlock->nest_lock, hlock->acquire_ip,
3176				hlock->references))
3177			return 0;
3178	}
3179
 
 
 
 
3180	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3181		return 0;
3182	return 1;
3183}
3184
3185/*
3186 * Remove the lock to the list of currently held locks in a
3187 * potentially non-nested (out of order) manner. This is a
3188 * relatively rare operation, as all the unlock APIs default
3189 * to nested mode (which uses lock_release()):
3190 */
3191static int
3192lock_release_non_nested(struct task_struct *curr,
3193			struct lockdep_map *lock, unsigned long ip)
3194{
3195	struct held_lock *hlock, *prev_hlock;
3196	unsigned int depth;
3197	int i;
3198
3199	/*
3200	 * Check whether the lock exists in the current stack
3201	 * of held locks:
3202	 */
3203	depth = curr->lockdep_depth;
 
 
 
 
3204	if (DEBUG_LOCKS_WARN_ON(!depth))
3205		return 0;
3206
3207	prev_hlock = NULL;
3208	for (i = depth-1; i >= 0; i--) {
3209		hlock = curr->held_locks + i;
3210		/*
3211		 * We must not cross into another context:
3212		 */
3213		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3214			break;
3215		if (match_held_lock(hlock, lock))
3216			goto found_it;
3217		prev_hlock = hlock;
3218	}
3219	return print_unlock_inbalance_bug(curr, lock, ip);
3220
3221found_it:
3222	if (hlock->instance == lock)
3223		lock_release_holdtime(hlock);
3224
3225	if (hlock->references) {
3226		hlock->references--;
3227		if (hlock->references) {
3228			/*
3229			 * We had, and after removing one, still have
3230			 * references, the current lock stack is still
3231			 * valid. We're done!
3232			 */
3233			return 1;
3234		}
3235	}
3236
3237	/*
3238	 * We have the right lock to unlock, 'hlock' points to it.
3239	 * Now we remove it from the stack, and add back the other
3240	 * entries (if any), recalculating the hash along the way:
3241	 */
3242
3243	curr->lockdep_depth = i;
3244	curr->curr_chain_key = hlock->prev_chain_key;
3245
3246	for (i++; i < depth; i++) {
3247		hlock = curr->held_locks + i;
3248		if (!__lock_acquire(hlock->instance,
3249			hlock_class(hlock)->subclass, hlock->trylock,
3250				hlock->read, hlock->check, hlock->hardirqs_off,
3251				hlock->nest_lock, hlock->acquire_ip,
3252				hlock->references))
3253			return 0;
3254	}
3255
 
 
 
 
3256	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3257		return 0;
3258	return 1;
3259}
3260
3261/*
3262 * Remove the lock to the list of currently held locks - this gets
3263 * called on mutex_unlock()/spin_unlock*() (or on a failed
3264 * mutex_lock_interruptible()). This is done for unlocks that nest
3265 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3266 */
3267static int lock_release_nested(struct task_struct *curr,
3268			       struct lockdep_map *lock, unsigned long ip)
3269{
3270	struct held_lock *hlock;
3271	unsigned int depth;
3272
3273	/*
3274	 * Pop off the top of the lock stack:
3275	 */
3276	depth = curr->lockdep_depth - 1;
3277	hlock = curr->held_locks + depth;
3278
3279	/*
3280	 * Is the unlock non-nested:
3281	 */
3282	if (hlock->instance != lock || hlock->references)
3283		return lock_release_non_nested(curr, lock, ip);
3284	curr->lockdep_depth--;
3285
 
 
 
3286	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3287		return 0;
3288
3289	curr->curr_chain_key = hlock->prev_chain_key;
3290
3291	lock_release_holdtime(hlock);
3292
3293#ifdef CONFIG_DEBUG_LOCKDEP
3294	hlock->prev_chain_key = 0;
3295	hlock->class_idx = 0;
3296	hlock->acquire_ip = 0;
3297	hlock->irq_context = 0;
3298#endif
3299	return 1;
3300}
3301
3302/*
3303 * Remove the lock to the list of currently held locks - this gets
3304 * called on mutex_unlock()/spin_unlock*() (or on a failed
3305 * mutex_lock_interruptible()). This is done for unlocks that nest
3306 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3307 */
3308static void
3309__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3310{
3311	struct task_struct *curr = current;
3312
3313	if (!check_unlock(curr, lock, ip))
3314		return;
3315
3316	if (nested) {
3317		if (!lock_release_nested(curr, lock, ip))
3318			return;
3319	} else {
3320		if (!lock_release_non_nested(curr, lock, ip))
3321			return;
3322	}
3323
3324	check_chain_key(curr);
3325}
3326
3327static int __lock_is_held(struct lockdep_map *lock)
3328{
3329	struct task_struct *curr = current;
3330	int i;
3331
3332	for (i = 0; i < curr->lockdep_depth; i++) {
3333		struct held_lock *hlock = curr->held_locks + i;
3334
3335		if (match_held_lock(hlock, lock))
3336			return 1;
3337	}
3338
3339	return 0;
3340}
3341
3342/*
3343 * Check whether we follow the irq-flags state precisely:
3344 */
3345static void check_flags(unsigned long flags)
3346{
3347#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3348    defined(CONFIG_TRACE_IRQFLAGS)
3349	if (!debug_locks)
3350		return;
3351
3352	if (irqs_disabled_flags(flags)) {
3353		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3354			printk("possible reason: unannotated irqs-off.\n");
3355		}
3356	} else {
3357		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3358			printk("possible reason: unannotated irqs-on.\n");
3359		}
3360	}
3361
3362	/*
3363	 * We dont accurately track softirq state in e.g.
3364	 * hardirq contexts (such as on 4KSTACKS), so only
3365	 * check if not in hardirq contexts:
3366	 */
3367	if (!hardirq_count()) {
3368		if (softirq_count())
 
3369			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3370		else
 
3371			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
 
3372	}
3373
3374	if (!debug_locks)
3375		print_irqtrace_events(current);
3376#endif
3377}
3378
3379void lock_set_class(struct lockdep_map *lock, const char *name,
3380		    struct lock_class_key *key, unsigned int subclass,
3381		    unsigned long ip)
3382{
3383	unsigned long flags;
3384
3385	if (unlikely(current->lockdep_recursion))
3386		return;
3387
3388	raw_local_irq_save(flags);
3389	current->lockdep_recursion = 1;
3390	check_flags(flags);
3391	if (__lock_set_class(lock, name, key, subclass, ip))
3392		check_chain_key(current);
3393	current->lockdep_recursion = 0;
3394	raw_local_irq_restore(flags);
3395}
3396EXPORT_SYMBOL_GPL(lock_set_class);
3397
3398/*
3399 * We are not always called with irqs disabled - do that here,
3400 * and also avoid lockdep recursion:
3401 */
3402void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3403			  int trylock, int read, int check,
3404			  struct lockdep_map *nest_lock, unsigned long ip)
3405{
3406	unsigned long flags;
3407
3408	if (unlikely(current->lockdep_recursion))
3409		return;
3410
3411	raw_local_irq_save(flags);
3412	check_flags(flags);
3413
3414	current->lockdep_recursion = 1;
3415	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3416	__lock_acquire(lock, subclass, trylock, read, check,
3417		       irqs_disabled_flags(flags), nest_lock, ip, 0);
3418	current->lockdep_recursion = 0;
3419	raw_local_irq_restore(flags);
3420}
3421EXPORT_SYMBOL_GPL(lock_acquire);
3422
3423void lock_release(struct lockdep_map *lock, int nested,
3424			  unsigned long ip)
3425{
3426	unsigned long flags;
3427
3428	if (unlikely(current->lockdep_recursion))
3429		return;
3430
3431	raw_local_irq_save(flags);
3432	check_flags(flags);
3433	current->lockdep_recursion = 1;
3434	trace_lock_release(lock, ip);
3435	__lock_release(lock, nested, ip);
3436	current->lockdep_recursion = 0;
3437	raw_local_irq_restore(flags);
3438}
3439EXPORT_SYMBOL_GPL(lock_release);
3440
3441int lock_is_held(struct lockdep_map *lock)
3442{
3443	unsigned long flags;
3444	int ret = 0;
3445
3446	if (unlikely(current->lockdep_recursion))
3447		return 1; /* avoid false negative lockdep_assert_held() */
3448
3449	raw_local_irq_save(flags);
3450	check_flags(flags);
3451
3452	current->lockdep_recursion = 1;
3453	ret = __lock_is_held(lock);
3454	current->lockdep_recursion = 0;
3455	raw_local_irq_restore(flags);
3456
3457	return ret;
3458}
3459EXPORT_SYMBOL_GPL(lock_is_held);
3460
3461void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3462{
3463	current->lockdep_reclaim_gfp = gfp_mask;
3464}
3465
3466void lockdep_clear_current_reclaim_state(void)
3467{
3468	current->lockdep_reclaim_gfp = 0;
3469}
3470
3471#ifdef CONFIG_LOCK_STAT
3472static int
3473print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3474			   unsigned long ip)
3475{
3476	if (!debug_locks_off())
3477		return 0;
3478	if (debug_locks_silent)
3479		return 0;
3480
3481	printk("\n=================================\n");
3482	printk(  "[ BUG: bad contention detected! ]\n");
3483	printk(  "---------------------------------\n");
 
 
3484	printk("%s/%d is trying to contend lock (",
3485		curr->comm, task_pid_nr(curr));
3486	print_lockdep_cache(lock);
3487	printk(") at:\n");
3488	print_ip_sym(ip);
3489	printk("but there are no locks held!\n");
3490	printk("\nother info that might help us debug this:\n");
3491	lockdep_print_held_locks(curr);
3492
3493	printk("\nstack backtrace:\n");
3494	dump_stack();
3495
3496	return 0;
3497}
3498
3499static void
3500__lock_contended(struct lockdep_map *lock, unsigned long ip)
3501{
3502	struct task_struct *curr = current;
3503	struct held_lock *hlock, *prev_hlock;
3504	struct lock_class_stats *stats;
3505	unsigned int depth;
3506	int i, contention_point, contending_point;
3507
3508	depth = curr->lockdep_depth;
 
 
 
 
3509	if (DEBUG_LOCKS_WARN_ON(!depth))
3510		return;
3511
3512	prev_hlock = NULL;
3513	for (i = depth-1; i >= 0; i--) {
3514		hlock = curr->held_locks + i;
3515		/*
3516		 * We must not cross into another context:
3517		 */
3518		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3519			break;
3520		if (match_held_lock(hlock, lock))
3521			goto found_it;
3522		prev_hlock = hlock;
3523	}
3524	print_lock_contention_bug(curr, lock, ip);
3525	return;
3526
3527found_it:
3528	if (hlock->instance != lock)
3529		return;
3530
3531	hlock->waittime_stamp = lockstat_clock();
3532
3533	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3534	contending_point = lock_point(hlock_class(hlock)->contending_point,
3535				      lock->ip);
3536
3537	stats = get_lock_stats(hlock_class(hlock));
3538	if (contention_point < LOCKSTAT_POINTS)
3539		stats->contention_point[contention_point]++;
3540	if (contending_point < LOCKSTAT_POINTS)
3541		stats->contending_point[contending_point]++;
3542	if (lock->cpu != smp_processor_id())
3543		stats->bounces[bounce_contended + !!hlock->read]++;
3544	put_lock_stats(stats);
3545}
3546
3547static void
3548__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3549{
3550	struct task_struct *curr = current;
3551	struct held_lock *hlock, *prev_hlock;
3552	struct lock_class_stats *stats;
3553	unsigned int depth;
3554	u64 now, waittime = 0;
3555	int i, cpu;
3556
3557	depth = curr->lockdep_depth;
 
 
 
 
3558	if (DEBUG_LOCKS_WARN_ON(!depth))
3559		return;
3560
3561	prev_hlock = NULL;
3562	for (i = depth-1; i >= 0; i--) {
3563		hlock = curr->held_locks + i;
3564		/*
3565		 * We must not cross into another context:
3566		 */
3567		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3568			break;
3569		if (match_held_lock(hlock, lock))
3570			goto found_it;
3571		prev_hlock = hlock;
3572	}
3573	print_lock_contention_bug(curr, lock, _RET_IP_);
3574	return;
3575
3576found_it:
3577	if (hlock->instance != lock)
3578		return;
3579
3580	cpu = smp_processor_id();
3581	if (hlock->waittime_stamp) {
3582		now = lockstat_clock();
3583		waittime = now - hlock->waittime_stamp;
3584		hlock->holdtime_stamp = now;
3585	}
3586
3587	trace_lock_acquired(lock, ip);
3588
3589	stats = get_lock_stats(hlock_class(hlock));
3590	if (waittime) {
3591		if (hlock->read)
3592			lock_time_inc(&stats->read_waittime, waittime);
3593		else
3594			lock_time_inc(&stats->write_waittime, waittime);
3595	}
3596	if (lock->cpu != cpu)
3597		stats->bounces[bounce_acquired + !!hlock->read]++;
3598	put_lock_stats(stats);
3599
3600	lock->cpu = cpu;
3601	lock->ip = ip;
3602}
3603
3604void lock_contended(struct lockdep_map *lock, unsigned long ip)
3605{
3606	unsigned long flags;
3607
3608	if (unlikely(!lock_stat))
3609		return;
3610
3611	if (unlikely(current->lockdep_recursion))
3612		return;
3613
3614	raw_local_irq_save(flags);
3615	check_flags(flags);
3616	current->lockdep_recursion = 1;
3617	trace_lock_contended(lock, ip);
3618	__lock_contended(lock, ip);
3619	current->lockdep_recursion = 0;
3620	raw_local_irq_restore(flags);
3621}
3622EXPORT_SYMBOL_GPL(lock_contended);
3623
3624void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3625{
3626	unsigned long flags;
3627
3628	if (unlikely(!lock_stat))
3629		return;
3630
3631	if (unlikely(current->lockdep_recursion))
3632		return;
3633
3634	raw_local_irq_save(flags);
3635	check_flags(flags);
3636	current->lockdep_recursion = 1;
3637	__lock_acquired(lock, ip);
3638	current->lockdep_recursion = 0;
3639	raw_local_irq_restore(flags);
3640}
3641EXPORT_SYMBOL_GPL(lock_acquired);
3642#endif
3643
3644/*
3645 * Used by the testsuite, sanitize the validator state
3646 * after a simulated failure:
3647 */
3648
3649void lockdep_reset(void)
3650{
3651	unsigned long flags;
3652	int i;
3653
3654	raw_local_irq_save(flags);
3655	current->curr_chain_key = 0;
3656	current->lockdep_depth = 0;
3657	current->lockdep_recursion = 0;
3658	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3659	nr_hardirq_chains = 0;
3660	nr_softirq_chains = 0;
3661	nr_process_chains = 0;
3662	debug_locks = 1;
3663	for (i = 0; i < CHAINHASH_SIZE; i++)
3664		INIT_LIST_HEAD(chainhash_table + i);
3665	raw_local_irq_restore(flags);
3666}
3667
3668static void zap_class(struct lock_class *class)
3669{
3670	int i;
3671
3672	/*
3673	 * Remove all dependencies this lock is
3674	 * involved in:
3675	 */
3676	for (i = 0; i < nr_list_entries; i++) {
3677		if (list_entries[i].class == class)
3678			list_del_rcu(&list_entries[i].entry);
3679	}
3680	/*
3681	 * Unhash the class and remove it from the all_lock_classes list:
3682	 */
3683	list_del_rcu(&class->hash_entry);
3684	list_del_rcu(&class->lock_entry);
3685
3686	class->key = NULL;
3687}
3688
3689static inline int within(const void *addr, void *start, unsigned long size)
3690{
3691	return addr >= start && addr < start + size;
3692}
3693
3694void lockdep_free_key_range(void *start, unsigned long size)
3695{
3696	struct lock_class *class, *next;
3697	struct list_head *head;
3698	unsigned long flags;
3699	int i;
3700	int locked;
3701
3702	raw_local_irq_save(flags);
3703	locked = graph_lock();
3704
3705	/*
3706	 * Unhash all classes that were created by this module:
3707	 */
3708	for (i = 0; i < CLASSHASH_SIZE; i++) {
3709		head = classhash_table + i;
3710		if (list_empty(head))
3711			continue;
3712		list_for_each_entry_safe(class, next, head, hash_entry) {
3713			if (within(class->key, start, size))
3714				zap_class(class);
3715			else if (within(class->name, start, size))
3716				zap_class(class);
3717		}
3718	}
3719
3720	if (locked)
3721		graph_unlock();
3722	raw_local_irq_restore(flags);
3723}
3724
3725void lockdep_reset_lock(struct lockdep_map *lock)
3726{
3727	struct lock_class *class, *next;
3728	struct list_head *head;
3729	unsigned long flags;
3730	int i, j;
3731	int locked;
3732
3733	raw_local_irq_save(flags);
3734
3735	/*
3736	 * Remove all classes this lock might have:
3737	 */
3738	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3739		/*
3740		 * If the class exists we look it up and zap it:
3741		 */
3742		class = look_up_lock_class(lock, j);
3743		if (class)
3744			zap_class(class);
3745	}
3746	/*
3747	 * Debug check: in the end all mapped classes should
3748	 * be gone.
3749	 */
3750	locked = graph_lock();
3751	for (i = 0; i < CLASSHASH_SIZE; i++) {
3752		head = classhash_table + i;
3753		if (list_empty(head))
3754			continue;
3755		list_for_each_entry_safe(class, next, head, hash_entry) {
3756			int match = 0;
3757
3758			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3759				match |= class == lock->class_cache[j];
3760
3761			if (unlikely(match)) {
3762				if (debug_locks_off_graph_unlock())
 
 
 
3763					WARN_ON(1);
 
3764				goto out_restore;
3765			}
3766		}
3767	}
3768	if (locked)
3769		graph_unlock();
3770
3771out_restore:
3772	raw_local_irq_restore(flags);
3773}
3774
3775void lockdep_init(void)
3776{
3777	int i;
3778
3779	/*
3780	 * Some architectures have their own start_kernel()
3781	 * code which calls lockdep_init(), while we also
3782	 * call lockdep_init() from the start_kernel() itself,
3783	 * and we want to initialize the hashes only once:
3784	 */
3785	if (lockdep_initialized)
3786		return;
3787
3788	for (i = 0; i < CLASSHASH_SIZE; i++)
3789		INIT_LIST_HEAD(classhash_table + i);
3790
3791	for (i = 0; i < CHAINHASH_SIZE; i++)
3792		INIT_LIST_HEAD(chainhash_table + i);
3793
3794	lockdep_initialized = 1;
3795}
3796
3797void __init lockdep_info(void)
3798{
3799	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3800
3801	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3802	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3803	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3804	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3805	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3806	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3807	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3808
3809	printk(" memory used by lock dependency info: %lu kB\n",
3810		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3811		sizeof(struct list_head) * CLASSHASH_SIZE +
3812		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3813		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3814		sizeof(struct list_head) * CHAINHASH_SIZE
3815#ifdef CONFIG_PROVE_LOCKING
3816		+ sizeof(struct circular_queue)
3817#endif
3818		) / 1024
3819		);
3820
3821	printk(" per task-struct memory footprint: %lu bytes\n",
3822		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3823
3824#ifdef CONFIG_DEBUG_LOCKDEP
3825	if (lockdep_init_error) {
3826		printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
 
3827		printk("Call stack leading to lockdep invocation was:\n");
3828		print_stack_trace(&lockdep_init_trace, 0);
3829	}
3830#endif
3831}
3832
3833static void
3834print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3835		     const void *mem_to, struct held_lock *hlock)
3836{
3837	if (!debug_locks_off())
3838		return;
3839	if (debug_locks_silent)
3840		return;
3841
3842	printk("\n=========================\n");
3843	printk(  "[ BUG: held lock freed! ]\n");
3844	printk(  "-------------------------\n");
 
 
3845	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3846		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3847	print_lock(hlock);
3848	lockdep_print_held_locks(curr);
3849
3850	printk("\nstack backtrace:\n");
3851	dump_stack();
3852}
3853
3854static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3855				const void* lock_from, unsigned long lock_len)
3856{
3857	return lock_from + lock_len <= mem_from ||
3858		mem_from + mem_len <= lock_from;
3859}
3860
3861/*
3862 * Called when kernel memory is freed (or unmapped), or if a lock
3863 * is destroyed or reinitialized - this code checks whether there is
3864 * any held lock in the memory range of <from> to <to>:
3865 */
3866void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3867{
3868	struct task_struct *curr = current;
3869	struct held_lock *hlock;
3870	unsigned long flags;
3871	int i;
3872
3873	if (unlikely(!debug_locks))
3874		return;
3875
3876	local_irq_save(flags);
3877	for (i = 0; i < curr->lockdep_depth; i++) {
3878		hlock = curr->held_locks + i;
3879
3880		if (not_in_range(mem_from, mem_len, hlock->instance,
3881					sizeof(*hlock->instance)))
3882			continue;
3883
3884		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3885		break;
3886	}
3887	local_irq_restore(flags);
3888}
3889EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3890
3891static void print_held_locks_bug(struct task_struct *curr)
3892{
3893	if (!debug_locks_off())
3894		return;
3895	if (debug_locks_silent)
3896		return;
3897
3898	printk("\n=====================================\n");
3899	printk(  "[ BUG: lock held at task exit time! ]\n");
3900	printk(  "-------------------------------------\n");
 
 
3901	printk("%s/%d is exiting with locks still held!\n",
3902		curr->comm, task_pid_nr(curr));
3903	lockdep_print_held_locks(curr);
3904
3905	printk("\nstack backtrace:\n");
3906	dump_stack();
3907}
3908
3909void debug_check_no_locks_held(struct task_struct *task)
3910{
3911	if (unlikely(task->lockdep_depth > 0))
3912		print_held_locks_bug(task);
3913}
3914
3915void debug_show_all_locks(void)
3916{
3917	struct task_struct *g, *p;
3918	int count = 10;
3919	int unlock = 1;
3920
3921	if (unlikely(!debug_locks)) {
3922		printk("INFO: lockdep is turned off.\n");
3923		return;
3924	}
3925	printk("\nShowing all locks held in the system:\n");
3926
3927	/*
3928	 * Here we try to get the tasklist_lock as hard as possible,
3929	 * if not successful after 2 seconds we ignore it (but keep
3930	 * trying). This is to enable a debug printout even if a
3931	 * tasklist_lock-holding task deadlocks or crashes.
3932	 */
3933retry:
3934	if (!read_trylock(&tasklist_lock)) {
3935		if (count == 10)
3936			printk("hm, tasklist_lock locked, retrying... ");
3937		if (count) {
3938			count--;
3939			printk(" #%d", 10-count);
3940			mdelay(200);
3941			goto retry;
3942		}
3943		printk(" ignoring it.\n");
3944		unlock = 0;
3945	} else {
3946		if (count != 10)
3947			printk(KERN_CONT " locked it.\n");
3948	}
3949
3950	do_each_thread(g, p) {
3951		/*
3952		 * It's not reliable to print a task's held locks
3953		 * if it's not sleeping (or if it's not the current
3954		 * task):
3955		 */
3956		if (p->state == TASK_RUNNING && p != current)
3957			continue;
3958		if (p->lockdep_depth)
3959			lockdep_print_held_locks(p);
3960		if (!unlock)
3961			if (read_trylock(&tasklist_lock))
3962				unlock = 1;
3963	} while_each_thread(g, p);
3964
3965	printk("\n");
3966	printk("=============================================\n\n");
3967
3968	if (unlock)
3969		read_unlock(&tasklist_lock);
3970}
3971EXPORT_SYMBOL_GPL(debug_show_all_locks);
3972
3973/*
3974 * Careful: only use this function if you are sure that
3975 * the task cannot run in parallel!
3976 */
3977void debug_show_held_locks(struct task_struct *task)
3978{
3979	if (unlikely(!debug_locks)) {
3980		printk("INFO: lockdep is turned off.\n");
3981		return;
3982	}
3983	lockdep_print_held_locks(task);
3984}
3985EXPORT_SYMBOL_GPL(debug_show_held_locks);
3986
3987void lockdep_sys_exit(void)
3988{
3989	struct task_struct *curr = current;
3990
3991	if (unlikely(curr->lockdep_depth)) {
3992		if (!debug_locks_off())
3993			return;
3994		printk("\n================================================\n");
3995		printk(  "[ BUG: lock held when returning to user space! ]\n");
3996		printk(  "------------------------------------------------\n");
 
 
3997		printk("%s/%d is leaving the kernel with locks still held!\n",
3998				curr->comm, curr->pid);
3999		lockdep_print_held_locks(curr);
4000	}
4001}
4002
4003void lockdep_rcu_dereference(const char *file, const int line)
4004{
4005	struct task_struct *curr = current;
4006
4007#ifndef CONFIG_PROVE_RCU_REPEATEDLY
4008	if (!debug_locks_off())
4009		return;
4010#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4011	/* Note: the following can be executed concurrently, so be careful. */
4012	printk("\n===================================================\n");
4013	printk(  "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
4014	printk(  "---------------------------------------------------\n");
4015	printk("%s:%d invoked rcu_dereference_check() without protection!\n",
4016			file, line);
 
4017	printk("\nother info that might help us debug this:\n\n");
4018	printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4019	lockdep_print_held_locks(curr);
4020	printk("\nstack backtrace:\n");
4021	dump_stack();
4022}
4023EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
v3.5.6
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  10 *
  11 * this code maps all the lock dependencies as they occur in a live kernel
  12 * and will warn about the following classes of locking bugs:
  13 *
  14 * - lock inversion scenarios
  15 * - circular lock dependencies
  16 * - hardirq/softirq safe/unsafe locking bugs
  17 *
  18 * Bugs are reported even if the current locking scenario does not cause
  19 * any deadlock at this point.
  20 *
  21 * I.e. if anytime in the past two locks were taken in a different order,
  22 * even if it happened for another task, even if those were different
  23 * locks (but of the same class as this lock), this code will detect it.
  24 *
  25 * Thanks to Arjan van de Ven for coming up with the initial idea of
  26 * mapping lock dependencies runtime.
  27 */
  28#define DISABLE_BRANCH_PROFILING
  29#include <linux/mutex.h>
  30#include <linux/sched.h>
  31#include <linux/delay.h>
  32#include <linux/module.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/spinlock.h>
  36#include <linux/kallsyms.h>
  37#include <linux/interrupt.h>
  38#include <linux/stacktrace.h>
  39#include <linux/debug_locks.h>
  40#include <linux/irqflags.h>
  41#include <linux/utsname.h>
  42#include <linux/hash.h>
  43#include <linux/ftrace.h>
  44#include <linux/stringify.h>
  45#include <linux/bitops.h>
  46#include <linux/gfp.h>
  47#include <linux/kmemcheck.h>
  48
  49#include <asm/sections.h>
  50
  51#include "lockdep_internals.h"
  52
  53#define CREATE_TRACE_POINTS
  54#include <trace/events/lock.h>
  55
  56#ifdef CONFIG_PROVE_LOCKING
  57int prove_locking = 1;
  58module_param(prove_locking, int, 0644);
  59#else
  60#define prove_locking 0
  61#endif
  62
  63#ifdef CONFIG_LOCK_STAT
  64int lock_stat = 1;
  65module_param(lock_stat, int, 0644);
  66#else
  67#define lock_stat 0
  68#endif
  69
  70/*
  71 * lockdep_lock: protects the lockdep graph, the hashes and the
  72 *               class/list/hash allocators.
  73 *
  74 * This is one of the rare exceptions where it's justified
  75 * to use a raw spinlock - we really dont want the spinlock
  76 * code to recurse back into the lockdep code...
  77 */
  78static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  79
  80static int graph_lock(void)
  81{
  82	arch_spin_lock(&lockdep_lock);
  83	/*
  84	 * Make sure that if another CPU detected a bug while
  85	 * walking the graph we dont change it (while the other
  86	 * CPU is busy printing out stuff with the graph lock
  87	 * dropped already)
  88	 */
  89	if (!debug_locks) {
  90		arch_spin_unlock(&lockdep_lock);
  91		return 0;
  92	}
  93	/* prevent any recursions within lockdep from causing deadlocks */
  94	current->lockdep_recursion++;
  95	return 1;
  96}
  97
  98static inline int graph_unlock(void)
  99{
 100	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
 101		/*
 102		 * The lockdep graph lock isn't locked while we expect it to
 103		 * be, we're confused now, bye!
 104		 */
 105		return DEBUG_LOCKS_WARN_ON(1);
 106	}
 107
 108	current->lockdep_recursion--;
 109	arch_spin_unlock(&lockdep_lock);
 110	return 0;
 111}
 112
 113/*
 114 * Turn lock debugging off and return with 0 if it was off already,
 115 * and also release the graph lock:
 116 */
 117static inline int debug_locks_off_graph_unlock(void)
 118{
 119	int ret = debug_locks_off();
 120
 121	arch_spin_unlock(&lockdep_lock);
 122
 123	return ret;
 124}
 125
 126static int lockdep_initialized;
 127
 128unsigned long nr_list_entries;
 129static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 130
 131/*
 132 * All data structures here are protected by the global debug_lock.
 133 *
 134 * Mutex key structs only get allocated, once during bootup, and never
 135 * get freed - this significantly simplifies the debugging code.
 136 */
 137unsigned long nr_lock_classes;
 138static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 139
 140static inline struct lock_class *hlock_class(struct held_lock *hlock)
 141{
 142	if (!hlock->class_idx) {
 143		/*
 144		 * Someone passed in garbage, we give up.
 145		 */
 146		DEBUG_LOCKS_WARN_ON(1);
 147		return NULL;
 148	}
 149	return lock_classes + hlock->class_idx - 1;
 150}
 151
 152#ifdef CONFIG_LOCK_STAT
 153static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
 154		      cpu_lock_stats);
 155
 156static inline u64 lockstat_clock(void)
 157{
 158	return local_clock();
 159}
 160
 161static int lock_point(unsigned long points[], unsigned long ip)
 162{
 163	int i;
 164
 165	for (i = 0; i < LOCKSTAT_POINTS; i++) {
 166		if (points[i] == 0) {
 167			points[i] = ip;
 168			break;
 169		}
 170		if (points[i] == ip)
 171			break;
 172	}
 173
 174	return i;
 175}
 176
 177static void lock_time_inc(struct lock_time *lt, u64 time)
 178{
 179	if (time > lt->max)
 180		lt->max = time;
 181
 182	if (time < lt->min || !lt->nr)
 183		lt->min = time;
 184
 185	lt->total += time;
 186	lt->nr++;
 187}
 188
 189static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 190{
 191	if (!src->nr)
 192		return;
 193
 194	if (src->max > dst->max)
 195		dst->max = src->max;
 196
 197	if (src->min < dst->min || !dst->nr)
 198		dst->min = src->min;
 199
 200	dst->total += src->total;
 201	dst->nr += src->nr;
 202}
 203
 204struct lock_class_stats lock_stats(struct lock_class *class)
 205{
 206	struct lock_class_stats stats;
 207	int cpu, i;
 208
 209	memset(&stats, 0, sizeof(struct lock_class_stats));
 210	for_each_possible_cpu(cpu) {
 211		struct lock_class_stats *pcs =
 212			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 213
 214		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 215			stats.contention_point[i] += pcs->contention_point[i];
 216
 217		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 218			stats.contending_point[i] += pcs->contending_point[i];
 219
 220		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 221		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 222
 223		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 224		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 225
 226		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 227			stats.bounces[i] += pcs->bounces[i];
 228	}
 229
 230	return stats;
 231}
 232
 233void clear_lock_stats(struct lock_class *class)
 234{
 235	int cpu;
 236
 237	for_each_possible_cpu(cpu) {
 238		struct lock_class_stats *cpu_stats =
 239			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 240
 241		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 242	}
 243	memset(class->contention_point, 0, sizeof(class->contention_point));
 244	memset(class->contending_point, 0, sizeof(class->contending_point));
 245}
 246
 247static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 248{
 249	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 250}
 251
 252static void put_lock_stats(struct lock_class_stats *stats)
 253{
 254	put_cpu_var(cpu_lock_stats);
 255}
 256
 257static void lock_release_holdtime(struct held_lock *hlock)
 258{
 259	struct lock_class_stats *stats;
 260	u64 holdtime;
 261
 262	if (!lock_stat)
 263		return;
 264
 265	holdtime = lockstat_clock() - hlock->holdtime_stamp;
 266
 267	stats = get_lock_stats(hlock_class(hlock));
 268	if (hlock->read)
 269		lock_time_inc(&stats->read_holdtime, holdtime);
 270	else
 271		lock_time_inc(&stats->write_holdtime, holdtime);
 272	put_lock_stats(stats);
 273}
 274#else
 275static inline void lock_release_holdtime(struct held_lock *hlock)
 276{
 277}
 278#endif
 279
 280/*
 281 * We keep a global list of all lock classes. The list only grows,
 282 * never shrinks. The list is only accessed with the lockdep
 283 * spinlock lock held.
 284 */
 285LIST_HEAD(all_lock_classes);
 286
 287/*
 288 * The lockdep classes are in a hash-table as well, for fast lookup:
 289 */
 290#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 291#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
 292#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 293#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 294
 295static struct list_head classhash_table[CLASSHASH_SIZE];
 296
 297/*
 298 * We put the lock dependency chains into a hash-table as well, to cache
 299 * their existence:
 300 */
 301#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 302#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
 303#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 304#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 305
 306static struct list_head chainhash_table[CHAINHASH_SIZE];
 307
 308/*
 309 * The hash key of the lock dependency chains is a hash itself too:
 310 * it's a hash of all locks taken up to that lock, including that lock.
 311 * It's a 64-bit hash, because it's important for the keys to be
 312 * unique.
 313 */
 314#define iterate_chain_key(key1, key2) \
 315	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
 316	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
 317	(key2))
 318
 319void lockdep_off(void)
 320{
 321	current->lockdep_recursion++;
 322}
 323EXPORT_SYMBOL(lockdep_off);
 324
 325void lockdep_on(void)
 326{
 327	current->lockdep_recursion--;
 328}
 329EXPORT_SYMBOL(lockdep_on);
 330
 331/*
 332 * Debugging switches:
 333 */
 334
 335#define VERBOSE			0
 336#define VERY_VERBOSE		0
 337
 338#if VERBOSE
 339# define HARDIRQ_VERBOSE	1
 340# define SOFTIRQ_VERBOSE	1
 341# define RECLAIM_VERBOSE	1
 342#else
 343# define HARDIRQ_VERBOSE	0
 344# define SOFTIRQ_VERBOSE	0
 345# define RECLAIM_VERBOSE	0
 346#endif
 347
 348#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 349/*
 350 * Quick filtering for interesting events:
 351 */
 352static int class_filter(struct lock_class *class)
 353{
 354#if 0
 355	/* Example */
 356	if (class->name_version == 1 &&
 357			!strcmp(class->name, "lockname"))
 358		return 1;
 359	if (class->name_version == 1 &&
 360			!strcmp(class->name, "&struct->lockfield"))
 361		return 1;
 362#endif
 363	/* Filter everything else. 1 would be to allow everything else */
 364	return 0;
 365}
 366#endif
 367
 368static int verbose(struct lock_class *class)
 369{
 370#if VERBOSE
 371	return class_filter(class);
 372#endif
 373	return 0;
 374}
 375
 376/*
 377 * Stack-trace: tightly packed array of stack backtrace
 378 * addresses. Protected by the graph_lock.
 379 */
 380unsigned long nr_stack_trace_entries;
 381static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 382
 383static int save_trace(struct stack_trace *trace)
 384{
 385	trace->nr_entries = 0;
 386	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 387	trace->entries = stack_trace + nr_stack_trace_entries;
 388
 389	trace->skip = 3;
 390
 391	save_stack_trace(trace);
 392
 393	/*
 394	 * Some daft arches put -1 at the end to indicate its a full trace.
 395	 *
 396	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
 397	 * complete trace that maxes out the entries provided will be reported
 398	 * as incomplete, friggin useless </rant>
 399	 */
 400	if (trace->nr_entries != 0 &&
 401	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
 402		trace->nr_entries--;
 403
 404	trace->max_entries = trace->nr_entries;
 405
 406	nr_stack_trace_entries += trace->nr_entries;
 407
 408	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
 409		if (!debug_locks_off_graph_unlock())
 410			return 0;
 411
 412		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
 413		printk("turning off the locking correctness validator.\n");
 414		dump_stack();
 415
 416		return 0;
 417	}
 418
 419	return 1;
 420}
 421
 422unsigned int nr_hardirq_chains;
 423unsigned int nr_softirq_chains;
 424unsigned int nr_process_chains;
 425unsigned int max_lockdep_depth;
 426
 427#ifdef CONFIG_DEBUG_LOCKDEP
 428/*
 429 * We cannot printk in early bootup code. Not even early_printk()
 430 * might work. So we mark any initialization errors and printk
 431 * about it later on, in lockdep_info().
 432 */
 433static int lockdep_init_error;
 434static const char *lock_init_error;
 435static unsigned long lockdep_init_trace_data[20];
 436static struct stack_trace lockdep_init_trace = {
 437	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
 438	.entries = lockdep_init_trace_data,
 439};
 440
 441/*
 442 * Various lockdep statistics:
 443 */
 444DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
 445#endif
 446
 447/*
 448 * Locking printouts:
 449 */
 450
 451#define __USAGE(__STATE)						\
 452	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
 453	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
 454	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
 455	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
 456
 457static const char *usage_str[] =
 458{
 459#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
 460#include "lockdep_states.h"
 461#undef LOCKDEP_STATE
 462	[LOCK_USED] = "INITIAL USE",
 463};
 464
 465const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 466{
 467	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 468}
 469
 470static inline unsigned long lock_flag(enum lock_usage_bit bit)
 471{
 472	return 1UL << bit;
 473}
 474
 475static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 476{
 477	char c = '.';
 478
 479	if (class->usage_mask & lock_flag(bit + 2))
 480		c = '+';
 481	if (class->usage_mask & lock_flag(bit)) {
 482		c = '-';
 483		if (class->usage_mask & lock_flag(bit + 2))
 484			c = '?';
 485	}
 486
 487	return c;
 488}
 489
 490void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 491{
 492	int i = 0;
 493
 494#define LOCKDEP_STATE(__STATE) 						\
 495	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
 496	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
 497#include "lockdep_states.h"
 498#undef LOCKDEP_STATE
 499
 500	usage[i] = '\0';
 501}
 502
 503static void __print_lock_name(struct lock_class *class)
 504{
 505	char str[KSYM_NAME_LEN];
 506	const char *name;
 507
 508	name = class->name;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 509	if (!name) {
 510		name = __get_key_name(class->key, str);
 511		printk("%s", name);
 512	} else {
 513		printk("%s", name);
 514		if (class->name_version > 1)
 515			printk("#%d", class->name_version);
 516		if (class->subclass)
 517			printk("/%d", class->subclass);
 518	}
 519}
 520
 521static void print_lock_name(struct lock_class *class)
 522{
 523	char usage[LOCK_USAGE_CHARS];
 524
 525	get_usage_chars(class, usage);
 526
 527	printk(" (");
 528	__print_lock_name(class);
 529	printk("){%s}", usage);
 530}
 531
 532static void print_lockdep_cache(struct lockdep_map *lock)
 533{
 534	const char *name;
 535	char str[KSYM_NAME_LEN];
 536
 537	name = lock->name;
 538	if (!name)
 539		name = __get_key_name(lock->key->subkeys, str);
 540
 541	printk("%s", name);
 542}
 543
 544static void print_lock(struct held_lock *hlock)
 545{
 546	print_lock_name(hlock_class(hlock));
 547	printk(", at: ");
 548	print_ip_sym(hlock->acquire_ip);
 549}
 550
 551static void lockdep_print_held_locks(struct task_struct *curr)
 552{
 553	int i, depth = curr->lockdep_depth;
 554
 555	if (!depth) {
 556		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
 557		return;
 558	}
 559	printk("%d lock%s held by %s/%d:\n",
 560		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 561
 562	for (i = 0; i < depth; i++) {
 563		printk(" #%d: ", i);
 564		print_lock(curr->held_locks + i);
 565	}
 566}
 567
 568static void print_kernel_ident(void)
 569{
 570	printk("%s %.*s %s\n", init_utsname()->release,
 571		(int)strcspn(init_utsname()->version, " "),
 572		init_utsname()->version,
 573		print_tainted());
 574}
 575
 576static int very_verbose(struct lock_class *class)
 577{
 578#if VERY_VERBOSE
 579	return class_filter(class);
 580#endif
 581	return 0;
 582}
 583
 584/*
 585 * Is this the address of a static object:
 586 */
 587static int static_obj(void *obj)
 588{
 589	unsigned long start = (unsigned long) &_stext,
 590		      end   = (unsigned long) &_end,
 591		      addr  = (unsigned long) obj;
 592
 593	/*
 594	 * static variable?
 595	 */
 596	if ((addr >= start) && (addr < end))
 597		return 1;
 598
 599	if (arch_is_kernel_data(addr))
 600		return 1;
 601
 602	/*
 603	 * in-kernel percpu var?
 604	 */
 605	if (is_kernel_percpu_address(addr))
 606		return 1;
 607
 608	/*
 609	 * module static or percpu var?
 610	 */
 611	return is_module_address(addr) || is_module_percpu_address(addr);
 612}
 613
 614/*
 615 * To make lock name printouts unique, we calculate a unique
 616 * class->name_version generation counter:
 617 */
 618static int count_matching_names(struct lock_class *new_class)
 619{
 620	struct lock_class *class;
 621	int count = 0;
 622
 623	if (!new_class->name)
 624		return 0;
 625
 626	list_for_each_entry(class, &all_lock_classes, lock_entry) {
 627		if (new_class->key - new_class->subclass == class->key)
 628			return class->name_version;
 629		if (class->name && !strcmp(class->name, new_class->name))
 630			count = max(count, class->name_version);
 631	}
 632
 633	return count + 1;
 634}
 635
 636/*
 637 * Register a lock's class in the hash-table, if the class is not present
 638 * yet. Otherwise we look it up. We cache the result in the lock object
 639 * itself, so actual lookup of the hash should be once per lock object.
 640 */
 641static inline struct lock_class *
 642look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 643{
 644	struct lockdep_subclass_key *key;
 645	struct list_head *hash_head;
 646	struct lock_class *class;
 647
 648#ifdef CONFIG_DEBUG_LOCKDEP
 649	/*
 650	 * If the architecture calls into lockdep before initializing
 651	 * the hashes then we'll warn about it later. (we cannot printk
 652	 * right now)
 653	 */
 654	if (unlikely(!lockdep_initialized)) {
 655		lockdep_init();
 656		lockdep_init_error = 1;
 657		lock_init_error = lock->name;
 658		save_stack_trace(&lockdep_init_trace);
 659	}
 660#endif
 661
 662	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 663		debug_locks_off();
 664		printk(KERN_ERR
 665			"BUG: looking up invalid subclass: %u\n", subclass);
 666		printk(KERN_ERR
 667			"turning off the locking correctness validator.\n");
 668		dump_stack();
 669		return NULL;
 670	}
 671
 672	/*
 673	 * Static locks do not have their class-keys yet - for them the key
 674	 * is the lock object itself:
 675	 */
 676	if (unlikely(!lock->key))
 677		lock->key = (void *)lock;
 678
 679	/*
 680	 * NOTE: the class-key must be unique. For dynamic locks, a static
 681	 * lock_class_key variable is passed in through the mutex_init()
 682	 * (or spin_lock_init()) call - which acts as the key. For static
 683	 * locks we use the lock object itself as the key.
 684	 */
 685	BUILD_BUG_ON(sizeof(struct lock_class_key) >
 686			sizeof(struct lockdep_map));
 687
 688	key = lock->key->subkeys + subclass;
 689
 690	hash_head = classhashentry(key);
 691
 692	/*
 693	 * We can walk the hash lockfree, because the hash only
 694	 * grows, and we are careful when adding entries to the end:
 695	 */
 696	list_for_each_entry(class, hash_head, hash_entry) {
 697		if (class->key == key) {
 698			/*
 699			 * Huh! same key, different name? Did someone trample
 700			 * on some memory? We're most confused.
 701			 */
 702			WARN_ON_ONCE(class->name != lock->name);
 703			return class;
 704		}
 705	}
 706
 707	return NULL;
 708}
 709
 710/*
 711 * Register a lock's class in the hash-table, if the class is not present
 712 * yet. Otherwise we look it up. We cache the result in the lock object
 713 * itself, so actual lookup of the hash should be once per lock object.
 714 */
 715static inline struct lock_class *
 716register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 717{
 718	struct lockdep_subclass_key *key;
 719	struct list_head *hash_head;
 720	struct lock_class *class;
 721	unsigned long flags;
 722
 723	class = look_up_lock_class(lock, subclass);
 724	if (likely(class))
 725		goto out_set_class_cache;
 726
 727	/*
 728	 * Debug-check: all keys must be persistent!
 729 	 */
 730	if (!static_obj(lock->key)) {
 731		debug_locks_off();
 732		printk("INFO: trying to register non-static key.\n");
 733		printk("the code is fine but needs lockdep annotation.\n");
 734		printk("turning off the locking correctness validator.\n");
 735		dump_stack();
 736
 737		return NULL;
 738	}
 739
 740	key = lock->key->subkeys + subclass;
 741	hash_head = classhashentry(key);
 742
 743	raw_local_irq_save(flags);
 744	if (!graph_lock()) {
 745		raw_local_irq_restore(flags);
 746		return NULL;
 747	}
 748	/*
 749	 * We have to do the hash-walk again, to avoid races
 750	 * with another CPU:
 751	 */
 752	list_for_each_entry(class, hash_head, hash_entry)
 753		if (class->key == key)
 754			goto out_unlock_set;
 755	/*
 756	 * Allocate a new key from the static array, and add it to
 757	 * the hash:
 758	 */
 759	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 760		if (!debug_locks_off_graph_unlock()) {
 761			raw_local_irq_restore(flags);
 762			return NULL;
 763		}
 764		raw_local_irq_restore(flags);
 765
 766		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 767		printk("turning off the locking correctness validator.\n");
 768		dump_stack();
 769		return NULL;
 770	}
 771	class = lock_classes + nr_lock_classes++;
 772	debug_atomic_inc(nr_unused_locks);
 773	class->key = key;
 774	class->name = lock->name;
 775	class->subclass = subclass;
 776	INIT_LIST_HEAD(&class->lock_entry);
 777	INIT_LIST_HEAD(&class->locks_before);
 778	INIT_LIST_HEAD(&class->locks_after);
 779	class->name_version = count_matching_names(class);
 780	/*
 781	 * We use RCU's safe list-add method to make
 782	 * parallel walking of the hash-list safe:
 783	 */
 784	list_add_tail_rcu(&class->hash_entry, hash_head);
 785	/*
 786	 * Add it to the global list of classes:
 787	 */
 788	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
 789
 790	if (verbose(class)) {
 791		graph_unlock();
 792		raw_local_irq_restore(flags);
 793
 794		printk("\nnew class %p: %s", class->key, class->name);
 795		if (class->name_version > 1)
 796			printk("#%d", class->name_version);
 797		printk("\n");
 798		dump_stack();
 799
 800		raw_local_irq_save(flags);
 801		if (!graph_lock()) {
 802			raw_local_irq_restore(flags);
 803			return NULL;
 804		}
 805	}
 806out_unlock_set:
 807	graph_unlock();
 808	raw_local_irq_restore(flags);
 809
 810out_set_class_cache:
 811	if (!subclass || force)
 812		lock->class_cache[0] = class;
 813	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
 814		lock->class_cache[subclass] = class;
 815
 816	/*
 817	 * Hash collision, did we smoke some? We found a class with a matching
 818	 * hash but the subclass -- which is hashed in -- didn't match.
 819	 */
 820	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
 821		return NULL;
 822
 823	return class;
 824}
 825
 826#ifdef CONFIG_PROVE_LOCKING
 827/*
 828 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 829 * with NULL on failure)
 830 */
 831static struct lock_list *alloc_list_entry(void)
 832{
 833	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
 834		if (!debug_locks_off_graph_unlock())
 835			return NULL;
 836
 837		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
 838		printk("turning off the locking correctness validator.\n");
 839		dump_stack();
 840		return NULL;
 841	}
 842	return list_entries + nr_list_entries++;
 843}
 844
 845/*
 846 * Add a new dependency to the head of the list:
 847 */
 848static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 849			    struct list_head *head, unsigned long ip,
 850			    int distance, struct stack_trace *trace)
 851{
 852	struct lock_list *entry;
 853	/*
 854	 * Lock not present yet - get a new dependency struct and
 855	 * add it to the list:
 856	 */
 857	entry = alloc_list_entry();
 858	if (!entry)
 859		return 0;
 860
 861	entry->class = this;
 862	entry->distance = distance;
 863	entry->trace = *trace;
 864	/*
 865	 * Since we never remove from the dependency list, the list can
 866	 * be walked lockless by other CPUs, it's only allocation
 867	 * that must be protected by the spinlock. But this also means
 868	 * we must make new entries visible only once writes to the
 869	 * entry become visible - hence the RCU op:
 870	 */
 871	list_add_tail_rcu(&entry->entry, head);
 872
 873	return 1;
 874}
 875
 876/*
 877 * For good efficiency of modular, we use power of 2
 878 */
 879#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
 880#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
 881
 882/*
 883 * The circular_queue and helpers is used to implement the
 884 * breadth-first search(BFS)algorithem, by which we can build
 885 * the shortest path from the next lock to be acquired to the
 886 * previous held lock if there is a circular between them.
 887 */
 888struct circular_queue {
 889	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
 890	unsigned int  front, rear;
 891};
 892
 893static struct circular_queue lock_cq;
 894
 895unsigned int max_bfs_queue_depth;
 896
 897static unsigned int lockdep_dependency_gen_id;
 898
 899static inline void __cq_init(struct circular_queue *cq)
 900{
 901	cq->front = cq->rear = 0;
 902	lockdep_dependency_gen_id++;
 903}
 904
 905static inline int __cq_empty(struct circular_queue *cq)
 906{
 907	return (cq->front == cq->rear);
 908}
 909
 910static inline int __cq_full(struct circular_queue *cq)
 911{
 912	return ((cq->rear + 1) & CQ_MASK) == cq->front;
 913}
 914
 915static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 916{
 917	if (__cq_full(cq))
 918		return -1;
 919
 920	cq->element[cq->rear] = elem;
 921	cq->rear = (cq->rear + 1) & CQ_MASK;
 922	return 0;
 923}
 924
 925static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 926{
 927	if (__cq_empty(cq))
 928		return -1;
 929
 930	*elem = cq->element[cq->front];
 931	cq->front = (cq->front + 1) & CQ_MASK;
 932	return 0;
 933}
 934
 935static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 936{
 937	return (cq->rear - cq->front) & CQ_MASK;
 938}
 939
 940static inline void mark_lock_accessed(struct lock_list *lock,
 941					struct lock_list *parent)
 942{
 943	unsigned long nr;
 944
 945	nr = lock - list_entries;
 946	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
 947	lock->parent = parent;
 948	lock->class->dep_gen_id = lockdep_dependency_gen_id;
 949}
 950
 951static inline unsigned long lock_accessed(struct lock_list *lock)
 952{
 953	unsigned long nr;
 954
 955	nr = lock - list_entries;
 956	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
 957	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 958}
 959
 960static inline struct lock_list *get_lock_parent(struct lock_list *child)
 961{
 962	return child->parent;
 963}
 964
 965static inline int get_lock_depth(struct lock_list *child)
 966{
 967	int depth = 0;
 968	struct lock_list *parent;
 969
 970	while ((parent = get_lock_parent(child))) {
 971		child = parent;
 972		depth++;
 973	}
 974	return depth;
 975}
 976
 977static int __bfs(struct lock_list *source_entry,
 978		 void *data,
 979		 int (*match)(struct lock_list *entry, void *data),
 980		 struct lock_list **target_entry,
 981		 int forward)
 982{
 983	struct lock_list *entry;
 984	struct list_head *head;
 985	struct circular_queue *cq = &lock_cq;
 986	int ret = 1;
 987
 988	if (match(source_entry, data)) {
 989		*target_entry = source_entry;
 990		ret = 0;
 991		goto exit;
 992	}
 993
 994	if (forward)
 995		head = &source_entry->class->locks_after;
 996	else
 997		head = &source_entry->class->locks_before;
 998
 999	if (list_empty(head))
1000		goto exit;
1001
1002	__cq_init(cq);
1003	__cq_enqueue(cq, (unsigned long)source_entry);
1004
1005	while (!__cq_empty(cq)) {
1006		struct lock_list *lock;
1007
1008		__cq_dequeue(cq, (unsigned long *)&lock);
1009
1010		if (!lock->class) {
1011			ret = -2;
1012			goto exit;
1013		}
1014
1015		if (forward)
1016			head = &lock->class->locks_after;
1017		else
1018			head = &lock->class->locks_before;
1019
1020		list_for_each_entry(entry, head, entry) {
1021			if (!lock_accessed(entry)) {
1022				unsigned int cq_depth;
1023				mark_lock_accessed(entry, lock);
1024				if (match(entry, data)) {
1025					*target_entry = entry;
1026					ret = 0;
1027					goto exit;
1028				}
1029
1030				if (__cq_enqueue(cq, (unsigned long)entry)) {
1031					ret = -1;
1032					goto exit;
1033				}
1034				cq_depth = __cq_get_elem_count(cq);
1035				if (max_bfs_queue_depth < cq_depth)
1036					max_bfs_queue_depth = cq_depth;
1037			}
1038		}
1039	}
1040exit:
1041	return ret;
1042}
1043
1044static inline int __bfs_forwards(struct lock_list *src_entry,
1045			void *data,
1046			int (*match)(struct lock_list *entry, void *data),
1047			struct lock_list **target_entry)
1048{
1049	return __bfs(src_entry, data, match, target_entry, 1);
1050
1051}
1052
1053static inline int __bfs_backwards(struct lock_list *src_entry,
1054			void *data,
1055			int (*match)(struct lock_list *entry, void *data),
1056			struct lock_list **target_entry)
1057{
1058	return __bfs(src_entry, data, match, target_entry, 0);
1059
1060}
1061
1062/*
1063 * Recursive, forwards-direction lock-dependency checking, used for
1064 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1065 * checking.
1066 */
1067
1068/*
1069 * Print a dependency chain entry (this is only done when a deadlock
1070 * has been detected):
1071 */
1072static noinline int
1073print_circular_bug_entry(struct lock_list *target, int depth)
1074{
1075	if (debug_locks_silent)
1076		return 0;
1077	printk("\n-> #%u", depth);
1078	print_lock_name(target->class);
1079	printk(":\n");
1080	print_stack_trace(&target->trace, 6);
1081
1082	return 0;
1083}
1084
1085static void
1086print_circular_lock_scenario(struct held_lock *src,
1087			     struct held_lock *tgt,
1088			     struct lock_list *prt)
1089{
1090	struct lock_class *source = hlock_class(src);
1091	struct lock_class *target = hlock_class(tgt);
1092	struct lock_class *parent = prt->class;
1093
1094	/*
1095	 * A direct locking problem where unsafe_class lock is taken
1096	 * directly by safe_class lock, then all we need to show
1097	 * is the deadlock scenario, as it is obvious that the
1098	 * unsafe lock is taken under the safe lock.
1099	 *
1100	 * But if there is a chain instead, where the safe lock takes
1101	 * an intermediate lock (middle_class) where this lock is
1102	 * not the same as the safe lock, then the lock chain is
1103	 * used to describe the problem. Otherwise we would need
1104	 * to show a different CPU case for each link in the chain
1105	 * from the safe_class lock to the unsafe_class lock.
1106	 */
1107	if (parent != source) {
1108		printk("Chain exists of:\n  ");
1109		__print_lock_name(source);
1110		printk(" --> ");
1111		__print_lock_name(parent);
1112		printk(" --> ");
1113		__print_lock_name(target);
1114		printk("\n\n");
1115	}
1116
1117	printk(" Possible unsafe locking scenario:\n\n");
1118	printk("       CPU0                    CPU1\n");
1119	printk("       ----                    ----\n");
1120	printk("  lock(");
1121	__print_lock_name(target);
1122	printk(");\n");
1123	printk("                               lock(");
1124	__print_lock_name(parent);
1125	printk(");\n");
1126	printk("                               lock(");
1127	__print_lock_name(target);
1128	printk(");\n");
1129	printk("  lock(");
1130	__print_lock_name(source);
1131	printk(");\n");
1132	printk("\n *** DEADLOCK ***\n\n");
1133}
1134
1135/*
1136 * When a circular dependency is detected, print the
1137 * header first:
1138 */
1139static noinline int
1140print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1141			struct held_lock *check_src,
1142			struct held_lock *check_tgt)
1143{
1144	struct task_struct *curr = current;
1145
1146	if (debug_locks_silent)
1147		return 0;
1148
1149	printk("\n");
1150	printk("======================================================\n");
1151	printk("[ INFO: possible circular locking dependency detected ]\n");
1152	print_kernel_ident();
1153	printk("-------------------------------------------------------\n");
1154	printk("%s/%d is trying to acquire lock:\n",
1155		curr->comm, task_pid_nr(curr));
1156	print_lock(check_src);
1157	printk("\nbut task is already holding lock:\n");
1158	print_lock(check_tgt);
1159	printk("\nwhich lock already depends on the new lock.\n\n");
1160	printk("\nthe existing dependency chain (in reverse order) is:\n");
1161
1162	print_circular_bug_entry(entry, depth);
1163
1164	return 0;
1165}
1166
1167static inline int class_equal(struct lock_list *entry, void *data)
1168{
1169	return entry->class == data;
1170}
1171
1172static noinline int print_circular_bug(struct lock_list *this,
1173				struct lock_list *target,
1174				struct held_lock *check_src,
1175				struct held_lock *check_tgt)
1176{
1177	struct task_struct *curr = current;
1178	struct lock_list *parent;
1179	struct lock_list *first_parent;
1180	int depth;
1181
1182	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1183		return 0;
1184
1185	if (!save_trace(&this->trace))
1186		return 0;
1187
1188	depth = get_lock_depth(target);
1189
1190	print_circular_bug_header(target, depth, check_src, check_tgt);
1191
1192	parent = get_lock_parent(target);
1193	first_parent = parent;
1194
1195	while (parent) {
1196		print_circular_bug_entry(parent, --depth);
1197		parent = get_lock_parent(parent);
1198	}
1199
1200	printk("\nother info that might help us debug this:\n\n");
1201	print_circular_lock_scenario(check_src, check_tgt,
1202				     first_parent);
1203
1204	lockdep_print_held_locks(curr);
1205
1206	printk("\nstack backtrace:\n");
1207	dump_stack();
1208
1209	return 0;
1210}
1211
1212static noinline int print_bfs_bug(int ret)
1213{
1214	if (!debug_locks_off_graph_unlock())
1215		return 0;
1216
1217	/*
1218	 * Breadth-first-search failed, graph got corrupted?
1219	 */
1220	WARN(1, "lockdep bfs error:%d\n", ret);
1221
1222	return 0;
1223}
1224
1225static int noop_count(struct lock_list *entry, void *data)
1226{
1227	(*(unsigned long *)data)++;
1228	return 0;
1229}
1230
1231unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1232{
1233	unsigned long  count = 0;
1234	struct lock_list *uninitialized_var(target_entry);
1235
1236	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1237
1238	return count;
1239}
1240unsigned long lockdep_count_forward_deps(struct lock_class *class)
1241{
1242	unsigned long ret, flags;
1243	struct lock_list this;
1244
1245	this.parent = NULL;
1246	this.class = class;
1247
1248	local_irq_save(flags);
1249	arch_spin_lock(&lockdep_lock);
1250	ret = __lockdep_count_forward_deps(&this);
1251	arch_spin_unlock(&lockdep_lock);
1252	local_irq_restore(flags);
1253
1254	return ret;
1255}
1256
1257unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1258{
1259	unsigned long  count = 0;
1260	struct lock_list *uninitialized_var(target_entry);
1261
1262	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1263
1264	return count;
1265}
1266
1267unsigned long lockdep_count_backward_deps(struct lock_class *class)
1268{
1269	unsigned long ret, flags;
1270	struct lock_list this;
1271
1272	this.parent = NULL;
1273	this.class = class;
1274
1275	local_irq_save(flags);
1276	arch_spin_lock(&lockdep_lock);
1277	ret = __lockdep_count_backward_deps(&this);
1278	arch_spin_unlock(&lockdep_lock);
1279	local_irq_restore(flags);
1280
1281	return ret;
1282}
1283
1284/*
1285 * Prove that the dependency graph starting at <entry> can not
1286 * lead to <target>. Print an error and return 0 if it does.
1287 */
1288static noinline int
1289check_noncircular(struct lock_list *root, struct lock_class *target,
1290		struct lock_list **target_entry)
1291{
1292	int result;
1293
1294	debug_atomic_inc(nr_cyclic_checks);
1295
1296	result = __bfs_forwards(root, target, class_equal, target_entry);
1297
1298	return result;
1299}
1300
1301#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1302/*
1303 * Forwards and backwards subgraph searching, for the purposes of
1304 * proving that two subgraphs can be connected by a new dependency
1305 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1306 */
1307
1308static inline int usage_match(struct lock_list *entry, void *bit)
1309{
1310	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1311}
1312
1313
1314
1315/*
1316 * Find a node in the forwards-direction dependency sub-graph starting
1317 * at @root->class that matches @bit.
1318 *
1319 * Return 0 if such a node exists in the subgraph, and put that node
1320 * into *@target_entry.
1321 *
1322 * Return 1 otherwise and keep *@target_entry unchanged.
1323 * Return <0 on error.
1324 */
1325static int
1326find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1327			struct lock_list **target_entry)
1328{
1329	int result;
1330
1331	debug_atomic_inc(nr_find_usage_forwards_checks);
1332
1333	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1334
1335	return result;
1336}
1337
1338/*
1339 * Find a node in the backwards-direction dependency sub-graph starting
1340 * at @root->class that matches @bit.
1341 *
1342 * Return 0 if such a node exists in the subgraph, and put that node
1343 * into *@target_entry.
1344 *
1345 * Return 1 otherwise and keep *@target_entry unchanged.
1346 * Return <0 on error.
1347 */
1348static int
1349find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1350			struct lock_list **target_entry)
1351{
1352	int result;
1353
1354	debug_atomic_inc(nr_find_usage_backwards_checks);
1355
1356	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1357
1358	return result;
1359}
1360
1361static void print_lock_class_header(struct lock_class *class, int depth)
1362{
1363	int bit;
1364
1365	printk("%*s->", depth, "");
1366	print_lock_name(class);
1367	printk(" ops: %lu", class->ops);
1368	printk(" {\n");
1369
1370	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1371		if (class->usage_mask & (1 << bit)) {
1372			int len = depth;
1373
1374			len += printk("%*s   %s", depth, "", usage_str[bit]);
1375			len += printk(" at:\n");
1376			print_stack_trace(class->usage_traces + bit, len);
1377		}
1378	}
1379	printk("%*s }\n", depth, "");
1380
1381	printk("%*s ... key      at: ",depth,"");
1382	print_ip_sym((unsigned long)class->key);
1383}
1384
1385/*
1386 * printk the shortest lock dependencies from @start to @end in reverse order:
1387 */
1388static void __used
1389print_shortest_lock_dependencies(struct lock_list *leaf,
1390				struct lock_list *root)
1391{
1392	struct lock_list *entry = leaf;
1393	int depth;
1394
1395	/*compute depth from generated tree by BFS*/
1396	depth = get_lock_depth(leaf);
1397
1398	do {
1399		print_lock_class_header(entry->class, depth);
1400		printk("%*s ... acquired at:\n", depth, "");
1401		print_stack_trace(&entry->trace, 2);
1402		printk("\n");
1403
1404		if (depth == 0 && (entry != root)) {
1405			printk("lockdep:%s bad path found in chain graph\n", __func__);
1406			break;
1407		}
1408
1409		entry = get_lock_parent(entry);
1410		depth--;
1411	} while (entry && (depth >= 0));
1412
1413	return;
1414}
1415
1416static void
1417print_irq_lock_scenario(struct lock_list *safe_entry,
1418			struct lock_list *unsafe_entry,
1419			struct lock_class *prev_class,
1420			struct lock_class *next_class)
1421{
1422	struct lock_class *safe_class = safe_entry->class;
1423	struct lock_class *unsafe_class = unsafe_entry->class;
1424	struct lock_class *middle_class = prev_class;
1425
1426	if (middle_class == safe_class)
1427		middle_class = next_class;
1428
1429	/*
1430	 * A direct locking problem where unsafe_class lock is taken
1431	 * directly by safe_class lock, then all we need to show
1432	 * is the deadlock scenario, as it is obvious that the
1433	 * unsafe lock is taken under the safe lock.
1434	 *
1435	 * But if there is a chain instead, where the safe lock takes
1436	 * an intermediate lock (middle_class) where this lock is
1437	 * not the same as the safe lock, then the lock chain is
1438	 * used to describe the problem. Otherwise we would need
1439	 * to show a different CPU case for each link in the chain
1440	 * from the safe_class lock to the unsafe_class lock.
1441	 */
1442	if (middle_class != unsafe_class) {
1443		printk("Chain exists of:\n  ");
1444		__print_lock_name(safe_class);
1445		printk(" --> ");
1446		__print_lock_name(middle_class);
1447		printk(" --> ");
1448		__print_lock_name(unsafe_class);
1449		printk("\n\n");
1450	}
1451
1452	printk(" Possible interrupt unsafe locking scenario:\n\n");
1453	printk("       CPU0                    CPU1\n");
1454	printk("       ----                    ----\n");
1455	printk("  lock(");
1456	__print_lock_name(unsafe_class);
1457	printk(");\n");
1458	printk("                               local_irq_disable();\n");
1459	printk("                               lock(");
1460	__print_lock_name(safe_class);
1461	printk(");\n");
1462	printk("                               lock(");
1463	__print_lock_name(middle_class);
1464	printk(");\n");
1465	printk("  <Interrupt>\n");
1466	printk("    lock(");
1467	__print_lock_name(safe_class);
1468	printk(");\n");
1469	printk("\n *** DEADLOCK ***\n\n");
1470}
1471
1472static int
1473print_bad_irq_dependency(struct task_struct *curr,
1474			 struct lock_list *prev_root,
1475			 struct lock_list *next_root,
1476			 struct lock_list *backwards_entry,
1477			 struct lock_list *forwards_entry,
1478			 struct held_lock *prev,
1479			 struct held_lock *next,
1480			 enum lock_usage_bit bit1,
1481			 enum lock_usage_bit bit2,
1482			 const char *irqclass)
1483{
1484	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1485		return 0;
1486
1487	printk("\n");
1488	printk("======================================================\n");
1489	printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1490		irqclass, irqclass);
1491	print_kernel_ident();
1492	printk("------------------------------------------------------\n");
1493	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1494		curr->comm, task_pid_nr(curr),
1495		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1496		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1497		curr->hardirqs_enabled,
1498		curr->softirqs_enabled);
1499	print_lock(next);
1500
1501	printk("\nand this task is already holding:\n");
1502	print_lock(prev);
1503	printk("which would create a new lock dependency:\n");
1504	print_lock_name(hlock_class(prev));
1505	printk(" ->");
1506	print_lock_name(hlock_class(next));
1507	printk("\n");
1508
1509	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1510		irqclass);
1511	print_lock_name(backwards_entry->class);
1512	printk("\n... which became %s-irq-safe at:\n", irqclass);
1513
1514	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1515
1516	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1517	print_lock_name(forwards_entry->class);
1518	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1519	printk("...");
1520
1521	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1522
1523	printk("\nother info that might help us debug this:\n\n");
1524	print_irq_lock_scenario(backwards_entry, forwards_entry,
1525				hlock_class(prev), hlock_class(next));
1526
1527	lockdep_print_held_locks(curr);
1528
1529	printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1530	printk(" and the holding lock:\n");
1531	if (!save_trace(&prev_root->trace))
1532		return 0;
1533	print_shortest_lock_dependencies(backwards_entry, prev_root);
1534
1535	printk("\nthe dependencies between the lock to be acquired");
1536	printk(" and %s-irq-unsafe lock:\n", irqclass);
1537	if (!save_trace(&next_root->trace))
1538		return 0;
1539	print_shortest_lock_dependencies(forwards_entry, next_root);
1540
1541	printk("\nstack backtrace:\n");
1542	dump_stack();
1543
1544	return 0;
1545}
1546
1547static int
1548check_usage(struct task_struct *curr, struct held_lock *prev,
1549	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1550	    enum lock_usage_bit bit_forwards, const char *irqclass)
1551{
1552	int ret;
1553	struct lock_list this, that;
1554	struct lock_list *uninitialized_var(target_entry);
1555	struct lock_list *uninitialized_var(target_entry1);
1556
1557	this.parent = NULL;
1558
1559	this.class = hlock_class(prev);
1560	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1561	if (ret < 0)
1562		return print_bfs_bug(ret);
1563	if (ret == 1)
1564		return ret;
1565
1566	that.parent = NULL;
1567	that.class = hlock_class(next);
1568	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1569	if (ret < 0)
1570		return print_bfs_bug(ret);
1571	if (ret == 1)
1572		return ret;
1573
1574	return print_bad_irq_dependency(curr, &this, &that,
1575			target_entry, target_entry1,
1576			prev, next,
1577			bit_backwards, bit_forwards, irqclass);
1578}
1579
1580static const char *state_names[] = {
1581#define LOCKDEP_STATE(__STATE) \
1582	__stringify(__STATE),
1583#include "lockdep_states.h"
1584#undef LOCKDEP_STATE
1585};
1586
1587static const char *state_rnames[] = {
1588#define LOCKDEP_STATE(__STATE) \
1589	__stringify(__STATE)"-READ",
1590#include "lockdep_states.h"
1591#undef LOCKDEP_STATE
1592};
1593
1594static inline const char *state_name(enum lock_usage_bit bit)
1595{
1596	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1597}
1598
1599static int exclusive_bit(int new_bit)
1600{
1601	/*
1602	 * USED_IN
1603	 * USED_IN_READ
1604	 * ENABLED
1605	 * ENABLED_READ
1606	 *
1607	 * bit 0 - write/read
1608	 * bit 1 - used_in/enabled
1609	 * bit 2+  state
1610	 */
1611
1612	int state = new_bit & ~3;
1613	int dir = new_bit & 2;
1614
1615	/*
1616	 * keep state, bit flip the direction and strip read.
1617	 */
1618	return state | (dir ^ 2);
1619}
1620
1621static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1622			   struct held_lock *next, enum lock_usage_bit bit)
1623{
1624	/*
1625	 * Prove that the new dependency does not connect a hardirq-safe
1626	 * lock with a hardirq-unsafe lock - to achieve this we search
1627	 * the backwards-subgraph starting at <prev>, and the
1628	 * forwards-subgraph starting at <next>:
1629	 */
1630	if (!check_usage(curr, prev, next, bit,
1631			   exclusive_bit(bit), state_name(bit)))
1632		return 0;
1633
1634	bit++; /* _READ */
1635
1636	/*
1637	 * Prove that the new dependency does not connect a hardirq-safe-read
1638	 * lock with a hardirq-unsafe lock - to achieve this we search
1639	 * the backwards-subgraph starting at <prev>, and the
1640	 * forwards-subgraph starting at <next>:
1641	 */
1642	if (!check_usage(curr, prev, next, bit,
1643			   exclusive_bit(bit), state_name(bit)))
1644		return 0;
1645
1646	return 1;
1647}
1648
1649static int
1650check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1651		struct held_lock *next)
1652{
1653#define LOCKDEP_STATE(__STATE)						\
1654	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1655		return 0;
1656#include "lockdep_states.h"
1657#undef LOCKDEP_STATE
1658
1659	return 1;
1660}
1661
1662static void inc_chains(void)
1663{
1664	if (current->hardirq_context)
1665		nr_hardirq_chains++;
1666	else {
1667		if (current->softirq_context)
1668			nr_softirq_chains++;
1669		else
1670			nr_process_chains++;
1671	}
1672}
1673
1674#else
1675
1676static inline int
1677check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1678		struct held_lock *next)
1679{
1680	return 1;
1681}
1682
1683static inline void inc_chains(void)
1684{
1685	nr_process_chains++;
1686}
1687
1688#endif
1689
1690static void
1691print_deadlock_scenario(struct held_lock *nxt,
1692			     struct held_lock *prv)
1693{
1694	struct lock_class *next = hlock_class(nxt);
1695	struct lock_class *prev = hlock_class(prv);
1696
1697	printk(" Possible unsafe locking scenario:\n\n");
1698	printk("       CPU0\n");
1699	printk("       ----\n");
1700	printk("  lock(");
1701	__print_lock_name(prev);
1702	printk(");\n");
1703	printk("  lock(");
1704	__print_lock_name(next);
1705	printk(");\n");
1706	printk("\n *** DEADLOCK ***\n\n");
1707	printk(" May be due to missing lock nesting notation\n\n");
1708}
1709
1710static int
1711print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1712		   struct held_lock *next)
1713{
1714	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1715		return 0;
1716
1717	printk("\n");
1718	printk("=============================================\n");
1719	printk("[ INFO: possible recursive locking detected ]\n");
1720	print_kernel_ident();
1721	printk("---------------------------------------------\n");
1722	printk("%s/%d is trying to acquire lock:\n",
1723		curr->comm, task_pid_nr(curr));
1724	print_lock(next);
1725	printk("\nbut task is already holding lock:\n");
1726	print_lock(prev);
1727
1728	printk("\nother info that might help us debug this:\n");
1729	print_deadlock_scenario(next, prev);
1730	lockdep_print_held_locks(curr);
1731
1732	printk("\nstack backtrace:\n");
1733	dump_stack();
1734
1735	return 0;
1736}
1737
1738/*
1739 * Check whether we are holding such a class already.
1740 *
1741 * (Note that this has to be done separately, because the graph cannot
1742 * detect such classes of deadlocks.)
1743 *
1744 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1745 */
1746static int
1747check_deadlock(struct task_struct *curr, struct held_lock *next,
1748	       struct lockdep_map *next_instance, int read)
1749{
1750	struct held_lock *prev;
1751	struct held_lock *nest = NULL;
1752	int i;
1753
1754	for (i = 0; i < curr->lockdep_depth; i++) {
1755		prev = curr->held_locks + i;
1756
1757		if (prev->instance == next->nest_lock)
1758			nest = prev;
1759
1760		if (hlock_class(prev) != hlock_class(next))
1761			continue;
1762
1763		/*
1764		 * Allow read-after-read recursion of the same
1765		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1766		 */
1767		if ((read == 2) && prev->read)
1768			return 2;
1769
1770		/*
1771		 * We're holding the nest_lock, which serializes this lock's
1772		 * nesting behaviour.
1773		 */
1774		if (nest)
1775			return 2;
1776
1777		return print_deadlock_bug(curr, prev, next);
1778	}
1779	return 1;
1780}
1781
1782/*
1783 * There was a chain-cache miss, and we are about to add a new dependency
1784 * to a previous lock. We recursively validate the following rules:
1785 *
1786 *  - would the adding of the <prev> -> <next> dependency create a
1787 *    circular dependency in the graph? [== circular deadlock]
1788 *
1789 *  - does the new prev->next dependency connect any hardirq-safe lock
1790 *    (in the full backwards-subgraph starting at <prev>) with any
1791 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1792 *    <next>)? [== illegal lock inversion with hardirq contexts]
1793 *
1794 *  - does the new prev->next dependency connect any softirq-safe lock
1795 *    (in the full backwards-subgraph starting at <prev>) with any
1796 *    softirq-unsafe lock (in the full forwards-subgraph starting at
1797 *    <next>)? [== illegal lock inversion with softirq contexts]
1798 *
1799 * any of these scenarios could lead to a deadlock.
1800 *
1801 * Then if all the validations pass, we add the forwards and backwards
1802 * dependency.
1803 */
1804static int
1805check_prev_add(struct task_struct *curr, struct held_lock *prev,
1806	       struct held_lock *next, int distance, int trylock_loop)
1807{
1808	struct lock_list *entry;
1809	int ret;
1810	struct lock_list this;
1811	struct lock_list *uninitialized_var(target_entry);
1812	/*
1813	 * Static variable, serialized by the graph_lock().
1814	 *
1815	 * We use this static variable to save the stack trace in case
1816	 * we call into this function multiple times due to encountering
1817	 * trylocks in the held lock stack.
1818	 */
1819	static struct stack_trace trace;
1820
1821	/*
1822	 * Prove that the new <prev> -> <next> dependency would not
1823	 * create a circular dependency in the graph. (We do this by
1824	 * forward-recursing into the graph starting at <next>, and
1825	 * checking whether we can reach <prev>.)
1826	 *
1827	 * We are using global variables to control the recursion, to
1828	 * keep the stackframe size of the recursive functions low:
1829	 */
1830	this.class = hlock_class(next);
1831	this.parent = NULL;
1832	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1833	if (unlikely(!ret))
1834		return print_circular_bug(&this, target_entry, next, prev);
1835	else if (unlikely(ret < 0))
1836		return print_bfs_bug(ret);
1837
1838	if (!check_prev_add_irq(curr, prev, next))
1839		return 0;
1840
1841	/*
1842	 * For recursive read-locks we do all the dependency checks,
1843	 * but we dont store read-triggered dependencies (only
1844	 * write-triggered dependencies). This ensures that only the
1845	 * write-side dependencies matter, and that if for example a
1846	 * write-lock never takes any other locks, then the reads are
1847	 * equivalent to a NOP.
1848	 */
1849	if (next->read == 2 || prev->read == 2)
1850		return 1;
1851	/*
1852	 * Is the <prev> -> <next> dependency already present?
1853	 *
1854	 * (this may occur even though this is a new chain: consider
1855	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1856	 *  chains - the second one will be new, but L1 already has
1857	 *  L2 added to its dependency list, due to the first chain.)
1858	 */
1859	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1860		if (entry->class == hlock_class(next)) {
1861			if (distance == 1)
1862				entry->distance = 1;
1863			return 2;
1864		}
1865	}
1866
1867	if (!trylock_loop && !save_trace(&trace))
1868		return 0;
1869
1870	/*
1871	 * Ok, all validations passed, add the new lock
1872	 * to the previous lock's dependency list:
1873	 */
1874	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1875			       &hlock_class(prev)->locks_after,
1876			       next->acquire_ip, distance, &trace);
1877
1878	if (!ret)
1879		return 0;
1880
1881	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1882			       &hlock_class(next)->locks_before,
1883			       next->acquire_ip, distance, &trace);
1884	if (!ret)
1885		return 0;
1886
1887	/*
1888	 * Debugging printouts:
1889	 */
1890	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1891		graph_unlock();
1892		printk("\n new dependency: ");
1893		print_lock_name(hlock_class(prev));
1894		printk(" => ");
1895		print_lock_name(hlock_class(next));
1896		printk("\n");
1897		dump_stack();
1898		return graph_lock();
1899	}
1900	return 1;
1901}
1902
1903/*
1904 * Add the dependency to all directly-previous locks that are 'relevant'.
1905 * The ones that are relevant are (in increasing distance from curr):
1906 * all consecutive trylock entries and the final non-trylock entry - or
1907 * the end of this context's lock-chain - whichever comes first.
1908 */
1909static int
1910check_prevs_add(struct task_struct *curr, struct held_lock *next)
1911{
1912	int depth = curr->lockdep_depth;
1913	int trylock_loop = 0;
1914	struct held_lock *hlock;
1915
1916	/*
1917	 * Debugging checks.
1918	 *
1919	 * Depth must not be zero for a non-head lock:
1920	 */
1921	if (!depth)
1922		goto out_bug;
1923	/*
1924	 * At least two relevant locks must exist for this
1925	 * to be a head:
1926	 */
1927	if (curr->held_locks[depth].irq_context !=
1928			curr->held_locks[depth-1].irq_context)
1929		goto out_bug;
1930
1931	for (;;) {
1932		int distance = curr->lockdep_depth - depth + 1;
1933		hlock = curr->held_locks + depth-1;
1934		/*
1935		 * Only non-recursive-read entries get new dependencies
1936		 * added:
1937		 */
1938		if (hlock->read != 2) {
1939			if (!check_prev_add(curr, hlock, next,
1940						distance, trylock_loop))
1941				return 0;
1942			/*
1943			 * Stop after the first non-trylock entry,
1944			 * as non-trylock entries have added their
1945			 * own direct dependencies already, so this
1946			 * lock is connected to them indirectly:
1947			 */
1948			if (!hlock->trylock)
1949				break;
1950		}
1951		depth--;
1952		/*
1953		 * End of lock-stack?
1954		 */
1955		if (!depth)
1956			break;
1957		/*
1958		 * Stop the search if we cross into another context:
1959		 */
1960		if (curr->held_locks[depth].irq_context !=
1961				curr->held_locks[depth-1].irq_context)
1962			break;
1963		trylock_loop = 1;
1964	}
1965	return 1;
1966out_bug:
1967	if (!debug_locks_off_graph_unlock())
1968		return 0;
1969
1970	/*
1971	 * Clearly we all shouldn't be here, but since we made it we
1972	 * can reliable say we messed up our state. See the above two
1973	 * gotos for reasons why we could possibly end up here.
1974	 */
1975	WARN_ON(1);
1976
1977	return 0;
1978}
1979
1980unsigned long nr_lock_chains;
1981struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1982int nr_chain_hlocks;
1983static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1984
1985struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1986{
1987	return lock_classes + chain_hlocks[chain->base + i];
1988}
1989
1990/*
1991 * Look up a dependency chain. If the key is not present yet then
1992 * add it and return 1 - in this case the new dependency chain is
1993 * validated. If the key is already hashed, return 0.
1994 * (On return with 1 graph_lock is held.)
1995 */
1996static inline int lookup_chain_cache(struct task_struct *curr,
1997				     struct held_lock *hlock,
1998				     u64 chain_key)
1999{
2000	struct lock_class *class = hlock_class(hlock);
2001	struct list_head *hash_head = chainhashentry(chain_key);
2002	struct lock_chain *chain;
2003	struct held_lock *hlock_curr, *hlock_next;
2004	int i, j;
2005
2006	/*
2007	 * We might need to take the graph lock, ensure we've got IRQs
2008	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2009	 * lockdep won't complain about its own locking errors.
2010	 */
2011	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2012		return 0;
2013	/*
2014	 * We can walk it lock-free, because entries only get added
2015	 * to the hash:
2016	 */
2017	list_for_each_entry(chain, hash_head, entry) {
2018		if (chain->chain_key == chain_key) {
2019cache_hit:
2020			debug_atomic_inc(chain_lookup_hits);
2021			if (very_verbose(class))
2022				printk("\nhash chain already cached, key: "
2023					"%016Lx tail class: [%p] %s\n",
2024					(unsigned long long)chain_key,
2025					class->key, class->name);
2026			return 0;
2027		}
2028	}
2029	if (very_verbose(class))
2030		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2031			(unsigned long long)chain_key, class->key, class->name);
2032	/*
2033	 * Allocate a new chain entry from the static array, and add
2034	 * it to the hash:
2035	 */
2036	if (!graph_lock())
2037		return 0;
2038	/*
2039	 * We have to walk the chain again locked - to avoid duplicates:
2040	 */
2041	list_for_each_entry(chain, hash_head, entry) {
2042		if (chain->chain_key == chain_key) {
2043			graph_unlock();
2044			goto cache_hit;
2045		}
2046	}
2047	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2048		if (!debug_locks_off_graph_unlock())
2049			return 0;
2050
2051		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
2052		printk("turning off the locking correctness validator.\n");
2053		dump_stack();
2054		return 0;
2055	}
2056	chain = lock_chains + nr_lock_chains++;
2057	chain->chain_key = chain_key;
2058	chain->irq_context = hlock->irq_context;
2059	/* Find the first held_lock of current chain */
2060	hlock_next = hlock;
2061	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2062		hlock_curr = curr->held_locks + i;
2063		if (hlock_curr->irq_context != hlock_next->irq_context)
2064			break;
2065		hlock_next = hlock;
2066	}
2067	i++;
2068	chain->depth = curr->lockdep_depth + 1 - i;
2069	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2070		chain->base = nr_chain_hlocks;
2071		nr_chain_hlocks += chain->depth;
2072		for (j = 0; j < chain->depth - 1; j++, i++) {
2073			int lock_id = curr->held_locks[i].class_idx - 1;
2074			chain_hlocks[chain->base + j] = lock_id;
2075		}
2076		chain_hlocks[chain->base + j] = class - lock_classes;
2077	}
2078	list_add_tail_rcu(&chain->entry, hash_head);
2079	debug_atomic_inc(chain_lookup_misses);
2080	inc_chains();
2081
2082	return 1;
2083}
2084
2085static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2086		struct held_lock *hlock, int chain_head, u64 chain_key)
2087{
2088	/*
2089	 * Trylock needs to maintain the stack of held locks, but it
2090	 * does not add new dependencies, because trylock can be done
2091	 * in any order.
2092	 *
2093	 * We look up the chain_key and do the O(N^2) check and update of
2094	 * the dependencies only if this is a new dependency chain.
2095	 * (If lookup_chain_cache() returns with 1 it acquires
2096	 * graph_lock for us)
2097	 */
2098	if (!hlock->trylock && (hlock->check == 2) &&
2099	    lookup_chain_cache(curr, hlock, chain_key)) {
2100		/*
2101		 * Check whether last held lock:
2102		 *
2103		 * - is irq-safe, if this lock is irq-unsafe
2104		 * - is softirq-safe, if this lock is hardirq-unsafe
2105		 *
2106		 * And check whether the new lock's dependency graph
2107		 * could lead back to the previous lock.
2108		 *
2109		 * any of these scenarios could lead to a deadlock. If
2110		 * All validations
2111		 */
2112		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2113
2114		if (!ret)
2115			return 0;
2116		/*
2117		 * Mark recursive read, as we jump over it when
2118		 * building dependencies (just like we jump over
2119		 * trylock entries):
2120		 */
2121		if (ret == 2)
2122			hlock->read = 2;
2123		/*
2124		 * Add dependency only if this lock is not the head
2125		 * of the chain, and if it's not a secondary read-lock:
2126		 */
2127		if (!chain_head && ret != 2)
2128			if (!check_prevs_add(curr, hlock))
2129				return 0;
2130		graph_unlock();
2131	} else
2132		/* after lookup_chain_cache(): */
2133		if (unlikely(!debug_locks))
2134			return 0;
2135
2136	return 1;
2137}
2138#else
2139static inline int validate_chain(struct task_struct *curr,
2140	       	struct lockdep_map *lock, struct held_lock *hlock,
2141		int chain_head, u64 chain_key)
2142{
2143	return 1;
2144}
2145#endif
2146
2147/*
2148 * We are building curr_chain_key incrementally, so double-check
2149 * it from scratch, to make sure that it's done correctly:
2150 */
2151static void check_chain_key(struct task_struct *curr)
2152{
2153#ifdef CONFIG_DEBUG_LOCKDEP
2154	struct held_lock *hlock, *prev_hlock = NULL;
2155	unsigned int i, id;
2156	u64 chain_key = 0;
2157
2158	for (i = 0; i < curr->lockdep_depth; i++) {
2159		hlock = curr->held_locks + i;
2160		if (chain_key != hlock->prev_chain_key) {
2161			debug_locks_off();
2162			/*
2163			 * We got mighty confused, our chain keys don't match
2164			 * with what we expect, someone trample on our task state?
2165			 */
2166			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2167				curr->lockdep_depth, i,
2168				(unsigned long long)chain_key,
2169				(unsigned long long)hlock->prev_chain_key);
2170			return;
2171		}
2172		id = hlock->class_idx - 1;
2173		/*
2174		 * Whoops ran out of static storage again?
2175		 */
2176		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2177			return;
2178
2179		if (prev_hlock && (prev_hlock->irq_context !=
2180							hlock->irq_context))
2181			chain_key = 0;
2182		chain_key = iterate_chain_key(chain_key, id);
2183		prev_hlock = hlock;
2184	}
2185	if (chain_key != curr->curr_chain_key) {
2186		debug_locks_off();
2187		/*
2188		 * More smoking hash instead of calculating it, damn see these
2189		 * numbers float.. I bet that a pink elephant stepped on my memory.
2190		 */
2191		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2192			curr->lockdep_depth, i,
2193			(unsigned long long)chain_key,
2194			(unsigned long long)curr->curr_chain_key);
2195	}
2196#endif
2197}
2198
2199static void
2200print_usage_bug_scenario(struct held_lock *lock)
2201{
2202	struct lock_class *class = hlock_class(lock);
2203
2204	printk(" Possible unsafe locking scenario:\n\n");
2205	printk("       CPU0\n");
2206	printk("       ----\n");
2207	printk("  lock(");
2208	__print_lock_name(class);
2209	printk(");\n");
2210	printk("  <Interrupt>\n");
2211	printk("    lock(");
2212	__print_lock_name(class);
2213	printk(");\n");
2214	printk("\n *** DEADLOCK ***\n\n");
2215}
2216
2217static int
2218print_usage_bug(struct task_struct *curr, struct held_lock *this,
2219		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2220{
2221	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2222		return 0;
2223
2224	printk("\n");
2225	printk("=================================\n");
2226	printk("[ INFO: inconsistent lock state ]\n");
2227	print_kernel_ident();
2228	printk("---------------------------------\n");
2229
2230	printk("inconsistent {%s} -> {%s} usage.\n",
2231		usage_str[prev_bit], usage_str[new_bit]);
2232
2233	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2234		curr->comm, task_pid_nr(curr),
2235		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2236		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2237		trace_hardirqs_enabled(curr),
2238		trace_softirqs_enabled(curr));
2239	print_lock(this);
2240
2241	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2242	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2243
2244	print_irqtrace_events(curr);
2245	printk("\nother info that might help us debug this:\n");
2246	print_usage_bug_scenario(this);
2247
2248	lockdep_print_held_locks(curr);
2249
2250	printk("\nstack backtrace:\n");
2251	dump_stack();
2252
2253	return 0;
2254}
2255
2256/*
2257 * Print out an error if an invalid bit is set:
2258 */
2259static inline int
2260valid_state(struct task_struct *curr, struct held_lock *this,
2261	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2262{
2263	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2264		return print_usage_bug(curr, this, bad_bit, new_bit);
2265	return 1;
2266}
2267
2268static int mark_lock(struct task_struct *curr, struct held_lock *this,
2269		     enum lock_usage_bit new_bit);
2270
2271#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2272
2273/*
2274 * print irq inversion bug:
2275 */
2276static int
2277print_irq_inversion_bug(struct task_struct *curr,
2278			struct lock_list *root, struct lock_list *other,
2279			struct held_lock *this, int forwards,
2280			const char *irqclass)
2281{
2282	struct lock_list *entry = other;
2283	struct lock_list *middle = NULL;
2284	int depth;
2285
2286	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2287		return 0;
2288
2289	printk("\n");
2290	printk("=========================================================\n");
2291	printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2292	print_kernel_ident();
2293	printk("---------------------------------------------------------\n");
2294	printk("%s/%d just changed the state of lock:\n",
2295		curr->comm, task_pid_nr(curr));
2296	print_lock(this);
2297	if (forwards)
2298		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2299	else
2300		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2301	print_lock_name(other->class);
2302	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2303
2304	printk("\nother info that might help us debug this:\n");
2305
2306	/* Find a middle lock (if one exists) */
2307	depth = get_lock_depth(other);
2308	do {
2309		if (depth == 0 && (entry != root)) {
2310			printk("lockdep:%s bad path found in chain graph\n", __func__);
2311			break;
2312		}
2313		middle = entry;
2314		entry = get_lock_parent(entry);
2315		depth--;
2316	} while (entry && entry != root && (depth >= 0));
2317	if (forwards)
2318		print_irq_lock_scenario(root, other,
2319			middle ? middle->class : root->class, other->class);
2320	else
2321		print_irq_lock_scenario(other, root,
2322			middle ? middle->class : other->class, root->class);
2323
2324	lockdep_print_held_locks(curr);
2325
2326	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2327	if (!save_trace(&root->trace))
2328		return 0;
2329	print_shortest_lock_dependencies(other, root);
2330
2331	printk("\nstack backtrace:\n");
2332	dump_stack();
2333
2334	return 0;
2335}
2336
2337/*
2338 * Prove that in the forwards-direction subgraph starting at <this>
2339 * there is no lock matching <mask>:
2340 */
2341static int
2342check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2343		     enum lock_usage_bit bit, const char *irqclass)
2344{
2345	int ret;
2346	struct lock_list root;
2347	struct lock_list *uninitialized_var(target_entry);
2348
2349	root.parent = NULL;
2350	root.class = hlock_class(this);
2351	ret = find_usage_forwards(&root, bit, &target_entry);
2352	if (ret < 0)
2353		return print_bfs_bug(ret);
2354	if (ret == 1)
2355		return ret;
2356
2357	return print_irq_inversion_bug(curr, &root, target_entry,
2358					this, 1, irqclass);
2359}
2360
2361/*
2362 * Prove that in the backwards-direction subgraph starting at <this>
2363 * there is no lock matching <mask>:
2364 */
2365static int
2366check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2367		      enum lock_usage_bit bit, const char *irqclass)
2368{
2369	int ret;
2370	struct lock_list root;
2371	struct lock_list *uninitialized_var(target_entry);
2372
2373	root.parent = NULL;
2374	root.class = hlock_class(this);
2375	ret = find_usage_backwards(&root, bit, &target_entry);
2376	if (ret < 0)
2377		return print_bfs_bug(ret);
2378	if (ret == 1)
2379		return ret;
2380
2381	return print_irq_inversion_bug(curr, &root, target_entry,
2382					this, 0, irqclass);
2383}
2384
2385void print_irqtrace_events(struct task_struct *curr)
2386{
2387	printk("irq event stamp: %u\n", curr->irq_events);
2388	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
2389	print_ip_sym(curr->hardirq_enable_ip);
2390	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2391	print_ip_sym(curr->hardirq_disable_ip);
2392	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
2393	print_ip_sym(curr->softirq_enable_ip);
2394	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2395	print_ip_sym(curr->softirq_disable_ip);
2396}
2397
2398static int HARDIRQ_verbose(struct lock_class *class)
2399{
2400#if HARDIRQ_VERBOSE
2401	return class_filter(class);
2402#endif
2403	return 0;
2404}
2405
2406static int SOFTIRQ_verbose(struct lock_class *class)
2407{
2408#if SOFTIRQ_VERBOSE
2409	return class_filter(class);
2410#endif
2411	return 0;
2412}
2413
2414static int RECLAIM_FS_verbose(struct lock_class *class)
2415{
2416#if RECLAIM_VERBOSE
2417	return class_filter(class);
2418#endif
2419	return 0;
2420}
2421
2422#define STRICT_READ_CHECKS	1
2423
2424static int (*state_verbose_f[])(struct lock_class *class) = {
2425#define LOCKDEP_STATE(__STATE) \
2426	__STATE##_verbose,
2427#include "lockdep_states.h"
2428#undef LOCKDEP_STATE
2429};
2430
2431static inline int state_verbose(enum lock_usage_bit bit,
2432				struct lock_class *class)
2433{
2434	return state_verbose_f[bit >> 2](class);
2435}
2436
2437typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2438			     enum lock_usage_bit bit, const char *name);
2439
2440static int
2441mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2442		enum lock_usage_bit new_bit)
2443{
2444	int excl_bit = exclusive_bit(new_bit);
2445	int read = new_bit & 1;
2446	int dir = new_bit & 2;
2447
2448	/*
2449	 * mark USED_IN has to look forwards -- to ensure no dependency
2450	 * has ENABLED state, which would allow recursion deadlocks.
2451	 *
2452	 * mark ENABLED has to look backwards -- to ensure no dependee
2453	 * has USED_IN state, which, again, would allow  recursion deadlocks.
2454	 */
2455	check_usage_f usage = dir ?
2456		check_usage_backwards : check_usage_forwards;
2457
2458	/*
2459	 * Validate that this particular lock does not have conflicting
2460	 * usage states.
2461	 */
2462	if (!valid_state(curr, this, new_bit, excl_bit))
2463		return 0;
2464
2465	/*
2466	 * Validate that the lock dependencies don't have conflicting usage
2467	 * states.
2468	 */
2469	if ((!read || !dir || STRICT_READ_CHECKS) &&
2470			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2471		return 0;
2472
2473	/*
2474	 * Check for read in write conflicts
2475	 */
2476	if (!read) {
2477		if (!valid_state(curr, this, new_bit, excl_bit + 1))
2478			return 0;
2479
2480		if (STRICT_READ_CHECKS &&
2481			!usage(curr, this, excl_bit + 1,
2482				state_name(new_bit + 1)))
2483			return 0;
2484	}
2485
2486	if (state_verbose(new_bit, hlock_class(this)))
2487		return 2;
2488
2489	return 1;
2490}
2491
2492enum mark_type {
2493#define LOCKDEP_STATE(__STATE)	__STATE,
2494#include "lockdep_states.h"
2495#undef LOCKDEP_STATE
2496};
2497
2498/*
2499 * Mark all held locks with a usage bit:
2500 */
2501static int
2502mark_held_locks(struct task_struct *curr, enum mark_type mark)
2503{
2504	enum lock_usage_bit usage_bit;
2505	struct held_lock *hlock;
2506	int i;
2507
2508	for (i = 0; i < curr->lockdep_depth; i++) {
2509		hlock = curr->held_locks + i;
2510
2511		usage_bit = 2 + (mark << 2); /* ENABLED */
2512		if (hlock->read)
2513			usage_bit += 1; /* READ */
2514
2515		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2516
2517		if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2518			continue;
2519
2520		if (!mark_lock(curr, hlock, usage_bit))
2521			return 0;
2522	}
2523
2524	return 1;
2525}
2526
2527/*
2528 * Hardirqs will be enabled:
2529 */
2530static void __trace_hardirqs_on_caller(unsigned long ip)
2531{
2532	struct task_struct *curr = current;
2533
2534	/* we'll do an OFF -> ON transition: */
2535	curr->hardirqs_enabled = 1;
2536
2537	/*
2538	 * We are going to turn hardirqs on, so set the
2539	 * usage bit for all held locks:
2540	 */
2541	if (!mark_held_locks(curr, HARDIRQ))
2542		return;
2543	/*
2544	 * If we have softirqs enabled, then set the usage
2545	 * bit for all held locks. (disabled hardirqs prevented
2546	 * this bit from being set before)
2547	 */
2548	if (curr->softirqs_enabled)
2549		if (!mark_held_locks(curr, SOFTIRQ))
2550			return;
2551
2552	curr->hardirq_enable_ip = ip;
2553	curr->hardirq_enable_event = ++curr->irq_events;
2554	debug_atomic_inc(hardirqs_on_events);
2555}
2556
2557void trace_hardirqs_on_caller(unsigned long ip)
2558{
2559	time_hardirqs_on(CALLER_ADDR0, ip);
2560
2561	if (unlikely(!debug_locks || current->lockdep_recursion))
2562		return;
2563
2564	if (unlikely(current->hardirqs_enabled)) {
2565		/*
2566		 * Neither irq nor preemption are disabled here
2567		 * so this is racy by nature but losing one hit
2568		 * in a stat is not a big deal.
2569		 */
2570		__debug_atomic_inc(redundant_hardirqs_on);
2571		return;
2572	}
2573
2574	/*
2575	 * We're enabling irqs and according to our state above irqs weren't
2576	 * already enabled, yet we find the hardware thinks they are in fact
2577	 * enabled.. someone messed up their IRQ state tracing.
2578	 */
2579	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2580		return;
2581
2582	/*
2583	 * See the fine text that goes along with this variable definition.
2584	 */
2585	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2586		return;
2587
2588	/*
2589	 * Can't allow enabling interrupts while in an interrupt handler,
2590	 * that's general bad form and such. Recursion, limited stack etc..
2591	 */
2592	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2593		return;
2594
2595	current->lockdep_recursion = 1;
2596	__trace_hardirqs_on_caller(ip);
2597	current->lockdep_recursion = 0;
2598}
2599EXPORT_SYMBOL(trace_hardirqs_on_caller);
2600
2601void trace_hardirqs_on(void)
2602{
2603	trace_hardirqs_on_caller(CALLER_ADDR0);
2604}
2605EXPORT_SYMBOL(trace_hardirqs_on);
2606
2607/*
2608 * Hardirqs were disabled:
2609 */
2610void trace_hardirqs_off_caller(unsigned long ip)
2611{
2612	struct task_struct *curr = current;
2613
2614	time_hardirqs_off(CALLER_ADDR0, ip);
2615
2616	if (unlikely(!debug_locks || current->lockdep_recursion))
2617		return;
2618
2619	/*
2620	 * So we're supposed to get called after you mask local IRQs, but for
2621	 * some reason the hardware doesn't quite think you did a proper job.
2622	 */
2623	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2624		return;
2625
2626	if (curr->hardirqs_enabled) {
2627		/*
2628		 * We have done an ON -> OFF transition:
2629		 */
2630		curr->hardirqs_enabled = 0;
2631		curr->hardirq_disable_ip = ip;
2632		curr->hardirq_disable_event = ++curr->irq_events;
2633		debug_atomic_inc(hardirqs_off_events);
2634	} else
2635		debug_atomic_inc(redundant_hardirqs_off);
2636}
2637EXPORT_SYMBOL(trace_hardirqs_off_caller);
2638
2639void trace_hardirqs_off(void)
2640{
2641	trace_hardirqs_off_caller(CALLER_ADDR0);
2642}
2643EXPORT_SYMBOL(trace_hardirqs_off);
2644
2645/*
2646 * Softirqs will be enabled:
2647 */
2648void trace_softirqs_on(unsigned long ip)
2649{
2650	struct task_struct *curr = current;
2651
2652	if (unlikely(!debug_locks || current->lockdep_recursion))
2653		return;
2654
2655	/*
2656	 * We fancy IRQs being disabled here, see softirq.c, avoids
2657	 * funny state and nesting things.
2658	 */
2659	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2660		return;
2661
2662	if (curr->softirqs_enabled) {
2663		debug_atomic_inc(redundant_softirqs_on);
2664		return;
2665	}
2666
2667	current->lockdep_recursion = 1;
2668	/*
2669	 * We'll do an OFF -> ON transition:
2670	 */
2671	curr->softirqs_enabled = 1;
2672	curr->softirq_enable_ip = ip;
2673	curr->softirq_enable_event = ++curr->irq_events;
2674	debug_atomic_inc(softirqs_on_events);
2675	/*
2676	 * We are going to turn softirqs on, so set the
2677	 * usage bit for all held locks, if hardirqs are
2678	 * enabled too:
2679	 */
2680	if (curr->hardirqs_enabled)
2681		mark_held_locks(curr, SOFTIRQ);
2682	current->lockdep_recursion = 0;
2683}
2684
2685/*
2686 * Softirqs were disabled:
2687 */
2688void trace_softirqs_off(unsigned long ip)
2689{
2690	struct task_struct *curr = current;
2691
2692	if (unlikely(!debug_locks || current->lockdep_recursion))
2693		return;
2694
2695	/*
2696	 * We fancy IRQs being disabled here, see softirq.c
2697	 */
2698	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2699		return;
2700
2701	if (curr->softirqs_enabled) {
2702		/*
2703		 * We have done an ON -> OFF transition:
2704		 */
2705		curr->softirqs_enabled = 0;
2706		curr->softirq_disable_ip = ip;
2707		curr->softirq_disable_event = ++curr->irq_events;
2708		debug_atomic_inc(softirqs_off_events);
2709		/*
2710		 * Whoops, we wanted softirqs off, so why aren't they?
2711		 */
2712		DEBUG_LOCKS_WARN_ON(!softirq_count());
2713	} else
2714		debug_atomic_inc(redundant_softirqs_off);
2715}
2716
2717static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2718{
2719	struct task_struct *curr = current;
2720
2721	if (unlikely(!debug_locks))
2722		return;
2723
2724	/* no reclaim without waiting on it */
2725	if (!(gfp_mask & __GFP_WAIT))
2726		return;
2727
2728	/* this guy won't enter reclaim */
2729	if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2730		return;
2731
2732	/* We're only interested __GFP_FS allocations for now */
2733	if (!(gfp_mask & __GFP_FS))
2734		return;
2735
2736	/*
2737	 * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2738	 */
2739	if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2740		return;
2741
2742	mark_held_locks(curr, RECLAIM_FS);
2743}
2744
2745static void check_flags(unsigned long flags);
2746
2747void lockdep_trace_alloc(gfp_t gfp_mask)
2748{
2749	unsigned long flags;
2750
2751	if (unlikely(current->lockdep_recursion))
2752		return;
2753
2754	raw_local_irq_save(flags);
2755	check_flags(flags);
2756	current->lockdep_recursion = 1;
2757	__lockdep_trace_alloc(gfp_mask, flags);
2758	current->lockdep_recursion = 0;
2759	raw_local_irq_restore(flags);
2760}
2761
2762static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2763{
2764	/*
2765	 * If non-trylock use in a hardirq or softirq context, then
2766	 * mark the lock as used in these contexts:
2767	 */
2768	if (!hlock->trylock) {
2769		if (hlock->read) {
2770			if (curr->hardirq_context)
2771				if (!mark_lock(curr, hlock,
2772						LOCK_USED_IN_HARDIRQ_READ))
2773					return 0;
2774			if (curr->softirq_context)
2775				if (!mark_lock(curr, hlock,
2776						LOCK_USED_IN_SOFTIRQ_READ))
2777					return 0;
2778		} else {
2779			if (curr->hardirq_context)
2780				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2781					return 0;
2782			if (curr->softirq_context)
2783				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2784					return 0;
2785		}
2786	}
2787	if (!hlock->hardirqs_off) {
2788		if (hlock->read) {
2789			if (!mark_lock(curr, hlock,
2790					LOCK_ENABLED_HARDIRQ_READ))
2791				return 0;
2792			if (curr->softirqs_enabled)
2793				if (!mark_lock(curr, hlock,
2794						LOCK_ENABLED_SOFTIRQ_READ))
2795					return 0;
2796		} else {
2797			if (!mark_lock(curr, hlock,
2798					LOCK_ENABLED_HARDIRQ))
2799				return 0;
2800			if (curr->softirqs_enabled)
2801				if (!mark_lock(curr, hlock,
2802						LOCK_ENABLED_SOFTIRQ))
2803					return 0;
2804		}
2805	}
2806
2807	/*
2808	 * We reuse the irq context infrastructure more broadly as a general
2809	 * context checking code. This tests GFP_FS recursion (a lock taken
2810	 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2811	 * allocation).
2812	 */
2813	if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2814		if (hlock->read) {
2815			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2816					return 0;
2817		} else {
2818			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2819					return 0;
2820		}
2821	}
2822
2823	return 1;
2824}
2825
2826static int separate_irq_context(struct task_struct *curr,
2827		struct held_lock *hlock)
2828{
2829	unsigned int depth = curr->lockdep_depth;
2830
2831	/*
2832	 * Keep track of points where we cross into an interrupt context:
2833	 */
2834	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2835				curr->softirq_context;
2836	if (depth) {
2837		struct held_lock *prev_hlock;
2838
2839		prev_hlock = curr->held_locks + depth-1;
2840		/*
2841		 * If we cross into another context, reset the
2842		 * hash key (this also prevents the checking and the
2843		 * adding of the dependency to 'prev'):
2844		 */
2845		if (prev_hlock->irq_context != hlock->irq_context)
2846			return 1;
2847	}
2848	return 0;
2849}
2850
2851#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2852
2853static inline
2854int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2855		enum lock_usage_bit new_bit)
2856{
2857	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2858	return 1;
2859}
2860
2861static inline int mark_irqflags(struct task_struct *curr,
2862		struct held_lock *hlock)
2863{
2864	return 1;
2865}
2866
2867static inline int separate_irq_context(struct task_struct *curr,
2868		struct held_lock *hlock)
2869{
2870	return 0;
2871}
2872
2873void lockdep_trace_alloc(gfp_t gfp_mask)
2874{
2875}
2876
2877#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2878
2879/*
2880 * Mark a lock with a usage bit, and validate the state transition:
2881 */
2882static int mark_lock(struct task_struct *curr, struct held_lock *this,
2883			     enum lock_usage_bit new_bit)
2884{
2885	unsigned int new_mask = 1 << new_bit, ret = 1;
2886
2887	/*
2888	 * If already set then do not dirty the cacheline,
2889	 * nor do any checks:
2890	 */
2891	if (likely(hlock_class(this)->usage_mask & new_mask))
2892		return 1;
2893
2894	if (!graph_lock())
2895		return 0;
2896	/*
2897	 * Make sure we didn't race:
2898	 */
2899	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2900		graph_unlock();
2901		return 1;
2902	}
2903
2904	hlock_class(this)->usage_mask |= new_mask;
2905
2906	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2907		return 0;
2908
2909	switch (new_bit) {
2910#define LOCKDEP_STATE(__STATE)			\
2911	case LOCK_USED_IN_##__STATE:		\
2912	case LOCK_USED_IN_##__STATE##_READ:	\
2913	case LOCK_ENABLED_##__STATE:		\
2914	case LOCK_ENABLED_##__STATE##_READ:
2915#include "lockdep_states.h"
2916#undef LOCKDEP_STATE
2917		ret = mark_lock_irq(curr, this, new_bit);
2918		if (!ret)
2919			return 0;
2920		break;
2921	case LOCK_USED:
2922		debug_atomic_dec(nr_unused_locks);
2923		break;
2924	default:
2925		if (!debug_locks_off_graph_unlock())
2926			return 0;
2927		WARN_ON(1);
2928		return 0;
2929	}
2930
2931	graph_unlock();
2932
2933	/*
2934	 * We must printk outside of the graph_lock:
2935	 */
2936	if (ret == 2) {
2937		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2938		print_lock(this);
2939		print_irqtrace_events(curr);
2940		dump_stack();
2941	}
2942
2943	return ret;
2944}
2945
2946/*
2947 * Initialize a lock instance's lock-class mapping info:
2948 */
2949void lockdep_init_map(struct lockdep_map *lock, const char *name,
2950		      struct lock_class_key *key, int subclass)
2951{
2952	int i;
2953
2954	kmemcheck_mark_initialized(lock, sizeof(*lock));
2955
2956	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2957		lock->class_cache[i] = NULL;
2958
2959#ifdef CONFIG_LOCK_STAT
2960	lock->cpu = raw_smp_processor_id();
2961#endif
2962
2963	/*
2964	 * Can't be having no nameless bastards around this place!
2965	 */
2966	if (DEBUG_LOCKS_WARN_ON(!name)) {
2967		lock->name = "NULL";
2968		return;
2969	}
2970
2971	lock->name = name;
2972
2973	/*
2974	 * No key, no joy, we need to hash something.
2975	 */
2976	if (DEBUG_LOCKS_WARN_ON(!key))
2977		return;
2978	/*
2979	 * Sanity check, the lock-class key must be persistent:
2980	 */
2981	if (!static_obj(key)) {
2982		printk("BUG: key %p not in .data!\n", key);
2983		/*
2984		 * What it says above ^^^^^, I suggest you read it.
2985		 */
2986		DEBUG_LOCKS_WARN_ON(1);
2987		return;
2988	}
2989	lock->key = key;
2990
2991	if (unlikely(!debug_locks))
2992		return;
2993
2994	if (subclass)
2995		register_lock_class(lock, subclass, 1);
2996}
2997EXPORT_SYMBOL_GPL(lockdep_init_map);
2998
2999struct lock_class_key __lockdep_no_validate__;
3000
3001/*
3002 * This gets called for every mutex_lock*()/spin_lock*() operation.
3003 * We maintain the dependency maps and validate the locking attempt:
3004 */
3005static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3006			  int trylock, int read, int check, int hardirqs_off,
3007			  struct lockdep_map *nest_lock, unsigned long ip,
3008			  int references)
3009{
3010	struct task_struct *curr = current;
3011	struct lock_class *class = NULL;
3012	struct held_lock *hlock;
3013	unsigned int depth, id;
3014	int chain_head = 0;
3015	int class_idx;
3016	u64 chain_key;
3017
3018	if (!prove_locking)
3019		check = 1;
3020
3021	if (unlikely(!debug_locks))
3022		return 0;
3023
3024	/*
3025	 * Lockdep should run with IRQs disabled, otherwise we could
3026	 * get an interrupt which would want to take locks, which would
3027	 * end up in lockdep and have you got a head-ache already?
3028	 */
3029	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3030		return 0;
3031
3032	if (lock->key == &__lockdep_no_validate__)
3033		check = 1;
3034
3035	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3036		class = lock->class_cache[subclass];
3037	/*
3038	 * Not cached?
3039	 */
3040	if (unlikely(!class)) {
3041		class = register_lock_class(lock, subclass, 0);
3042		if (!class)
3043			return 0;
3044	}
3045	atomic_inc((atomic_t *)&class->ops);
3046	if (very_verbose(class)) {
3047		printk("\nacquire class [%p] %s", class->key, class->name);
3048		if (class->name_version > 1)
3049			printk("#%d", class->name_version);
3050		printk("\n");
3051		dump_stack();
3052	}
3053
3054	/*
3055	 * Add the lock to the list of currently held locks.
3056	 * (we dont increase the depth just yet, up until the
3057	 * dependency checks are done)
3058	 */
3059	depth = curr->lockdep_depth;
3060	/*
3061	 * Ran out of static storage for our per-task lock stack again have we?
3062	 */
3063	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3064		return 0;
3065
3066	class_idx = class - lock_classes + 1;
3067
3068	if (depth) {
3069		hlock = curr->held_locks + depth - 1;
3070		if (hlock->class_idx == class_idx && nest_lock) {
3071			if (hlock->references)
3072				hlock->references++;
3073			else
3074				hlock->references = 2;
3075
3076			return 1;
3077		}
3078	}
3079
3080	hlock = curr->held_locks + depth;
3081	/*
3082	 * Plain impossible, we just registered it and checked it weren't no
3083	 * NULL like.. I bet this mushroom I ate was good!
3084	 */
3085	if (DEBUG_LOCKS_WARN_ON(!class))
3086		return 0;
3087	hlock->class_idx = class_idx;
3088	hlock->acquire_ip = ip;
3089	hlock->instance = lock;
3090	hlock->nest_lock = nest_lock;
3091	hlock->trylock = trylock;
3092	hlock->read = read;
3093	hlock->check = check;
3094	hlock->hardirqs_off = !!hardirqs_off;
3095	hlock->references = references;
3096#ifdef CONFIG_LOCK_STAT
3097	hlock->waittime_stamp = 0;
3098	hlock->holdtime_stamp = lockstat_clock();
3099#endif
3100
3101	if (check == 2 && !mark_irqflags(curr, hlock))
3102		return 0;
3103
3104	/* mark it as used: */
3105	if (!mark_lock(curr, hlock, LOCK_USED))
3106		return 0;
3107
3108	/*
3109	 * Calculate the chain hash: it's the combined hash of all the
3110	 * lock keys along the dependency chain. We save the hash value
3111	 * at every step so that we can get the current hash easily
3112	 * after unlock. The chain hash is then used to cache dependency
3113	 * results.
3114	 *
3115	 * The 'key ID' is what is the most compact key value to drive
3116	 * the hash, not class->key.
3117	 */
3118	id = class - lock_classes;
3119	/*
3120	 * Whoops, we did it again.. ran straight out of our static allocation.
3121	 */
3122	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3123		return 0;
3124
3125	chain_key = curr->curr_chain_key;
3126	if (!depth) {
3127		/*
3128		 * How can we have a chain hash when we ain't got no keys?!
3129		 */
3130		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3131			return 0;
3132		chain_head = 1;
3133	}
3134
3135	hlock->prev_chain_key = chain_key;
3136	if (separate_irq_context(curr, hlock)) {
3137		chain_key = 0;
3138		chain_head = 1;
3139	}
3140	chain_key = iterate_chain_key(chain_key, id);
3141
3142	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3143		return 0;
3144
3145	curr->curr_chain_key = chain_key;
3146	curr->lockdep_depth++;
3147	check_chain_key(curr);
3148#ifdef CONFIG_DEBUG_LOCKDEP
3149	if (unlikely(!debug_locks))
3150		return 0;
3151#endif
3152	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3153		debug_locks_off();
3154		printk("BUG: MAX_LOCK_DEPTH too low!\n");
3155		printk("turning off the locking correctness validator.\n");
3156		dump_stack();
3157		return 0;
3158	}
3159
3160	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3161		max_lockdep_depth = curr->lockdep_depth;
3162
3163	return 1;
3164}
3165
3166static int
3167print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3168			   unsigned long ip)
3169{
3170	if (!debug_locks_off())
3171		return 0;
3172	if (debug_locks_silent)
3173		return 0;
3174
3175	printk("\n");
3176	printk("=====================================\n");
3177	printk("[ BUG: bad unlock balance detected! ]\n");
3178	print_kernel_ident();
3179	printk("-------------------------------------\n");
3180	printk("%s/%d is trying to release lock (",
3181		curr->comm, task_pid_nr(curr));
3182	print_lockdep_cache(lock);
3183	printk(") at:\n");
3184	print_ip_sym(ip);
3185	printk("but there are no more locks to release!\n");
3186	printk("\nother info that might help us debug this:\n");
3187	lockdep_print_held_locks(curr);
3188
3189	printk("\nstack backtrace:\n");
3190	dump_stack();
3191
3192	return 0;
3193}
3194
3195/*
3196 * Common debugging checks for both nested and non-nested unlock:
3197 */
3198static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3199			unsigned long ip)
3200{
3201	if (unlikely(!debug_locks))
3202		return 0;
3203	/*
3204	 * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3205	 */
3206	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3207		return 0;
3208
3209	if (curr->lockdep_depth <= 0)
3210		return print_unlock_inbalance_bug(curr, lock, ip);
3211
3212	return 1;
3213}
3214
3215static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3216{
3217	if (hlock->instance == lock)
3218		return 1;
3219
3220	if (hlock->references) {
3221		struct lock_class *class = lock->class_cache[0];
3222
3223		if (!class)
3224			class = look_up_lock_class(lock, 0);
3225
3226		/*
3227		 * If look_up_lock_class() failed to find a class, we're trying
3228		 * to test if we hold a lock that has never yet been acquired.
3229		 * Clearly if the lock hasn't been acquired _ever_, we're not
3230		 * holding it either, so report failure.
3231		 */
3232		if (!class)
3233			return 0;
3234
3235		/*
3236		 * References, but not a lock we're actually ref-counting?
3237		 * State got messed up, follow the sites that change ->references
3238		 * and try to make sense of it.
3239		 */
3240		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3241			return 0;
3242
3243		if (hlock->class_idx == class - lock_classes + 1)
3244			return 1;
3245	}
3246
3247	return 0;
3248}
3249
3250static int
3251__lock_set_class(struct lockdep_map *lock, const char *name,
3252		 struct lock_class_key *key, unsigned int subclass,
3253		 unsigned long ip)
3254{
3255	struct task_struct *curr = current;
3256	struct held_lock *hlock, *prev_hlock;
3257	struct lock_class *class;
3258	unsigned int depth;
3259	int i;
3260
3261	depth = curr->lockdep_depth;
3262	/*
3263	 * This function is about (re)setting the class of a held lock,
3264	 * yet we're not actually holding any locks. Naughty user!
3265	 */
3266	if (DEBUG_LOCKS_WARN_ON(!depth))
3267		return 0;
3268
3269	prev_hlock = NULL;
3270	for (i = depth-1; i >= 0; i--) {
3271		hlock = curr->held_locks + i;
3272		/*
3273		 * We must not cross into another context:
3274		 */
3275		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3276			break;
3277		if (match_held_lock(hlock, lock))
3278			goto found_it;
3279		prev_hlock = hlock;
3280	}
3281	return print_unlock_inbalance_bug(curr, lock, ip);
3282
3283found_it:
3284	lockdep_init_map(lock, name, key, 0);
3285	class = register_lock_class(lock, subclass, 0);
3286	hlock->class_idx = class - lock_classes + 1;
3287
3288	curr->lockdep_depth = i;
3289	curr->curr_chain_key = hlock->prev_chain_key;
3290
3291	for (; i < depth; i++) {
3292		hlock = curr->held_locks + i;
3293		if (!__lock_acquire(hlock->instance,
3294			hlock_class(hlock)->subclass, hlock->trylock,
3295				hlock->read, hlock->check, hlock->hardirqs_off,
3296				hlock->nest_lock, hlock->acquire_ip,
3297				hlock->references))
3298			return 0;
3299	}
3300
3301	/*
3302	 * I took it apart and put it back together again, except now I have
3303	 * these 'spare' parts.. where shall I put them.
3304	 */
3305	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3306		return 0;
3307	return 1;
3308}
3309
3310/*
3311 * Remove the lock to the list of currently held locks in a
3312 * potentially non-nested (out of order) manner. This is a
3313 * relatively rare operation, as all the unlock APIs default
3314 * to nested mode (which uses lock_release()):
3315 */
3316static int
3317lock_release_non_nested(struct task_struct *curr,
3318			struct lockdep_map *lock, unsigned long ip)
3319{
3320	struct held_lock *hlock, *prev_hlock;
3321	unsigned int depth;
3322	int i;
3323
3324	/*
3325	 * Check whether the lock exists in the current stack
3326	 * of held locks:
3327	 */
3328	depth = curr->lockdep_depth;
3329	/*
3330	 * So we're all set to release this lock.. wait what lock? We don't
3331	 * own any locks, you've been drinking again?
3332	 */
3333	if (DEBUG_LOCKS_WARN_ON(!depth))
3334		return 0;
3335
3336	prev_hlock = NULL;
3337	for (i = depth-1; i >= 0; i--) {
3338		hlock = curr->held_locks + i;
3339		/*
3340		 * We must not cross into another context:
3341		 */
3342		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3343			break;
3344		if (match_held_lock(hlock, lock))
3345			goto found_it;
3346		prev_hlock = hlock;
3347	}
3348	return print_unlock_inbalance_bug(curr, lock, ip);
3349
3350found_it:
3351	if (hlock->instance == lock)
3352		lock_release_holdtime(hlock);
3353
3354	if (hlock->references) {
3355		hlock->references--;
3356		if (hlock->references) {
3357			/*
3358			 * We had, and after removing one, still have
3359			 * references, the current lock stack is still
3360			 * valid. We're done!
3361			 */
3362			return 1;
3363		}
3364	}
3365
3366	/*
3367	 * We have the right lock to unlock, 'hlock' points to it.
3368	 * Now we remove it from the stack, and add back the other
3369	 * entries (if any), recalculating the hash along the way:
3370	 */
3371
3372	curr->lockdep_depth = i;
3373	curr->curr_chain_key = hlock->prev_chain_key;
3374
3375	for (i++; i < depth; i++) {
3376		hlock = curr->held_locks + i;
3377		if (!__lock_acquire(hlock->instance,
3378			hlock_class(hlock)->subclass, hlock->trylock,
3379				hlock->read, hlock->check, hlock->hardirqs_off,
3380				hlock->nest_lock, hlock->acquire_ip,
3381				hlock->references))
3382			return 0;
3383	}
3384
3385	/*
3386	 * We had N bottles of beer on the wall, we drank one, but now
3387	 * there's not N-1 bottles of beer left on the wall...
3388	 */
3389	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3390		return 0;
3391	return 1;
3392}
3393
3394/*
3395 * Remove the lock to the list of currently held locks - this gets
3396 * called on mutex_unlock()/spin_unlock*() (or on a failed
3397 * mutex_lock_interruptible()). This is done for unlocks that nest
3398 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3399 */
3400static int lock_release_nested(struct task_struct *curr,
3401			       struct lockdep_map *lock, unsigned long ip)
3402{
3403	struct held_lock *hlock;
3404	unsigned int depth;
3405
3406	/*
3407	 * Pop off the top of the lock stack:
3408	 */
3409	depth = curr->lockdep_depth - 1;
3410	hlock = curr->held_locks + depth;
3411
3412	/*
3413	 * Is the unlock non-nested:
3414	 */
3415	if (hlock->instance != lock || hlock->references)
3416		return lock_release_non_nested(curr, lock, ip);
3417	curr->lockdep_depth--;
3418
3419	/*
3420	 * No more locks, but somehow we've got hash left over, who left it?
3421	 */
3422	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3423		return 0;
3424
3425	curr->curr_chain_key = hlock->prev_chain_key;
3426
3427	lock_release_holdtime(hlock);
3428
3429#ifdef CONFIG_DEBUG_LOCKDEP
3430	hlock->prev_chain_key = 0;
3431	hlock->class_idx = 0;
3432	hlock->acquire_ip = 0;
3433	hlock->irq_context = 0;
3434#endif
3435	return 1;
3436}
3437
3438/*
3439 * Remove the lock to the list of currently held locks - this gets
3440 * called on mutex_unlock()/spin_unlock*() (or on a failed
3441 * mutex_lock_interruptible()). This is done for unlocks that nest
3442 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3443 */
3444static void
3445__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3446{
3447	struct task_struct *curr = current;
3448
3449	if (!check_unlock(curr, lock, ip))
3450		return;
3451
3452	if (nested) {
3453		if (!lock_release_nested(curr, lock, ip))
3454			return;
3455	} else {
3456		if (!lock_release_non_nested(curr, lock, ip))
3457			return;
3458	}
3459
3460	check_chain_key(curr);
3461}
3462
3463static int __lock_is_held(struct lockdep_map *lock)
3464{
3465	struct task_struct *curr = current;
3466	int i;
3467
3468	for (i = 0; i < curr->lockdep_depth; i++) {
3469		struct held_lock *hlock = curr->held_locks + i;
3470
3471		if (match_held_lock(hlock, lock))
3472			return 1;
3473	}
3474
3475	return 0;
3476}
3477
3478/*
3479 * Check whether we follow the irq-flags state precisely:
3480 */
3481static void check_flags(unsigned long flags)
3482{
3483#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3484    defined(CONFIG_TRACE_IRQFLAGS)
3485	if (!debug_locks)
3486		return;
3487
3488	if (irqs_disabled_flags(flags)) {
3489		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3490			printk("possible reason: unannotated irqs-off.\n");
3491		}
3492	} else {
3493		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3494			printk("possible reason: unannotated irqs-on.\n");
3495		}
3496	}
3497
3498	/*
3499	 * We dont accurately track softirq state in e.g.
3500	 * hardirq contexts (such as on 4KSTACKS), so only
3501	 * check if not in hardirq contexts:
3502	 */
3503	if (!hardirq_count()) {
3504		if (softirq_count()) {
3505			/* like the above, but with softirqs */
3506			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3507		} else {
3508			/* lick the above, does it taste good? */
3509			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3510		}
3511	}
3512
3513	if (!debug_locks)
3514		print_irqtrace_events(current);
3515#endif
3516}
3517
3518void lock_set_class(struct lockdep_map *lock, const char *name,
3519		    struct lock_class_key *key, unsigned int subclass,
3520		    unsigned long ip)
3521{
3522	unsigned long flags;
3523
3524	if (unlikely(current->lockdep_recursion))
3525		return;
3526
3527	raw_local_irq_save(flags);
3528	current->lockdep_recursion = 1;
3529	check_flags(flags);
3530	if (__lock_set_class(lock, name, key, subclass, ip))
3531		check_chain_key(current);
3532	current->lockdep_recursion = 0;
3533	raw_local_irq_restore(flags);
3534}
3535EXPORT_SYMBOL_GPL(lock_set_class);
3536
3537/*
3538 * We are not always called with irqs disabled - do that here,
3539 * and also avoid lockdep recursion:
3540 */
3541void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3542			  int trylock, int read, int check,
3543			  struct lockdep_map *nest_lock, unsigned long ip)
3544{
3545	unsigned long flags;
3546
3547	if (unlikely(current->lockdep_recursion))
3548		return;
3549
3550	raw_local_irq_save(flags);
3551	check_flags(flags);
3552
3553	current->lockdep_recursion = 1;
3554	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3555	__lock_acquire(lock, subclass, trylock, read, check,
3556		       irqs_disabled_flags(flags), nest_lock, ip, 0);
3557	current->lockdep_recursion = 0;
3558	raw_local_irq_restore(flags);
3559}
3560EXPORT_SYMBOL_GPL(lock_acquire);
3561
3562void lock_release(struct lockdep_map *lock, int nested,
3563			  unsigned long ip)
3564{
3565	unsigned long flags;
3566
3567	if (unlikely(current->lockdep_recursion))
3568		return;
3569
3570	raw_local_irq_save(flags);
3571	check_flags(flags);
3572	current->lockdep_recursion = 1;
3573	trace_lock_release(lock, ip);
3574	__lock_release(lock, nested, ip);
3575	current->lockdep_recursion = 0;
3576	raw_local_irq_restore(flags);
3577}
3578EXPORT_SYMBOL_GPL(lock_release);
3579
3580int lock_is_held(struct lockdep_map *lock)
3581{
3582	unsigned long flags;
3583	int ret = 0;
3584
3585	if (unlikely(current->lockdep_recursion))
3586		return 1; /* avoid false negative lockdep_assert_held() */
3587
3588	raw_local_irq_save(flags);
3589	check_flags(flags);
3590
3591	current->lockdep_recursion = 1;
3592	ret = __lock_is_held(lock);
3593	current->lockdep_recursion = 0;
3594	raw_local_irq_restore(flags);
3595
3596	return ret;
3597}
3598EXPORT_SYMBOL_GPL(lock_is_held);
3599
3600void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3601{
3602	current->lockdep_reclaim_gfp = gfp_mask;
3603}
3604
3605void lockdep_clear_current_reclaim_state(void)
3606{
3607	current->lockdep_reclaim_gfp = 0;
3608}
3609
3610#ifdef CONFIG_LOCK_STAT
3611static int
3612print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3613			   unsigned long ip)
3614{
3615	if (!debug_locks_off())
3616		return 0;
3617	if (debug_locks_silent)
3618		return 0;
3619
3620	printk("\n");
3621	printk("=================================\n");
3622	printk("[ BUG: bad contention detected! ]\n");
3623	print_kernel_ident();
3624	printk("---------------------------------\n");
3625	printk("%s/%d is trying to contend lock (",
3626		curr->comm, task_pid_nr(curr));
3627	print_lockdep_cache(lock);
3628	printk(") at:\n");
3629	print_ip_sym(ip);
3630	printk("but there are no locks held!\n");
3631	printk("\nother info that might help us debug this:\n");
3632	lockdep_print_held_locks(curr);
3633
3634	printk("\nstack backtrace:\n");
3635	dump_stack();
3636
3637	return 0;
3638}
3639
3640static void
3641__lock_contended(struct lockdep_map *lock, unsigned long ip)
3642{
3643	struct task_struct *curr = current;
3644	struct held_lock *hlock, *prev_hlock;
3645	struct lock_class_stats *stats;
3646	unsigned int depth;
3647	int i, contention_point, contending_point;
3648
3649	depth = curr->lockdep_depth;
3650	/*
3651	 * Whee, we contended on this lock, except it seems we're not
3652	 * actually trying to acquire anything much at all..
3653	 */
3654	if (DEBUG_LOCKS_WARN_ON(!depth))
3655		return;
3656
3657	prev_hlock = NULL;
3658	for (i = depth-1; i >= 0; i--) {
3659		hlock = curr->held_locks + i;
3660		/*
3661		 * We must not cross into another context:
3662		 */
3663		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3664			break;
3665		if (match_held_lock(hlock, lock))
3666			goto found_it;
3667		prev_hlock = hlock;
3668	}
3669	print_lock_contention_bug(curr, lock, ip);
3670	return;
3671
3672found_it:
3673	if (hlock->instance != lock)
3674		return;
3675
3676	hlock->waittime_stamp = lockstat_clock();
3677
3678	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3679	contending_point = lock_point(hlock_class(hlock)->contending_point,
3680				      lock->ip);
3681
3682	stats = get_lock_stats(hlock_class(hlock));
3683	if (contention_point < LOCKSTAT_POINTS)
3684		stats->contention_point[contention_point]++;
3685	if (contending_point < LOCKSTAT_POINTS)
3686		stats->contending_point[contending_point]++;
3687	if (lock->cpu != smp_processor_id())
3688		stats->bounces[bounce_contended + !!hlock->read]++;
3689	put_lock_stats(stats);
3690}
3691
3692static void
3693__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3694{
3695	struct task_struct *curr = current;
3696	struct held_lock *hlock, *prev_hlock;
3697	struct lock_class_stats *stats;
3698	unsigned int depth;
3699	u64 now, waittime = 0;
3700	int i, cpu;
3701
3702	depth = curr->lockdep_depth;
3703	/*
3704	 * Yay, we acquired ownership of this lock we didn't try to
3705	 * acquire, how the heck did that happen?
3706	 */
3707	if (DEBUG_LOCKS_WARN_ON(!depth))
3708		return;
3709
3710	prev_hlock = NULL;
3711	for (i = depth-1; i >= 0; i--) {
3712		hlock = curr->held_locks + i;
3713		/*
3714		 * We must not cross into another context:
3715		 */
3716		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3717			break;
3718		if (match_held_lock(hlock, lock))
3719			goto found_it;
3720		prev_hlock = hlock;
3721	}
3722	print_lock_contention_bug(curr, lock, _RET_IP_);
3723	return;
3724
3725found_it:
3726	if (hlock->instance != lock)
3727		return;
3728
3729	cpu = smp_processor_id();
3730	if (hlock->waittime_stamp) {
3731		now = lockstat_clock();
3732		waittime = now - hlock->waittime_stamp;
3733		hlock->holdtime_stamp = now;
3734	}
3735
3736	trace_lock_acquired(lock, ip);
3737
3738	stats = get_lock_stats(hlock_class(hlock));
3739	if (waittime) {
3740		if (hlock->read)
3741			lock_time_inc(&stats->read_waittime, waittime);
3742		else
3743			lock_time_inc(&stats->write_waittime, waittime);
3744	}
3745	if (lock->cpu != cpu)
3746		stats->bounces[bounce_acquired + !!hlock->read]++;
3747	put_lock_stats(stats);
3748
3749	lock->cpu = cpu;
3750	lock->ip = ip;
3751}
3752
3753void lock_contended(struct lockdep_map *lock, unsigned long ip)
3754{
3755	unsigned long flags;
3756
3757	if (unlikely(!lock_stat))
3758		return;
3759
3760	if (unlikely(current->lockdep_recursion))
3761		return;
3762
3763	raw_local_irq_save(flags);
3764	check_flags(flags);
3765	current->lockdep_recursion = 1;
3766	trace_lock_contended(lock, ip);
3767	__lock_contended(lock, ip);
3768	current->lockdep_recursion = 0;
3769	raw_local_irq_restore(flags);
3770}
3771EXPORT_SYMBOL_GPL(lock_contended);
3772
3773void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3774{
3775	unsigned long flags;
3776
3777	if (unlikely(!lock_stat))
3778		return;
3779
3780	if (unlikely(current->lockdep_recursion))
3781		return;
3782
3783	raw_local_irq_save(flags);
3784	check_flags(flags);
3785	current->lockdep_recursion = 1;
3786	__lock_acquired(lock, ip);
3787	current->lockdep_recursion = 0;
3788	raw_local_irq_restore(flags);
3789}
3790EXPORT_SYMBOL_GPL(lock_acquired);
3791#endif
3792
3793/*
3794 * Used by the testsuite, sanitize the validator state
3795 * after a simulated failure:
3796 */
3797
3798void lockdep_reset(void)
3799{
3800	unsigned long flags;
3801	int i;
3802
3803	raw_local_irq_save(flags);
3804	current->curr_chain_key = 0;
3805	current->lockdep_depth = 0;
3806	current->lockdep_recursion = 0;
3807	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3808	nr_hardirq_chains = 0;
3809	nr_softirq_chains = 0;
3810	nr_process_chains = 0;
3811	debug_locks = 1;
3812	for (i = 0; i < CHAINHASH_SIZE; i++)
3813		INIT_LIST_HEAD(chainhash_table + i);
3814	raw_local_irq_restore(flags);
3815}
3816
3817static void zap_class(struct lock_class *class)
3818{
3819	int i;
3820
3821	/*
3822	 * Remove all dependencies this lock is
3823	 * involved in:
3824	 */
3825	for (i = 0; i < nr_list_entries; i++) {
3826		if (list_entries[i].class == class)
3827			list_del_rcu(&list_entries[i].entry);
3828	}
3829	/*
3830	 * Unhash the class and remove it from the all_lock_classes list:
3831	 */
3832	list_del_rcu(&class->hash_entry);
3833	list_del_rcu(&class->lock_entry);
3834
3835	class->key = NULL;
3836}
3837
3838static inline int within(const void *addr, void *start, unsigned long size)
3839{
3840	return addr >= start && addr < start + size;
3841}
3842
3843void lockdep_free_key_range(void *start, unsigned long size)
3844{
3845	struct lock_class *class, *next;
3846	struct list_head *head;
3847	unsigned long flags;
3848	int i;
3849	int locked;
3850
3851	raw_local_irq_save(flags);
3852	locked = graph_lock();
3853
3854	/*
3855	 * Unhash all classes that were created by this module:
3856	 */
3857	for (i = 0; i < CLASSHASH_SIZE; i++) {
3858		head = classhash_table + i;
3859		if (list_empty(head))
3860			continue;
3861		list_for_each_entry_safe(class, next, head, hash_entry) {
3862			if (within(class->key, start, size))
3863				zap_class(class);
3864			else if (within(class->name, start, size))
3865				zap_class(class);
3866		}
3867	}
3868
3869	if (locked)
3870		graph_unlock();
3871	raw_local_irq_restore(flags);
3872}
3873
3874void lockdep_reset_lock(struct lockdep_map *lock)
3875{
3876	struct lock_class *class, *next;
3877	struct list_head *head;
3878	unsigned long flags;
3879	int i, j;
3880	int locked;
3881
3882	raw_local_irq_save(flags);
3883
3884	/*
3885	 * Remove all classes this lock might have:
3886	 */
3887	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3888		/*
3889		 * If the class exists we look it up and zap it:
3890		 */
3891		class = look_up_lock_class(lock, j);
3892		if (class)
3893			zap_class(class);
3894	}
3895	/*
3896	 * Debug check: in the end all mapped classes should
3897	 * be gone.
3898	 */
3899	locked = graph_lock();
3900	for (i = 0; i < CLASSHASH_SIZE; i++) {
3901		head = classhash_table + i;
3902		if (list_empty(head))
3903			continue;
3904		list_for_each_entry_safe(class, next, head, hash_entry) {
3905			int match = 0;
3906
3907			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3908				match |= class == lock->class_cache[j];
3909
3910			if (unlikely(match)) {
3911				if (debug_locks_off_graph_unlock()) {
3912					/*
3913					 * We all just reset everything, how did it match?
3914					 */
3915					WARN_ON(1);
3916				}
3917				goto out_restore;
3918			}
3919		}
3920	}
3921	if (locked)
3922		graph_unlock();
3923
3924out_restore:
3925	raw_local_irq_restore(flags);
3926}
3927
3928void lockdep_init(void)
3929{
3930	int i;
3931
3932	/*
3933	 * Some architectures have their own start_kernel()
3934	 * code which calls lockdep_init(), while we also
3935	 * call lockdep_init() from the start_kernel() itself,
3936	 * and we want to initialize the hashes only once:
3937	 */
3938	if (lockdep_initialized)
3939		return;
3940
3941	for (i = 0; i < CLASSHASH_SIZE; i++)
3942		INIT_LIST_HEAD(classhash_table + i);
3943
3944	for (i = 0; i < CHAINHASH_SIZE; i++)
3945		INIT_LIST_HEAD(chainhash_table + i);
3946
3947	lockdep_initialized = 1;
3948}
3949
3950void __init lockdep_info(void)
3951{
3952	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3953
3954	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3955	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3956	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3957	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3958	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3959	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3960	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3961
3962	printk(" memory used by lock dependency info: %lu kB\n",
3963		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3964		sizeof(struct list_head) * CLASSHASH_SIZE +
3965		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3966		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3967		sizeof(struct list_head) * CHAINHASH_SIZE
3968#ifdef CONFIG_PROVE_LOCKING
3969		+ sizeof(struct circular_queue)
3970#endif
3971		) / 1024
3972		);
3973
3974	printk(" per task-struct memory footprint: %lu bytes\n",
3975		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3976
3977#ifdef CONFIG_DEBUG_LOCKDEP
3978	if (lockdep_init_error) {
3979		printk("WARNING: lockdep init error! lock-%s was acquired"
3980			"before lockdep_init\n", lock_init_error);
3981		printk("Call stack leading to lockdep invocation was:\n");
3982		print_stack_trace(&lockdep_init_trace, 0);
3983	}
3984#endif
3985}
3986
3987static void
3988print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3989		     const void *mem_to, struct held_lock *hlock)
3990{
3991	if (!debug_locks_off())
3992		return;
3993	if (debug_locks_silent)
3994		return;
3995
3996	printk("\n");
3997	printk("=========================\n");
3998	printk("[ BUG: held lock freed! ]\n");
3999	print_kernel_ident();
4000	printk("-------------------------\n");
4001	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4002		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4003	print_lock(hlock);
4004	lockdep_print_held_locks(curr);
4005
4006	printk("\nstack backtrace:\n");
4007	dump_stack();
4008}
4009
4010static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4011				const void* lock_from, unsigned long lock_len)
4012{
4013	return lock_from + lock_len <= mem_from ||
4014		mem_from + mem_len <= lock_from;
4015}
4016
4017/*
4018 * Called when kernel memory is freed (or unmapped), or if a lock
4019 * is destroyed or reinitialized - this code checks whether there is
4020 * any held lock in the memory range of <from> to <to>:
4021 */
4022void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4023{
4024	struct task_struct *curr = current;
4025	struct held_lock *hlock;
4026	unsigned long flags;
4027	int i;
4028
4029	if (unlikely(!debug_locks))
4030		return;
4031
4032	local_irq_save(flags);
4033	for (i = 0; i < curr->lockdep_depth; i++) {
4034		hlock = curr->held_locks + i;
4035
4036		if (not_in_range(mem_from, mem_len, hlock->instance,
4037					sizeof(*hlock->instance)))
4038			continue;
4039
4040		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4041		break;
4042	}
4043	local_irq_restore(flags);
4044}
4045EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4046
4047static void print_held_locks_bug(struct task_struct *curr)
4048{
4049	if (!debug_locks_off())
4050		return;
4051	if (debug_locks_silent)
4052		return;
4053
4054	printk("\n");
4055	printk("=====================================\n");
4056	printk("[ BUG: lock held at task exit time! ]\n");
4057	print_kernel_ident();
4058	printk("-------------------------------------\n");
4059	printk("%s/%d is exiting with locks still held!\n",
4060		curr->comm, task_pid_nr(curr));
4061	lockdep_print_held_locks(curr);
4062
4063	printk("\nstack backtrace:\n");
4064	dump_stack();
4065}
4066
4067void debug_check_no_locks_held(struct task_struct *task)
4068{
4069	if (unlikely(task->lockdep_depth > 0))
4070		print_held_locks_bug(task);
4071}
4072
4073void debug_show_all_locks(void)
4074{
4075	struct task_struct *g, *p;
4076	int count = 10;
4077	int unlock = 1;
4078
4079	if (unlikely(!debug_locks)) {
4080		printk("INFO: lockdep is turned off.\n");
4081		return;
4082	}
4083	printk("\nShowing all locks held in the system:\n");
4084
4085	/*
4086	 * Here we try to get the tasklist_lock as hard as possible,
4087	 * if not successful after 2 seconds we ignore it (but keep
4088	 * trying). This is to enable a debug printout even if a
4089	 * tasklist_lock-holding task deadlocks or crashes.
4090	 */
4091retry:
4092	if (!read_trylock(&tasklist_lock)) {
4093		if (count == 10)
4094			printk("hm, tasklist_lock locked, retrying... ");
4095		if (count) {
4096			count--;
4097			printk(" #%d", 10-count);
4098			mdelay(200);
4099			goto retry;
4100		}
4101		printk(" ignoring it.\n");
4102		unlock = 0;
4103	} else {
4104		if (count != 10)
4105			printk(KERN_CONT " locked it.\n");
4106	}
4107
4108	do_each_thread(g, p) {
4109		/*
4110		 * It's not reliable to print a task's held locks
4111		 * if it's not sleeping (or if it's not the current
4112		 * task):
4113		 */
4114		if (p->state == TASK_RUNNING && p != current)
4115			continue;
4116		if (p->lockdep_depth)
4117			lockdep_print_held_locks(p);
4118		if (!unlock)
4119			if (read_trylock(&tasklist_lock))
4120				unlock = 1;
4121	} while_each_thread(g, p);
4122
4123	printk("\n");
4124	printk("=============================================\n\n");
4125
4126	if (unlock)
4127		read_unlock(&tasklist_lock);
4128}
4129EXPORT_SYMBOL_GPL(debug_show_all_locks);
4130
4131/*
4132 * Careful: only use this function if you are sure that
4133 * the task cannot run in parallel!
4134 */
4135void debug_show_held_locks(struct task_struct *task)
4136{
4137	if (unlikely(!debug_locks)) {
4138		printk("INFO: lockdep is turned off.\n");
4139		return;
4140	}
4141	lockdep_print_held_locks(task);
4142}
4143EXPORT_SYMBOL_GPL(debug_show_held_locks);
4144
4145void lockdep_sys_exit(void)
4146{
4147	struct task_struct *curr = current;
4148
4149	if (unlikely(curr->lockdep_depth)) {
4150		if (!debug_locks_off())
4151			return;
4152		printk("\n");
4153		printk("================================================\n");
4154		printk("[ BUG: lock held when returning to user space! ]\n");
4155		print_kernel_ident();
4156		printk("------------------------------------------------\n");
4157		printk("%s/%d is leaving the kernel with locks still held!\n",
4158				curr->comm, curr->pid);
4159		lockdep_print_held_locks(curr);
4160	}
4161}
4162
4163void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4164{
4165	struct task_struct *curr = current;
4166
4167#ifndef CONFIG_PROVE_RCU_REPEATEDLY
4168	if (!debug_locks_off())
4169		return;
4170#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4171	/* Note: the following can be executed concurrently, so be careful. */
4172	printk("\n");
4173	printk("===============================\n");
4174	printk("[ INFO: suspicious RCU usage. ]\n");
4175	print_kernel_ident();
4176	printk("-------------------------------\n");
4177	printk("%s:%d %s!\n", file, line, s);
4178	printk("\nother info that might help us debug this:\n\n");
4179	printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4180	       !rcu_lockdep_current_cpu_online()
4181			? "RCU used illegally from offline CPU!\n"
4182			: rcu_is_cpu_idle()
4183				? "RCU used illegally from idle CPU!\n"
4184				: "",
4185	       rcu_scheduler_active, debug_locks);
4186
4187	/*
4188	 * If a CPU is in the RCU-free window in idle (ie: in the section
4189	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4190	 * considers that CPU to be in an "extended quiescent state",
4191	 * which means that RCU will be completely ignoring that CPU.
4192	 * Therefore, rcu_read_lock() and friends have absolutely no
4193	 * effect on a CPU running in that state. In other words, even if
4194	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4195	 * delete data structures out from under it.  RCU really has no
4196	 * choice here: we need to keep an RCU-free window in idle where
4197	 * the CPU may possibly enter into low power mode. This way we can
4198	 * notice an extended quiescent state to other CPUs that started a grace
4199	 * period. Otherwise we would delay any grace period as long as we run
4200	 * in the idle task.
4201	 *
4202	 * So complain bitterly if someone does call rcu_read_lock(),
4203	 * rcu_read_lock_bh() and so on from extended quiescent states.
4204	 */
4205	if (rcu_is_cpu_idle())
4206		printk("RCU used illegally from extended quiescent state!\n");
4207
4208	lockdep_print_held_locks(curr);
4209	printk("\nstack backtrace:\n");
4210	dump_stack();
4211}
4212EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);