Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * kernel/lockdep_internals.h
  3 *
  4 * Runtime locking correctness validator
  5 *
  6 * lockdep subsystem internal functions and variables.
  7 */
  8
  9/*
 10 * Lock-class usage-state bits:
 11 */
 12enum lock_usage_bit {
 13#define LOCKDEP_STATE(__STATE)		\
 14	LOCK_USED_IN_##__STATE,		\
 15	LOCK_USED_IN_##__STATE##_READ,	\
 16	LOCK_ENABLED_##__STATE,		\
 17	LOCK_ENABLED_##__STATE##_READ,
 18#include "lockdep_states.h"
 19#undef LOCKDEP_STATE
 20	LOCK_USED,
 21	LOCK_USAGE_STATES
 22};
 23
 24/*
 25 * Usage-state bitmasks:
 26 */
 27#define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
 28
 29enum {
 30#define LOCKDEP_STATE(__STATE)						\
 31	__LOCKF(USED_IN_##__STATE)					\
 32	__LOCKF(USED_IN_##__STATE##_READ)				\
 33	__LOCKF(ENABLED_##__STATE)					\
 34	__LOCKF(ENABLED_##__STATE##_READ)
 35#include "lockdep_states.h"
 36#undef LOCKDEP_STATE
 37	__LOCKF(USED)
 38};
 39
 40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
 41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
 42
 43#define LOCKF_ENABLED_IRQ_READ \
 44		(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
 45#define LOCKF_USED_IN_IRQ_READ \
 46		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 47
 48/*
 49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
 50 * .data and .bss to fit in required 32MB limit for the kernel. With
 51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
 52 * So, reduce the static allocations for lockdeps related structures so that
 53 * everything fits in current required size limit.
 54 */
 55#ifdef CONFIG_PROVE_LOCKING_SMALL
 56/*
 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
 58 * we track.
 59 *
 60 * We use the per-lock dependency maps in two ways: we grow it by adding
 61 * every to-be-taken lock to all currently held lock's own dependency
 62 * table (if it's not there yet), and we check it for lock order
 63 * conflicts and deadlocks.
 64 */
 65#define MAX_LOCKDEP_ENTRIES	16384UL
 66#define MAX_LOCKDEP_CHAINS_BITS	15
 67#define MAX_STACK_TRACE_ENTRIES	262144UL
 68#else
 69#define MAX_LOCKDEP_ENTRIES	32768UL
 70
 71#define MAX_LOCKDEP_CHAINS_BITS	16
 72
 73/*
 74 * Stack-trace: tightly packed array of stack backtrace
 75 * addresses. Protected by the hash_lock.
 76 */
 77#define MAX_STACK_TRACE_ENTRIES	524288UL
 78#endif
 79
 80#define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 81
 82#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 83
 84extern struct list_head all_lock_classes;
 85extern struct lock_chain lock_chains[];
 86
 87#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
 88
 89extern void get_usage_chars(struct lock_class *class,
 90			    char usage[LOCK_USAGE_CHARS]);
 91
 92extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
 93
 94struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
 95
 96extern unsigned long nr_lock_classes;
 97extern unsigned long nr_list_entries;
 98extern unsigned long nr_lock_chains;
 99extern int nr_chain_hlocks;
100extern unsigned long nr_stack_trace_entries;
101
102extern unsigned int nr_hardirq_chains;
103extern unsigned int nr_softirq_chains;
104extern unsigned int nr_process_chains;
105extern unsigned int max_lockdep_depth;
106extern unsigned int max_recursion_depth;
107
108extern unsigned int max_bfs_queue_depth;
109
110#ifdef CONFIG_PROVE_LOCKING
111extern unsigned long lockdep_count_forward_deps(struct lock_class *);
112extern unsigned long lockdep_count_backward_deps(struct lock_class *);
113#else
114static inline unsigned long
115lockdep_count_forward_deps(struct lock_class *class)
116{
117	return 0;
118}
119static inline unsigned long
120lockdep_count_backward_deps(struct lock_class *class)
121{
122	return 0;
123}
124#endif
125
126#ifdef CONFIG_DEBUG_LOCKDEP
127
128#include <asm/local.h>
129/*
130 * Various lockdep statistics.
131 * We want them per cpu as they are often accessed in fast path
132 * and we want to avoid too much cache bouncing.
133 */
134struct lockdep_stats {
135	int	chain_lookup_hits;
136	int	chain_lookup_misses;
137	int	hardirqs_on_events;
138	int	hardirqs_off_events;
139	int	redundant_hardirqs_on;
140	int	redundant_hardirqs_off;
141	int	softirqs_on_events;
142	int	softirqs_off_events;
143	int	redundant_softirqs_on;
144	int	redundant_softirqs_off;
145	int	nr_unused_locks;
146	int	nr_cyclic_checks;
147	int	nr_cyclic_check_recursions;
148	int	nr_find_usage_forwards_checks;
149	int	nr_find_usage_forwards_recursions;
150	int	nr_find_usage_backwards_checks;
151	int	nr_find_usage_backwards_recursions;
152};
153
154DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
155
156#define __debug_atomic_inc(ptr)					\
157	this_cpu_inc(lockdep_stats.ptr);
158
159#define debug_atomic_inc(ptr)			{		\
160	WARN_ON_ONCE(!irqs_disabled());				\
161	__this_cpu_inc(lockdep_stats.ptr);			\
162}
163
164#define debug_atomic_dec(ptr)			{		\
165	WARN_ON_ONCE(!irqs_disabled());				\
166	__this_cpu_dec(lockdep_stats.ptr);			\
167}
168
169#define debug_atomic_read(ptr)		({				\
170	struct lockdep_stats *__cpu_lockdep_stats;			\
171	unsigned long long __total = 0;					\
172	int __cpu;							\
173	for_each_possible_cpu(__cpu) {					\
174		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
175		__total += __cpu_lockdep_stats->ptr;			\
176	}								\
177	__total;							\
178})
179#else
180# define __debug_atomic_inc(ptr)	do { } while (0)
181# define debug_atomic_inc(ptr)		do { } while (0)
182# define debug_atomic_dec(ptr)		do { } while (0)
183# define debug_atomic_read(ptr)		0
184#endif