Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.15
 
  1/*
  2 * kernel/lockdep_internals.h
  3 *
  4 * Runtime locking correctness validator
  5 *
  6 * lockdep subsystem internal functions and variables.
  7 */
  8
  9/*
 10 * Lock-class usage-state bits:
 11 */
 12enum lock_usage_bit {
 13#define LOCKDEP_STATE(__STATE)		\
 14	LOCK_USED_IN_##__STATE,		\
 15	LOCK_USED_IN_##__STATE##_READ,	\
 16	LOCK_ENABLED_##__STATE,		\
 17	LOCK_ENABLED_##__STATE##_READ,
 18#include "lockdep_states.h"
 19#undef LOCKDEP_STATE
 20	LOCK_USED,
 
 21	LOCK_USAGE_STATES
 22};
 23
 
 
 
 
 24/*
 25 * Usage-state bitmasks:
 26 */
 27#define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
 28
 29enum {
 30#define LOCKDEP_STATE(__STATE)						\
 31	__LOCKF(USED_IN_##__STATE)					\
 32	__LOCKF(USED_IN_##__STATE##_READ)				\
 33	__LOCKF(ENABLED_##__STATE)					\
 34	__LOCKF(ENABLED_##__STATE##_READ)
 35#include "lockdep_states.h"
 36#undef LOCKDEP_STATE
 37	__LOCKF(USED)
 
 38};
 39
 40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
 41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
 
 
 
 
 
 
 
 
 
 42
 43#define LOCKF_ENABLED_IRQ_READ \
 44		(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
 45#define LOCKF_USED_IN_IRQ_READ \
 46		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48/*
 
 
 
 
 
 
 
 
 49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
 50 * we track.
 51 *
 52 * We use the per-lock dependency maps in two ways: we grow it by adding
 53 * every to-be-taken lock to all currently held lock's own dependency
 54 * table (if it's not there yet), and we check it for lock order
 55 * conflicts and deadlocks.
 56 */
 57#define MAX_LOCKDEP_ENTRIES	16384UL
 58
 59#define MAX_LOCKDEP_CHAINS_BITS	15
 60#define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 
 
 61
 62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 63
 64/*
 65 * Stack-trace: tightly packed array of stack backtrace
 66 * addresses. Protected by the hash_lock.
 67 */
 68#define MAX_STACK_TRACE_ENTRIES	262144UL
 
 
 
 
 
 
 
 
 
 
 
 
 69
 70extern struct list_head all_lock_classes;
 71extern struct lock_chain lock_chains[];
 72
 73#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
 74
 75extern void get_usage_chars(struct lock_class *class,
 76			    char usage[LOCK_USAGE_CHARS]);
 77
 78extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
 
 79
 80struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
 81
 82extern unsigned long nr_lock_classes;
 
 
 83extern unsigned long nr_list_entries;
 84extern unsigned long nr_lock_chains;
 85extern int nr_chain_hlocks;
 86extern unsigned long nr_stack_trace_entries;
 87
 88extern unsigned int nr_hardirq_chains;
 89extern unsigned int nr_softirq_chains;
 90extern unsigned int nr_process_chains;
 91extern unsigned int max_lockdep_depth;
 92extern unsigned int max_recursion_depth;
 
 93
 
 94extern unsigned int max_bfs_queue_depth;
 95
 96#ifdef CONFIG_PROVE_LOCKING
 97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
 
 
 
 
 99#else
100static inline unsigned long
101lockdep_count_forward_deps(struct lock_class *class)
102{
103	return 0;
104}
105static inline unsigned long
106lockdep_count_backward_deps(struct lock_class *class)
107{
108	return 0;
109}
110#endif
111
112#ifdef CONFIG_DEBUG_LOCKDEP
113
114#include <asm/local.h>
115/*
116 * Various lockdep statistics.
117 * We want them per cpu as they are often accessed in fast path
118 * and we want to avoid too much cache bouncing.
119 */
120struct lockdep_stats {
121	int	chain_lookup_hits;
122	int	chain_lookup_misses;
123	int	hardirqs_on_events;
124	int	hardirqs_off_events;
125	int	redundant_hardirqs_on;
126	int	redundant_hardirqs_off;
127	int	softirqs_on_events;
128	int	softirqs_off_events;
129	int	redundant_softirqs_on;
130	int	redundant_softirqs_off;
131	int	nr_unused_locks;
132	int	nr_cyclic_checks;
133	int	nr_cyclic_check_recursions;
134	int	nr_find_usage_forwards_checks;
135	int	nr_find_usage_forwards_recursions;
136	int	nr_find_usage_backwards_checks;
137	int	nr_find_usage_backwards_recursions;
 
 
 
 
138};
139
140DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
 
141
142#define __debug_atomic_inc(ptr)					\
143	this_cpu_inc(lockdep_stats.ptr);
144
145#define debug_atomic_inc(ptr)			{		\
146	WARN_ON_ONCE(!irqs_disabled());				\
147	__this_cpu_inc(lockdep_stats.ptr);			\
148}
149
150#define debug_atomic_dec(ptr)			{		\
151	WARN_ON_ONCE(!irqs_disabled());				\
152	__this_cpu_dec(lockdep_stats.ptr);			\
153}
154
155#define debug_atomic_read(ptr)		({				\
156	struct lockdep_stats *__cpu_lockdep_stats;			\
157	unsigned long long __total = 0;					\
158	int __cpu;							\
159	for_each_possible_cpu(__cpu) {					\
160		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
161		__total += __cpu_lockdep_stats->ptr;			\
162	}								\
163	__total;							\
164})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165#else
166# define __debug_atomic_inc(ptr)	do { } while (0)
167# define debug_atomic_inc(ptr)		do { } while (0)
168# define debug_atomic_dec(ptr)		do { } while (0)
169# define debug_atomic_read(ptr)		0
 
170#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * kernel/lockdep_internals.h
  4 *
  5 * Runtime locking correctness validator
  6 *
  7 * lockdep subsystem internal functions and variables.
  8 */
  9
 10/*
 11 * Lock-class usage-state bits:
 12 */
 13enum lock_usage_bit {
 14#define LOCKDEP_STATE(__STATE)		\
 15	LOCK_USED_IN_##__STATE,		\
 16	LOCK_USED_IN_##__STATE##_READ,	\
 17	LOCK_ENABLED_##__STATE,		\
 18	LOCK_ENABLED_##__STATE##_READ,
 19#include "lockdep_states.h"
 20#undef LOCKDEP_STATE
 21	LOCK_USED,
 22	LOCK_USED_READ,
 23	LOCK_USAGE_STATES
 24};
 25
 26#define LOCK_USAGE_READ_MASK 1
 27#define LOCK_USAGE_DIR_MASK  2
 28#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
 29
 30/*
 31 * Usage-state bitmasks:
 32 */
 33#define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
 34
 35enum {
 36#define LOCKDEP_STATE(__STATE)						\
 37	__LOCKF(USED_IN_##__STATE)					\
 38	__LOCKF(USED_IN_##__STATE##_READ)				\
 39	__LOCKF(ENABLED_##__STATE)					\
 40	__LOCKF(ENABLED_##__STATE##_READ)
 41#include "lockdep_states.h"
 42#undef LOCKDEP_STATE
 43	__LOCKF(USED)
 44	__LOCKF(USED_READ)
 45};
 46
 47#define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
 48static const unsigned long LOCKF_ENABLED_IRQ =
 49#include "lockdep_states.h"
 50	0;
 51#undef LOCKDEP_STATE
 52
 53#define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
 54static const unsigned long LOCKF_USED_IN_IRQ =
 55#include "lockdep_states.h"
 56	0;
 57#undef LOCKDEP_STATE
 58
 59#define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
 60static const unsigned long LOCKF_ENABLED_IRQ_READ =
 61#include "lockdep_states.h"
 62	0;
 63#undef LOCKDEP_STATE
 64
 65#define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
 66static const unsigned long LOCKF_USED_IN_IRQ_READ =
 67#include "lockdep_states.h"
 68	0;
 69#undef LOCKDEP_STATE
 70
 71#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
 72#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
 73
 74#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
 75#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
 76
 77/*
 78 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
 79 * .data and .bss to fit in required 32MB limit for the kernel. With
 80 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
 81 * So, reduce the static allocations for lockdeps related structures so that
 82 * everything fits in current required size limit.
 83 */
 84#ifdef CONFIG_LOCKDEP_SMALL
 85/*
 86 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
 87 * we track.
 88 *
 89 * We use the per-lock dependency maps in two ways: we grow it by adding
 90 * every to-be-taken lock to all currently held lock's own dependency
 91 * table (if it's not there yet), and we check it for lock order
 92 * conflicts and deadlocks.
 93 */
 94#define MAX_LOCKDEP_ENTRIES	16384UL
 
 95#define MAX_LOCKDEP_CHAINS_BITS	15
 96#define MAX_STACK_TRACE_ENTRIES	262144UL
 97#define STACK_TRACE_HASH_SIZE	8192
 98#else
 99#define MAX_LOCKDEP_ENTRIES	32768UL
100
101#define MAX_LOCKDEP_CHAINS_BITS	16
102
103/*
104 * Stack-trace: tightly packed array of stack backtrace
105 * addresses. Protected by the hash_lock.
106 */
107#define MAX_STACK_TRACE_ENTRIES	524288UL
108#define STACK_TRACE_HASH_SIZE	16384
109#endif
110
111/*
112 * Bit definitions for lock_chain.irq_context
113 */
114#define LOCK_CHAIN_SOFTIRQ_CONTEXT	(1 << 0)
115#define LOCK_CHAIN_HARDIRQ_CONTEXT	(1 << 1)
116
117#define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
118
119#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
120
121extern struct list_head all_lock_classes;
122extern struct lock_chain lock_chains[];
123
124#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
125
126extern void get_usage_chars(struct lock_class *class,
127			    char usage[LOCK_USAGE_CHARS]);
128
129extern const char *__get_key_name(const struct lockdep_subclass_key *key,
130				  char *str);
131
132struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
133
134extern unsigned long nr_lock_classes;
135extern unsigned long nr_zapped_classes;
136extern unsigned long nr_zapped_lock_chains;
137extern unsigned long nr_list_entries;
138long lockdep_next_lockchain(long i);
139unsigned long lock_chain_count(void);
140extern unsigned long nr_stack_trace_entries;
141
142extern unsigned int nr_hardirq_chains;
143extern unsigned int nr_softirq_chains;
144extern unsigned int nr_process_chains;
145extern unsigned int nr_free_chain_hlocks;
146extern unsigned int nr_lost_chain_hlocks;
147extern unsigned int nr_large_chain_blocks;
148
149extern unsigned int max_lockdep_depth;
150extern unsigned int max_bfs_queue_depth;
151
152#ifdef CONFIG_PROVE_LOCKING
153extern unsigned long lockdep_count_forward_deps(struct lock_class *);
154extern unsigned long lockdep_count_backward_deps(struct lock_class *);
155#ifdef CONFIG_TRACE_IRQFLAGS
156u64 lockdep_stack_trace_count(void);
157u64 lockdep_stack_hash_count(void);
158#endif
159#else
160static inline unsigned long
161lockdep_count_forward_deps(struct lock_class *class)
162{
163	return 0;
164}
165static inline unsigned long
166lockdep_count_backward_deps(struct lock_class *class)
167{
168	return 0;
169}
170#endif
171
172#ifdef CONFIG_DEBUG_LOCKDEP
173
174#include <asm/local.h>
175/*
176 * Various lockdep statistics.
177 * We want them per cpu as they are often accessed in fast path
178 * and we want to avoid too much cache bouncing.
179 */
180struct lockdep_stats {
181	unsigned long  chain_lookup_hits;
182	unsigned int   chain_lookup_misses;
183	unsigned long  hardirqs_on_events;
184	unsigned long  hardirqs_off_events;
185	unsigned long  redundant_hardirqs_on;
186	unsigned long  redundant_hardirqs_off;
187	unsigned long  softirqs_on_events;
188	unsigned long  softirqs_off_events;
189	unsigned long  redundant_softirqs_on;
190	unsigned long  redundant_softirqs_off;
191	int            nr_unused_locks;
192	unsigned int   nr_redundant_checks;
193	unsigned int   nr_redundant;
194	unsigned int   nr_cyclic_checks;
195	unsigned int   nr_find_usage_forwards_checks;
196	unsigned int   nr_find_usage_backwards_checks;
197
198	/*
199	 * Per lock class locking operation stat counts
200	 */
201	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
202};
203
204DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
205extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
206
207#define __debug_atomic_inc(ptr)					\
208	this_cpu_inc(lockdep_stats.ptr);
209
210#define debug_atomic_inc(ptr)			{		\
211	WARN_ON_ONCE(!irqs_disabled());				\
212	__this_cpu_inc(lockdep_stats.ptr);			\
213}
214
215#define debug_atomic_dec(ptr)			{		\
216	WARN_ON_ONCE(!irqs_disabled());				\
217	__this_cpu_dec(lockdep_stats.ptr);			\
218}
219
220#define debug_atomic_read(ptr)		({				\
221	struct lockdep_stats *__cpu_lockdep_stats;			\
222	unsigned long long __total = 0;					\
223	int __cpu;							\
224	for_each_possible_cpu(__cpu) {					\
225		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
226		__total += __cpu_lockdep_stats->ptr;			\
227	}								\
228	__total;							\
229})
230
231static inline void debug_class_ops_inc(struct lock_class *class)
232{
233	int idx;
234
235	idx = class - lock_classes;
236	__debug_atomic_inc(lock_class_ops[idx]);
237}
238
239static inline unsigned long debug_class_ops_read(struct lock_class *class)
240{
241	int idx, cpu;
242	unsigned long ops = 0;
243
244	idx = class - lock_classes;
245	for_each_possible_cpu(cpu)
246		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
247	return ops;
248}
249
250#else
251# define __debug_atomic_inc(ptr)	do { } while (0)
252# define debug_atomic_inc(ptr)		do { } while (0)
253# define debug_atomic_dec(ptr)		do { } while (0)
254# define debug_atomic_read(ptr)		0
255# define debug_class_ops_inc(ptr)	do { } while (0)
256#endif