Loading...
1/*
2 * kernel/lockdep_internals.h
3 *
4 * Runtime locking correctness validator
5 *
6 * lockdep subsystem internal functions and variables.
7 */
8
9/*
10 * Lock-class usage-state bits:
11 */
12enum lock_usage_bit {
13#define LOCKDEP_STATE(__STATE) \
14 LOCK_USED_IN_##__STATE, \
15 LOCK_USED_IN_##__STATE##_READ, \
16 LOCK_ENABLED_##__STATE, \
17 LOCK_ENABLED_##__STATE##_READ,
18#include "lockdep_states.h"
19#undef LOCKDEP_STATE
20 LOCK_USED,
21 LOCK_USAGE_STATES
22};
23
24/*
25 * Usage-state bitmasks:
26 */
27#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
28
29enum {
30#define LOCKDEP_STATE(__STATE) \
31 __LOCKF(USED_IN_##__STATE) \
32 __LOCKF(USED_IN_##__STATE##_READ) \
33 __LOCKF(ENABLED_##__STATE) \
34 __LOCKF(ENABLED_##__STATE##_READ)
35#include "lockdep_states.h"
36#undef LOCKDEP_STATE
37 __LOCKF(USED)
38};
39
40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42
43#define LOCKF_ENABLED_IRQ_READ \
44 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45#define LOCKF_USED_IN_IRQ_READ \
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47
48/*
49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
50 * we track.
51 *
52 * We use the per-lock dependency maps in two ways: we grow it by adding
53 * every to-be-taken lock to all currently held lock's own dependency
54 * table (if it's not there yet), and we check it for lock order
55 * conflicts and deadlocks.
56 */
57#define MAX_LOCKDEP_ENTRIES 32768UL
58
59#define MAX_LOCKDEP_CHAINS_BITS 16
60#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
61
62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
63
64/*
65 * Stack-trace: tightly packed array of stack backtrace
66 * addresses. Protected by the hash_lock.
67 */
68#define MAX_STACK_TRACE_ENTRIES 524288UL
69
70extern struct list_head all_lock_classes;
71extern struct lock_chain lock_chains[];
72
73#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
74
75extern void get_usage_chars(struct lock_class *class,
76 char usage[LOCK_USAGE_CHARS]);
77
78extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
79
80struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
81
82extern unsigned long nr_lock_classes;
83extern unsigned long nr_list_entries;
84extern unsigned long nr_lock_chains;
85extern int nr_chain_hlocks;
86extern unsigned long nr_stack_trace_entries;
87
88extern unsigned int nr_hardirq_chains;
89extern unsigned int nr_softirq_chains;
90extern unsigned int nr_process_chains;
91extern unsigned int max_lockdep_depth;
92extern unsigned int max_recursion_depth;
93
94extern unsigned int max_bfs_queue_depth;
95
96#ifdef CONFIG_PROVE_LOCKING
97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
99#else
100static inline unsigned long
101lockdep_count_forward_deps(struct lock_class *class)
102{
103 return 0;
104}
105static inline unsigned long
106lockdep_count_backward_deps(struct lock_class *class)
107{
108 return 0;
109}
110#endif
111
112#ifdef CONFIG_DEBUG_LOCKDEP
113
114#include <asm/local.h>
115/*
116 * Various lockdep statistics.
117 * We want them per cpu as they are often accessed in fast path
118 * and we want to avoid too much cache bouncing.
119 */
120struct lockdep_stats {
121 int chain_lookup_hits;
122 int chain_lookup_misses;
123 int hardirqs_on_events;
124 int hardirqs_off_events;
125 int redundant_hardirqs_on;
126 int redundant_hardirqs_off;
127 int softirqs_on_events;
128 int softirqs_off_events;
129 int redundant_softirqs_on;
130 int redundant_softirqs_off;
131 int nr_unused_locks;
132 int nr_cyclic_checks;
133 int nr_cyclic_check_recursions;
134 int nr_find_usage_forwards_checks;
135 int nr_find_usage_forwards_recursions;
136 int nr_find_usage_backwards_checks;
137 int nr_find_usage_backwards_recursions;
138};
139
140DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
141
142#define __debug_atomic_inc(ptr) \
143 this_cpu_inc(lockdep_stats.ptr);
144
145#define debug_atomic_inc(ptr) { \
146 WARN_ON_ONCE(!irqs_disabled()); \
147 __this_cpu_inc(lockdep_stats.ptr); \
148}
149
150#define debug_atomic_dec(ptr) { \
151 WARN_ON_ONCE(!irqs_disabled()); \
152 __this_cpu_dec(lockdep_stats.ptr); \
153}
154
155#define debug_atomic_read(ptr) ({ \
156 struct lockdep_stats *__cpu_lockdep_stats; \
157 unsigned long long __total = 0; \
158 int __cpu; \
159 for_each_possible_cpu(__cpu) { \
160 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
161 __total += __cpu_lockdep_stats->ptr; \
162 } \
163 __total; \
164})
165#else
166# define __debug_atomic_inc(ptr) do { } while (0)
167# define debug_atomic_inc(ptr) do { } while (0)
168# define debug_atomic_dec(ptr) do { } while (0)
169# define debug_atomic_read(ptr) 0
170#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10/*
11 * Lock-class usage-state bits:
12 */
13enum lock_usage_bit {
14#define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19#include "lockdep_states.h"
20#undef LOCKDEP_STATE
21 LOCK_USED,
22 LOCK_USED_READ,
23 LOCK_USAGE_STATES,
24};
25
26/* states after LOCK_USED_READ are not traced and printed */
27static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28
29#define LOCK_USAGE_READ_MASK 1
30#define LOCK_USAGE_DIR_MASK 2
31#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
32
33/*
34 * Usage-state bitmasks:
35 */
36#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
37
38enum {
39#define LOCKDEP_STATE(__STATE) \
40 __LOCKF(USED_IN_##__STATE) \
41 __LOCKF(USED_IN_##__STATE##_READ) \
42 __LOCKF(ENABLED_##__STATE) \
43 __LOCKF(ENABLED_##__STATE##_READ)
44#include "lockdep_states.h"
45#undef LOCKDEP_STATE
46 __LOCKF(USED)
47 __LOCKF(USED_READ)
48};
49
50#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
51static const unsigned long LOCKF_ENABLED_IRQ =
52#include "lockdep_states.h"
53 0;
54#undef LOCKDEP_STATE
55
56#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
57static const unsigned long LOCKF_USED_IN_IRQ =
58#include "lockdep_states.h"
59 0;
60#undef LOCKDEP_STATE
61
62#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
63static const unsigned long LOCKF_ENABLED_IRQ_READ =
64#include "lockdep_states.h"
65 0;
66#undef LOCKDEP_STATE
67
68#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
69static const unsigned long LOCKF_USED_IN_IRQ_READ =
70#include "lockdep_states.h"
71 0;
72#undef LOCKDEP_STATE
73
74#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
75#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
76
77#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
78#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
79
80/*
81 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
82 * .data and .bss to fit in required 32MB limit for the kernel. With
83 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
84 * So, reduce the static allocations for lockdeps related structures so that
85 * everything fits in current required size limit.
86 */
87#ifdef CONFIG_LOCKDEP_SMALL
88/*
89 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
90 * we track.
91 *
92 * We use the per-lock dependency maps in two ways: we grow it by adding
93 * every to-be-taken lock to all currently held lock's own dependency
94 * table (if it's not there yet), and we check it for lock order
95 * conflicts and deadlocks.
96 */
97#define MAX_LOCKDEP_ENTRIES 16384UL
98#define MAX_LOCKDEP_CHAINS_BITS 15
99#define MAX_STACK_TRACE_ENTRIES 262144UL
100#define STACK_TRACE_HASH_SIZE 8192
101#else
102#define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS)
103
104#define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS
105
106/*
107 * Stack-trace: tightly packed array of stack backtrace
108 * addresses. Protected by the hash_lock.
109 */
110#define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
111#define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
112#endif
113
114/*
115 * Bit definitions for lock_chain.irq_context
116 */
117#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
118#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
119
120#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
121
122#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
123
124extern struct list_head all_lock_classes;
125extern struct lock_chain lock_chains[];
126
127#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
128
129extern void get_usage_chars(struct lock_class *class,
130 char usage[LOCK_USAGE_CHARS]);
131
132extern const char *__get_key_name(const struct lockdep_subclass_key *key,
133 char *str);
134
135struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
136
137extern unsigned long nr_lock_classes;
138extern unsigned long nr_zapped_classes;
139extern unsigned long nr_zapped_lock_chains;
140extern unsigned long nr_list_entries;
141long lockdep_next_lockchain(long i);
142unsigned long lock_chain_count(void);
143extern unsigned long nr_stack_trace_entries;
144
145extern unsigned int nr_hardirq_chains;
146extern unsigned int nr_softirq_chains;
147extern unsigned int nr_process_chains;
148extern unsigned int nr_free_chain_hlocks;
149extern unsigned int nr_lost_chain_hlocks;
150extern unsigned int nr_large_chain_blocks;
151
152extern unsigned int max_lockdep_depth;
153extern unsigned int max_bfs_queue_depth;
154
155#ifdef CONFIG_PROVE_LOCKING
156extern unsigned long lockdep_count_forward_deps(struct lock_class *);
157extern unsigned long lockdep_count_backward_deps(struct lock_class *);
158#ifdef CONFIG_TRACE_IRQFLAGS
159u64 lockdep_stack_trace_count(void);
160u64 lockdep_stack_hash_count(void);
161#endif
162#else
163static inline unsigned long
164lockdep_count_forward_deps(struct lock_class *class)
165{
166 return 0;
167}
168static inline unsigned long
169lockdep_count_backward_deps(struct lock_class *class)
170{
171 return 0;
172}
173#endif
174
175#ifdef CONFIG_DEBUG_LOCKDEP
176
177#include <asm/local.h>
178/*
179 * Various lockdep statistics.
180 * We want them per cpu as they are often accessed in fast path
181 * and we want to avoid too much cache bouncing.
182 */
183struct lockdep_stats {
184 unsigned long chain_lookup_hits;
185 unsigned int chain_lookup_misses;
186 unsigned long hardirqs_on_events;
187 unsigned long hardirqs_off_events;
188 unsigned long redundant_hardirqs_on;
189 unsigned long redundant_hardirqs_off;
190 unsigned long softirqs_on_events;
191 unsigned long softirqs_off_events;
192 unsigned long redundant_softirqs_on;
193 unsigned long redundant_softirqs_off;
194 int nr_unused_locks;
195 unsigned int nr_redundant_checks;
196 unsigned int nr_redundant;
197 unsigned int nr_cyclic_checks;
198 unsigned int nr_find_usage_forwards_checks;
199 unsigned int nr_find_usage_backwards_checks;
200
201 /*
202 * Per lock class locking operation stat counts
203 */
204 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
205};
206
207DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
208extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
209
210#define __debug_atomic_inc(ptr) \
211 this_cpu_inc(lockdep_stats.ptr);
212
213#define debug_atomic_inc(ptr) { \
214 WARN_ON_ONCE(!irqs_disabled()); \
215 __this_cpu_inc(lockdep_stats.ptr); \
216}
217
218#define debug_atomic_dec(ptr) { \
219 WARN_ON_ONCE(!irqs_disabled()); \
220 __this_cpu_dec(lockdep_stats.ptr); \
221}
222
223#define debug_atomic_read(ptr) ({ \
224 struct lockdep_stats *__cpu_lockdep_stats; \
225 unsigned long long __total = 0; \
226 int __cpu; \
227 for_each_possible_cpu(__cpu) { \
228 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
229 __total += __cpu_lockdep_stats->ptr; \
230 } \
231 __total; \
232})
233
234static inline void debug_class_ops_inc(struct lock_class *class)
235{
236 int idx;
237
238 idx = class - lock_classes;
239 __debug_atomic_inc(lock_class_ops[idx]);
240}
241
242static inline unsigned long debug_class_ops_read(struct lock_class *class)
243{
244 int idx, cpu;
245 unsigned long ops = 0;
246
247 idx = class - lock_classes;
248 for_each_possible_cpu(cpu)
249 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
250 return ops;
251}
252
253#else
254# define __debug_atomic_inc(ptr) do { } while (0)
255# define debug_atomic_inc(ptr) do { } while (0)
256# define debug_atomic_dec(ptr) do { } while (0)
257# define debug_atomic_read(ptr) 0
258# define debug_class_ops_inc(ptr) do { } while (0)
259#endif