Loading...
1/*
2 * kernel/lockdep_internals.h
3 *
4 * Runtime locking correctness validator
5 *
6 * lockdep subsystem internal functions and variables.
7 */
8
9/*
10 * Lock-class usage-state bits:
11 */
12enum lock_usage_bit {
13#define LOCKDEP_STATE(__STATE) \
14 LOCK_USED_IN_##__STATE, \
15 LOCK_USED_IN_##__STATE##_READ, \
16 LOCK_ENABLED_##__STATE, \
17 LOCK_ENABLED_##__STATE##_READ,
18#include "lockdep_states.h"
19#undef LOCKDEP_STATE
20 LOCK_USED,
21 LOCK_USAGE_STATES
22};
23
24/*
25 * Usage-state bitmasks:
26 */
27#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
28
29enum {
30#define LOCKDEP_STATE(__STATE) \
31 __LOCKF(USED_IN_##__STATE) \
32 __LOCKF(USED_IN_##__STATE##_READ) \
33 __LOCKF(ENABLED_##__STATE) \
34 __LOCKF(ENABLED_##__STATE##_READ)
35#include "lockdep_states.h"
36#undef LOCKDEP_STATE
37 __LOCKF(USED)
38};
39
40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42
43#define LOCKF_ENABLED_IRQ_READ \
44 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45#define LOCKF_USED_IN_IRQ_READ \
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47
48/*
49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
50 * .data and .bss to fit in required 32MB limit for the kernel. With
51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
52 * So, reduce the static allocations for lockdeps related structures so that
53 * everything fits in current required size limit.
54 */
55#ifdef CONFIG_PROVE_LOCKING_SMALL
56/*
57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
58 * we track.
59 *
60 * We use the per-lock dependency maps in two ways: we grow it by adding
61 * every to-be-taken lock to all currently held lock's own dependency
62 * table (if it's not there yet), and we check it for lock order
63 * conflicts and deadlocks.
64 */
65#define MAX_LOCKDEP_ENTRIES 16384UL
66#define MAX_LOCKDEP_CHAINS_BITS 15
67#define MAX_STACK_TRACE_ENTRIES 262144UL
68#else
69#define MAX_LOCKDEP_ENTRIES 32768UL
70
71#define MAX_LOCKDEP_CHAINS_BITS 16
72
73/*
74 * Stack-trace: tightly packed array of stack backtrace
75 * addresses. Protected by the hash_lock.
76 */
77#define MAX_STACK_TRACE_ENTRIES 524288UL
78#endif
79
80#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
81
82#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
83
84extern struct list_head all_lock_classes;
85extern struct lock_chain lock_chains[];
86
87#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
88
89extern void get_usage_chars(struct lock_class *class,
90 char usage[LOCK_USAGE_CHARS]);
91
92extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
93
94struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
95
96extern unsigned long nr_lock_classes;
97extern unsigned long nr_list_entries;
98extern unsigned long nr_lock_chains;
99extern int nr_chain_hlocks;
100extern unsigned long nr_stack_trace_entries;
101
102extern unsigned int nr_hardirq_chains;
103extern unsigned int nr_softirq_chains;
104extern unsigned int nr_process_chains;
105extern unsigned int max_lockdep_depth;
106extern unsigned int max_recursion_depth;
107
108extern unsigned int max_bfs_queue_depth;
109
110#ifdef CONFIG_PROVE_LOCKING
111extern unsigned long lockdep_count_forward_deps(struct lock_class *);
112extern unsigned long lockdep_count_backward_deps(struct lock_class *);
113#else
114static inline unsigned long
115lockdep_count_forward_deps(struct lock_class *class)
116{
117 return 0;
118}
119static inline unsigned long
120lockdep_count_backward_deps(struct lock_class *class)
121{
122 return 0;
123}
124#endif
125
126#ifdef CONFIG_DEBUG_LOCKDEP
127
128#include <asm/local.h>
129/*
130 * Various lockdep statistics.
131 * We want them per cpu as they are often accessed in fast path
132 * and we want to avoid too much cache bouncing.
133 */
134struct lockdep_stats {
135 int chain_lookup_hits;
136 int chain_lookup_misses;
137 int hardirqs_on_events;
138 int hardirqs_off_events;
139 int redundant_hardirqs_on;
140 int redundant_hardirqs_off;
141 int softirqs_on_events;
142 int softirqs_off_events;
143 int redundant_softirqs_on;
144 int redundant_softirqs_off;
145 int nr_unused_locks;
146 int nr_cyclic_checks;
147 int nr_cyclic_check_recursions;
148 int nr_find_usage_forwards_checks;
149 int nr_find_usage_forwards_recursions;
150 int nr_find_usage_backwards_checks;
151 int nr_find_usage_backwards_recursions;
152};
153
154DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
155
156#define __debug_atomic_inc(ptr) \
157 this_cpu_inc(lockdep_stats.ptr);
158
159#define debug_atomic_inc(ptr) { \
160 WARN_ON_ONCE(!irqs_disabled()); \
161 __this_cpu_inc(lockdep_stats.ptr); \
162}
163
164#define debug_atomic_dec(ptr) { \
165 WARN_ON_ONCE(!irqs_disabled()); \
166 __this_cpu_dec(lockdep_stats.ptr); \
167}
168
169#define debug_atomic_read(ptr) ({ \
170 struct lockdep_stats *__cpu_lockdep_stats; \
171 unsigned long long __total = 0; \
172 int __cpu; \
173 for_each_possible_cpu(__cpu) { \
174 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
175 __total += __cpu_lockdep_stats->ptr; \
176 } \
177 __total; \
178})
179#else
180# define __debug_atomic_inc(ptr) do { } while (0)
181# define debug_atomic_inc(ptr) do { } while (0)
182# define debug_atomic_dec(ptr) do { } while (0)
183# define debug_atomic_read(ptr) 0
184#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10/*
11 * Lock-class usage-state bits:
12 */
13enum lock_usage_bit {
14#define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19#include "lockdep_states.h"
20#undef LOCKDEP_STATE
21 LOCK_USED,
22 LOCK_USED_READ,
23 LOCK_USAGE_STATES
24};
25
26#define LOCK_USAGE_READ_MASK 1
27#define LOCK_USAGE_DIR_MASK 2
28#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
29
30/*
31 * Usage-state bitmasks:
32 */
33#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
34
35enum {
36#define LOCKDEP_STATE(__STATE) \
37 __LOCKF(USED_IN_##__STATE) \
38 __LOCKF(USED_IN_##__STATE##_READ) \
39 __LOCKF(ENABLED_##__STATE) \
40 __LOCKF(ENABLED_##__STATE##_READ)
41#include "lockdep_states.h"
42#undef LOCKDEP_STATE
43 __LOCKF(USED)
44 __LOCKF(USED_READ)
45};
46
47#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
48static const unsigned long LOCKF_ENABLED_IRQ =
49#include "lockdep_states.h"
50 0;
51#undef LOCKDEP_STATE
52
53#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
54static const unsigned long LOCKF_USED_IN_IRQ =
55#include "lockdep_states.h"
56 0;
57#undef LOCKDEP_STATE
58
59#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
60static const unsigned long LOCKF_ENABLED_IRQ_READ =
61#include "lockdep_states.h"
62 0;
63#undef LOCKDEP_STATE
64
65#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
66static const unsigned long LOCKF_USED_IN_IRQ_READ =
67#include "lockdep_states.h"
68 0;
69#undef LOCKDEP_STATE
70
71#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
72#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
73
74#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
75#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
76
77/*
78 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
79 * .data and .bss to fit in required 32MB limit for the kernel. With
80 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
81 * So, reduce the static allocations for lockdeps related structures so that
82 * everything fits in current required size limit.
83 */
84#ifdef CONFIG_LOCKDEP_SMALL
85/*
86 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
87 * we track.
88 *
89 * We use the per-lock dependency maps in two ways: we grow it by adding
90 * every to-be-taken lock to all currently held lock's own dependency
91 * table (if it's not there yet), and we check it for lock order
92 * conflicts and deadlocks.
93 */
94#define MAX_LOCKDEP_ENTRIES 16384UL
95#define MAX_LOCKDEP_CHAINS_BITS 15
96#define MAX_STACK_TRACE_ENTRIES 262144UL
97#define STACK_TRACE_HASH_SIZE 8192
98#else
99#define MAX_LOCKDEP_ENTRIES 32768UL
100
101#define MAX_LOCKDEP_CHAINS_BITS 16
102
103/*
104 * Stack-trace: tightly packed array of stack backtrace
105 * addresses. Protected by the hash_lock.
106 */
107#define MAX_STACK_TRACE_ENTRIES 524288UL
108#define STACK_TRACE_HASH_SIZE 16384
109#endif
110
111/*
112 * Bit definitions for lock_chain.irq_context
113 */
114#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
115#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
116
117#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
118
119#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
120
121extern struct list_head all_lock_classes;
122extern struct lock_chain lock_chains[];
123
124#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
125
126extern void get_usage_chars(struct lock_class *class,
127 char usage[LOCK_USAGE_CHARS]);
128
129extern const char *__get_key_name(const struct lockdep_subclass_key *key,
130 char *str);
131
132struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
133
134extern unsigned long nr_lock_classes;
135extern unsigned long nr_zapped_classes;
136extern unsigned long nr_zapped_lock_chains;
137extern unsigned long nr_list_entries;
138long lockdep_next_lockchain(long i);
139unsigned long lock_chain_count(void);
140extern unsigned long nr_stack_trace_entries;
141
142extern unsigned int nr_hardirq_chains;
143extern unsigned int nr_softirq_chains;
144extern unsigned int nr_process_chains;
145extern unsigned int nr_free_chain_hlocks;
146extern unsigned int nr_lost_chain_hlocks;
147extern unsigned int nr_large_chain_blocks;
148
149extern unsigned int max_lockdep_depth;
150extern unsigned int max_bfs_queue_depth;
151
152#ifdef CONFIG_PROVE_LOCKING
153extern unsigned long lockdep_count_forward_deps(struct lock_class *);
154extern unsigned long lockdep_count_backward_deps(struct lock_class *);
155#ifdef CONFIG_TRACE_IRQFLAGS
156u64 lockdep_stack_trace_count(void);
157u64 lockdep_stack_hash_count(void);
158#endif
159#else
160static inline unsigned long
161lockdep_count_forward_deps(struct lock_class *class)
162{
163 return 0;
164}
165static inline unsigned long
166lockdep_count_backward_deps(struct lock_class *class)
167{
168 return 0;
169}
170#endif
171
172#ifdef CONFIG_DEBUG_LOCKDEP
173
174#include <asm/local.h>
175/*
176 * Various lockdep statistics.
177 * We want them per cpu as they are often accessed in fast path
178 * and we want to avoid too much cache bouncing.
179 */
180struct lockdep_stats {
181 unsigned long chain_lookup_hits;
182 unsigned int chain_lookup_misses;
183 unsigned long hardirqs_on_events;
184 unsigned long hardirqs_off_events;
185 unsigned long redundant_hardirqs_on;
186 unsigned long redundant_hardirqs_off;
187 unsigned long softirqs_on_events;
188 unsigned long softirqs_off_events;
189 unsigned long redundant_softirqs_on;
190 unsigned long redundant_softirqs_off;
191 int nr_unused_locks;
192 unsigned int nr_redundant_checks;
193 unsigned int nr_redundant;
194 unsigned int nr_cyclic_checks;
195 unsigned int nr_find_usage_forwards_checks;
196 unsigned int nr_find_usage_backwards_checks;
197
198 /*
199 * Per lock class locking operation stat counts
200 */
201 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
202};
203
204DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
205extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
206
207#define __debug_atomic_inc(ptr) \
208 this_cpu_inc(lockdep_stats.ptr);
209
210#define debug_atomic_inc(ptr) { \
211 WARN_ON_ONCE(!irqs_disabled()); \
212 __this_cpu_inc(lockdep_stats.ptr); \
213}
214
215#define debug_atomic_dec(ptr) { \
216 WARN_ON_ONCE(!irqs_disabled()); \
217 __this_cpu_dec(lockdep_stats.ptr); \
218}
219
220#define debug_atomic_read(ptr) ({ \
221 struct lockdep_stats *__cpu_lockdep_stats; \
222 unsigned long long __total = 0; \
223 int __cpu; \
224 for_each_possible_cpu(__cpu) { \
225 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
226 __total += __cpu_lockdep_stats->ptr; \
227 } \
228 __total; \
229})
230
231static inline void debug_class_ops_inc(struct lock_class *class)
232{
233 int idx;
234
235 idx = class - lock_classes;
236 __debug_atomic_inc(lock_class_ops[idx]);
237}
238
239static inline unsigned long debug_class_ops_read(struct lock_class *class)
240{
241 int idx, cpu;
242 unsigned long ops = 0;
243
244 idx = class - lock_classes;
245 for_each_possible_cpu(cpu)
246 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
247 return ops;
248}
249
250#else
251# define __debug_atomic_inc(ptr) do { } while (0)
252# define debug_atomic_inc(ptr) do { } while (0)
253# define debug_atomic_dec(ptr) do { } while (0)
254# define debug_atomic_read(ptr) 0
255# define debug_class_ops_inc(ptr) do { } while (0)
256#endif