Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10/*
11 * Lock-class usage-state bits:
12 */
13enum lock_usage_bit {
14#define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19#include "lockdep_states.h"
20#undef LOCKDEP_STATE
21 LOCK_USED,
22 LOCK_USAGE_STATES
23};
24
25/*
26 * Usage-state bitmasks:
27 */
28#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
29
30enum {
31#define LOCKDEP_STATE(__STATE) \
32 __LOCKF(USED_IN_##__STATE) \
33 __LOCKF(USED_IN_##__STATE##_READ) \
34 __LOCKF(ENABLED_##__STATE) \
35 __LOCKF(ENABLED_##__STATE##_READ)
36#include "lockdep_states.h"
37#undef LOCKDEP_STATE
38 __LOCKF(USED)
39};
40
41#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
42#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
43
44#define LOCKF_ENABLED_IRQ_READ \
45 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
46#define LOCKF_USED_IN_IRQ_READ \
47 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
48
49/*
50 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
51 * .data and .bss to fit in required 32MB limit for the kernel. With
52 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
53 * So, reduce the static allocations for lockdeps related structures so that
54 * everything fits in current required size limit.
55 */
56#ifdef CONFIG_LOCKDEP_SMALL
57/*
58 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
59 * we track.
60 *
61 * We use the per-lock dependency maps in two ways: we grow it by adding
62 * every to-be-taken lock to all currently held lock's own dependency
63 * table (if it's not there yet), and we check it for lock order
64 * conflicts and deadlocks.
65 */
66#define MAX_LOCKDEP_ENTRIES 16384UL
67#define MAX_LOCKDEP_CHAINS_BITS 15
68#define MAX_STACK_TRACE_ENTRIES 262144UL
69#else
70#define MAX_LOCKDEP_ENTRIES 32768UL
71
72#define MAX_LOCKDEP_CHAINS_BITS 16
73
74/*
75 * Stack-trace: tightly packed array of stack backtrace
76 * addresses. Protected by the hash_lock.
77 */
78#define MAX_STACK_TRACE_ENTRIES 524288UL
79#endif
80
81#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
82
83#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
84
85extern struct list_head all_lock_classes;
86extern struct lock_chain lock_chains[];
87
88#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
89
90extern void get_usage_chars(struct lock_class *class,
91 char usage[LOCK_USAGE_CHARS]);
92
93extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
94
95struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
96
97extern unsigned long nr_lock_classes;
98extern unsigned long nr_list_entries;
99extern unsigned long nr_lock_chains;
100extern int nr_chain_hlocks;
101extern unsigned long nr_stack_trace_entries;
102
103extern unsigned int nr_hardirq_chains;
104extern unsigned int nr_softirq_chains;
105extern unsigned int nr_process_chains;
106extern unsigned int max_lockdep_depth;
107extern unsigned int max_recursion_depth;
108
109extern unsigned int max_bfs_queue_depth;
110
111#ifdef CONFIG_PROVE_LOCKING
112extern unsigned long lockdep_count_forward_deps(struct lock_class *);
113extern unsigned long lockdep_count_backward_deps(struct lock_class *);
114#else
115static inline unsigned long
116lockdep_count_forward_deps(struct lock_class *class)
117{
118 return 0;
119}
120static inline unsigned long
121lockdep_count_backward_deps(struct lock_class *class)
122{
123 return 0;
124}
125#endif
126
127#ifdef CONFIG_DEBUG_LOCKDEP
128
129#include <asm/local.h>
130/*
131 * Various lockdep statistics.
132 * We want them per cpu as they are often accessed in fast path
133 * and we want to avoid too much cache bouncing.
134 */
135struct lockdep_stats {
136 int chain_lookup_hits;
137 int chain_lookup_misses;
138 int hardirqs_on_events;
139 int hardirqs_off_events;
140 int redundant_hardirqs_on;
141 int redundant_hardirqs_off;
142 int softirqs_on_events;
143 int softirqs_off_events;
144 int redundant_softirqs_on;
145 int redundant_softirqs_off;
146 int nr_unused_locks;
147 int nr_redundant_checks;
148 int nr_redundant;
149 int nr_cyclic_checks;
150 int nr_cyclic_check_recursions;
151 int nr_find_usage_forwards_checks;
152 int nr_find_usage_forwards_recursions;
153 int nr_find_usage_backwards_checks;
154 int nr_find_usage_backwards_recursions;
155};
156
157DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
158
159#define __debug_atomic_inc(ptr) \
160 this_cpu_inc(lockdep_stats.ptr);
161
162#define debug_atomic_inc(ptr) { \
163 WARN_ON_ONCE(!irqs_disabled()); \
164 __this_cpu_inc(lockdep_stats.ptr); \
165}
166
167#define debug_atomic_dec(ptr) { \
168 WARN_ON_ONCE(!irqs_disabled()); \
169 __this_cpu_dec(lockdep_stats.ptr); \
170}
171
172#define debug_atomic_read(ptr) ({ \
173 struct lockdep_stats *__cpu_lockdep_stats; \
174 unsigned long long __total = 0; \
175 int __cpu; \
176 for_each_possible_cpu(__cpu) { \
177 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
178 __total += __cpu_lockdep_stats->ptr; \
179 } \
180 __total; \
181})
182#else
183# define __debug_atomic_inc(ptr) do { } while (0)
184# define debug_atomic_inc(ptr) do { } while (0)
185# define debug_atomic_dec(ptr) do { } while (0)
186# define debug_atomic_read(ptr) 0
187#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10/*
11 * Lock-class usage-state bits:
12 */
13enum lock_usage_bit {
14#define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19#include "lockdep_states.h"
20#undef LOCKDEP_STATE
21 LOCK_USED,
22 LOCK_USAGE_STATES
23};
24
25#define LOCK_USAGE_READ_MASK 1
26#define LOCK_USAGE_DIR_MASK 2
27#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
28
29/*
30 * Usage-state bitmasks:
31 */
32#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
33
34enum {
35#define LOCKDEP_STATE(__STATE) \
36 __LOCKF(USED_IN_##__STATE) \
37 __LOCKF(USED_IN_##__STATE##_READ) \
38 __LOCKF(ENABLED_##__STATE) \
39 __LOCKF(ENABLED_##__STATE##_READ)
40#include "lockdep_states.h"
41#undef LOCKDEP_STATE
42 __LOCKF(USED)
43};
44
45#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
46static const unsigned long LOCKF_ENABLED_IRQ =
47#include "lockdep_states.h"
48 0;
49#undef LOCKDEP_STATE
50
51#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
52static const unsigned long LOCKF_USED_IN_IRQ =
53#include "lockdep_states.h"
54 0;
55#undef LOCKDEP_STATE
56
57#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
58static const unsigned long LOCKF_ENABLED_IRQ_READ =
59#include "lockdep_states.h"
60 0;
61#undef LOCKDEP_STATE
62
63#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
64static const unsigned long LOCKF_USED_IN_IRQ_READ =
65#include "lockdep_states.h"
66 0;
67#undef LOCKDEP_STATE
68
69#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
70#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
71
72#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
73#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
74
75/*
76 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
77 * .data and .bss to fit in required 32MB limit for the kernel. With
78 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
79 * So, reduce the static allocations for lockdeps related structures so that
80 * everything fits in current required size limit.
81 */
82#ifdef CONFIG_LOCKDEP_SMALL
83/*
84 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
85 * we track.
86 *
87 * We use the per-lock dependency maps in two ways: we grow it by adding
88 * every to-be-taken lock to all currently held lock's own dependency
89 * table (if it's not there yet), and we check it for lock order
90 * conflicts and deadlocks.
91 */
92#define MAX_LOCKDEP_ENTRIES 16384UL
93#define MAX_LOCKDEP_CHAINS_BITS 15
94#define MAX_STACK_TRACE_ENTRIES 262144UL
95#define STACK_TRACE_HASH_SIZE 8192
96#else
97#define MAX_LOCKDEP_ENTRIES 32768UL
98
99#define MAX_LOCKDEP_CHAINS_BITS 16
100
101/*
102 * Stack-trace: tightly packed array of stack backtrace
103 * addresses. Protected by the hash_lock.
104 */
105#define MAX_STACK_TRACE_ENTRIES 524288UL
106#define STACK_TRACE_HASH_SIZE 16384
107#endif
108
109#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
110
111#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
112
113extern struct list_head all_lock_classes;
114extern struct lock_chain lock_chains[];
115
116#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
117
118extern void get_usage_chars(struct lock_class *class,
119 char usage[LOCK_USAGE_CHARS]);
120
121extern const char *__get_key_name(const struct lockdep_subclass_key *key,
122 char *str);
123
124struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
125
126extern unsigned long nr_lock_classes;
127extern unsigned long nr_list_entries;
128long lockdep_next_lockchain(long i);
129unsigned long lock_chain_count(void);
130extern int nr_chain_hlocks;
131extern unsigned long nr_stack_trace_entries;
132
133extern unsigned int nr_hardirq_chains;
134extern unsigned int nr_softirq_chains;
135extern unsigned int nr_process_chains;
136extern unsigned int max_lockdep_depth;
137
138extern unsigned int max_bfs_queue_depth;
139
140#ifdef CONFIG_PROVE_LOCKING
141extern unsigned long lockdep_count_forward_deps(struct lock_class *);
142extern unsigned long lockdep_count_backward_deps(struct lock_class *);
143#ifdef CONFIG_TRACE_IRQFLAGS
144u64 lockdep_stack_trace_count(void);
145u64 lockdep_stack_hash_count(void);
146#endif
147#else
148static inline unsigned long
149lockdep_count_forward_deps(struct lock_class *class)
150{
151 return 0;
152}
153static inline unsigned long
154lockdep_count_backward_deps(struct lock_class *class)
155{
156 return 0;
157}
158#endif
159
160#ifdef CONFIG_DEBUG_LOCKDEP
161
162#include <asm/local.h>
163/*
164 * Various lockdep statistics.
165 * We want them per cpu as they are often accessed in fast path
166 * and we want to avoid too much cache bouncing.
167 */
168struct lockdep_stats {
169 unsigned long chain_lookup_hits;
170 unsigned int chain_lookup_misses;
171 unsigned long hardirqs_on_events;
172 unsigned long hardirqs_off_events;
173 unsigned long redundant_hardirqs_on;
174 unsigned long redundant_hardirqs_off;
175 unsigned long softirqs_on_events;
176 unsigned long softirqs_off_events;
177 unsigned long redundant_softirqs_on;
178 unsigned long redundant_softirqs_off;
179 int nr_unused_locks;
180 unsigned int nr_redundant_checks;
181 unsigned int nr_redundant;
182 unsigned int nr_cyclic_checks;
183 unsigned int nr_find_usage_forwards_checks;
184 unsigned int nr_find_usage_backwards_checks;
185
186 /*
187 * Per lock class locking operation stat counts
188 */
189 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
190};
191
192DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
193extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
194
195#define __debug_atomic_inc(ptr) \
196 this_cpu_inc(lockdep_stats.ptr);
197
198#define debug_atomic_inc(ptr) { \
199 WARN_ON_ONCE(!irqs_disabled()); \
200 __this_cpu_inc(lockdep_stats.ptr); \
201}
202
203#define debug_atomic_dec(ptr) { \
204 WARN_ON_ONCE(!irqs_disabled()); \
205 __this_cpu_dec(lockdep_stats.ptr); \
206}
207
208#define debug_atomic_read(ptr) ({ \
209 struct lockdep_stats *__cpu_lockdep_stats; \
210 unsigned long long __total = 0; \
211 int __cpu; \
212 for_each_possible_cpu(__cpu) { \
213 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
214 __total += __cpu_lockdep_stats->ptr; \
215 } \
216 __total; \
217})
218
219static inline void debug_class_ops_inc(struct lock_class *class)
220{
221 int idx;
222
223 idx = class - lock_classes;
224 __debug_atomic_inc(lock_class_ops[idx]);
225}
226
227static inline unsigned long debug_class_ops_read(struct lock_class *class)
228{
229 int idx, cpu;
230 unsigned long ops = 0;
231
232 idx = class - lock_classes;
233 for_each_possible_cpu(cpu)
234 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
235 return ops;
236}
237
238#else
239# define __debug_atomic_inc(ptr) do { } while (0)
240# define debug_atomic_inc(ptr) do { } while (0)
241# define debug_atomic_dec(ptr) do { } while (0)
242# define debug_atomic_read(ptr) 0
243# define debug_class_ops_inc(ptr) do { } while (0)
244#endif