Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * kernel/lockdep_internals.h
  3 *
  4 * Runtime locking correctness validator
  5 *
  6 * lockdep subsystem internal functions and variables.
  7 */
  8
  9/*
 10 * Lock-class usage-state bits:
 11 */
 12enum lock_usage_bit {
 13#define LOCKDEP_STATE(__STATE)		\
 14	LOCK_USED_IN_##__STATE,		\
 15	LOCK_USED_IN_##__STATE##_READ,	\
 16	LOCK_ENABLED_##__STATE,		\
 17	LOCK_ENABLED_##__STATE##_READ,
 18#include "lockdep_states.h"
 19#undef LOCKDEP_STATE
 20	LOCK_USED,
 21	LOCK_USAGE_STATES
 22};
 23
 24/*
 25 * Usage-state bitmasks:
 26 */
 27#define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
 28
 29enum {
 30#define LOCKDEP_STATE(__STATE)						\
 31	__LOCKF(USED_IN_##__STATE)					\
 32	__LOCKF(USED_IN_##__STATE##_READ)				\
 33	__LOCKF(ENABLED_##__STATE)					\
 34	__LOCKF(ENABLED_##__STATE##_READ)
 35#include "lockdep_states.h"
 36#undef LOCKDEP_STATE
 37	__LOCKF(USED)
 38};
 39
 40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
 41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
 42
 43#define LOCKF_ENABLED_IRQ_READ \
 44		(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
 45#define LOCKF_USED_IN_IRQ_READ \
 46		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 47
 48/*
 
 
 
 
 
 
 
 
 49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
 50 * we track.
 51 *
 52 * We use the per-lock dependency maps in two ways: we grow it by adding
 53 * every to-be-taken lock to all currently held lock's own dependency
 54 * table (if it's not there yet), and we check it for lock order
 55 * conflicts and deadlocks.
 56 */
 57#define MAX_LOCKDEP_ENTRIES	16384UL
 58
 59#define MAX_LOCKDEP_CHAINS_BITS	15
 60#define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 
 61
 62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 63
 64/*
 65 * Stack-trace: tightly packed array of stack backtrace
 66 * addresses. Protected by the hash_lock.
 67 */
 68#define MAX_STACK_TRACE_ENTRIES	262144UL
 
 
 
 
 
 69
 70extern struct list_head all_lock_classes;
 71extern struct lock_chain lock_chains[];
 72
 73#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
 74
 75extern void get_usage_chars(struct lock_class *class,
 76			    char usage[LOCK_USAGE_CHARS]);
 77
 78extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
 79
 80struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
 81
 82extern unsigned long nr_lock_classes;
 83extern unsigned long nr_list_entries;
 84extern unsigned long nr_lock_chains;
 85extern int nr_chain_hlocks;
 86extern unsigned long nr_stack_trace_entries;
 87
 88extern unsigned int nr_hardirq_chains;
 89extern unsigned int nr_softirq_chains;
 90extern unsigned int nr_process_chains;
 91extern unsigned int max_lockdep_depth;
 92extern unsigned int max_recursion_depth;
 93
 94extern unsigned int max_bfs_queue_depth;
 95
 96#ifdef CONFIG_PROVE_LOCKING
 97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
 99#else
100static inline unsigned long
101lockdep_count_forward_deps(struct lock_class *class)
102{
103	return 0;
104}
105static inline unsigned long
106lockdep_count_backward_deps(struct lock_class *class)
107{
108	return 0;
109}
110#endif
111
112#ifdef CONFIG_DEBUG_LOCKDEP
113
114#include <asm/local.h>
115/*
116 * Various lockdep statistics.
117 * We want them per cpu as they are often accessed in fast path
118 * and we want to avoid too much cache bouncing.
119 */
120struct lockdep_stats {
121	int	chain_lookup_hits;
122	int	chain_lookup_misses;
123	int	hardirqs_on_events;
124	int	hardirqs_off_events;
125	int	redundant_hardirqs_on;
126	int	redundant_hardirqs_off;
127	int	softirqs_on_events;
128	int	softirqs_off_events;
129	int	redundant_softirqs_on;
130	int	redundant_softirqs_off;
131	int	nr_unused_locks;
 
 
132	int	nr_cyclic_checks;
133	int	nr_cyclic_check_recursions;
134	int	nr_find_usage_forwards_checks;
135	int	nr_find_usage_forwards_recursions;
136	int	nr_find_usage_backwards_checks;
137	int	nr_find_usage_backwards_recursions;
138};
139
140DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
141
142#define __debug_atomic_inc(ptr)					\
143	this_cpu_inc(lockdep_stats.ptr);
144
145#define debug_atomic_inc(ptr)			{		\
146	WARN_ON_ONCE(!irqs_disabled());				\
147	__this_cpu_inc(lockdep_stats.ptr);			\
148}
149
150#define debug_atomic_dec(ptr)			{		\
151	WARN_ON_ONCE(!irqs_disabled());				\
152	__this_cpu_dec(lockdep_stats.ptr);			\
153}
154
155#define debug_atomic_read(ptr)		({				\
156	struct lockdep_stats *__cpu_lockdep_stats;			\
157	unsigned long long __total = 0;					\
158	int __cpu;							\
159	for_each_possible_cpu(__cpu) {					\
160		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
161		__total += __cpu_lockdep_stats->ptr;			\
162	}								\
163	__total;							\
164})
165#else
166# define __debug_atomic_inc(ptr)	do { } while (0)
167# define debug_atomic_inc(ptr)		do { } while (0)
168# define debug_atomic_dec(ptr)		do { } while (0)
169# define debug_atomic_read(ptr)		0
170#endif
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * kernel/lockdep_internals.h
  4 *
  5 * Runtime locking correctness validator
  6 *
  7 * lockdep subsystem internal functions and variables.
  8 */
  9
 10/*
 11 * Lock-class usage-state bits:
 12 */
 13enum lock_usage_bit {
 14#define LOCKDEP_STATE(__STATE)		\
 15	LOCK_USED_IN_##__STATE,		\
 16	LOCK_USED_IN_##__STATE##_READ,	\
 17	LOCK_ENABLED_##__STATE,		\
 18	LOCK_ENABLED_##__STATE##_READ,
 19#include "lockdep_states.h"
 20#undef LOCKDEP_STATE
 21	LOCK_USED,
 22	LOCK_USAGE_STATES
 23};
 24
 25/*
 26 * Usage-state bitmasks:
 27 */
 28#define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
 29
 30enum {
 31#define LOCKDEP_STATE(__STATE)						\
 32	__LOCKF(USED_IN_##__STATE)					\
 33	__LOCKF(USED_IN_##__STATE##_READ)				\
 34	__LOCKF(ENABLED_##__STATE)					\
 35	__LOCKF(ENABLED_##__STATE##_READ)
 36#include "lockdep_states.h"
 37#undef LOCKDEP_STATE
 38	__LOCKF(USED)
 39};
 40
 41#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
 42#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
 43
 44#define LOCKF_ENABLED_IRQ_READ \
 45		(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
 46#define LOCKF_USED_IN_IRQ_READ \
 47		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 48
 49/*
 50 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
 51 * .data and .bss to fit in required 32MB limit for the kernel. With
 52 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
 53 * So, reduce the static allocations for lockdeps related structures so that
 54 * everything fits in current required size limit.
 55 */
 56#ifdef CONFIG_LOCKDEP_SMALL
 57/*
 58 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
 59 * we track.
 60 *
 61 * We use the per-lock dependency maps in two ways: we grow it by adding
 62 * every to-be-taken lock to all currently held lock's own dependency
 63 * table (if it's not there yet), and we check it for lock order
 64 * conflicts and deadlocks.
 65 */
 66#define MAX_LOCKDEP_ENTRIES	16384UL
 
 67#define MAX_LOCKDEP_CHAINS_BITS	15
 68#define MAX_STACK_TRACE_ENTRIES	262144UL
 69#else
 70#define MAX_LOCKDEP_ENTRIES	32768UL
 71
 72#define MAX_LOCKDEP_CHAINS_BITS	16
 73
 74/*
 75 * Stack-trace: tightly packed array of stack backtrace
 76 * addresses. Protected by the hash_lock.
 77 */
 78#define MAX_STACK_TRACE_ENTRIES	524288UL
 79#endif
 80
 81#define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 82
 83#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 84
 85extern struct list_head all_lock_classes;
 86extern struct lock_chain lock_chains[];
 87
 88#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
 89
 90extern void get_usage_chars(struct lock_class *class,
 91			    char usage[LOCK_USAGE_CHARS]);
 92
 93extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
 94
 95struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
 96
 97extern unsigned long nr_lock_classes;
 98extern unsigned long nr_list_entries;
 99extern unsigned long nr_lock_chains;
100extern int nr_chain_hlocks;
101extern unsigned long nr_stack_trace_entries;
102
103extern unsigned int nr_hardirq_chains;
104extern unsigned int nr_softirq_chains;
105extern unsigned int nr_process_chains;
106extern unsigned int max_lockdep_depth;
107extern unsigned int max_recursion_depth;
108
109extern unsigned int max_bfs_queue_depth;
110
111#ifdef CONFIG_PROVE_LOCKING
112extern unsigned long lockdep_count_forward_deps(struct lock_class *);
113extern unsigned long lockdep_count_backward_deps(struct lock_class *);
114#else
115static inline unsigned long
116lockdep_count_forward_deps(struct lock_class *class)
117{
118	return 0;
119}
120static inline unsigned long
121lockdep_count_backward_deps(struct lock_class *class)
122{
123	return 0;
124}
125#endif
126
127#ifdef CONFIG_DEBUG_LOCKDEP
128
129#include <asm/local.h>
130/*
131 * Various lockdep statistics.
132 * We want them per cpu as they are often accessed in fast path
133 * and we want to avoid too much cache bouncing.
134 */
135struct lockdep_stats {
136	int	chain_lookup_hits;
137	int	chain_lookup_misses;
138	int	hardirqs_on_events;
139	int	hardirqs_off_events;
140	int	redundant_hardirqs_on;
141	int	redundant_hardirqs_off;
142	int	softirqs_on_events;
143	int	softirqs_off_events;
144	int	redundant_softirqs_on;
145	int	redundant_softirqs_off;
146	int	nr_unused_locks;
147	int	nr_redundant_checks;
148	int	nr_redundant;
149	int	nr_cyclic_checks;
150	int	nr_cyclic_check_recursions;
151	int	nr_find_usage_forwards_checks;
152	int	nr_find_usage_forwards_recursions;
153	int	nr_find_usage_backwards_checks;
154	int	nr_find_usage_backwards_recursions;
155};
156
157DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
158
159#define __debug_atomic_inc(ptr)					\
160	this_cpu_inc(lockdep_stats.ptr);
161
162#define debug_atomic_inc(ptr)			{		\
163	WARN_ON_ONCE(!irqs_disabled());				\
164	__this_cpu_inc(lockdep_stats.ptr);			\
165}
166
167#define debug_atomic_dec(ptr)			{		\
168	WARN_ON_ONCE(!irqs_disabled());				\
169	__this_cpu_dec(lockdep_stats.ptr);			\
170}
171
172#define debug_atomic_read(ptr)		({				\
173	struct lockdep_stats *__cpu_lockdep_stats;			\
174	unsigned long long __total = 0;					\
175	int __cpu;							\
176	for_each_possible_cpu(__cpu) {					\
177		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
178		__total += __cpu_lockdep_stats->ptr;			\
179	}								\
180	__total;							\
181})
182#else
183# define __debug_atomic_inc(ptr)	do { } while (0)
184# define debug_atomic_inc(ptr)		do { } while (0)
185# define debug_atomic_dec(ptr)		do { } while (0)
186# define debug_atomic_read(ptr)		0
187#endif