Loading...
1/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/nmi.h>
11#include <linux/interrupt.h>
12#include <linux/debug_locks.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15
16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key, short inner)
18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /*
21 * Make sure we are not reinitializing a held lock:
22 */
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
25#endif
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1;
30}
31
32EXPORT_SYMBOL(__raw_spin_lock_init);
33
34#ifndef CONFIG_PREEMPT_RT
35void __rwlock_init(rwlock_t *lock, const char *name,
36 struct lock_class_key *key)
37{
38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 /*
40 * Make sure we are not reinitializing a held lock:
41 */
42 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
43 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
44#endif
45 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
46 lock->magic = RWLOCK_MAGIC;
47 lock->owner = SPINLOCK_OWNER_INIT;
48 lock->owner_cpu = -1;
49}
50
51EXPORT_SYMBOL(__rwlock_init);
52#endif
53
54static void spin_dump(raw_spinlock_t *lock, const char *msg)
55{
56 struct task_struct *owner = READ_ONCE(lock->owner);
57
58 if (owner == SPINLOCK_OWNER_INIT)
59 owner = NULL;
60 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61 msg, raw_smp_processor_id(),
62 current->comm, task_pid_nr(current));
63 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
64 ".owner_cpu: %d\n",
65 lock, READ_ONCE(lock->magic),
66 owner ? owner->comm : "<none>",
67 owner ? task_pid_nr(owner) : -1,
68 READ_ONCE(lock->owner_cpu));
69 dump_stack();
70}
71
72static void spin_bug(raw_spinlock_t *lock, const char *msg)
73{
74 if (!debug_locks_off())
75 return;
76
77 spin_dump(lock, msg);
78}
79
80#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
81
82static inline void
83debug_spin_lock_before(raw_spinlock_t *lock)
84{
85 SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
86 SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
87 SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
88 lock, "cpu recursion");
89}
90
91static inline void debug_spin_lock_after(raw_spinlock_t *lock)
92{
93 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
94 WRITE_ONCE(lock->owner, current);
95}
96
97static inline void debug_spin_unlock(raw_spinlock_t *lock)
98{
99 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
100 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
101 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
102 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
103 lock, "wrong CPU");
104 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
105 WRITE_ONCE(lock->owner_cpu, -1);
106}
107
108/*
109 * We are now relying on the NMI watchdog to detect lockup instead of doing
110 * the detection here with an unfair lock which can cause problem of its own.
111 */
112void do_raw_spin_lock(raw_spinlock_t *lock)
113{
114 debug_spin_lock_before(lock);
115 arch_spin_lock(&lock->raw_lock);
116 mmiowb_spin_lock();
117 debug_spin_lock_after(lock);
118}
119
120int do_raw_spin_trylock(raw_spinlock_t *lock)
121{
122 int ret = arch_spin_trylock(&lock->raw_lock);
123
124 if (ret) {
125 mmiowb_spin_lock();
126 debug_spin_lock_after(lock);
127 }
128#ifndef CONFIG_SMP
129 /*
130 * Must not happen on UP:
131 */
132 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
133#endif
134 return ret;
135}
136
137void do_raw_spin_unlock(raw_spinlock_t *lock)
138{
139 mmiowb_spin_unlock();
140 debug_spin_unlock(lock);
141 arch_spin_unlock(&lock->raw_lock);
142}
143
144#ifndef CONFIG_PREEMPT_RT
145static void rwlock_bug(rwlock_t *lock, const char *msg)
146{
147 if (!debug_locks_off())
148 return;
149
150 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
151 msg, raw_smp_processor_id(), current->comm,
152 task_pid_nr(current), lock);
153 dump_stack();
154}
155
156#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
157
158void do_raw_read_lock(rwlock_t *lock)
159{
160 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
161 arch_read_lock(&lock->raw_lock);
162}
163
164int do_raw_read_trylock(rwlock_t *lock)
165{
166 int ret = arch_read_trylock(&lock->raw_lock);
167
168#ifndef CONFIG_SMP
169 /*
170 * Must not happen on UP:
171 */
172 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
173#endif
174 return ret;
175}
176
177void do_raw_read_unlock(rwlock_t *lock)
178{
179 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
180 arch_read_unlock(&lock->raw_lock);
181}
182
183static inline void debug_write_lock_before(rwlock_t *lock)
184{
185 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
186 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
187 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
188 lock, "cpu recursion");
189}
190
191static inline void debug_write_lock_after(rwlock_t *lock)
192{
193 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
194 WRITE_ONCE(lock->owner, current);
195}
196
197static inline void debug_write_unlock(rwlock_t *lock)
198{
199 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
200 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
201 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
202 lock, "wrong CPU");
203 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
204 WRITE_ONCE(lock->owner_cpu, -1);
205}
206
207void do_raw_write_lock(rwlock_t *lock)
208{
209 debug_write_lock_before(lock);
210 arch_write_lock(&lock->raw_lock);
211 debug_write_lock_after(lock);
212}
213
214int do_raw_write_trylock(rwlock_t *lock)
215{
216 int ret = arch_write_trylock(&lock->raw_lock);
217
218 if (ret)
219 debug_write_lock_after(lock);
220#ifndef CONFIG_SMP
221 /*
222 * Must not happen on UP:
223 */
224 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
225#endif
226 return ret;
227}
228
229void do_raw_write_unlock(rwlock_t *lock)
230{
231 debug_write_unlock(lock);
232 arch_write_unlock(&lock->raw_lock);
233}
234
235#endif /* !CONFIG_PREEMPT_RT */
1/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/nmi.h>
11#include <linux/interrupt.h>
12#include <linux/debug_locks.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15
16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key, short inner)
18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /*
21 * Make sure we are not reinitializing a held lock:
22 */
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
25#endif
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1;
30}
31
32EXPORT_SYMBOL(__raw_spin_lock_init);
33
34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key)
36{
37#ifdef CONFIG_DEBUG_LOCK_ALLOC
38 /*
39 * Make sure we are not reinitializing a held lock:
40 */
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
43#endif
44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1;
48}
49
50EXPORT_SYMBOL(__rwlock_init);
51
52static void spin_dump(raw_spinlock_t *lock, const char *msg)
53{
54 struct task_struct *owner = READ_ONCE(lock->owner);
55
56 if (owner == SPINLOCK_OWNER_INIT)
57 owner = NULL;
58 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
59 msg, raw_smp_processor_id(),
60 current->comm, task_pid_nr(current));
61 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
62 ".owner_cpu: %d\n",
63 lock, READ_ONCE(lock->magic),
64 owner ? owner->comm : "<none>",
65 owner ? task_pid_nr(owner) : -1,
66 READ_ONCE(lock->owner_cpu));
67 dump_stack();
68}
69
70static void spin_bug(raw_spinlock_t *lock, const char *msg)
71{
72 if (!debug_locks_off())
73 return;
74
75 spin_dump(lock, msg);
76}
77
78#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
79
80static inline void
81debug_spin_lock_before(raw_spinlock_t *lock)
82{
83 SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
84 SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
85 SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
86 lock, "cpu recursion");
87}
88
89static inline void debug_spin_lock_after(raw_spinlock_t *lock)
90{
91 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
92 WRITE_ONCE(lock->owner, current);
93}
94
95static inline void debug_spin_unlock(raw_spinlock_t *lock)
96{
97 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
98 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
99 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
100 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
101 lock, "wrong CPU");
102 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
103 WRITE_ONCE(lock->owner_cpu, -1);
104}
105
106/*
107 * We are now relying on the NMI watchdog to detect lockup instead of doing
108 * the detection here with an unfair lock which can cause problem of its own.
109 */
110void do_raw_spin_lock(raw_spinlock_t *lock)
111{
112 debug_spin_lock_before(lock);
113 arch_spin_lock(&lock->raw_lock);
114 mmiowb_spin_lock();
115 debug_spin_lock_after(lock);
116}
117
118int do_raw_spin_trylock(raw_spinlock_t *lock)
119{
120 int ret = arch_spin_trylock(&lock->raw_lock);
121
122 if (ret) {
123 mmiowb_spin_lock();
124 debug_spin_lock_after(lock);
125 }
126#ifndef CONFIG_SMP
127 /*
128 * Must not happen on UP:
129 */
130 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
131#endif
132 return ret;
133}
134
135void do_raw_spin_unlock(raw_spinlock_t *lock)
136{
137 mmiowb_spin_unlock();
138 debug_spin_unlock(lock);
139 arch_spin_unlock(&lock->raw_lock);
140}
141
142static void rwlock_bug(rwlock_t *lock, const char *msg)
143{
144 if (!debug_locks_off())
145 return;
146
147 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
148 msg, raw_smp_processor_id(), current->comm,
149 task_pid_nr(current), lock);
150 dump_stack();
151}
152
153#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
154
155void do_raw_read_lock(rwlock_t *lock)
156{
157 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
158 arch_read_lock(&lock->raw_lock);
159}
160
161int do_raw_read_trylock(rwlock_t *lock)
162{
163 int ret = arch_read_trylock(&lock->raw_lock);
164
165#ifndef CONFIG_SMP
166 /*
167 * Must not happen on UP:
168 */
169 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
170#endif
171 return ret;
172}
173
174void do_raw_read_unlock(rwlock_t *lock)
175{
176 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
177 arch_read_unlock(&lock->raw_lock);
178}
179
180static inline void debug_write_lock_before(rwlock_t *lock)
181{
182 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
183 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
184 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
185 lock, "cpu recursion");
186}
187
188static inline void debug_write_lock_after(rwlock_t *lock)
189{
190 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
191 WRITE_ONCE(lock->owner, current);
192}
193
194static inline void debug_write_unlock(rwlock_t *lock)
195{
196 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
197 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
198 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
199 lock, "wrong CPU");
200 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
201 WRITE_ONCE(lock->owner_cpu, -1);
202}
203
204void do_raw_write_lock(rwlock_t *lock)
205{
206 debug_write_lock_before(lock);
207 arch_write_lock(&lock->raw_lock);
208 debug_write_lock_after(lock);
209}
210
211int do_raw_write_trylock(rwlock_t *lock)
212{
213 int ret = arch_write_trylock(&lock->raw_lock);
214
215 if (ret)
216 debug_write_lock_after(lock);
217#ifndef CONFIG_SMP
218 /*
219 * Must not happen on UP:
220 */
221 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
222#endif
223 return ret;
224}
225
226void do_raw_write_unlock(rwlock_t *lock)
227{
228 debug_write_unlock(lock);
229 arch_write_unlock(&lock->raw_lock);
230}