Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1/*
  2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3 * Released under the General Public License (GPL).
  4 *
  5 * This file contains the spinlock/rwlock implementations for
  6 * DEBUG_SPINLOCK.
  7 */
  8
  9#include <linux/spinlock.h>
 10#include <linux/nmi.h>
 11#include <linux/interrupt.h>
 12#include <linux/debug_locks.h>
 13#include <linux/delay.h>
 14#include <linux/module.h>
 15
 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 17			  struct lock_class_key *key)
 18{
 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
 20	/*
 21	 * Make sure we are not reinitializing a held lock:
 22	 */
 23	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 24	lockdep_init_map(&lock->dep_map, name, key, 0);
 25#endif
 26	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 27	lock->magic = SPINLOCK_MAGIC;
 28	lock->owner = SPINLOCK_OWNER_INIT;
 29	lock->owner_cpu = -1;
 30}
 31
 32EXPORT_SYMBOL(__raw_spin_lock_init);
 33
 34void __rwlock_init(rwlock_t *lock, const char *name,
 35		   struct lock_class_key *key)
 36{
 37#ifdef CONFIG_DEBUG_LOCK_ALLOC
 38	/*
 39	 * Make sure we are not reinitializing a held lock:
 40	 */
 41	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 42	lockdep_init_map(&lock->dep_map, name, key, 0);
 43#endif
 44	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
 45	lock->magic = RWLOCK_MAGIC;
 46	lock->owner = SPINLOCK_OWNER_INIT;
 47	lock->owner_cpu = -1;
 48}
 49
 50EXPORT_SYMBOL(__rwlock_init);
 51
 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
 53{
 54	struct task_struct *owner = NULL;
 55
 56	if (!debug_locks_off())
 57		return;
 58
 59	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
 60		owner = lock->owner;
 61	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
 62		msg, raw_smp_processor_id(),
 63		current->comm, task_pid_nr(current));
 64	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
 65			".owner_cpu: %d\n",
 66		lock, lock->magic,
 67		owner ? owner->comm : "<none>",
 68		owner ? task_pid_nr(owner) : -1,
 69		lock->owner_cpu);
 70	dump_stack();
 71}
 72
 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
 74
 75static inline void
 76debug_spin_lock_before(raw_spinlock_t *lock)
 77{
 78	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
 79	SPIN_BUG_ON(lock->owner == current, lock, "recursion");
 80	SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
 81							lock, "cpu recursion");
 82}
 83
 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
 85{
 86	lock->owner_cpu = raw_smp_processor_id();
 87	lock->owner = current;
 88}
 89
 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
 91{
 92	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
 93	SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
 94	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
 95	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
 96							lock, "wrong CPU");
 97	lock->owner = SPINLOCK_OWNER_INIT;
 98	lock->owner_cpu = -1;
 99}
100
101static void __spin_lock_debug(raw_spinlock_t *lock)
102{
103	u64 i;
104	u64 loops = loops_per_jiffy * HZ;
105	int print_once = 1;
106
107	for (;;) {
108		for (i = 0; i < loops; i++) {
109			if (arch_spin_trylock(&lock->raw_lock))
110				return;
111			__delay(1);
112		}
113		/* lockup suspected: */
114		if (print_once) {
115			print_once = 0;
116			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
117					"%s/%d, %p\n",
118				raw_smp_processor_id(), current->comm,
119				task_pid_nr(current), lock);
120			dump_stack();
121#ifdef CONFIG_SMP
122			trigger_all_cpu_backtrace();
123#endif
124		}
125	}
126}
127
128void do_raw_spin_lock(raw_spinlock_t *lock)
129{
130	debug_spin_lock_before(lock);
131	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132		__spin_lock_debug(lock);
133	debug_spin_lock_after(lock);
134}
135
136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{
138	int ret = arch_spin_trylock(&lock->raw_lock);
139
140	if (ret)
141		debug_spin_lock_after(lock);
142#ifndef CONFIG_SMP
143	/*
144	 * Must not happen on UP:
145	 */
146	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
147#endif
148	return ret;
149}
150
151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{
153	debug_spin_unlock(lock);
154	arch_spin_unlock(&lock->raw_lock);
155}
156
157static void rwlock_bug(rwlock_t *lock, const char *msg)
158{
159	if (!debug_locks_off())
160		return;
161
162	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
163		msg, raw_smp_processor_id(), current->comm,
164		task_pid_nr(current), lock);
165	dump_stack();
166}
167
168#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
169
170#if 0		/* __write_lock_debug() can lock up - maybe this can too? */
171static void __read_lock_debug(rwlock_t *lock)
172{
173	u64 i;
174	u64 loops = loops_per_jiffy * HZ;
175	int print_once = 1;
176
177	for (;;) {
178		for (i = 0; i < loops; i++) {
179			if (arch_read_trylock(&lock->raw_lock))
180				return;
181			__delay(1);
182		}
183		/* lockup suspected: */
184		if (print_once) {
185			print_once = 0;
186			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
187					"%s/%d, %p\n",
188				raw_smp_processor_id(), current->comm,
189				current->pid, lock);
190			dump_stack();
191		}
192	}
193}
194#endif
195
196void do_raw_read_lock(rwlock_t *lock)
197{
198	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199	arch_read_lock(&lock->raw_lock);
200}
201
202int do_raw_read_trylock(rwlock_t *lock)
203{
204	int ret = arch_read_trylock(&lock->raw_lock);
205
206#ifndef CONFIG_SMP
207	/*
208	 * Must not happen on UP:
209	 */
210	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
211#endif
212	return ret;
213}
214
215void do_raw_read_unlock(rwlock_t *lock)
216{
217	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218	arch_read_unlock(&lock->raw_lock);
219}
220
221static inline void debug_write_lock_before(rwlock_t *lock)
222{
223	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
224	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
225	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
226							lock, "cpu recursion");
227}
228
229static inline void debug_write_lock_after(rwlock_t *lock)
230{
231	lock->owner_cpu = raw_smp_processor_id();
232	lock->owner = current;
233}
234
235static inline void debug_write_unlock(rwlock_t *lock)
236{
237	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
238	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
239	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
240							lock, "wrong CPU");
241	lock->owner = SPINLOCK_OWNER_INIT;
242	lock->owner_cpu = -1;
243}
244
245#if 0		/* This can cause lockups */
246static void __write_lock_debug(rwlock_t *lock)
247{
248	u64 i;
249	u64 loops = loops_per_jiffy * HZ;
250	int print_once = 1;
251
252	for (;;) {
253		for (i = 0; i < loops; i++) {
254			if (arch_write_trylock(&lock->raw_lock))
255				return;
256			__delay(1);
257		}
258		/* lockup suspected: */
259		if (print_once) {
260			print_once = 0;
261			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
262					"%s/%d, %p\n",
263				raw_smp_processor_id(), current->comm,
264				current->pid, lock);
265			dump_stack();
266		}
267	}
268}
269#endif
270
271void do_raw_write_lock(rwlock_t *lock)
272{
273	debug_write_lock_before(lock);
274	arch_write_lock(&lock->raw_lock);
275	debug_write_lock_after(lock);
276}
277
278int do_raw_write_trylock(rwlock_t *lock)
279{
280	int ret = arch_write_trylock(&lock->raw_lock);
281
282	if (ret)
283		debug_write_lock_after(lock);
284#ifndef CONFIG_SMP
285	/*
286	 * Must not happen on UP:
287	 */
288	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
289#endif
290	return ret;
291}
292
293void do_raw_write_unlock(rwlock_t *lock)
294{
295	debug_write_unlock(lock);
296	arch_write_unlock(&lock->raw_lock);
297}