Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6 *
  7 *  Derived from "include/asm-i386/spinlock.h"
  8 */
  9
 10#ifndef __ASM_SPINLOCK_H
 11#define __ASM_SPINLOCK_H
 12
 13#include <linux/smp.h>
 14#include <asm/atomic_ops.h>
 15#include <asm/barrier.h>
 16#include <asm/processor.h>
 17#include <asm/alternative.h>
 18
 19#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
 20
 21extern int spin_retry;
 22
 
 
 
 
 
 
 
 
 
 23bool arch_vcpu_is_preempted(int cpu);
 
 24
 25#define vcpu_is_preempted arch_vcpu_is_preempted
 26
 27/*
 28 * Simple spin lock operations.  There are two variants, one clears IRQ's
 29 * on the local processor, one does not.
 30 *
 31 * We make no fairness assumptions. They have a cost.
 32 *
 33 * (the type definitions are in asm/spinlock_types.h)
 34 */
 35
 36void arch_spin_relax(arch_spinlock_t *lock);
 37#define arch_spin_relax	arch_spin_relax
 38
 39void arch_spin_lock_wait(arch_spinlock_t *);
 40int arch_spin_trylock_retry(arch_spinlock_t *);
 41void arch_spin_lock_setup(int cpu);
 
 
 
 
 
 42
 43static inline u32 arch_spin_lockval(int cpu)
 44{
 45	return cpu + 1;
 46}
 47
 48static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 49{
 50	return lock.lock == 0;
 51}
 52
 53static inline int arch_spin_is_locked(arch_spinlock_t *lp)
 54{
 55	return READ_ONCE(lp->lock) != 0;
 56}
 57
 58static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
 59{
 60	barrier();
 61	return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
 
 62}
 63
 64static inline void arch_spin_lock(arch_spinlock_t *lp)
 65{
 66	if (!arch_spin_trylock_once(lp))
 67		arch_spin_lock_wait(lp);
 68}
 69
 
 
 
 
 
 
 
 70static inline int arch_spin_trylock(arch_spinlock_t *lp)
 71{
 72	if (!arch_spin_trylock_once(lp))
 73		return arch_spin_trylock_retry(lp);
 74	return 1;
 75}
 76
 77static inline void arch_spin_unlock(arch_spinlock_t *lp)
 78{
 79	typecheck(int, lp->lock);
 80	kcsan_release();
 81	asm_inline volatile(
 82		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
 83		"	sth	%1,%0\n"
 84		: "=R" (((unsigned short *) &lp->lock)[1])
 85		: "d" (0) : "cc", "memory");
 
 
 
 
 
 
 86}
 87
 88/*
 89 * Read-write spinlocks, allowing multiple readers
 90 * but only one writer.
 91 *
 92 * NOTE! it is quite common to have readers in interrupts
 93 * but no interrupt writers. For those circumstances we
 94 * can "mix" irq-safe locks - any writer needs to get a
 95 * irq-safe write-lock, but readers can get non-irqsafe
 96 * read-locks.
 97 */
 98
 99#define arch_read_relax(rw) barrier()
100#define arch_write_relax(rw) barrier()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
102void arch_read_lock_wait(arch_rwlock_t *lp);
103void arch_write_lock_wait(arch_rwlock_t *lp);
104
105static inline void arch_read_lock(arch_rwlock_t *rw)
106{
107	int old;
108
109	old = __atomic_add(1, &rw->cnts);
110	if (old & 0xffff0000)
111		arch_read_lock_wait(rw);
112}
113
114static inline void arch_read_unlock(arch_rwlock_t *rw)
115{
116	__atomic_add_const_barrier(-1, &rw->cnts);
117}
118
119static inline void arch_write_lock(arch_rwlock_t *rw)
120{
121	if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
122		arch_write_lock_wait(rw);
 
 
 
 
123}
124
125static inline void arch_write_unlock(arch_rwlock_t *rw)
126{
127	__atomic_add_barrier(-0x30000, &rw->cnts);
 
128}
129
 
130
131static inline int arch_read_trylock(arch_rwlock_t *rw)
 
 
 
132{
133	int old;
 
 
134
135	old = READ_ONCE(rw->cnts);
136	return (!(old & 0xffff0000) &&
137		__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138}
139
140static inline int arch_write_trylock(arch_rwlock_t *rw)
141{
142	int old;
 
 
 
 
143
144	old = READ_ONCE(rw->cnts);
145	return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
 
 
 
 
 
 
146}
147
148#endif /* __ASM_SPINLOCK_H */
v4.10.11
 
  1/*
  2 *  S390 version
  3 *    Copyright IBM Corp. 1999
  4 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5 *
  6 *  Derived from "include/asm-i386/spinlock.h"
  7 */
  8
  9#ifndef __ASM_SPINLOCK_H
 10#define __ASM_SPINLOCK_H
 11
 12#include <linux/smp.h>
 
 13#include <asm/barrier.h>
 14#include <asm/processor.h>
 
 15
 16#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
 17
 18extern int spin_retry;
 19
 20static inline int
 21_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
 22{
 23	return __sync_bool_compare_and_swap(lock, old, new);
 24}
 25
 26#ifndef CONFIG_SMP
 27static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
 28#else
 29bool arch_vcpu_is_preempted(int cpu);
 30#endif
 31
 32#define vcpu_is_preempted arch_vcpu_is_preempted
 33
 34/*
 35 * Simple spin lock operations.  There are two variants, one clears IRQ's
 36 * on the local processor, one does not.
 37 *
 38 * We make no fairness assumptions. They have a cost.
 39 *
 40 * (the type definitions are in asm/spinlock_types.h)
 41 */
 42
 43void arch_lock_relax(unsigned int cpu);
 
 44
 45void arch_spin_lock_wait(arch_spinlock_t *);
 46int arch_spin_trylock_retry(arch_spinlock_t *);
 47void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 48
 49static inline void arch_spin_relax(arch_spinlock_t *lock)
 50{
 51	arch_lock_relax(lock->lock);
 52}
 53
 54static inline u32 arch_spin_lockval(int cpu)
 55{
 56	return ~cpu;
 57}
 58
 59static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 60{
 61	return lock.lock == 0;
 62}
 63
 64static inline int arch_spin_is_locked(arch_spinlock_t *lp)
 65{
 66	return ACCESS_ONCE(lp->lock) != 0;
 67}
 68
 69static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
 70{
 71	barrier();
 72	return likely(arch_spin_value_unlocked(*lp) &&
 73		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
 74}
 75
 76static inline void arch_spin_lock(arch_spinlock_t *lp)
 77{
 78	if (!arch_spin_trylock_once(lp))
 79		arch_spin_lock_wait(lp);
 80}
 81
 82static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
 83					unsigned long flags)
 84{
 85	if (!arch_spin_trylock_once(lp))
 86		arch_spin_lock_wait_flags(lp, flags);
 87}
 88
 89static inline int arch_spin_trylock(arch_spinlock_t *lp)
 90{
 91	if (!arch_spin_trylock_once(lp))
 92		return arch_spin_trylock_retry(lp);
 93	return 1;
 94}
 95
 96static inline void arch_spin_unlock(arch_spinlock_t *lp)
 97{
 98	typecheck(unsigned int, lp->lock);
 99	asm volatile(
100		"st	%1,%0\n"
101		: "+Q" (lp->lock)
102		: "d" (0)
103		: "cc", "memory");
104}
105
106static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
107{
108	while (arch_spin_is_locked(lock))
109		arch_spin_relax(lock);
110	smp_acquire__after_ctrl_dep();
111}
112
113/*
114 * Read-write spinlocks, allowing multiple readers
115 * but only one writer.
116 *
117 * NOTE! it is quite common to have readers in interrupts
118 * but no interrupt writers. For those circumstances we
119 * can "mix" irq-safe locks - any writer needs to get a
120 * irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
122 */
123
124/**
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
127 */
128#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
129
130/**
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
133 */
134#define arch_write_can_lock(x) ((x)->lock == 0)
135
136extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
137extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
138
139#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
140#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
141
142static inline int arch_read_trylock_once(arch_rwlock_t *rw)
143{
144	unsigned int old = ACCESS_ONCE(rw->lock);
145	return likely((int) old >= 0 &&
146		      _raw_compare_and_swap(&rw->lock, old, old + 1));
147}
148
149static inline int arch_write_trylock_once(arch_rwlock_t *rw)
150{
151	unsigned int old = ACCESS_ONCE(rw->lock);
152	return likely(old == 0 &&
153		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
154}
155
156#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157
158#define __RAW_OP_OR	"lao"
159#define __RAW_OP_AND	"lan"
160#define __RAW_OP_ADD	"laa"
161
162#define __RAW_LOCK(ptr, op_val, op_string)		\
163({							\
164	unsigned int old_val;				\
165							\
166	typecheck(unsigned int *, ptr);			\
167	asm volatile(					\
168		op_string "	%0,%2,%1\n"		\
169		"bcr	14,0\n"				\
170		: "=d" (old_val), "+Q" (*ptr)		\
171		: "d" (op_val)				\
172		: "cc", "memory");			\
173	old_val;					\
174})
175
176#define __RAW_UNLOCK(ptr, op_val, op_string)		\
177({							\
178	unsigned int old_val;				\
179							\
180	typecheck(unsigned int *, ptr);			\
181	asm volatile(					\
182		op_string "	%0,%2,%1\n"		\
183		: "=d" (old_val), "+Q" (*ptr)		\
184		: "d" (op_val)				\
185		: "cc", "memory");			\
186	old_val;					\
187})
188
189extern void _raw_read_lock_wait(arch_rwlock_t *lp);
190extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
191
192static inline void arch_read_lock(arch_rwlock_t *rw)
193{
194	unsigned int old;
195
196	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
197	if ((int) old < 0)
198		_raw_read_lock_wait(rw);
199}
200
201static inline void arch_read_unlock(arch_rwlock_t *rw)
202{
203	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
204}
205
206static inline void arch_write_lock(arch_rwlock_t *rw)
207{
208	unsigned int old;
209
210	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211	if (old != 0)
212		_raw_write_lock_wait(rw, old);
213	rw->owner = SPINLOCK_LOCKVAL;
214}
215
216static inline void arch_write_unlock(arch_rwlock_t *rw)
217{
218	rw->owner = 0;
219	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
220}
221
222#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
223
224extern void _raw_read_lock_wait(arch_rwlock_t *lp);
225extern void _raw_write_lock_wait(arch_rwlock_t *lp);
226
227static inline void arch_read_lock(arch_rwlock_t *rw)
228{
229	if (!arch_read_trylock_once(rw))
230		_raw_read_lock_wait(rw);
231}
232
233static inline void arch_read_unlock(arch_rwlock_t *rw)
234{
235	unsigned int old;
236
237	do {
238		old = ACCESS_ONCE(rw->lock);
239	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
240}
241
242static inline void arch_write_lock(arch_rwlock_t *rw)
243{
244	if (!arch_write_trylock_once(rw))
245		_raw_write_lock_wait(rw);
246	rw->owner = SPINLOCK_LOCKVAL;
247}
248
249static inline void arch_write_unlock(arch_rwlock_t *rw)
250{
251	typecheck(unsigned int, rw->lock);
252
253	rw->owner = 0;
254	asm volatile(
255		"st	%1,%0\n"
256		: "+Q" (rw->lock)
257		: "d" (0)
258		: "cc", "memory");
259}
260
261#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
262
263static inline int arch_read_trylock(arch_rwlock_t *rw)
264{
265	if (!arch_read_trylock_once(rw))
266		return _raw_read_trylock_retry(rw);
267	return 1;
268}
269
270static inline int arch_write_trylock(arch_rwlock_t *rw)
271{
272	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
273		return 0;
274	rw->owner = SPINLOCK_LOCKVAL;
275	return 1;
276}
277
278static inline void arch_read_relax(arch_rwlock_t *rw)
279{
280	arch_lock_relax(rw->owner);
281}
282
283static inline void arch_write_relax(arch_rwlock_t *rw)
284{
285	arch_lock_relax(rw->owner);
286}
287
288#endif /* __ASM_SPINLOCK_H */