Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright IBM Corp. 1999, 2016
  4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  5 *	      Denis Joseph Barrow,
  6 *	      Arnd Bergmann,
  7 */
  8
  9#ifndef __ARCH_S390_ATOMIC__
 10#define __ARCH_S390_ATOMIC__
 11
 12#include <linux/compiler.h>
 13#include <linux/types.h>
 14#include <asm/atomic_ops.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 18static inline int arch_atomic_read(const atomic_t *v)
 19{
 20	return __atomic_read(v);
 
 
 
 
 
 21}
 22#define arch_atomic_read arch_atomic_read
 23
 24static inline void arch_atomic_set(atomic_t *v, int i)
 25{
 26	__atomic_set(v, i);
 
 
 27}
 28#define arch_atomic_set arch_atomic_set
 29
 30static inline int arch_atomic_add_return(int i, atomic_t *v)
 31{
 32	return __atomic_add_barrier(i, &v->counter) + i;
 33}
 34#define arch_atomic_add_return arch_atomic_add_return
 35
 36static inline int arch_atomic_fetch_add(int i, atomic_t *v)
 37{
 38	return __atomic_add_barrier(i, &v->counter);
 39}
 40#define arch_atomic_fetch_add arch_atomic_fetch_add
 41
 42static inline void arch_atomic_add(int i, atomic_t *v)
 43{
 
 
 
 
 
 
 
 
 
 
 44	__atomic_add(i, &v->counter);
 45}
 46#define arch_atomic_add arch_atomic_add
 47
 48#define arch_atomic_sub(_i, _v)		arch_atomic_add(-(int)(_i), _v)
 49#define arch_atomic_sub_return(_i, _v)	arch_atomic_add_return(-(int)(_i), _v)
 50#define arch_atomic_fetch_sub(_i, _v)	arch_atomic_fetch_add(-(int)(_i), _v)
 51
 52#define ATOMIC_OPS(op)							\
 53static inline void arch_atomic_##op(int i, atomic_t *v)			\
 54{									\
 55	__atomic_##op(i, &v->counter);					\
 56}									\
 57static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
 58{									\
 59	return __atomic_##op##_barrier(i, &v->counter);			\
 60}
 61
 62ATOMIC_OPS(and)
 63ATOMIC_OPS(or)
 64ATOMIC_OPS(xor)
 65
 66#undef ATOMIC_OPS
 67
 68#define arch_atomic_and			arch_atomic_and
 69#define arch_atomic_or			arch_atomic_or
 70#define arch_atomic_xor			arch_atomic_xor
 71#define arch_atomic_fetch_and		arch_atomic_fetch_and
 72#define arch_atomic_fetch_or		arch_atomic_fetch_or
 73#define arch_atomic_fetch_xor		arch_atomic_fetch_xor
 74
 75#define arch_atomic_xchg(v, new)	(arch_xchg(&((v)->counter), new))
 76
 77static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 78{
 79	return __atomic_cmpxchg(&v->counter, old, new);
 80}
 81#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 82
 83#define ATOMIC64_INIT(i)  { (i) }
 84
 85static inline s64 arch_atomic64_read(const atomic64_t *v)
 86{
 87	return __atomic64_read(v);
 
 
 
 
 
 88}
 89#define arch_atomic64_read arch_atomic64_read
 90
 91static inline void arch_atomic64_set(atomic64_t *v, s64 i)
 92{
 93	__atomic64_set(v, i);
 
 
 94}
 95#define arch_atomic64_set arch_atomic64_set
 96
 97static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 98{
 99	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100}
101#define arch_atomic64_add_return arch_atomic64_add_return
102
103static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104{
105	return __atomic64_add_barrier(i, (long *)&v->counter);
106}
107#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108
109static inline void arch_atomic64_add(s64 i, atomic64_t *v)
110{
 
 
 
 
 
 
 
 
 
 
111	__atomic64_add(i, (long *)&v->counter);
112}
113#define arch_atomic64_add arch_atomic64_add
114
115#define arch_atomic64_xchg(v, new)	(arch_xchg(&((v)->counter), new))
116
117static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118{
119	return __atomic64_cmpxchg((long *)&v->counter, old, new);
120}
121#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122
123#define ATOMIC64_OPS(op)						\
124static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
125{									\
126	__atomic64_##op(i, (long *)&v->counter);			\
127}									\
128static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
129{									\
130	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
131}
132
133ATOMIC64_OPS(and)
134ATOMIC64_OPS(or)
135ATOMIC64_OPS(xor)
136
137#undef ATOMIC64_OPS
138
139#define arch_atomic64_and		arch_atomic64_and
140#define arch_atomic64_or		arch_atomic64_or
141#define arch_atomic64_xor		arch_atomic64_xor
142#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
143#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
144#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
145
146#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147#define arch_atomic64_fetch_sub(_i, _v)  arch_atomic64_fetch_add(-(s64)(_i), _v)
148#define arch_atomic64_sub(_i, _v)	 arch_atomic64_add(-(s64)(_i), _v)
149
150#endif /* __ARCH_S390_ATOMIC__  */
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright IBM Corp. 1999, 2016
  4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  5 *	      Denis Joseph Barrow,
  6 *	      Arnd Bergmann,
  7 */
  8
  9#ifndef __ARCH_S390_ATOMIC__
 10#define __ARCH_S390_ATOMIC__
 11
 12#include <linux/compiler.h>
 13#include <linux/types.h>
 14#include <asm/atomic_ops.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 18static inline int atomic_read(const atomic_t *v)
 19{
 20	int c;
 21
 22	asm volatile(
 23		"	l	%0,%1\n"
 24		: "=d" (c) : "Q" (v->counter));
 25	return c;
 26}
 
 27
 28static inline void atomic_set(atomic_t *v, int i)
 29{
 30	asm volatile(
 31		"	st	%1,%0\n"
 32		: "=Q" (v->counter) : "d" (i));
 33}
 
 34
 35static inline int atomic_add_return(int i, atomic_t *v)
 36{
 37	return __atomic_add_barrier(i, &v->counter) + i;
 38}
 
 39
 40static inline int atomic_fetch_add(int i, atomic_t *v)
 41{
 42	return __atomic_add_barrier(i, &v->counter);
 43}
 
 44
 45static inline void atomic_add(int i, atomic_t *v)
 46{
 47#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 48	/*
 49	 * Order of conditions is important to circumvent gcc 10 bug:
 50	 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
 51	 */
 52	if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
 53		__atomic_add_const(i, &v->counter);
 54		return;
 55	}
 56#endif
 57	__atomic_add(i, &v->counter);
 58}
 
 59
 60#define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
 61#define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
 62#define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
 63
 64#define ATOMIC_OPS(op)							\
 65static inline void atomic_##op(int i, atomic_t *v)			\
 66{									\
 67	__atomic_##op(i, &v->counter);					\
 68}									\
 69static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 70{									\
 71	return __atomic_##op##_barrier(i, &v->counter);			\
 72}
 73
 74ATOMIC_OPS(and)
 75ATOMIC_OPS(or)
 76ATOMIC_OPS(xor)
 77
 78#undef ATOMIC_OPS
 79
 80#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 
 
 
 
 
 
 81
 82static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 83{
 84	return __atomic_cmpxchg(&v->counter, old, new);
 85}
 
 86
 87#define ATOMIC64_INIT(i)  { (i) }
 88
 89static inline s64 atomic64_read(const atomic64_t *v)
 90{
 91	s64 c;
 92
 93	asm volatile(
 94		"	lg	%0,%1\n"
 95		: "=d" (c) : "Q" (v->counter));
 96	return c;
 97}
 
 98
 99static inline void atomic64_set(atomic64_t *v, s64 i)
100{
101	asm volatile(
102		"	stg	%1,%0\n"
103		: "=Q" (v->counter) : "d" (i));
104}
 
105
106static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
107{
108	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
109}
 
110
111static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
112{
113	return __atomic64_add_barrier(i, (long *)&v->counter);
114}
 
115
116static inline void atomic64_add(s64 i, atomic64_t *v)
117{
118#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
119	/*
120	 * Order of conditions is important to circumvent gcc 10 bug:
121	 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
122	 */
123	if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
124		__atomic64_add_const(i, (long *)&v->counter);
125		return;
126	}
127#endif
128	__atomic64_add(i, (long *)&v->counter);
129}
 
130
131#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
132
133static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
134{
135	return __atomic64_cmpxchg((long *)&v->counter, old, new);
136}
 
137
138#define ATOMIC64_OPS(op)						\
139static inline void atomic64_##op(s64 i, atomic64_t *v)			\
140{									\
141	__atomic64_##op(i, (long *)&v->counter);			\
142}									\
143static inline long atomic64_fetch_##op(s64 i, atomic64_t *v)		\
144{									\
145	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
146}
147
148ATOMIC64_OPS(and)
149ATOMIC64_OPS(or)
150ATOMIC64_OPS(xor)
151
152#undef ATOMIC64_OPS
153
154#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(s64)(_i), _v)
155#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(s64)(_i), _v)
156#define atomic64_sub(_i, _v)		atomic64_add(-(s64)(_i), _v)
 
 
 
 
 
 
 
157
158#endif /* __ARCH_S390_ATOMIC__  */