Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Generic C implementation of atomic counter operations. Usable on
  4 * UP systems only. Do not include in machine independent code.
  5 *
 
 
  6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  7 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  8 */
  9#ifndef __ASM_GENERIC_ATOMIC_H
 10#define __ASM_GENERIC_ATOMIC_H
 11
 12#include <asm/cmpxchg.h>
 13#include <asm/barrier.h>
 14
 15/*
 16 * atomic_$op() - $op integer to atomic variable
 17 * @i: integer value to $op
 18 * @v: pointer to the atomic variable
 19 *
 20 * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
 21 * smp_mb__{before,after}_atomic().
 22 */
 23
 24/*
 25 * atomic_$op_return() - $op interer to atomic variable and returns the result
 26 * @i: integer value to $op
 27 * @v: pointer to the atomic variable
 28 *
 29 * Atomically $ops @i to @v. Does imply a full memory barrier.
 30 */
 31
 32#ifdef CONFIG_SMP
 33
 34/* we can build all atomic primitives from cmpxchg */
 35
 36#define ATOMIC_OP(op, c_op)						\
 37static inline void atomic_##op(int i, atomic_t *v)			\
 38{									\
 39	int c, old;							\
 40									\
 41	c = v->counter;							\
 42	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
 43		c = old;						\
 44}
 45
 46#define ATOMIC_OP_RETURN(op, c_op)					\
 47static inline int atomic_##op##_return(int i, atomic_t *v)		\
 48{									\
 49	int c, old;							\
 50									\
 51	c = v->counter;							\
 52	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
 53		c = old;						\
 54									\
 55	return c c_op i;						\
 56}
 57
 58#define ATOMIC_FETCH_OP(op, c_op)					\
 59static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 60{									\
 61	int c, old;							\
 62									\
 63	c = v->counter;							\
 64	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
 65		c = old;						\
 66									\
 67	return c;							\
 68}
 69
 70#else
 
 
 
 
 
 
 
 71
 72#include <linux/irqflags.h>
 73
 74#define ATOMIC_OP(op, c_op)						\
 75static inline void atomic_##op(int i, atomic_t *v)			\
 76{									\
 77	unsigned long flags;						\
 78									\
 79	raw_local_irq_save(flags);					\
 80	v->counter = v->counter c_op i;					\
 81	raw_local_irq_restore(flags);					\
 82}
 83
 84#define ATOMIC_OP_RETURN(op, c_op)					\
 85static inline int atomic_##op##_return(int i, atomic_t *v)		\
 86{									\
 87	unsigned long flags;						\
 88	int ret;							\
 89									\
 90	raw_local_irq_save(flags);					\
 91	ret = (v->counter = v->counter c_op i);				\
 92	raw_local_irq_restore(flags);					\
 93									\
 94	return ret;							\
 95}
 96
 97#define ATOMIC_FETCH_OP(op, c_op)					\
 98static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 99{									\
100	unsigned long flags;						\
101	int ret;							\
102									\
103	raw_local_irq_save(flags);					\
104	ret = v->counter;						\
105	v->counter = v->counter c_op i;					\
106	raw_local_irq_restore(flags);					\
107									\
108	return ret;							\
109}
110
111#endif /* CONFIG_SMP */
 
 
 
 
112
113#ifndef atomic_add_return
114ATOMIC_OP_RETURN(add, +)
115#endif
116
 
 
 
 
 
 
 
117#ifndef atomic_sub_return
118ATOMIC_OP_RETURN(sub, -)
119#endif
 
 
120
121#ifndef atomic_fetch_add
122ATOMIC_FETCH_OP(add, +)
123#endif
 
 
124
125#ifndef atomic_fetch_sub
126ATOMIC_FETCH_OP(sub, -)
127#endif
128
129#ifndef atomic_fetch_and
130ATOMIC_FETCH_OP(and, &)
131#endif
 
132
133#ifndef atomic_fetch_or
134ATOMIC_FETCH_OP(or, |)
135#endif
 
136
137#ifndef atomic_fetch_xor
138ATOMIC_FETCH_OP(xor, ^)
139#endif
 
140
141#ifndef atomic_and
142ATOMIC_OP(and, &)
143#endif
 
144
145#ifndef atomic_or
146ATOMIC_OP(or, |)
147#endif
 
148
149#ifndef atomic_xor
150ATOMIC_OP(xor, ^)
151#endif
152
153#undef ATOMIC_FETCH_OP
154#undef ATOMIC_OP_RETURN
155#undef ATOMIC_OP
156
157/*
158 * Atomic operations that C can't guarantee us.  Useful for
159 * resource counting etc..
160 */
161
162#define ATOMIC_INIT(i)	{ (i) }
 
 
 
 
 
 
 
 
 
 
 
 
 
163
164/**
165 * atomic_read - read atomic variable
 
166 * @v: pointer of type atomic_t
167 *
168 * Atomically reads the value of @v.
169 */
170#ifndef atomic_read
171#define atomic_read(v)	READ_ONCE((v)->counter)
 
 
 
 
 
 
 
 
172#endif
173
174/**
175 * atomic_set - set atomic variable
 
176 * @v: pointer of type atomic_t
177 * @i: required value
178 *
179 * Atomically sets the value of @v to @i.
180 */
181#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
182
183#include <linux/irqflags.h>
184
185static inline void atomic_add(int i, atomic_t *v)
186{
187	atomic_add_return(i, v);
188}
189
190static inline void atomic_sub(int i, atomic_t *v)
191{
192	atomic_sub_return(i, v);
193}
 
194
195#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
196#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
 
 
197
 
198#endif /* __ASM_GENERIC_ATOMIC_H */
v3.5.6
 
  1/*
  2 * Generic C implementation of atomic counter operations. Usable on
  3 * UP systems only. Do not include in machine independent code.
  4 *
  5 * Originally implemented for MN10300.
  6 *
  7 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  8 * Written by David Howells (dhowells@redhat.com)
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public Licence
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the Licence, or (at your option) any later version.
 14 */
 15#ifndef __ASM_GENERIC_ATOMIC_H
 16#define __ASM_GENERIC_ATOMIC_H
 17
 18#include <asm/cmpxchg.h>
 
 19
 20#ifdef CONFIG_SMP
 21/* Force people to define core atomics */
 22# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
 23     !defined(atomic_clear_mask) || !defined(atomic_set_mask)
 24#  error "SMP requires a little arch-specific magic"
 25# endif
 26#endif
 
 27
 28/*
 29 * Atomic operations that C can't guarantee us.  Useful for
 30 * resource counting etc..
 
 
 
 31 */
 32
 33#define ATOMIC_INIT(i)	{ (i) }
 34
 35#ifdef __KERNEL__
 36
 37/**
 38 * atomic_read - read atomic variable
 39 * @v: pointer of type atomic_t
 40 *
 41 * Atomically reads the value of @v.
 42 */
 43#ifndef atomic_read
 44#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 45#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46
 47/**
 48 * atomic_set - set atomic variable
 49 * @v: pointer of type atomic_t
 50 * @i: required value
 51 *
 52 * Atomically sets the value of @v to @i.
 53 */
 54#define atomic_set(v, i) (((v)->counter) = (i))
 55
 56#include <linux/irqflags.h>
 57
 58/**
 59 * atomic_add_return - add integer to atomic variable
 60 * @i: integer value to add
 61 * @v: pointer of type atomic_t
 62 *
 63 * Atomically adds @i to @v and returns the result
 64 */
 65#ifndef atomic_add_return
 66static inline int atomic_add_return(int i, atomic_t *v)
 67{
 68	unsigned long flags;
 69	int temp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71	raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
 72	temp = v->counter;
 73	temp += i;
 74	v->counter = temp;
 75	raw_local_irq_restore(flags);
 76
 77	return temp;
 78}
 79#endif
 80
 81/**
 82 * atomic_sub_return - subtract integer from atomic variable
 83 * @i: integer value to subtract
 84 * @v: pointer of type atomic_t
 85 *
 86 * Atomically subtracts @i from @v and returns the result
 87 */
 88#ifndef atomic_sub_return
 89static inline int atomic_sub_return(int i, atomic_t *v)
 90{
 91	unsigned long flags;
 92	int temp;
 93
 94	raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
 95	temp = v->counter;
 96	temp -= i;
 97	v->counter = temp;
 98	raw_local_irq_restore(flags);
 99
100	return temp;
101}
102#endif
103
104static inline int atomic_add_negative(int i, atomic_t *v)
105{
106	return atomic_add_return(i, v) < 0;
107}
108
109static inline void atomic_add(int i, atomic_t *v)
110{
111	atomic_add_return(i, v);
112}
113
114static inline void atomic_sub(int i, atomic_t *v)
115{
116	atomic_sub_return(i, v);
117}
118
119static inline void atomic_inc(atomic_t *v)
120{
121	atomic_add_return(1, v);
122}
123
124static inline void atomic_dec(atomic_t *v)
125{
126	atomic_sub_return(1, v);
127}
128
129#define atomic_dec_return(v)		atomic_sub_return(1, (v))
130#define atomic_inc_return(v)		atomic_add_return(1, (v))
 
131
132#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
133#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
134#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
135
136#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
137#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
 
138
139#define cmpxchg_local(ptr, o, n)				  	       \
140	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
141			(unsigned long)(n), sizeof(*(ptr))))
142
143#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
144
145static inline int __atomic_add_unless(atomic_t *v, int a, int u)
146{
147  int c, old;
148  c = atomic_read(v);
149  while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
150    c = old;
151  return c;
152}
153
154/**
155 * atomic_clear_mask - Atomically clear bits in atomic variable
156 * @mask: Mask of the bits to be cleared
157 * @v: pointer of type atomic_t
158 *
159 * Atomically clears the bits set in @mask from @v
160 */
161#ifndef atomic_clear_mask
162static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
163{
164	unsigned long flags;
165
166	mask = ~mask;
167	raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
168	v->counter &= mask;
169	raw_local_irq_restore(flags);
170}
171#endif
172
173/**
174 * atomic_set_mask - Atomically set bits in atomic variable
175 * @mask: Mask of the bits to be set
176 * @v: pointer of type atomic_t
 
177 *
178 * Atomically sets the bits set in @mask in @v
179 */
180#ifndef atomic_set_mask
181static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 
 
 
182{
183	unsigned long flags;
 
184
185	raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
186	v->counter |= mask;
187	raw_local_irq_restore(flags);
188}
189#endif
190
191/* Assume that atomic operations are already serializing */
192#define smp_mb__before_atomic_dec()	barrier()
193#define smp_mb__after_atomic_dec()	barrier()
194#define smp_mb__before_atomic_inc()	barrier()
195#define smp_mb__after_atomic_inc()	barrier()
196
197#endif /* __KERNEL__ */
198#endif /* __ASM_GENERIC_ATOMIC_H */