Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Generic C implementation of atomic counter operations. Do not include in
  4 * machine independent code.
 
 
  5 *
  6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  7 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  8 */
  9#ifndef __ASM_GENERIC_ATOMIC_H
 10#define __ASM_GENERIC_ATOMIC_H
 11
 12#include <asm/cmpxchg.h>
 13#include <asm/barrier.h>
 14
 15#ifdef CONFIG_SMP
 
 
 
 
 
 
 
 
 
 
 
 16
 17/* we can build all atomic primitives from cmpxchg */
 18
 19#define ATOMIC_OP(op, c_op)						\
 20static inline void generic_atomic_##op(int i, atomic_t *v)		\
 21{									\
 22	int c, old;							\
 23									\
 24	c = v->counter;							\
 25	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 26		c = old;						\
 27}
 28
 29#define ATOMIC_OP_RETURN(op, c_op)					\
 30static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
 31{									\
 32	int c, old;							\
 33									\
 34	c = v->counter;							\
 35	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 36		c = old;						\
 37									\
 38	return c c_op i;						\
 39}
 40
 41#define ATOMIC_FETCH_OP(op, c_op)					\
 42static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
 43{									\
 44	int c, old;							\
 45									\
 46	c = v->counter;							\
 47	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 48		c = old;						\
 49									\
 50	return c;							\
 51}
 52
 53#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55#include <linux/irqflags.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56
 57#define ATOMIC_OP(op, c_op)						\
 58static inline void generic_atomic_##op(int i, atomic_t *v)		\
 59{									\
 60	unsigned long flags;						\
 61									\
 62	raw_local_irq_save(flags);					\
 63	v->counter = v->counter c_op i;					\
 64	raw_local_irq_restore(flags);					\
 65}
 66
 67#define ATOMIC_OP_RETURN(op, c_op)					\
 68static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
 69{									\
 70	unsigned long flags;						\
 71	int ret;							\
 72									\
 73	raw_local_irq_save(flags);					\
 74	ret = (v->counter = v->counter c_op i);				\
 75	raw_local_irq_restore(flags);					\
 76									\
 77	return ret;							\
 78}
 79
 80#define ATOMIC_FETCH_OP(op, c_op)					\
 81static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
 82{									\
 83	unsigned long flags;						\
 84	int ret;							\
 85									\
 86	raw_local_irq_save(flags);					\
 87	ret = v->counter;						\
 88	v->counter = v->counter c_op i;					\
 89	raw_local_irq_restore(flags);					\
 90									\
 91	return ret;							\
 92}
 93
 94#endif /* CONFIG_SMP */
 95
 96ATOMIC_OP_RETURN(add, +)
 97ATOMIC_OP_RETURN(sub, -)
 98
 99ATOMIC_FETCH_OP(add, +)
100ATOMIC_FETCH_OP(sub, -)
101ATOMIC_FETCH_OP(and, &)
102ATOMIC_FETCH_OP(or, |)
103ATOMIC_FETCH_OP(xor, ^)
104
105ATOMIC_OP(add, +)
106ATOMIC_OP(sub, -)
107ATOMIC_OP(and, &)
108ATOMIC_OP(or, |)
109ATOMIC_OP(xor, ^)
110
111#undef ATOMIC_FETCH_OP
112#undef ATOMIC_OP_RETURN
113#undef ATOMIC_OP
114
115#define arch_atomic_add_return			generic_atomic_add_return
116#define arch_atomic_sub_return			generic_atomic_sub_return
117
118#define arch_atomic_fetch_add			generic_atomic_fetch_add
119#define arch_atomic_fetch_sub			generic_atomic_fetch_sub
120#define arch_atomic_fetch_and			generic_atomic_fetch_and
121#define arch_atomic_fetch_or			generic_atomic_fetch_or
122#define arch_atomic_fetch_xor			generic_atomic_fetch_xor
123
124#define arch_atomic_add				generic_atomic_add
125#define arch_atomic_sub				generic_atomic_sub
126#define arch_atomic_and				generic_atomic_and
127#define arch_atomic_or				generic_atomic_or
128#define arch_atomic_xor				generic_atomic_xor
129
130#define arch_atomic_read(v)			READ_ONCE((v)->counter)
131#define arch_atomic_set(v, i)			WRITE_ONCE(((v)->counter), (i))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133#endif /* __ASM_GENERIC_ATOMIC_H */
v3.1
 
  1/*
  2 * Generic C implementation of atomic counter operations. Usable on
  3 * UP systems only. Do not include in machine independent code.
  4 *
  5 * Originally implemented for MN10300.
  6 *
  7 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  8 * Written by David Howells (dhowells@redhat.com)
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public Licence
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the Licence, or (at your option) any later version.
 14 */
 15#ifndef __ASM_GENERIC_ATOMIC_H
 16#define __ASM_GENERIC_ATOMIC_H
 17
 
 
 
 18#ifdef CONFIG_SMP
 19/* Force people to define core atomics */
 20# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
 21     !defined(atomic_clear_mask) || !defined(atomic_set_mask)
 22#  error "SMP requires a little arch-specific magic"
 23# endif
 24#endif
 25
 26/*
 27 * Atomic operations that C can't guarantee us.  Useful for
 28 * resource counting etc..
 29 */
 30
 31#define ATOMIC_INIT(i)	{ (i) }
 32
 33#ifdef __KERNEL__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35/**
 36 * atomic_read - read atomic variable
 37 * @v: pointer of type atomic_t
 38 *
 39 * Atomically reads the value of @v.
 40 */
 41#ifndef atomic_read
 42#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 43#endif
 44
 45/**
 46 * atomic_set - set atomic variable
 47 * @v: pointer of type atomic_t
 48 * @i: required value
 49 *
 50 * Atomically sets the value of @v to @i.
 51 */
 52#define atomic_set(v, i) (((v)->counter) = (i))
 53
 54#include <linux/irqflags.h>
 55#include <asm/system.h>
 56
 57/**
 58 * atomic_add_return - add integer to atomic variable
 59 * @i: integer value to add
 60 * @v: pointer of type atomic_t
 61 *
 62 * Atomically adds @i to @v and returns the result
 63 */
 64#ifndef atomic_add_return
 65static inline int atomic_add_return(int i, atomic_t *v)
 66{
 67	unsigned long flags;
 68	int temp;
 69
 70	raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
 71	temp = v->counter;
 72	temp += i;
 73	v->counter = temp;
 74	raw_local_irq_restore(flags);
 75
 76	return temp;
 77}
 78#endif
 79
 80/**
 81 * atomic_sub_return - subtract integer from atomic variable
 82 * @i: integer value to subtract
 83 * @v: pointer of type atomic_t
 84 *
 85 * Atomically subtracts @i from @v and returns the result
 86 */
 87#ifndef atomic_sub_return
 88static inline int atomic_sub_return(int i, atomic_t *v)
 89{
 90	unsigned long flags;
 91	int temp;
 92
 93	raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
 94	temp = v->counter;
 95	temp -= i;
 96	v->counter = temp;
 97	raw_local_irq_restore(flags);
 98
 99	return temp;
100}
101#endif
102
103static inline int atomic_add_negative(int i, atomic_t *v)
104{
105	return atomic_add_return(i, v) < 0;
106}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
108static inline void atomic_add(int i, atomic_t *v)
109{
110	atomic_add_return(i, v);
111}
112
113static inline void atomic_sub(int i, atomic_t *v)
114{
115	atomic_sub_return(i, v);
116}
117
118static inline void atomic_inc(atomic_t *v)
119{
120	atomic_add_return(1, v);
121}
122
123static inline void atomic_dec(atomic_t *v)
124{
125	atomic_sub_return(1, v);
126}
127
128#define atomic_dec_return(v)		atomic_sub_return(1, (v))
129#define atomic_inc_return(v)		atomic_add_return(1, (v))
130
131#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
132#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
133#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
134
135#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
136#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
137
138#define cmpxchg_local(ptr, o, n)				  	       \
139	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
140			(unsigned long)(n), sizeof(*(ptr))))
141
142#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146  int c, old;
147  c = atomic_read(v);
148  while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
149    c = old;
150  return c;
151}
152
153/**
154 * atomic_clear_mask - Atomically clear bits in atomic variable
155 * @mask: Mask of the bits to be cleared
156 * @v: pointer of type atomic_t
157 *
158 * Atomically clears the bits set in @mask from @v
159 */
160#ifndef atomic_clear_mask
161static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
162{
163	unsigned long flags;
164
165	mask = ~mask;
166	raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
167	v->counter &= mask;
168	raw_local_irq_restore(flags);
169}
170#endif
171
172/**
173 * atomic_set_mask - Atomically set bits in atomic variable
174 * @mask: Mask of the bits to be set
175 * @v: pointer of type atomic_t
176 *
177 * Atomically sets the bits set in @mask in @v
178 */
179#ifndef atomic_set_mask
180static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
181{
182	unsigned long flags;
183
184	raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
185	v->counter |= mask;
186	raw_local_irq_restore(flags);
187}
188#endif
189
190/* Assume that atomic operations are already serializing */
191#define smp_mb__before_atomic_dec()	barrier()
192#define smp_mb__after_atomic_dec()	barrier()
193#define smp_mb__before_atomic_inc()	barrier()
194#define smp_mb__after_atomic_inc()	barrier()
195
196#endif /* __KERNEL__ */
197#endif /* __ASM_GENERIC_ATOMIC_H */