Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * Do not include directly; use <linux/atomic.h>.
 15 */
 16
 17#ifndef _ASM_TILE_ATOMIC_32_H
 18#define _ASM_TILE_ATOMIC_32_H
 19
 20#include <asm/barrier.h>
 21#include <arch/chip.h>
 22
 23#ifndef __ASSEMBLY__
 24
 25/**
 26 * atomic_add - add integer to atomic variable
 27 * @i: integer value to add
 28 * @v: pointer of type atomic_t
 29 *
 30 * Atomically adds @i to @v.
 31 */
 32static inline void atomic_add(int i, atomic_t *v)
 33{
 34	_atomic_xchg_add(&v->counter, i);
 35}
 36
 37/**
 38 * atomic_add_return - add integer and return
 39 * @v: pointer of type atomic_t
 40 * @i: integer value to add
 41 *
 42 * Atomically adds @i to @v and returns @i + @v
 43 */
 44static inline int atomic_add_return(int i, atomic_t *v)
 45{
 46	smp_mb();  /* barrier for proper semantics */
 47	return _atomic_xchg_add(&v->counter, i) + i;
 48}
 49
 50/**
 51 * __atomic_add_unless - add unless the number is already a given value
 52 * @v: pointer of type atomic_t
 53 * @a: the amount to add to v...
 54 * @u: ...unless v is equal to u.
 55 *
 56 * Atomically adds @a to @v, so long as @v was not already @u.
 57 * Returns the old value of @v.
 58 */
 59static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 60{
 61	smp_mb();  /* barrier for proper semantics */
 62	return _atomic_xchg_add_unless(&v->counter, a, u);
 63}
 64
 65/**
 66 * atomic_set - set atomic variable
 67 * @v: pointer of type atomic_t
 68 * @i: required value
 69 *
 70 * Atomically sets the value of @v to @i.
 71 *
 72 * atomic_set() can't be just a raw store, since it would be lost if it
 73 * fell between the load and store of one of the other atomic ops.
 74 */
 75static inline void atomic_set(atomic_t *v, int n)
 76{
 77	_atomic_xchg(&v->counter, n);
 78}
 79
 80/* A 64bit atomic type */
 81
 82typedef struct {
 83	long long counter;
 84} atomic64_t;
 85
 86#define ATOMIC64_INIT(val) { (val) }
 87
 88/**
 89 * atomic64_read - read atomic variable
 90 * @v: pointer of type atomic64_t
 91 *
 92 * Atomically reads the value of @v.
 93 */
 94static inline long long atomic64_read(const atomic64_t *v)
 95{
 96	/*
 97	 * Requires an atomic op to read both 32-bit parts consistently.
 98	 * Casting away const is safe since the atomic support routines
 99	 * do not write to memory if the value has not been modified.
100	 */
101	return _atomic64_xchg_add((long long *)&v->counter, 0);
102}
103
104/**
105 * atomic64_add - add integer to atomic variable
106 * @i: integer value to add
107 * @v: pointer of type atomic64_t
108 *
109 * Atomically adds @i to @v.
110 */
111static inline void atomic64_add(long long i, atomic64_t *v)
112{
113	_atomic64_xchg_add(&v->counter, i);
114}
115
116/**
117 * atomic64_add_return - add integer and return
118 * @v: pointer of type atomic64_t
119 * @i: integer value to add
120 *
121 * Atomically adds @i to @v and returns @i + @v
122 */
123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{
125	smp_mb();  /* barrier for proper semantics */
126	return _atomic64_xchg_add(&v->counter, i) + i;
127}
128
129/**
130 * atomic64_add_unless - add unless the number is already a given value
131 * @v: pointer of type atomic64_t
132 * @a: the amount to add to v...
133 * @u: ...unless v is equal to u.
134 *
135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */
138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139					long long u)
140{
141	smp_mb();  /* barrier for proper semantics */
142	return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
143}
144
145/**
146 * atomic64_set - set atomic variable
147 * @v: pointer of type atomic64_t
148 * @i: required value
149 *
150 * Atomically sets the value of @v to @i.
151 *
152 * atomic64_set() can't be just a raw store, since it would be lost if it
153 * fell between the load and store of one of the other atomic ops.
154 */
155static inline void atomic64_set(atomic64_t *v, long long n)
156{
157	_atomic64_xchg(&v->counter, n);
158}
159
160#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
161#define atomic64_inc(v)			atomic64_add(1LL, (v))
162#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
163#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
164#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
165#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
166#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
167#define atomic64_dec(v)			atomic64_sub(1LL, (v))
168#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
169#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
170#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
171
172/*
173 * We need to barrier before modifying the word, since the _atomic_xxx()
174 * routines just tns the lock and then read/modify/write of the word.
175 * But after the word is updated, the routine issues an "mf" before returning,
176 * and since it's a function call, we don't even need a compiler barrier.
177 */
178#define smp_mb__before_atomic_dec()	smp_mb()
179#define smp_mb__before_atomic_inc()	smp_mb()
180#define smp_mb__after_atomic_dec()	do { } while (0)
181#define smp_mb__after_atomic_inc()	do { } while (0)
182
183#endif /* !__ASSEMBLY__ */
184
185/*
186 * Internal definitions only beyond this point.
187 */
188
189/*
190 * Number of atomic locks in atomic_locks[]. Must be a power of two.
191 * There is no reason for more than PAGE_SIZE / 8 entries, since that
192 * is the maximum number of pointer bits we can use to index this.
193 * And we cannot have more than PAGE_SIZE / 4, since this has to
194 * fit on a single page and each entry takes 4 bytes.
195 */
196#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
197#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
198
199#ifndef __ASSEMBLY__
200extern int atomic_locks[];
201#endif
202
203/*
204 * All the code that may fault while holding an atomic lock must
205 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
206 * can correctly release and reacquire the lock.  Note that we
207 * mention the register number in a comment in "lib/atomic_asm.S" to help
208 * assembly coders from using this register by mistake, so if it
209 * is changed here, change that comment as well.
210 */
211#define ATOMIC_LOCK_REG 20
212#define ATOMIC_LOCK_REG_NAME r20
213
214#ifndef __ASSEMBLY__
215/* Called from setup to initialize a hash table to point to per_cpu locks. */
216void __init_atomic_per_cpu(void);
217
218#ifdef CONFIG_SMP
219/* Support releasing the atomic lock in do_page_fault_ics(). */
220void __atomic_fault_unlock(int *lock_ptr);
221#endif
222
223/* Return a pointer to the lock for the given address. */
224int *__atomic_hashed_lock(volatile void *v);
225
226/* Private helper routines in lib/atomic_asm_32.S */
227struct __get_user {
228	unsigned long val;
229	int err;
230};
231extern struct __get_user __atomic_cmpxchg(volatile int *p,
232					  int *lock, int o, int n);
233extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
234extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
235extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236						  int *lock, int o, int n);
237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
241					long long o, long long n);
242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
244					long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246					int *lock, long long o, long long n);
247
248/* Return failure from the atomic wrappers. */
249struct __get_user __atomic_bad_address(int __user *addr);
250
251#endif /* !__ASSEMBLY__ */
252
253#endif /* _ASM_TILE_ATOMIC_32_H */