Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * Do not include directly; use <linux/atomic.h>.
 15 */
 16
 17#ifndef _ASM_TILE_ATOMIC_64_H
 18#define _ASM_TILE_ATOMIC_64_H
 19
 20#ifndef __ASSEMBLY__
 21
 22#include <asm/barrier.h>
 23#include <arch/spr_def.h>
 24
 25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
 26
 27#define atomic_set(v, i) ((v)->counter = (i))
 28
 29/*
 30 * The smp_mb() operations throughout are to support the fact that
 31 * Linux requires memory barriers before and after the operation,
 32 * on any routine which updates memory and returns a value.
 33 */
 34
 35static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
 36{
 37	int val;
 38	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
 39	smp_mb();  /* barrier for proper semantics */
 40	val = __insn_cmpexch4((void *)&v->counter, n);
 41	smp_mb();  /* barrier for proper semantics */
 42	return val;
 43}
 44
 45static inline int atomic_xchg(atomic_t *v, int n)
 46{
 47	int val;
 48	smp_mb();  /* barrier for proper semantics */
 49	val = __insn_exch4((void *)&v->counter, n);
 50	smp_mb();  /* barrier for proper semantics */
 51	return val;
 52}
 53
 54static inline void atomic_add(int i, atomic_t *v)
 55{
 56	__insn_fetchadd4((void *)&v->counter, i);
 57}
 58
 59static inline int atomic_add_return(int i, atomic_t *v)
 60{
 61	int val;
 62	smp_mb();  /* barrier for proper semantics */
 63	val = __insn_fetchadd4((void *)&v->counter, i) + i;
 64	barrier();  /* the "+ i" above will wait on memory */
 65	return val;
 66}
 67
 68static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 69{
 70	int guess, oldval = v->counter;
 71	do {
 72		if (oldval == u)
 73			break;
 74		guess = oldval;
 75		oldval = atomic_cmpxchg(v, guess, guess + a);
 76	} while (guess != oldval);
 77	return oldval;
 78}
 79
 80/* Now the true 64-bit operations. */
 81
 82#define ATOMIC64_INIT(i)	{ (i) }
 83
 84#define atomic64_read(v)		((v)->counter)
 85#define atomic64_set(v, i) ((v)->counter = (i))
 86
 87static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
 88{
 89	long val;
 90	smp_mb();  /* barrier for proper semantics */
 91	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
 92	val = __insn_cmpexch((void *)&v->counter, n);
 93	smp_mb();  /* barrier for proper semantics */
 94	return val;
 95}
 96
 97static inline long atomic64_xchg(atomic64_t *v, long n)
 98{
 99	long val;
100	smp_mb();  /* barrier for proper semantics */
101	val = __insn_exch((void *)&v->counter, n);
102	smp_mb();  /* barrier for proper semantics */
103	return val;
104}
105
106static inline void atomic64_add(long i, atomic64_t *v)
107{
108	__insn_fetchadd((void *)&v->counter, i);
109}
110
111static inline long atomic64_add_return(long i, atomic64_t *v)
112{
113	int val;
114	smp_mb();  /* barrier for proper semantics */
115	val = __insn_fetchadd((void *)&v->counter, i) + i;
116	barrier();  /* the "+ i" above will wait on memory */
117	return val;
118}
119
120static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
121{
122	long guess, oldval = v->counter;
123	do {
124		if (oldval == u)
125			break;
126		guess = oldval;
127		oldval = atomic64_cmpxchg(v, guess, guess + a);
128	} while (guess != oldval);
129	return oldval != u;
130}
131
132#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
133#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
134#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
135#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
136#define atomic64_inc(v)			atomic64_add(1, (v))
137#define atomic64_dec(v)			atomic64_sub(1, (v))
138
139#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
140#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
141#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
142#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
143
144#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
145
146/* Atomic dec and inc don't implement barrier, so provide them if needed. */
147#define smp_mb__before_atomic_dec()	smp_mb()
148#define smp_mb__after_atomic_dec()	smp_mb()
149#define smp_mb__before_atomic_inc()	smp_mb()
150#define smp_mb__after_atomic_inc()	smp_mb()
151
152/* Define this to indicate that cmpxchg is an efficient operation. */
153#define __HAVE_ARCH_CMPXCHG
154
155#endif /* !__ASSEMBLY__ */
156
157#endif /* _ASM_TILE_ATOMIC_64_H */
v3.15
  1/*
  2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * Do not include directly; use <linux/atomic.h>.
 15 */
 16
 17#ifndef _ASM_TILE_ATOMIC_64_H
 18#define _ASM_TILE_ATOMIC_64_H
 19
 20#ifndef __ASSEMBLY__
 21
 22#include <asm/barrier.h>
 23#include <arch/spr_def.h>
 24
 25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
 26
 27#define atomic_set(v, i) ((v)->counter = (i))
 28
 29/*
 30 * The smp_mb() operations throughout are to support the fact that
 31 * Linux requires memory barriers before and after the operation,
 32 * on any routine which updates memory and returns a value.
 33 */
 34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35static inline void atomic_add(int i, atomic_t *v)
 36{
 37	__insn_fetchadd4((void *)&v->counter, i);
 38}
 39
 40static inline int atomic_add_return(int i, atomic_t *v)
 41{
 42	int val;
 43	smp_mb();  /* barrier for proper semantics */
 44	val = __insn_fetchadd4((void *)&v->counter, i) + i;
 45	barrier();  /* the "+ i" above will wait on memory */
 46	return val;
 47}
 48
 49static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 50{
 51	int guess, oldval = v->counter;
 52	do {
 53		if (oldval == u)
 54			break;
 55		guess = oldval;
 56		oldval = cmpxchg(&v->counter, guess, guess + a);
 57	} while (guess != oldval);
 58	return oldval;
 59}
 60
 61/* Now the true 64-bit operations. */
 62
 63#define ATOMIC64_INIT(i)	{ (i) }
 64
 65#define atomic64_read(v)		((v)->counter)
 66#define atomic64_set(v, i) ((v)->counter = (i))
 67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68static inline void atomic64_add(long i, atomic64_t *v)
 69{
 70	__insn_fetchadd((void *)&v->counter, i);
 71}
 72
 73static inline long atomic64_add_return(long i, atomic64_t *v)
 74{
 75	int val;
 76	smp_mb();  /* barrier for proper semantics */
 77	val = __insn_fetchadd((void *)&v->counter, i) + i;
 78	barrier();  /* the "+ i" above will wait on memory */
 79	return val;
 80}
 81
 82static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 83{
 84	long guess, oldval = v->counter;
 85	do {
 86		if (oldval == u)
 87			break;
 88		guess = oldval;
 89		oldval = cmpxchg(&v->counter, guess, guess + a);
 90	} while (guess != oldval);
 91	return oldval != u;
 92}
 93
 94#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
 95#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
 96#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
 97#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
 98#define atomic64_inc(v)			atomic64_add(1, (v))
 99#define atomic64_dec(v)			atomic64_sub(1, (v))
100
101#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
102#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
103#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
104#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
105
106#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
107
108/* Atomic dec and inc don't implement barrier, so provide them if needed. */
109#define smp_mb__before_atomic_dec()	smp_mb()
110#define smp_mb__after_atomic_dec()	smp_mb()
111#define smp_mb__before_atomic_inc()	smp_mb()
112#define smp_mb__after_atomic_inc()	smp_mb()
113
114/* Define this to indicate that cmpxchg is an efficient operation. */
115#define __HAVE_ARCH_CMPXCHG
116
117#endif /* !__ASSEMBLY__ */
118
119#endif /* _ASM_TILE_ATOMIC_64_H */