Loading...
1/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
5 * Copyright (C) 2007 Kyle McMartin
6 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
10#include <linux/atomic.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#ifdef CONFIG_SMP
15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20};
21
22#else /* SMP */
23
24static DEFINE_SPINLOCK(dummy);
25#define ATOMIC_HASH_SIZE 1
26#define ATOMIC_HASH(a) (&dummy)
27
28#endif /* SMP */
29
30int __atomic_add_return(int i, atomic_t *v)
31{
32 int ret;
33 unsigned long flags;
34 spin_lock_irqsave(ATOMIC_HASH(v), flags);
35
36 ret = (v->counter += i);
37
38 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
39 return ret;
40}
41EXPORT_SYMBOL(__atomic_add_return);
42
43int atomic_cmpxchg(atomic_t *v, int old, int new)
44{
45 int ret;
46 unsigned long flags;
47
48 spin_lock_irqsave(ATOMIC_HASH(v), flags);
49 ret = v->counter;
50 if (likely(ret == old))
51 v->counter = new;
52
53 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
54 return ret;
55}
56EXPORT_SYMBOL(atomic_cmpxchg);
57
58int __atomic_add_unless(atomic_t *v, int a, int u)
59{
60 int ret;
61 unsigned long flags;
62
63 spin_lock_irqsave(ATOMIC_HASH(v), flags);
64 ret = v->counter;
65 if (ret != u)
66 v->counter += a;
67 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
68 return ret;
69}
70EXPORT_SYMBOL(__atomic_add_unless);
71
72/* Atomic operations are already serializing */
73void atomic_set(atomic_t *v, int i)
74{
75 unsigned long flags;
76
77 spin_lock_irqsave(ATOMIC_HASH(v), flags);
78 v->counter = i;
79 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
80}
81EXPORT_SYMBOL(atomic_set);
82
83unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
84{
85 unsigned long old, flags;
86
87 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
88 old = *addr;
89 *addr = old | mask;
90 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
91
92 return old & mask;
93}
94EXPORT_SYMBOL(___set_bit);
95
96unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
97{
98 unsigned long old, flags;
99
100 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
101 old = *addr;
102 *addr = old & ~mask;
103 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
104
105 return old & mask;
106}
107EXPORT_SYMBOL(___clear_bit);
108
109unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
110{
111 unsigned long old, flags;
112
113 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
114 old = *addr;
115 *addr = old ^ mask;
116 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
117
118 return old & mask;
119}
120EXPORT_SYMBOL(___change_bit);
121
122unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
123{
124 unsigned long flags;
125 u32 prev;
126
127 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
128 if ((prev = *ptr) == old)
129 *ptr = new;
130 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
131
132 return (unsigned long)prev;
133}
134EXPORT_SYMBOL(__cmpxchg_u32);
1/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
5 * Copyright (C) 2007 Kyle McMartin
6 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
10#include <linux/atomic.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#ifdef CONFIG_SMP
15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20};
21
22#else /* SMP */
23
24static DEFINE_SPINLOCK(dummy);
25#define ATOMIC_HASH_SIZE 1
26#define ATOMIC_HASH(a) (&dummy)
27
28#endif /* SMP */
29
30#define ATOMIC_OP_RETURN(op, c_op) \
31int atomic_##op##_return(int i, atomic_t *v) \
32{ \
33 int ret; \
34 unsigned long flags; \
35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 \
37 ret = (v->counter c_op i); \
38 \
39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40 return ret; \
41} \
42EXPORT_SYMBOL(atomic_##op##_return);
43
44#define ATOMIC_OP(op, c_op) \
45void atomic_##op(int i, atomic_t *v) \
46{ \
47 unsigned long flags; \
48 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
49 \
50 v->counter c_op i; \
51 \
52 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
53} \
54EXPORT_SYMBOL(atomic_##op);
55
56ATOMIC_OP_RETURN(add, +=)
57ATOMIC_OP(and, &=)
58ATOMIC_OP(or, |=)
59ATOMIC_OP(xor, ^=)
60
61#undef ATOMIC_OP_RETURN
62#undef ATOMIC_OP
63
64int atomic_xchg(atomic_t *v, int new)
65{
66 int ret;
67 unsigned long flags;
68
69 spin_lock_irqsave(ATOMIC_HASH(v), flags);
70 ret = v->counter;
71 v->counter = new;
72 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
73 return ret;
74}
75EXPORT_SYMBOL(atomic_xchg);
76
77int atomic_cmpxchg(atomic_t *v, int old, int new)
78{
79 int ret;
80 unsigned long flags;
81
82 spin_lock_irqsave(ATOMIC_HASH(v), flags);
83 ret = v->counter;
84 if (likely(ret == old))
85 v->counter = new;
86
87 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
88 return ret;
89}
90EXPORT_SYMBOL(atomic_cmpxchg);
91
92int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 int ret;
95 unsigned long flags;
96
97 spin_lock_irqsave(ATOMIC_HASH(v), flags);
98 ret = v->counter;
99 if (ret != u)
100 v->counter += a;
101 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
102 return ret;
103}
104EXPORT_SYMBOL(__atomic_add_unless);
105
106/* Atomic operations are already serializing */
107void atomic_set(atomic_t *v, int i)
108{
109 unsigned long flags;
110
111 spin_lock_irqsave(ATOMIC_HASH(v), flags);
112 v->counter = i;
113 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
114}
115EXPORT_SYMBOL(atomic_set);
116
117unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
118{
119 unsigned long old, flags;
120
121 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
122 old = *addr;
123 *addr = old | mask;
124 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
125
126 return old & mask;
127}
128EXPORT_SYMBOL(___set_bit);
129
130unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
131{
132 unsigned long old, flags;
133
134 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
135 old = *addr;
136 *addr = old & ~mask;
137 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
138
139 return old & mask;
140}
141EXPORT_SYMBOL(___clear_bit);
142
143unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
144{
145 unsigned long old, flags;
146
147 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
148 old = *addr;
149 *addr = old ^ mask;
150 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
151
152 return old & mask;
153}
154EXPORT_SYMBOL(___change_bit);
155
156unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
157{
158 unsigned long flags;
159 u32 prev;
160
161 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
162 if ((prev = *ptr) == old)
163 *ptr = new;
164 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
165
166 return (unsigned long)prev;
167}
168EXPORT_SYMBOL(__cmpxchg_u32);
169
170unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
171{
172 unsigned long flags;
173 u32 prev;
174
175 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
176 prev = *ptr;
177 *ptr = new;
178 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
179
180 return (unsigned long)prev;
181}
182EXPORT_SYMBOL(__xchg_u32);