Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/bits.h>
17#include <linux/compiler.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/byteorder.h> /* sigh ... */
21#include <asm/compiler.h>
22#include <asm/cpu-features.h>
23#include <asm/isa-rev.h>
24#include <asm/llsc.h>
25#include <asm/sgidefs.h>
26#include <asm/war.h>
27
28#define __bit_op(mem, insn, inputs...) do { \
29 unsigned long temp; \
30 \
31 asm volatile( \
32 " .set push \n" \
33 " .set " MIPS_ISA_LEVEL " \n" \
34 " " __SYNC(full, loongson3_war) " \n" \
35 "1: " __LL "%0, %1 \n" \
36 " " insn " \n" \
37 " " __SC "%0, %1 \n" \
38 " " __SC_BEQZ "%0, 1b \n" \
39 " .set pop \n" \
40 : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
41 : inputs \
42 : __LLSC_CLOBBER); \
43} while (0)
44
45#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
46 unsigned long orig, temp; \
47 \
48 asm volatile( \
49 " .set push \n" \
50 " .set " MIPS_ISA_LEVEL " \n" \
51 " " __SYNC(full, loongson3_war) " \n" \
52 "1: " __LL ll_dst ", %2 \n" \
53 " " insn " \n" \
54 " " __SC "%1, %2 \n" \
55 " " __SC_BEQZ "%1, 1b \n" \
56 " .set pop \n" \
57 : "=&r"(orig), "=&r"(temp), \
58 "+" GCC_OFF_SMALL_ASM()(mem) \
59 : inputs \
60 : __LLSC_CLOBBER); \
61 \
62 orig; \
63})
64
65/*
66 * These are the "slower" versions of the functions and are in bitops.c.
67 * These functions call raw_local_irq_{save,restore}().
68 */
69void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
70void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
71void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
72int __mips_test_and_set_bit_lock(unsigned long nr,
73 volatile unsigned long *addr);
74int __mips_test_and_clear_bit(unsigned long nr,
75 volatile unsigned long *addr);
76int __mips_test_and_change_bit(unsigned long nr,
77 volatile unsigned long *addr);
78
79
80/*
81 * set_bit - Atomically set a bit in memory
82 * @nr: the bit to set
83 * @addr: the address to start counting from
84 *
85 * This function is atomic and may not be reordered. See __set_bit()
86 * if you do not require the atomic guarantees.
87 * Note that @nr may be almost arbitrarily large; this function is not
88 * restricted to acting on a single-word quantity.
89 */
90static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
91{
92 volatile unsigned long *m = &addr[BIT_WORD(nr)];
93 int bit = nr % BITS_PER_LONG;
94
95 if (!kernel_uses_llsc) {
96 __mips_set_bit(nr, addr);
97 return;
98 }
99
100 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
101 __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
102 return;
103 }
104
105 __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
106}
107
108/*
109 * clear_bit - Clears a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
116 * in order to ensure changes are visible on other processors.
117 */
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{
120 volatile unsigned long *m = &addr[BIT_WORD(nr)];
121 int bit = nr % BITS_PER_LONG;
122
123 if (!kernel_uses_llsc) {
124 __mips_clear_bit(nr, addr);
125 return;
126 }
127
128 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
129 __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
130 return;
131 }
132
133 __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
134}
135
136/*
137 * clear_bit_unlock - Clears a bit in memory
138 * @nr: Bit to clear
139 * @addr: Address to start counting from
140 *
141 * clear_bit() is atomic and implies release semantics before the memory
142 * operation. It can be used for an unlock.
143 */
144static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
145{
146 smp_mb__before_atomic();
147 clear_bit(nr, addr);
148}
149
150/*
151 * change_bit - Toggle a bit in memory
152 * @nr: Bit to change
153 * @addr: Address to start counting from
154 *
155 * change_bit() is atomic and may not be reordered.
156 * Note that @nr may be almost arbitrarily large; this function is not
157 * restricted to acting on a single-word quantity.
158 */
159static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
160{
161 volatile unsigned long *m = &addr[BIT_WORD(nr)];
162 int bit = nr % BITS_PER_LONG;
163
164 if (!kernel_uses_llsc) {
165 __mips_change_bit(nr, addr);
166 return;
167 }
168
169 __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
170}
171
172/*
173 * test_and_set_bit_lock - Set a bit and return its old value
174 * @nr: Bit to set
175 * @addr: Address to count from
176 *
177 * This operation is atomic and implies acquire ordering semantics
178 * after the memory operation.
179 */
180static inline int test_and_set_bit_lock(unsigned long nr,
181 volatile unsigned long *addr)
182{
183 volatile unsigned long *m = &addr[BIT_WORD(nr)];
184 int bit = nr % BITS_PER_LONG;
185 unsigned long res, orig;
186
187 if (!kernel_uses_llsc) {
188 res = __mips_test_and_set_bit_lock(nr, addr);
189 } else {
190 orig = __test_bit_op(*m, "%0",
191 "or\t%1, %0, %3",
192 "ir"(BIT(bit)));
193 res = (orig & BIT(bit)) != 0;
194 }
195
196 smp_llsc_mb();
197
198 return res;
199}
200
201/*
202 * test_and_set_bit - Set a bit and return its old value
203 * @nr: Bit to set
204 * @addr: Address to count from
205 *
206 * This operation is atomic and cannot be reordered.
207 * It also implies a memory barrier.
208 */
209static inline int test_and_set_bit(unsigned long nr,
210 volatile unsigned long *addr)
211{
212 smp_mb__before_atomic();
213 return test_and_set_bit_lock(nr, addr);
214}
215
216/*
217 * test_and_clear_bit - Clear a bit and return its old value
218 * @nr: Bit to clear
219 * @addr: Address to count from
220 *
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
223 */
224static inline int test_and_clear_bit(unsigned long nr,
225 volatile unsigned long *addr)
226{
227 volatile unsigned long *m = &addr[BIT_WORD(nr)];
228 int bit = nr % BITS_PER_LONG;
229 unsigned long res, orig;
230
231 smp_mb__before_atomic();
232
233 if (!kernel_uses_llsc) {
234 res = __mips_test_and_clear_bit(nr, addr);
235 } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
236 res = __test_bit_op(*m, "%1",
237 __EXT "%0, %1, %3, 1;"
238 __INS "%1, $0, %3, 1",
239 "i"(bit));
240 } else {
241 orig = __test_bit_op(*m, "%0",
242 "or\t%1, %0, %3;"
243 "xor\t%1, %1, %3",
244 "ir"(BIT(bit)));
245 res = (orig & BIT(bit)) != 0;
246 }
247
248 smp_llsc_mb();
249
250 return res;
251}
252
253/*
254 * test_and_change_bit - Change a bit and return its old value
255 * @nr: Bit to change
256 * @addr: Address to count from
257 *
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
260 */
261static inline int test_and_change_bit(unsigned long nr,
262 volatile unsigned long *addr)
263{
264 volatile unsigned long *m = &addr[BIT_WORD(nr)];
265 int bit = nr % BITS_PER_LONG;
266 unsigned long res, orig;
267
268 smp_mb__before_atomic();
269
270 if (!kernel_uses_llsc) {
271 res = __mips_test_and_change_bit(nr, addr);
272 } else {
273 orig = __test_bit_op(*m, "%0",
274 "xor\t%1, %0, %3",
275 "ir"(BIT(bit)));
276 res = (orig & BIT(bit)) != 0;
277 }
278
279 smp_llsc_mb();
280
281 return res;
282}
283
284#undef __bit_op
285#undef __test_bit_op
286
287#include <asm-generic/bitops/non-atomic.h>
288
289/*
290 * __clear_bit_unlock - Clears a bit in memory
291 * @nr: Bit to clear
292 * @addr: Address to start counting from
293 *
294 * __clear_bit() is non-atomic and implies release semantics before the memory
295 * operation. It can be used for an unlock if no other CPUs can concurrently
296 * modify other bits in the word.
297 */
298static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
299{
300 smp_mb__before_llsc();
301 __clear_bit(nr, addr);
302 nudge_writes();
303}
304
305/*
306 * Return the bit position (0..63) of the most significant 1 bit in a word
307 * Returns -1 if no 1 bit exists
308 */
309static __always_inline unsigned long __fls(unsigned long word)
310{
311 int num;
312
313 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
314 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
315 __asm__(
316 " .set push \n"
317 " .set "MIPS_ISA_LEVEL" \n"
318 " clz %0, %1 \n"
319 " .set pop \n"
320 : "=r" (num)
321 : "r" (word));
322
323 return 31 - num;
324 }
325
326 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
327 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
328 __asm__(
329 " .set push \n"
330 " .set "MIPS_ISA_LEVEL" \n"
331 " dclz %0, %1 \n"
332 " .set pop \n"
333 : "=r" (num)
334 : "r" (word));
335
336 return 63 - num;
337 }
338
339 num = BITS_PER_LONG - 1;
340
341#if BITS_PER_LONG == 64
342 if (!(word & (~0ul << 32))) {
343 num -= 32;
344 word <<= 32;
345 }
346#endif
347 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
348 num -= 16;
349 word <<= 16;
350 }
351 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
352 num -= 8;
353 word <<= 8;
354 }
355 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
356 num -= 4;
357 word <<= 4;
358 }
359 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
360 num -= 2;
361 word <<= 2;
362 }
363 if (!(word & (~0ul << (BITS_PER_LONG-1))))
364 num -= 1;
365 return num;
366}
367
368/*
369 * __ffs - find first bit in word.
370 * @word: The word to search
371 *
372 * Returns 0..SZLONG-1
373 * Undefined if no bit exists, so code should check against 0 first.
374 */
375static __always_inline unsigned long __ffs(unsigned long word)
376{
377 return __fls(word & -word);
378}
379
380/*
381 * fls - find last bit set.
382 * @word: The word to search
383 *
384 * This is defined the same way as ffs.
385 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
386 */
387static inline int fls(unsigned int x)
388{
389 int r;
390
391 if (!__builtin_constant_p(x) &&
392 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
393 __asm__(
394 " .set push \n"
395 " .set "MIPS_ISA_LEVEL" \n"
396 " clz %0, %1 \n"
397 " .set pop \n"
398 : "=r" (x)
399 : "r" (x));
400
401 return 32 - x;
402 }
403
404 r = 32;
405 if (!x)
406 return 0;
407 if (!(x & 0xffff0000u)) {
408 x <<= 16;
409 r -= 16;
410 }
411 if (!(x & 0xff000000u)) {
412 x <<= 8;
413 r -= 8;
414 }
415 if (!(x & 0xf0000000u)) {
416 x <<= 4;
417 r -= 4;
418 }
419 if (!(x & 0xc0000000u)) {
420 x <<= 2;
421 r -= 2;
422 }
423 if (!(x & 0x80000000u)) {
424 x <<= 1;
425 r -= 1;
426 }
427 return r;
428}
429
430#include <asm-generic/bitops/fls64.h>
431
432/*
433 * ffs - find first bit set.
434 * @word: The word to search
435 *
436 * This is defined the same way as
437 * the libc and compiler builtin ffs routines, therefore
438 * differs in spirit from the above ffz (man ffs).
439 */
440static inline int ffs(int word)
441{
442 if (!word)
443 return 0;
444
445 return fls(word & -word);
446}
447
448#include <asm-generic/bitops/ffz.h>
449#include <asm-generic/bitops/find.h>
450
451#ifdef __KERNEL__
452
453#include <asm-generic/bitops/sched.h>
454
455#include <asm/arch_hweight.h>
456#include <asm-generic/bitops/const_hweight.h>
457
458#include <asm-generic/bitops/le.h>
459#include <asm-generic/bitops/ext2-atomic.h>
460
461#endif /* __KERNEL__ */
462
463#endif /* _ASM_BITOPS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/byteorder.h> /* sigh ... */
21#include <asm/cpu-features.h>
22#include <asm/sgidefs.h>
23#include <asm/war.h>
24
25#if _MIPS_SZLONG == 32
26#define SZLONG_LOG 5
27#define SZLONG_MASK 31UL
28#define __LL "ll "
29#define __SC "sc "
30#define __INS "ins "
31#define __EXT "ext "
32#elif _MIPS_SZLONG == 64
33#define SZLONG_LOG 6
34#define SZLONG_MASK 63UL
35#define __LL "lld "
36#define __SC "scd "
37#define __INS "dins "
38#define __EXT "dext "
39#endif
40
41/*
42 * clear_bit() doesn't provide any barrier for the compiler.
43 */
44#define smp_mb__before_clear_bit() smp_mb__before_llsc()
45#define smp_mb__after_clear_bit() smp_llsc_mb()
46
47/*
48 * set_bit - Atomically set a bit in memory
49 * @nr: the bit to set
50 * @addr: the address to start counting from
51 *
52 * This function is atomic and may not be reordered. See __set_bit()
53 * if you do not require the atomic guarantees.
54 * Note that @nr may be almost arbitrarily large; this function is not
55 * restricted to acting on a single-word quantity.
56 */
57static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
58{
59 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
60 unsigned short bit = nr & SZLONG_MASK;
61 unsigned long temp;
62
63 if (kernel_uses_llsc && R10000_LLSC_WAR) {
64 __asm__ __volatile__(
65 " .set mips3 \n"
66 "1: " __LL "%0, %1 # set_bit \n"
67 " or %0, %2 \n"
68 " " __SC "%0, %1 \n"
69 " beqzl %0, 1b \n"
70 " .set mips0 \n"
71 : "=&r" (temp), "=m" (*m)
72 : "ir" (1UL << bit), "m" (*m));
73#ifdef CONFIG_CPU_MIPSR2
74 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
75 do {
76 __asm__ __volatile__(
77 " " __LL "%0, %1 # set_bit \n"
78 " " __INS "%0, %3, %2, 1 \n"
79 " " __SC "%0, %1 \n"
80 : "=&r" (temp), "+m" (*m)
81 : "ir" (bit), "r" (~0));
82 } while (unlikely(!temp));
83#endif /* CONFIG_CPU_MIPSR2 */
84 } else if (kernel_uses_llsc) {
85 do {
86 __asm__ __volatile__(
87 " .set mips3 \n"
88 " " __LL "%0, %1 # set_bit \n"
89 " or %0, %2 \n"
90 " " __SC "%0, %1 \n"
91 " .set mips0 \n"
92 : "=&r" (temp), "+m" (*m)
93 : "ir" (1UL << bit));
94 } while (unlikely(!temp));
95 } else {
96 volatile unsigned long *a = addr;
97 unsigned long mask;
98 unsigned long flags;
99
100 a += nr >> SZLONG_LOG;
101 mask = 1UL << bit;
102 raw_local_irq_save(flags);
103 *a |= mask;
104 raw_local_irq_restore(flags);
105 }
106}
107
108/*
109 * clear_bit - Clears a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
116 * in order to ensure changes are visible on other processors.
117 */
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{
120 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
121 unsigned short bit = nr & SZLONG_MASK;
122 unsigned long temp;
123
124 if (kernel_uses_llsc && R10000_LLSC_WAR) {
125 __asm__ __volatile__(
126 " .set mips3 \n"
127 "1: " __LL "%0, %1 # clear_bit \n"
128 " and %0, %2 \n"
129 " " __SC "%0, %1 \n"
130 " beqzl %0, 1b \n"
131 " .set mips0 \n"
132 : "=&r" (temp), "+m" (*m)
133 : "ir" (~(1UL << bit)));
134#ifdef CONFIG_CPU_MIPSR2
135 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
136 do {
137 __asm__ __volatile__(
138 " " __LL "%0, %1 # clear_bit \n"
139 " " __INS "%0, $0, %2, 1 \n"
140 " " __SC "%0, %1 \n"
141 : "=&r" (temp), "+m" (*m)
142 : "ir" (bit));
143 } while (unlikely(!temp));
144#endif /* CONFIG_CPU_MIPSR2 */
145 } else if (kernel_uses_llsc) {
146 do {
147 __asm__ __volatile__(
148 " .set mips3 \n"
149 " " __LL "%0, %1 # clear_bit \n"
150 " and %0, %2 \n"
151 " " __SC "%0, %1 \n"
152 " .set mips0 \n"
153 : "=&r" (temp), "+m" (*m)
154 : "ir" (~(1UL << bit)));
155 } while (unlikely(!temp));
156 } else {
157 volatile unsigned long *a = addr;
158 unsigned long mask;
159 unsigned long flags;
160
161 a += nr >> SZLONG_LOG;
162 mask = 1UL << bit;
163 raw_local_irq_save(flags);
164 *a &= ~mask;
165 raw_local_irq_restore(flags);
166 }
167}
168
169/*
170 * clear_bit_unlock - Clears a bit in memory
171 * @nr: Bit to clear
172 * @addr: Address to start counting from
173 *
174 * clear_bit() is atomic and implies release semantics before the memory
175 * operation. It can be used for an unlock.
176 */
177static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
178{
179 smp_mb__before_clear_bit();
180 clear_bit(nr, addr);
181}
182
183/*
184 * change_bit - Toggle a bit in memory
185 * @nr: Bit to change
186 * @addr: Address to start counting from
187 *
188 * change_bit() is atomic and may not be reordered.
189 * Note that @nr may be almost arbitrarily large; this function is not
190 * restricted to acting on a single-word quantity.
191 */
192static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193{
194 unsigned short bit = nr & SZLONG_MASK;
195
196 if (kernel_uses_llsc && R10000_LLSC_WAR) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
198 unsigned long temp;
199
200 __asm__ __volatile__(
201 " .set mips3 \n"
202 "1: " __LL "%0, %1 # change_bit \n"
203 " xor %0, %2 \n"
204 " " __SC "%0, %1 \n"
205 " beqzl %0, 1b \n"
206 " .set mips0 \n"
207 : "=&r" (temp), "+m" (*m)
208 : "ir" (1UL << bit));
209 } else if (kernel_uses_llsc) {
210 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
211 unsigned long temp;
212
213 do {
214 __asm__ __volatile__(
215 " .set mips3 \n"
216 " " __LL "%0, %1 # change_bit \n"
217 " xor %0, %2 \n"
218 " " __SC "%0, %1 \n"
219 " .set mips0 \n"
220 : "=&r" (temp), "+m" (*m)
221 : "ir" (1UL << bit));
222 } while (unlikely(!temp));
223 } else {
224 volatile unsigned long *a = addr;
225 unsigned long mask;
226 unsigned long flags;
227
228 a += nr >> SZLONG_LOG;
229 mask = 1UL << bit;
230 raw_local_irq_save(flags);
231 *a ^= mask;
232 raw_local_irq_restore(flags);
233 }
234}
235
236/*
237 * test_and_set_bit - Set a bit and return its old value
238 * @nr: Bit to set
239 * @addr: Address to count from
240 *
241 * This operation is atomic and cannot be reordered.
242 * It also implies a memory barrier.
243 */
244static inline int test_and_set_bit(unsigned long nr,
245 volatile unsigned long *addr)
246{
247 unsigned short bit = nr & SZLONG_MASK;
248 unsigned long res;
249
250 smp_mb__before_llsc();
251
252 if (kernel_uses_llsc && R10000_LLSC_WAR) {
253 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
254 unsigned long temp;
255
256 __asm__ __volatile__(
257 " .set mips3 \n"
258 "1: " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n"
261 " beqzl %2, 1b \n"
262 " and %2, %0, %3 \n"
263 " .set mips0 \n"
264 : "=&r" (temp), "+m" (*m), "=&r" (res)
265 : "r" (1UL << bit)
266 : "memory");
267 } else if (kernel_uses_llsc) {
268 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
269 unsigned long temp;
270
271 do {
272 __asm__ __volatile__(
273 " .set mips3 \n"
274 " " __LL "%0, %1 # test_and_set_bit \n"
275 " or %2, %0, %3 \n"
276 " " __SC "%2, %1 \n"
277 " .set mips0 \n"
278 : "=&r" (temp), "+m" (*m), "=&r" (res)
279 : "r" (1UL << bit)
280 : "memory");
281 } while (unlikely(!res));
282
283 res = temp & (1UL << bit);
284 } else {
285 volatile unsigned long *a = addr;
286 unsigned long mask;
287 unsigned long flags;
288
289 a += nr >> SZLONG_LOG;
290 mask = 1UL << bit;
291 raw_local_irq_save(flags);
292 res = (mask & *a);
293 *a |= mask;
294 raw_local_irq_restore(flags);
295 }
296
297 smp_llsc_mb();
298
299 return res != 0;
300}
301
302/*
303 * test_and_set_bit_lock - Set a bit and return its old value
304 * @nr: Bit to set
305 * @addr: Address to count from
306 *
307 * This operation is atomic and implies acquire ordering semantics
308 * after the memory operation.
309 */
310static inline int test_and_set_bit_lock(unsigned long nr,
311 volatile unsigned long *addr)
312{
313 unsigned short bit = nr & SZLONG_MASK;
314 unsigned long res;
315
316 if (kernel_uses_llsc && R10000_LLSC_WAR) {
317 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
318 unsigned long temp;
319
320 __asm__ __volatile__(
321 " .set mips3 \n"
322 "1: " __LL "%0, %1 # test_and_set_bit \n"
323 " or %2, %0, %3 \n"
324 " " __SC "%2, %1 \n"
325 " beqzl %2, 1b \n"
326 " and %2, %0, %3 \n"
327 " .set mips0 \n"
328 : "=&r" (temp), "+m" (*m), "=&r" (res)
329 : "r" (1UL << bit)
330 : "memory");
331 } else if (kernel_uses_llsc) {
332 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 unsigned long temp;
334
335 do {
336 __asm__ __volatile__(
337 " .set mips3 \n"
338 " " __LL "%0, %1 # test_and_set_bit \n"
339 " or %2, %0, %3 \n"
340 " " __SC "%2, %1 \n"
341 " .set mips0 \n"
342 : "=&r" (temp), "+m" (*m), "=&r" (res)
343 : "r" (1UL << bit)
344 : "memory");
345 } while (unlikely(!res));
346
347 res = temp & (1UL << bit);
348 } else {
349 volatile unsigned long *a = addr;
350 unsigned long mask;
351 unsigned long flags;
352
353 a += nr >> SZLONG_LOG;
354 mask = 1UL << bit;
355 raw_local_irq_save(flags);
356 res = (mask & *a);
357 *a |= mask;
358 raw_local_irq_restore(flags);
359 }
360
361 smp_llsc_mb();
362
363 return res != 0;
364}
365/*
366 * test_and_clear_bit - Clear a bit and return its old value
367 * @nr: Bit to clear
368 * @addr: Address to count from
369 *
370 * This operation is atomic and cannot be reordered.
371 * It also implies a memory barrier.
372 */
373static inline int test_and_clear_bit(unsigned long nr,
374 volatile unsigned long *addr)
375{
376 unsigned short bit = nr & SZLONG_MASK;
377 unsigned long res;
378
379 smp_mb__before_llsc();
380
381 if (kernel_uses_llsc && R10000_LLSC_WAR) {
382 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
383 unsigned long temp;
384
385 __asm__ __volatile__(
386 " .set mips3 \n"
387 "1: " __LL "%0, %1 # test_and_clear_bit \n"
388 " or %2, %0, %3 \n"
389 " xor %2, %3 \n"
390 " " __SC "%2, %1 \n"
391 " beqzl %2, 1b \n"
392 " and %2, %0, %3 \n"
393 " .set mips0 \n"
394 : "=&r" (temp), "+m" (*m), "=&r" (res)
395 : "r" (1UL << bit)
396 : "memory");
397#ifdef CONFIG_CPU_MIPSR2
398 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
399 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
400 unsigned long temp;
401
402 do {
403 __asm__ __volatile__(
404 " " __LL "%0, %1 # test_and_clear_bit \n"
405 " " __EXT "%2, %0, %3, 1 \n"
406 " " __INS "%0, $0, %3, 1 \n"
407 " " __SC "%0, %1 \n"
408 : "=&r" (temp), "+m" (*m), "=&r" (res)
409 : "ir" (bit)
410 : "memory");
411 } while (unlikely(!temp));
412#endif
413 } else if (kernel_uses_llsc) {
414 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
415 unsigned long temp;
416
417 do {
418 __asm__ __volatile__(
419 " .set mips3 \n"
420 " " __LL "%0, %1 # test_and_clear_bit \n"
421 " or %2, %0, %3 \n"
422 " xor %2, %3 \n"
423 " " __SC "%2, %1 \n"
424 " .set mips0 \n"
425 : "=&r" (temp), "+m" (*m), "=&r" (res)
426 : "r" (1UL << bit)
427 : "memory");
428 } while (unlikely(!res));
429
430 res = temp & (1UL << bit);
431 } else {
432 volatile unsigned long *a = addr;
433 unsigned long mask;
434 unsigned long flags;
435
436 a += nr >> SZLONG_LOG;
437 mask = 1UL << bit;
438 raw_local_irq_save(flags);
439 res = (mask & *a);
440 *a &= ~mask;
441 raw_local_irq_restore(flags);
442 }
443
444 smp_llsc_mb();
445
446 return res != 0;
447}
448
449/*
450 * test_and_change_bit - Change a bit and return its old value
451 * @nr: Bit to change
452 * @addr: Address to count from
453 *
454 * This operation is atomic and cannot be reordered.
455 * It also implies a memory barrier.
456 */
457static inline int test_and_change_bit(unsigned long nr,
458 volatile unsigned long *addr)
459{
460 unsigned short bit = nr & SZLONG_MASK;
461 unsigned long res;
462
463 smp_mb__before_llsc();
464
465 if (kernel_uses_llsc && R10000_LLSC_WAR) {
466 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
467 unsigned long temp;
468
469 __asm__ __volatile__(
470 " .set mips3 \n"
471 "1: " __LL "%0, %1 # test_and_change_bit \n"
472 " xor %2, %0, %3 \n"
473 " " __SC "%2, %1 \n"
474 " beqzl %2, 1b \n"
475 " and %2, %0, %3 \n"
476 " .set mips0 \n"
477 : "=&r" (temp), "+m" (*m), "=&r" (res)
478 : "r" (1UL << bit)
479 : "memory");
480 } else if (kernel_uses_llsc) {
481 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
482 unsigned long temp;
483
484 do {
485 __asm__ __volatile__(
486 " .set mips3 \n"
487 " " __LL "%0, %1 # test_and_change_bit \n"
488 " xor %2, %0, %3 \n"
489 " " __SC "\t%2, %1 \n"
490 " .set mips0 \n"
491 : "=&r" (temp), "+m" (*m), "=&r" (res)
492 : "r" (1UL << bit)
493 : "memory");
494 } while (unlikely(!res));
495
496 res = temp & (1UL << bit);
497 } else {
498 volatile unsigned long *a = addr;
499 unsigned long mask;
500 unsigned long flags;
501
502 a += nr >> SZLONG_LOG;
503 mask = 1UL << bit;
504 raw_local_irq_save(flags);
505 res = (mask & *a);
506 *a ^= mask;
507 raw_local_irq_restore(flags);
508 }
509
510 smp_llsc_mb();
511
512 return res != 0;
513}
514
515#include <asm-generic/bitops/non-atomic.h>
516
517/*
518 * __clear_bit_unlock - Clears a bit in memory
519 * @nr: Bit to clear
520 * @addr: Address to start counting from
521 *
522 * __clear_bit() is non-atomic and implies release semantics before the memory
523 * operation. It can be used for an unlock if no other CPUs can concurrently
524 * modify other bits in the word.
525 */
526static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
527{
528 smp_mb();
529 __clear_bit(nr, addr);
530}
531
532/*
533 * Return the bit position (0..63) of the most significant 1 bit in a word
534 * Returns -1 if no 1 bit exists
535 */
536static inline unsigned long __fls(unsigned long word)
537{
538 int num;
539
540 if (BITS_PER_LONG == 32 &&
541 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
542 __asm__(
543 " .set push \n"
544 " .set mips32 \n"
545 " clz %0, %1 \n"
546 " .set pop \n"
547 : "=r" (num)
548 : "r" (word));
549
550 return 31 - num;
551 }
552
553 if (BITS_PER_LONG == 64 &&
554 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
555 __asm__(
556 " .set push \n"
557 " .set mips64 \n"
558 " dclz %0, %1 \n"
559 " .set pop \n"
560 : "=r" (num)
561 : "r" (word));
562
563 return 63 - num;
564 }
565
566 num = BITS_PER_LONG - 1;
567
568#if BITS_PER_LONG == 64
569 if (!(word & (~0ul << 32))) {
570 num -= 32;
571 word <<= 32;
572 }
573#endif
574 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
575 num -= 16;
576 word <<= 16;
577 }
578 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
579 num -= 8;
580 word <<= 8;
581 }
582 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
583 num -= 4;
584 word <<= 4;
585 }
586 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
587 num -= 2;
588 word <<= 2;
589 }
590 if (!(word & (~0ul << (BITS_PER_LONG-1))))
591 num -= 1;
592 return num;
593}
594
595/*
596 * __ffs - find first bit in word.
597 * @word: The word to search
598 *
599 * Returns 0..SZLONG-1
600 * Undefined if no bit exists, so code should check against 0 first.
601 */
602static inline unsigned long __ffs(unsigned long word)
603{
604 return __fls(word & -word);
605}
606
607/*
608 * fls - find last bit set.
609 * @word: The word to search
610 *
611 * This is defined the same way as ffs.
612 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
613 */
614static inline int fls(int x)
615{
616 int r;
617
618 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
619 __asm__("clz %0, %1" : "=r" (x) : "r" (x));
620
621 return 32 - x;
622 }
623
624 r = 32;
625 if (!x)
626 return 0;
627 if (!(x & 0xffff0000u)) {
628 x <<= 16;
629 r -= 16;
630 }
631 if (!(x & 0xff000000u)) {
632 x <<= 8;
633 r -= 8;
634 }
635 if (!(x & 0xf0000000u)) {
636 x <<= 4;
637 r -= 4;
638 }
639 if (!(x & 0xc0000000u)) {
640 x <<= 2;
641 r -= 2;
642 }
643 if (!(x & 0x80000000u)) {
644 x <<= 1;
645 r -= 1;
646 }
647 return r;
648}
649
650#include <asm-generic/bitops/fls64.h>
651
652/*
653 * ffs - find first bit set.
654 * @word: The word to search
655 *
656 * This is defined the same way as
657 * the libc and compiler builtin ffs routines, therefore
658 * differs in spirit from the above ffz (man ffs).
659 */
660static inline int ffs(int word)
661{
662 if (!word)
663 return 0;
664
665 return fls(word & -word);
666}
667
668#include <asm-generic/bitops/ffz.h>
669#include <asm-generic/bitops/find.h>
670
671#ifdef __KERNEL__
672
673#include <asm-generic/bitops/sched.h>
674
675#include <asm/arch_hweight.h>
676#include <asm-generic/bitops/const_hweight.h>
677
678#include <asm-generic/bitops/le.h>
679#include <asm-generic/bitops/ext2-atomic.h>
680
681#endif /* __KERNEL__ */
682
683#endif /* _ASM_BITOPS_H */