Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/llsc.h>
23#include <asm/sgidefs.h>
24#include <asm/war.h>
25
26/*
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
29 */
30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
41
42
43/*
44 * set_bit - Atomically set a bit in memory
45 * @nr: the bit to set
46 * @addr: the address to start counting from
47 *
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
52 */
53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
54{
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 int bit = nr & SZLONG_MASK;
57 unsigned long temp;
58
59 if (kernel_uses_llsc && R10000_LLSC_WAR) {
60 __asm__ __volatile__(
61 " .set push \n"
62 " .set arch=r4000 \n"
63 "1: " __LL "%0, %1 # set_bit \n"
64 " or %0, %2 \n"
65 " " __SC "%0, %1 \n"
66 " beqzl %0, 1b \n"
67 " .set pop \n"
68 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
70 : __LLSC_CLOBBER);
71#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
72 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
73 loongson_llsc_mb();
74 do {
75 __asm__ __volatile__(
76 " " __LL "%0, %1 # set_bit \n"
77 " " __INS "%0, %3, %2, 1 \n"
78 " " __SC "%0, %1 \n"
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
80 : "ir" (bit), "r" (~0)
81 : __LLSC_CLOBBER);
82 } while (unlikely(!temp));
83#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
84 } else if (kernel_uses_llsc) {
85 loongson_llsc_mb();
86 do {
87 __asm__ __volatile__(
88 " .set push \n"
89 " .set "MIPS_ISA_ARCH_LEVEL" \n"
90 " " __LL "%0, %1 # set_bit \n"
91 " or %0, %2 \n"
92 " " __SC "%0, %1 \n"
93 " .set pop \n"
94 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
95 : "ir" (1UL << bit)
96 : __LLSC_CLOBBER);
97 } while (unlikely(!temp));
98 } else
99 __mips_set_bit(nr, addr);
100}
101
102/*
103 * clear_bit - Clears a bit in memory
104 * @nr: Bit to clear
105 * @addr: Address to start counting from
106 *
107 * clear_bit() is atomic and may not be reordered. However, it does
108 * not contain a memory barrier, so if it is used for locking purposes,
109 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110 * in order to ensure changes are visible on other processors.
111 */
112static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
113{
114 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115 int bit = nr & SZLONG_MASK;
116 unsigned long temp;
117
118 if (kernel_uses_llsc && R10000_LLSC_WAR) {
119 __asm__ __volatile__(
120 " .set push \n"
121 " .set arch=r4000 \n"
122 "1: " __LL "%0, %1 # clear_bit \n"
123 " and %0, %2 \n"
124 " " __SC "%0, %1 \n"
125 " beqzl %0, 1b \n"
126 " .set pop \n"
127 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128 : "ir" (~(1UL << bit))
129 : __LLSC_CLOBBER);
130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
132 loongson_llsc_mb();
133 do {
134 __asm__ __volatile__(
135 " " __LL "%0, %1 # clear_bit \n"
136 " " __INS "%0, $0, %2, 1 \n"
137 " " __SC "%0, %1 \n"
138 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
139 : "ir" (bit)
140 : __LLSC_CLOBBER);
141 } while (unlikely(!temp));
142#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143 } else if (kernel_uses_llsc) {
144 loongson_llsc_mb();
145 do {
146 __asm__ __volatile__(
147 " .set push \n"
148 " .set "MIPS_ISA_ARCH_LEVEL" \n"
149 " " __LL "%0, %1 # clear_bit \n"
150 " and %0, %2 \n"
151 " " __SC "%0, %1 \n"
152 " .set pop \n"
153 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154 : "ir" (~(1UL << bit))
155 : __LLSC_CLOBBER);
156 } while (unlikely(!temp));
157 } else
158 __mips_clear_bit(nr, addr);
159}
160
161/*
162 * clear_bit_unlock - Clears a bit in memory
163 * @nr: Bit to clear
164 * @addr: Address to start counting from
165 *
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
168 */
169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170{
171 smp_mb__before_atomic();
172 clear_bit(nr, addr);
173}
174
175/*
176 * change_bit - Toggle a bit in memory
177 * @nr: Bit to change
178 * @addr: Address to start counting from
179 *
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
183 */
184static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185{
186 int bit = nr & SZLONG_MASK;
187
188 if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190 unsigned long temp;
191
192 __asm__ __volatile__(
193 " .set push \n"
194 " .set arch=r4000 \n"
195 "1: " __LL "%0, %1 # change_bit \n"
196 " xor %0, %2 \n"
197 " " __SC "%0, %1 \n"
198 " beqzl %0, 1b \n"
199 " .set pop \n"
200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 : "ir" (1UL << bit)
202 : __LLSC_CLOBBER);
203 } else if (kernel_uses_llsc) {
204 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205 unsigned long temp;
206
207 loongson_llsc_mb();
208 do {
209 __asm__ __volatile__(
210 " .set push \n"
211 " .set "MIPS_ISA_ARCH_LEVEL" \n"
212 " " __LL "%0, %1 # change_bit \n"
213 " xor %0, %2 \n"
214 " " __SC "%0, %1 \n"
215 " .set pop \n"
216 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
217 : "ir" (1UL << bit)
218 : __LLSC_CLOBBER);
219 } while (unlikely(!temp));
220 } else
221 __mips_change_bit(nr, addr);
222}
223
224/*
225 * test_and_set_bit - Set a bit and return its old value
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
231 */
232static inline int test_and_set_bit(unsigned long nr,
233 volatile unsigned long *addr)
234{
235 int bit = nr & SZLONG_MASK;
236 unsigned long res;
237
238 smp_mb__before_llsc();
239
240 if (kernel_uses_llsc && R10000_LLSC_WAR) {
241 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
242 unsigned long temp;
243
244 __asm__ __volatile__(
245 " .set push \n"
246 " .set arch=r4000 \n"
247 "1: " __LL "%0, %1 # test_and_set_bit \n"
248 " or %2, %0, %3 \n"
249 " " __SC "%2, %1 \n"
250 " beqzl %2, 1b \n"
251 " and %2, %0, %3 \n"
252 " .set pop \n"
253 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
254 : "r" (1UL << bit)
255 : __LLSC_CLOBBER);
256 } else if (kernel_uses_llsc) {
257 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
258 unsigned long temp;
259
260 loongson_llsc_mb();
261 do {
262 __asm__ __volatile__(
263 " .set push \n"
264 " .set "MIPS_ISA_ARCH_LEVEL" \n"
265 " " __LL "%0, %1 # test_and_set_bit \n"
266 " or %2, %0, %3 \n"
267 " " __SC "%2, %1 \n"
268 " .set pop \n"
269 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
270 : "r" (1UL << bit)
271 : __LLSC_CLOBBER);
272 } while (unlikely(!res));
273
274 res = temp & (1UL << bit);
275 } else
276 res = __mips_test_and_set_bit(nr, addr);
277
278 smp_llsc_mb();
279
280 return res != 0;
281}
282
283/*
284 * test_and_set_bit_lock - Set a bit and return its old value
285 * @nr: Bit to set
286 * @addr: Address to count from
287 *
288 * This operation is atomic and implies acquire ordering semantics
289 * after the memory operation.
290 */
291static inline int test_and_set_bit_lock(unsigned long nr,
292 volatile unsigned long *addr)
293{
294 int bit = nr & SZLONG_MASK;
295 unsigned long res;
296
297 if (kernel_uses_llsc && R10000_LLSC_WAR) {
298 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
299 unsigned long temp;
300
301 __asm__ __volatile__(
302 " .set push \n"
303 " .set arch=r4000 \n"
304 "1: " __LL "%0, %1 # test_and_set_bit \n"
305 " or %2, %0, %3 \n"
306 " " __SC "%2, %1 \n"
307 " beqzl %2, 1b \n"
308 " and %2, %0, %3 \n"
309 " .set pop \n"
310 : "=&r" (temp), "+m" (*m), "=&r" (res)
311 : "r" (1UL << bit)
312 : __LLSC_CLOBBER);
313 } else if (kernel_uses_llsc) {
314 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
315 unsigned long temp;
316
317 loongson_llsc_mb();
318 do {
319 __asm__ __volatile__(
320 " .set push \n"
321 " .set "MIPS_ISA_ARCH_LEVEL" \n"
322 " " __LL "%0, %1 # test_and_set_bit \n"
323 " or %2, %0, %3 \n"
324 " " __SC "%2, %1 \n"
325 " .set pop \n"
326 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
327 : "r" (1UL << bit)
328 : __LLSC_CLOBBER);
329 } while (unlikely(!res));
330
331 res = temp & (1UL << bit);
332 } else
333 res = __mips_test_and_set_bit_lock(nr, addr);
334
335 smp_llsc_mb();
336
337 return res != 0;
338}
339/*
340 * test_and_clear_bit - Clear a bit and return its old value
341 * @nr: Bit to clear
342 * @addr: Address to count from
343 *
344 * This operation is atomic and cannot be reordered.
345 * It also implies a memory barrier.
346 */
347static inline int test_and_clear_bit(unsigned long nr,
348 volatile unsigned long *addr)
349{
350 int bit = nr & SZLONG_MASK;
351 unsigned long res;
352
353 smp_mb__before_llsc();
354
355 if (kernel_uses_llsc && R10000_LLSC_WAR) {
356 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
357 unsigned long temp;
358
359 __asm__ __volatile__(
360 " .set push \n"
361 " .set arch=r4000 \n"
362 "1: " __LL "%0, %1 # test_and_clear_bit \n"
363 " or %2, %0, %3 \n"
364 " xor %2, %3 \n"
365 " " __SC "%2, %1 \n"
366 " beqzl %2, 1b \n"
367 " and %2, %0, %3 \n"
368 " .set pop \n"
369 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
370 : "r" (1UL << bit)
371 : __LLSC_CLOBBER);
372#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
375 unsigned long temp;
376
377 loongson_llsc_mb();
378 do {
379 __asm__ __volatile__(
380 " " __LL "%0, %1 # test_and_clear_bit \n"
381 " " __EXT "%2, %0, %3, 1 \n"
382 " " __INS "%0, $0, %3, 1 \n"
383 " " __SC "%0, %1 \n"
384 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
385 : "ir" (bit)
386 : __LLSC_CLOBBER);
387 } while (unlikely(!temp));
388#endif
389 } else if (kernel_uses_llsc) {
390 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
391 unsigned long temp;
392
393 loongson_llsc_mb();
394 do {
395 __asm__ __volatile__(
396 " .set push \n"
397 " .set "MIPS_ISA_ARCH_LEVEL" \n"
398 " " __LL "%0, %1 # test_and_clear_bit \n"
399 " or %2, %0, %3 \n"
400 " xor %2, %3 \n"
401 " " __SC "%2, %1 \n"
402 " .set pop \n"
403 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
404 : "r" (1UL << bit)
405 : __LLSC_CLOBBER);
406 } while (unlikely(!res));
407
408 res = temp & (1UL << bit);
409 } else
410 res = __mips_test_and_clear_bit(nr, addr);
411
412 smp_llsc_mb();
413
414 return res != 0;
415}
416
417/*
418 * test_and_change_bit - Change a bit and return its old value
419 * @nr: Bit to change
420 * @addr: Address to count from
421 *
422 * This operation is atomic and cannot be reordered.
423 * It also implies a memory barrier.
424 */
425static inline int test_and_change_bit(unsigned long nr,
426 volatile unsigned long *addr)
427{
428 int bit = nr & SZLONG_MASK;
429 unsigned long res;
430
431 smp_mb__before_llsc();
432
433 if (kernel_uses_llsc && R10000_LLSC_WAR) {
434 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435 unsigned long temp;
436
437 __asm__ __volatile__(
438 " .set push \n"
439 " .set arch=r4000 \n"
440 "1: " __LL "%0, %1 # test_and_change_bit \n"
441 " xor %2, %0, %3 \n"
442 " " __SC "%2, %1 \n"
443 " beqzl %2, 1b \n"
444 " and %2, %0, %3 \n"
445 " .set pop \n"
446 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447 : "r" (1UL << bit)
448 : __LLSC_CLOBBER);
449 } else if (kernel_uses_llsc) {
450 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
451 unsigned long temp;
452
453 loongson_llsc_mb();
454 do {
455 __asm__ __volatile__(
456 " .set push \n"
457 " .set "MIPS_ISA_ARCH_LEVEL" \n"
458 " " __LL "%0, %1 # test_and_change_bit \n"
459 " xor %2, %0, %3 \n"
460 " " __SC "\t%2, %1 \n"
461 " .set pop \n"
462 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
463 : "r" (1UL << bit)
464 : __LLSC_CLOBBER);
465 } while (unlikely(!res));
466
467 res = temp & (1UL << bit);
468 } else
469 res = __mips_test_and_change_bit(nr, addr);
470
471 smp_llsc_mb();
472
473 return res != 0;
474}
475
476#include <asm-generic/bitops/non-atomic.h>
477
478/*
479 * __clear_bit_unlock - Clears a bit in memory
480 * @nr: Bit to clear
481 * @addr: Address to start counting from
482 *
483 * __clear_bit() is non-atomic and implies release semantics before the memory
484 * operation. It can be used for an unlock if no other CPUs can concurrently
485 * modify other bits in the word.
486 */
487static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
488{
489 smp_mb__before_llsc();
490 __clear_bit(nr, addr);
491 nudge_writes();
492}
493
494/*
495 * Return the bit position (0..63) of the most significant 1 bit in a word
496 * Returns -1 if no 1 bit exists
497 */
498static __always_inline unsigned long __fls(unsigned long word)
499{
500 int num;
501
502 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
504 __asm__(
505 " .set push \n"
506 " .set "MIPS_ISA_LEVEL" \n"
507 " clz %0, %1 \n"
508 " .set pop \n"
509 : "=r" (num)
510 : "r" (word));
511
512 return 31 - num;
513 }
514
515 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
517 __asm__(
518 " .set push \n"
519 " .set "MIPS_ISA_LEVEL" \n"
520 " dclz %0, %1 \n"
521 " .set pop \n"
522 : "=r" (num)
523 : "r" (word));
524
525 return 63 - num;
526 }
527
528 num = BITS_PER_LONG - 1;
529
530#if BITS_PER_LONG == 64
531 if (!(word & (~0ul << 32))) {
532 num -= 32;
533 word <<= 32;
534 }
535#endif
536 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
537 num -= 16;
538 word <<= 16;
539 }
540 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
541 num -= 8;
542 word <<= 8;
543 }
544 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
545 num -= 4;
546 word <<= 4;
547 }
548 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
549 num -= 2;
550 word <<= 2;
551 }
552 if (!(word & (~0ul << (BITS_PER_LONG-1))))
553 num -= 1;
554 return num;
555}
556
557/*
558 * __ffs - find first bit in word.
559 * @word: The word to search
560 *
561 * Returns 0..SZLONG-1
562 * Undefined if no bit exists, so code should check against 0 first.
563 */
564static __always_inline unsigned long __ffs(unsigned long word)
565{
566 return __fls(word & -word);
567}
568
569/*
570 * fls - find last bit set.
571 * @word: The word to search
572 *
573 * This is defined the same way as ffs.
574 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
575 */
576static inline int fls(unsigned int x)
577{
578 int r;
579
580 if (!__builtin_constant_p(x) &&
581 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
582 __asm__(
583 " .set push \n"
584 " .set "MIPS_ISA_LEVEL" \n"
585 " clz %0, %1 \n"
586 " .set pop \n"
587 : "=r" (x)
588 : "r" (x));
589
590 return 32 - x;
591 }
592
593 r = 32;
594 if (!x)
595 return 0;
596 if (!(x & 0xffff0000u)) {
597 x <<= 16;
598 r -= 16;
599 }
600 if (!(x & 0xff000000u)) {
601 x <<= 8;
602 r -= 8;
603 }
604 if (!(x & 0xf0000000u)) {
605 x <<= 4;
606 r -= 4;
607 }
608 if (!(x & 0xc0000000u)) {
609 x <<= 2;
610 r -= 2;
611 }
612 if (!(x & 0x80000000u)) {
613 x <<= 1;
614 r -= 1;
615 }
616 return r;
617}
618
619#include <asm-generic/bitops/fls64.h>
620
621/*
622 * ffs - find first bit set.
623 * @word: The word to search
624 *
625 * This is defined the same way as
626 * the libc and compiler builtin ffs routines, therefore
627 * differs in spirit from the above ffz (man ffs).
628 */
629static inline int ffs(int word)
630{
631 if (!word)
632 return 0;
633
634 return fls(word & -word);
635}
636
637#include <asm-generic/bitops/ffz.h>
638#include <asm-generic/bitops/find.h>
639
640#ifdef __KERNEL__
641
642#include <asm-generic/bitops/sched.h>
643
644#include <asm/arch_hweight.h>
645#include <asm-generic/bitops/const_hweight.h>
646
647#include <asm-generic/bitops/le.h>
648#include <asm-generic/bitops/ext2-atomic.h>
649
650#endif /* __KERNEL__ */
651
652#endif /* _ASM_BITOPS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/cpu-features.h>
21#include <asm/sgidefs.h>
22#include <asm/war.h>
23
24#if _MIPS_SZLONG == 32
25#define SZLONG_LOG 5
26#define SZLONG_MASK 31UL
27#define __LL "ll "
28#define __SC "sc "
29#define __INS "ins "
30#define __EXT "ext "
31#elif _MIPS_SZLONG == 64
32#define SZLONG_LOG 6
33#define SZLONG_MASK 63UL
34#define __LL "lld "
35#define __SC "scd "
36#define __INS "dins "
37#define __EXT "dext "
38#endif
39
40/*
41 * clear_bit() doesn't provide any barrier for the compiler.
42 */
43#define smp_mb__before_clear_bit() smp_mb__before_llsc()
44#define smp_mb__after_clear_bit() smp_llsc_mb()
45
46
47/*
48 * These are the "slower" versions of the functions and are in bitops.c.
49 * These functions call raw_local_irq_{save,restore}().
50 */
51void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
52void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
53void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
54int __mips_test_and_set_bit(unsigned long nr,
55 volatile unsigned long *addr);
56int __mips_test_and_set_bit_lock(unsigned long nr,
57 volatile unsigned long *addr);
58int __mips_test_and_clear_bit(unsigned long nr,
59 volatile unsigned long *addr);
60int __mips_test_and_change_bit(unsigned long nr,
61 volatile unsigned long *addr);
62
63
64/*
65 * set_bit - Atomically set a bit in memory
66 * @nr: the bit to set
67 * @addr: the address to start counting from
68 *
69 * This function is atomic and may not be reordered. See __set_bit()
70 * if you do not require the atomic guarantees.
71 * Note that @nr may be almost arbitrarily large; this function is not
72 * restricted to acting on a single-word quantity.
73 */
74static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
75{
76 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
77 int bit = nr & SZLONG_MASK;
78 unsigned long temp;
79
80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
81 __asm__ __volatile__(
82 " .set arch=r4000 \n"
83 "1: " __LL "%0, %1 # set_bit \n"
84 " or %0, %2 \n"
85 " " __SC "%0, %1 \n"
86 " beqzl %0, 1b \n"
87 " .set mips0 \n"
88 : "=&r" (temp), "=m" (*m)
89 : "ir" (1UL << bit), "m" (*m));
90#ifdef CONFIG_CPU_MIPSR2
91 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
92 do {
93 __asm__ __volatile__(
94 " " __LL "%0, %1 # set_bit \n"
95 " " __INS "%0, %3, %2, 1 \n"
96 " " __SC "%0, %1 \n"
97 : "=&r" (temp), "+m" (*m)
98 : "ir" (bit), "r" (~0));
99 } while (unlikely(!temp));
100#endif /* CONFIG_CPU_MIPSR2 */
101 } else if (kernel_uses_llsc) {
102 do {
103 __asm__ __volatile__(
104 " .set arch=r4000 \n"
105 " " __LL "%0, %1 # set_bit \n"
106 " or %0, %2 \n"
107 " " __SC "%0, %1 \n"
108 " .set mips0 \n"
109 : "=&r" (temp), "+m" (*m)
110 : "ir" (1UL << bit));
111 } while (unlikely(!temp));
112 } else
113 __mips_set_bit(nr, addr);
114}
115
116/*
117 * clear_bit - Clears a bit in memory
118 * @nr: Bit to clear
119 * @addr: Address to start counting from
120 *
121 * clear_bit() is atomic and may not be reordered. However, it does
122 * not contain a memory barrier, so if it is used for locking purposes,
123 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
124 * in order to ensure changes are visible on other processors.
125 */
126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
129 int bit = nr & SZLONG_MASK;
130 unsigned long temp;
131
132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
133 __asm__ __volatile__(
134 " .set arch=r4000 \n"
135 "1: " __LL "%0, %1 # clear_bit \n"
136 " and %0, %2 \n"
137 " " __SC "%0, %1 \n"
138 " beqzl %0, 1b \n"
139 " .set mips0 \n"
140 : "=&r" (temp), "+m" (*m)
141 : "ir" (~(1UL << bit)));
142#ifdef CONFIG_CPU_MIPSR2
143 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
144 do {
145 __asm__ __volatile__(
146 " " __LL "%0, %1 # clear_bit \n"
147 " " __INS "%0, $0, %2, 1 \n"
148 " " __SC "%0, %1 \n"
149 : "=&r" (temp), "+m" (*m)
150 : "ir" (bit));
151 } while (unlikely(!temp));
152#endif /* CONFIG_CPU_MIPSR2 */
153 } else if (kernel_uses_llsc) {
154 do {
155 __asm__ __volatile__(
156 " .set arch=r4000 \n"
157 " " __LL "%0, %1 # clear_bit \n"
158 " and %0, %2 \n"
159 " " __SC "%0, %1 \n"
160 " .set mips0 \n"
161 : "=&r" (temp), "+m" (*m)
162 : "ir" (~(1UL << bit)));
163 } while (unlikely(!temp));
164 } else
165 __mips_clear_bit(nr, addr);
166}
167
168/*
169 * clear_bit_unlock - Clears a bit in memory
170 * @nr: Bit to clear
171 * @addr: Address to start counting from
172 *
173 * clear_bit() is atomic and implies release semantics before the memory
174 * operation. It can be used for an unlock.
175 */
176static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
177{
178 smp_mb__before_clear_bit();
179 clear_bit(nr, addr);
180}
181
182/*
183 * change_bit - Toggle a bit in memory
184 * @nr: Bit to change
185 * @addr: Address to start counting from
186 *
187 * change_bit() is atomic and may not be reordered.
188 * Note that @nr may be almost arbitrarily large; this function is not
189 * restricted to acting on a single-word quantity.
190 */
191static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
192{
193 int bit = nr & SZLONG_MASK;
194
195 if (kernel_uses_llsc && R10000_LLSC_WAR) {
196 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197 unsigned long temp;
198
199 __asm__ __volatile__(
200 " .set arch=r4000 \n"
201 "1: " __LL "%0, %1 # change_bit \n"
202 " xor %0, %2 \n"
203 " " __SC "%0, %1 \n"
204 " beqzl %0, 1b \n"
205 " .set mips0 \n"
206 : "=&r" (temp), "+m" (*m)
207 : "ir" (1UL << bit));
208 } else if (kernel_uses_llsc) {
209 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
210 unsigned long temp;
211
212 do {
213 __asm__ __volatile__(
214 " .set arch=r4000 \n"
215 " " __LL "%0, %1 # change_bit \n"
216 " xor %0, %2 \n"
217 " " __SC "%0, %1 \n"
218 " .set mips0 \n"
219 : "=&r" (temp), "+m" (*m)
220 : "ir" (1UL << bit));
221 } while (unlikely(!temp));
222 } else
223 __mips_change_bit(nr, addr);
224}
225
226/*
227 * test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is atomic and cannot be reordered.
232 * It also implies a memory barrier.
233 */
234static inline int test_and_set_bit(unsigned long nr,
235 volatile unsigned long *addr)
236{
237 int bit = nr & SZLONG_MASK;
238 unsigned long res;
239
240 smp_mb__before_llsc();
241
242 if (kernel_uses_llsc && R10000_LLSC_WAR) {
243 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
244 unsigned long temp;
245
246 __asm__ __volatile__(
247 " .set arch=r4000 \n"
248 "1: " __LL "%0, %1 # test_and_set_bit \n"
249 " or %2, %0, %3 \n"
250 " " __SC "%2, %1 \n"
251 " beqzl %2, 1b \n"
252 " and %2, %0, %3 \n"
253 " .set mips0 \n"
254 : "=&r" (temp), "+m" (*m), "=&r" (res)
255 : "r" (1UL << bit)
256 : "memory");
257 } else if (kernel_uses_llsc) {
258 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
259 unsigned long temp;
260
261 do {
262 __asm__ __volatile__(
263 " .set arch=r4000 \n"
264 " " __LL "%0, %1 # test_and_set_bit \n"
265 " or %2, %0, %3 \n"
266 " " __SC "%2, %1 \n"
267 " .set mips0 \n"
268 : "=&r" (temp), "+m" (*m), "=&r" (res)
269 : "r" (1UL << bit)
270 : "memory");
271 } while (unlikely(!res));
272
273 res = temp & (1UL << bit);
274 } else
275 res = __mips_test_and_set_bit(nr, addr);
276
277 smp_llsc_mb();
278
279 return res != 0;
280}
281
282/*
283 * test_and_set_bit_lock - Set a bit and return its old value
284 * @nr: Bit to set
285 * @addr: Address to count from
286 *
287 * This operation is atomic and implies acquire ordering semantics
288 * after the memory operation.
289 */
290static inline int test_and_set_bit_lock(unsigned long nr,
291 volatile unsigned long *addr)
292{
293 int bit = nr & SZLONG_MASK;
294 unsigned long res;
295
296 if (kernel_uses_llsc && R10000_LLSC_WAR) {
297 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
298 unsigned long temp;
299
300 __asm__ __volatile__(
301 " .set arch=r4000 \n"
302 "1: " __LL "%0, %1 # test_and_set_bit \n"
303 " or %2, %0, %3 \n"
304 " " __SC "%2, %1 \n"
305 " beqzl %2, 1b \n"
306 " and %2, %0, %3 \n"
307 " .set mips0 \n"
308 : "=&r" (temp), "+m" (*m), "=&r" (res)
309 : "r" (1UL << bit)
310 : "memory");
311 } else if (kernel_uses_llsc) {
312 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
313 unsigned long temp;
314
315 do {
316 __asm__ __volatile__(
317 " .set arch=r4000 \n"
318 " " __LL "%0, %1 # test_and_set_bit \n"
319 " or %2, %0, %3 \n"
320 " " __SC "%2, %1 \n"
321 " .set mips0 \n"
322 : "=&r" (temp), "+m" (*m), "=&r" (res)
323 : "r" (1UL << bit)
324 : "memory");
325 } while (unlikely(!res));
326
327 res = temp & (1UL << bit);
328 } else
329 res = __mips_test_and_set_bit_lock(nr, addr);
330
331 smp_llsc_mb();
332
333 return res != 0;
334}
335/*
336 * test_and_clear_bit - Clear a bit and return its old value
337 * @nr: Bit to clear
338 * @addr: Address to count from
339 *
340 * This operation is atomic and cannot be reordered.
341 * It also implies a memory barrier.
342 */
343static inline int test_and_clear_bit(unsigned long nr,
344 volatile unsigned long *addr)
345{
346 int bit = nr & SZLONG_MASK;
347 unsigned long res;
348
349 smp_mb__before_llsc();
350
351 if (kernel_uses_llsc && R10000_LLSC_WAR) {
352 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
353 unsigned long temp;
354
355 __asm__ __volatile__(
356 " .set arch=r4000 \n"
357 "1: " __LL "%0, %1 # test_and_clear_bit \n"
358 " or %2, %0, %3 \n"
359 " xor %2, %3 \n"
360 " " __SC "%2, %1 \n"
361 " beqzl %2, 1b \n"
362 " and %2, %0, %3 \n"
363 " .set mips0 \n"
364 : "=&r" (temp), "+m" (*m), "=&r" (res)
365 : "r" (1UL << bit)
366 : "memory");
367#ifdef CONFIG_CPU_MIPSR2
368 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
369 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
370 unsigned long temp;
371
372 do {
373 __asm__ __volatile__(
374 " " __LL "%0, %1 # test_and_clear_bit \n"
375 " " __EXT "%2, %0, %3, 1 \n"
376 " " __INS "%0, $0, %3, 1 \n"
377 " " __SC "%0, %1 \n"
378 : "=&r" (temp), "+m" (*m), "=&r" (res)
379 : "ir" (bit)
380 : "memory");
381 } while (unlikely(!temp));
382#endif
383 } else if (kernel_uses_llsc) {
384 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
385 unsigned long temp;
386
387 do {
388 __asm__ __volatile__(
389 " .set arch=r4000 \n"
390 " " __LL "%0, %1 # test_and_clear_bit \n"
391 " or %2, %0, %3 \n"
392 " xor %2, %3 \n"
393 " " __SC "%2, %1 \n"
394 " .set mips0 \n"
395 : "=&r" (temp), "+m" (*m), "=&r" (res)
396 : "r" (1UL << bit)
397 : "memory");
398 } while (unlikely(!res));
399
400 res = temp & (1UL << bit);
401 } else
402 res = __mips_test_and_clear_bit(nr, addr);
403
404 smp_llsc_mb();
405
406 return res != 0;
407}
408
409/*
410 * test_and_change_bit - Change a bit and return its old value
411 * @nr: Bit to change
412 * @addr: Address to count from
413 *
414 * This operation is atomic and cannot be reordered.
415 * It also implies a memory barrier.
416 */
417static inline int test_and_change_bit(unsigned long nr,
418 volatile unsigned long *addr)
419{
420 int bit = nr & SZLONG_MASK;
421 unsigned long res;
422
423 smp_mb__before_llsc();
424
425 if (kernel_uses_llsc && R10000_LLSC_WAR) {
426 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
427 unsigned long temp;
428
429 __asm__ __volatile__(
430 " .set arch=r4000 \n"
431 "1: " __LL "%0, %1 # test_and_change_bit \n"
432 " xor %2, %0, %3 \n"
433 " " __SC "%2, %1 \n"
434 " beqzl %2, 1b \n"
435 " and %2, %0, %3 \n"
436 " .set mips0 \n"
437 : "=&r" (temp), "+m" (*m), "=&r" (res)
438 : "r" (1UL << bit)
439 : "memory");
440 } else if (kernel_uses_llsc) {
441 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
442 unsigned long temp;
443
444 do {
445 __asm__ __volatile__(
446 " .set arch=r4000 \n"
447 " " __LL "%0, %1 # test_and_change_bit \n"
448 " xor %2, %0, %3 \n"
449 " " __SC "\t%2, %1 \n"
450 " .set mips0 \n"
451 : "=&r" (temp), "+m" (*m), "=&r" (res)
452 : "r" (1UL << bit)
453 : "memory");
454 } while (unlikely(!res));
455
456 res = temp & (1UL << bit);
457 } else
458 res = __mips_test_and_change_bit(nr, addr);
459
460 smp_llsc_mb();
461
462 return res != 0;
463}
464
465#include <asm-generic/bitops/non-atomic.h>
466
467/*
468 * __clear_bit_unlock - Clears a bit in memory
469 * @nr: Bit to clear
470 * @addr: Address to start counting from
471 *
472 * __clear_bit() is non-atomic and implies release semantics before the memory
473 * operation. It can be used for an unlock if no other CPUs can concurrently
474 * modify other bits in the word.
475 */
476static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
477{
478 smp_mb();
479 __clear_bit(nr, addr);
480}
481
482/*
483 * Return the bit position (0..63) of the most significant 1 bit in a word
484 * Returns -1 if no 1 bit exists
485 */
486static inline unsigned long __fls(unsigned long word)
487{
488 int num;
489
490 if (BITS_PER_LONG == 32 &&
491 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
492 __asm__(
493 " .set push \n"
494 " .set mips32 \n"
495 " clz %0, %1 \n"
496 " .set pop \n"
497 : "=r" (num)
498 : "r" (word));
499
500 return 31 - num;
501 }
502
503 if (BITS_PER_LONG == 64 &&
504 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
505 __asm__(
506 " .set push \n"
507 " .set mips64 \n"
508 " dclz %0, %1 \n"
509 " .set pop \n"
510 : "=r" (num)
511 : "r" (word));
512
513 return 63 - num;
514 }
515
516 num = BITS_PER_LONG - 1;
517
518#if BITS_PER_LONG == 64
519 if (!(word & (~0ul << 32))) {
520 num -= 32;
521 word <<= 32;
522 }
523#endif
524 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
525 num -= 16;
526 word <<= 16;
527 }
528 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
529 num -= 8;
530 word <<= 8;
531 }
532 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
533 num -= 4;
534 word <<= 4;
535 }
536 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
537 num -= 2;
538 word <<= 2;
539 }
540 if (!(word & (~0ul << (BITS_PER_LONG-1))))
541 num -= 1;
542 return num;
543}
544
545/*
546 * __ffs - find first bit in word.
547 * @word: The word to search
548 *
549 * Returns 0..SZLONG-1
550 * Undefined if no bit exists, so code should check against 0 first.
551 */
552static inline unsigned long __ffs(unsigned long word)
553{
554 return __fls(word & -word);
555}
556
557/*
558 * fls - find last bit set.
559 * @word: The word to search
560 *
561 * This is defined the same way as ffs.
562 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
563 */
564static inline int fls(int x)
565{
566 int r;
567
568 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
569 __asm__("clz %0, %1" : "=r" (x) : "r" (x));
570
571 return 32 - x;
572 }
573
574 r = 32;
575 if (!x)
576 return 0;
577 if (!(x & 0xffff0000u)) {
578 x <<= 16;
579 r -= 16;
580 }
581 if (!(x & 0xff000000u)) {
582 x <<= 8;
583 r -= 8;
584 }
585 if (!(x & 0xf0000000u)) {
586 x <<= 4;
587 r -= 4;
588 }
589 if (!(x & 0xc0000000u)) {
590 x <<= 2;
591 r -= 2;
592 }
593 if (!(x & 0x80000000u)) {
594 x <<= 1;
595 r -= 1;
596 }
597 return r;
598}
599
600#include <asm-generic/bitops/fls64.h>
601
602/*
603 * ffs - find first bit set.
604 * @word: The word to search
605 *
606 * This is defined the same way as
607 * the libc and compiler builtin ffs routines, therefore
608 * differs in spirit from the above ffz (man ffs).
609 */
610static inline int ffs(int word)
611{
612 if (!word)
613 return 0;
614
615 return fls(word & -word);
616}
617
618#include <asm-generic/bitops/ffz.h>
619#include <asm-generic/bitops/find.h>
620
621#ifdef __KERNEL__
622
623#include <asm-generic/bitops/sched.h>
624
625#include <asm/arch_hweight.h>
626#include <asm-generic/bitops/const_hweight.h>
627
628#include <asm-generic/bitops/le.h>
629#include <asm-generic/bitops/ext2-atomic.h>
630
631#endif /* __KERNEL__ */
632
633#endif /* _ASM_BITOPS_H */