Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/llsc.h>
23#include <asm/sgidefs.h>
24#include <asm/war.h>
25
26/*
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
29 */
30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
41
42
43/*
44 * set_bit - Atomically set a bit in memory
45 * @nr: the bit to set
46 * @addr: the address to start counting from
47 *
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
52 */
53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
54{
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 int bit = nr & SZLONG_MASK;
57 unsigned long temp;
58
59 if (kernel_uses_llsc && R10000_LLSC_WAR) {
60 __asm__ __volatile__(
61 " .set push \n"
62 " .set arch=r4000 \n"
63 "1: " __LL "%0, %1 # set_bit \n"
64 " or %0, %2 \n"
65 " " __SC "%0, %1 \n"
66 " beqzl %0, 1b \n"
67 " .set pop \n"
68 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
70 : __LLSC_CLOBBER);
71#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
72 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
73 loongson_llsc_mb();
74 do {
75 __asm__ __volatile__(
76 " " __LL "%0, %1 # set_bit \n"
77 " " __INS "%0, %3, %2, 1 \n"
78 " " __SC "%0, %1 \n"
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
80 : "ir" (bit), "r" (~0)
81 : __LLSC_CLOBBER);
82 } while (unlikely(!temp));
83#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
84 } else if (kernel_uses_llsc) {
85 loongson_llsc_mb();
86 do {
87 __asm__ __volatile__(
88 " .set push \n"
89 " .set "MIPS_ISA_ARCH_LEVEL" \n"
90 " " __LL "%0, %1 # set_bit \n"
91 " or %0, %2 \n"
92 " " __SC "%0, %1 \n"
93 " .set pop \n"
94 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
95 : "ir" (1UL << bit)
96 : __LLSC_CLOBBER);
97 } while (unlikely(!temp));
98 } else
99 __mips_set_bit(nr, addr);
100}
101
102/*
103 * clear_bit - Clears a bit in memory
104 * @nr: Bit to clear
105 * @addr: Address to start counting from
106 *
107 * clear_bit() is atomic and may not be reordered. However, it does
108 * not contain a memory barrier, so if it is used for locking purposes,
109 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110 * in order to ensure changes are visible on other processors.
111 */
112static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
113{
114 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115 int bit = nr & SZLONG_MASK;
116 unsigned long temp;
117
118 if (kernel_uses_llsc && R10000_LLSC_WAR) {
119 __asm__ __volatile__(
120 " .set push \n"
121 " .set arch=r4000 \n"
122 "1: " __LL "%0, %1 # clear_bit \n"
123 " and %0, %2 \n"
124 " " __SC "%0, %1 \n"
125 " beqzl %0, 1b \n"
126 " .set pop \n"
127 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128 : "ir" (~(1UL << bit))
129 : __LLSC_CLOBBER);
130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
132 loongson_llsc_mb();
133 do {
134 __asm__ __volatile__(
135 " " __LL "%0, %1 # clear_bit \n"
136 " " __INS "%0, $0, %2, 1 \n"
137 " " __SC "%0, %1 \n"
138 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
139 : "ir" (bit)
140 : __LLSC_CLOBBER);
141 } while (unlikely(!temp));
142#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143 } else if (kernel_uses_llsc) {
144 loongson_llsc_mb();
145 do {
146 __asm__ __volatile__(
147 " .set push \n"
148 " .set "MIPS_ISA_ARCH_LEVEL" \n"
149 " " __LL "%0, %1 # clear_bit \n"
150 " and %0, %2 \n"
151 " " __SC "%0, %1 \n"
152 " .set pop \n"
153 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154 : "ir" (~(1UL << bit))
155 : __LLSC_CLOBBER);
156 } while (unlikely(!temp));
157 } else
158 __mips_clear_bit(nr, addr);
159}
160
161/*
162 * clear_bit_unlock - Clears a bit in memory
163 * @nr: Bit to clear
164 * @addr: Address to start counting from
165 *
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
168 */
169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170{
171 smp_mb__before_atomic();
172 clear_bit(nr, addr);
173}
174
175/*
176 * change_bit - Toggle a bit in memory
177 * @nr: Bit to change
178 * @addr: Address to start counting from
179 *
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
183 */
184static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185{
186 int bit = nr & SZLONG_MASK;
187
188 if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190 unsigned long temp;
191
192 __asm__ __volatile__(
193 " .set push \n"
194 " .set arch=r4000 \n"
195 "1: " __LL "%0, %1 # change_bit \n"
196 " xor %0, %2 \n"
197 " " __SC "%0, %1 \n"
198 " beqzl %0, 1b \n"
199 " .set pop \n"
200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 : "ir" (1UL << bit)
202 : __LLSC_CLOBBER);
203 } else if (kernel_uses_llsc) {
204 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205 unsigned long temp;
206
207 loongson_llsc_mb();
208 do {
209 __asm__ __volatile__(
210 " .set push \n"
211 " .set "MIPS_ISA_ARCH_LEVEL" \n"
212 " " __LL "%0, %1 # change_bit \n"
213 " xor %0, %2 \n"
214 " " __SC "%0, %1 \n"
215 " .set pop \n"
216 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
217 : "ir" (1UL << bit)
218 : __LLSC_CLOBBER);
219 } while (unlikely(!temp));
220 } else
221 __mips_change_bit(nr, addr);
222}
223
224/*
225 * test_and_set_bit - Set a bit and return its old value
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
231 */
232static inline int test_and_set_bit(unsigned long nr,
233 volatile unsigned long *addr)
234{
235 int bit = nr & SZLONG_MASK;
236 unsigned long res;
237
238 smp_mb__before_llsc();
239
240 if (kernel_uses_llsc && R10000_LLSC_WAR) {
241 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
242 unsigned long temp;
243
244 __asm__ __volatile__(
245 " .set push \n"
246 " .set arch=r4000 \n"
247 "1: " __LL "%0, %1 # test_and_set_bit \n"
248 " or %2, %0, %3 \n"
249 " " __SC "%2, %1 \n"
250 " beqzl %2, 1b \n"
251 " and %2, %0, %3 \n"
252 " .set pop \n"
253 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
254 : "r" (1UL << bit)
255 : __LLSC_CLOBBER);
256 } else if (kernel_uses_llsc) {
257 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
258 unsigned long temp;
259
260 loongson_llsc_mb();
261 do {
262 __asm__ __volatile__(
263 " .set push \n"
264 " .set "MIPS_ISA_ARCH_LEVEL" \n"
265 " " __LL "%0, %1 # test_and_set_bit \n"
266 " or %2, %0, %3 \n"
267 " " __SC "%2, %1 \n"
268 " .set pop \n"
269 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
270 : "r" (1UL << bit)
271 : __LLSC_CLOBBER);
272 } while (unlikely(!res));
273
274 res = temp & (1UL << bit);
275 } else
276 res = __mips_test_and_set_bit(nr, addr);
277
278 smp_llsc_mb();
279
280 return res != 0;
281}
282
283/*
284 * test_and_set_bit_lock - Set a bit and return its old value
285 * @nr: Bit to set
286 * @addr: Address to count from
287 *
288 * This operation is atomic and implies acquire ordering semantics
289 * after the memory operation.
290 */
291static inline int test_and_set_bit_lock(unsigned long nr,
292 volatile unsigned long *addr)
293{
294 int bit = nr & SZLONG_MASK;
295 unsigned long res;
296
297 if (kernel_uses_llsc && R10000_LLSC_WAR) {
298 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
299 unsigned long temp;
300
301 __asm__ __volatile__(
302 " .set push \n"
303 " .set arch=r4000 \n"
304 "1: " __LL "%0, %1 # test_and_set_bit \n"
305 " or %2, %0, %3 \n"
306 " " __SC "%2, %1 \n"
307 " beqzl %2, 1b \n"
308 " and %2, %0, %3 \n"
309 " .set pop \n"
310 : "=&r" (temp), "+m" (*m), "=&r" (res)
311 : "r" (1UL << bit)
312 : __LLSC_CLOBBER);
313 } else if (kernel_uses_llsc) {
314 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
315 unsigned long temp;
316
317 loongson_llsc_mb();
318 do {
319 __asm__ __volatile__(
320 " .set push \n"
321 " .set "MIPS_ISA_ARCH_LEVEL" \n"
322 " " __LL "%0, %1 # test_and_set_bit \n"
323 " or %2, %0, %3 \n"
324 " " __SC "%2, %1 \n"
325 " .set pop \n"
326 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
327 : "r" (1UL << bit)
328 : __LLSC_CLOBBER);
329 } while (unlikely(!res));
330
331 res = temp & (1UL << bit);
332 } else
333 res = __mips_test_and_set_bit_lock(nr, addr);
334
335 smp_llsc_mb();
336
337 return res != 0;
338}
339/*
340 * test_and_clear_bit - Clear a bit and return its old value
341 * @nr: Bit to clear
342 * @addr: Address to count from
343 *
344 * This operation is atomic and cannot be reordered.
345 * It also implies a memory barrier.
346 */
347static inline int test_and_clear_bit(unsigned long nr,
348 volatile unsigned long *addr)
349{
350 int bit = nr & SZLONG_MASK;
351 unsigned long res;
352
353 smp_mb__before_llsc();
354
355 if (kernel_uses_llsc && R10000_LLSC_WAR) {
356 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
357 unsigned long temp;
358
359 __asm__ __volatile__(
360 " .set push \n"
361 " .set arch=r4000 \n"
362 "1: " __LL "%0, %1 # test_and_clear_bit \n"
363 " or %2, %0, %3 \n"
364 " xor %2, %3 \n"
365 " " __SC "%2, %1 \n"
366 " beqzl %2, 1b \n"
367 " and %2, %0, %3 \n"
368 " .set pop \n"
369 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
370 : "r" (1UL << bit)
371 : __LLSC_CLOBBER);
372#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
375 unsigned long temp;
376
377 loongson_llsc_mb();
378 do {
379 __asm__ __volatile__(
380 " " __LL "%0, %1 # test_and_clear_bit \n"
381 " " __EXT "%2, %0, %3, 1 \n"
382 " " __INS "%0, $0, %3, 1 \n"
383 " " __SC "%0, %1 \n"
384 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
385 : "ir" (bit)
386 : __LLSC_CLOBBER);
387 } while (unlikely(!temp));
388#endif
389 } else if (kernel_uses_llsc) {
390 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
391 unsigned long temp;
392
393 loongson_llsc_mb();
394 do {
395 __asm__ __volatile__(
396 " .set push \n"
397 " .set "MIPS_ISA_ARCH_LEVEL" \n"
398 " " __LL "%0, %1 # test_and_clear_bit \n"
399 " or %2, %0, %3 \n"
400 " xor %2, %3 \n"
401 " " __SC "%2, %1 \n"
402 " .set pop \n"
403 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
404 : "r" (1UL << bit)
405 : __LLSC_CLOBBER);
406 } while (unlikely(!res));
407
408 res = temp & (1UL << bit);
409 } else
410 res = __mips_test_and_clear_bit(nr, addr);
411
412 smp_llsc_mb();
413
414 return res != 0;
415}
416
417/*
418 * test_and_change_bit - Change a bit and return its old value
419 * @nr: Bit to change
420 * @addr: Address to count from
421 *
422 * This operation is atomic and cannot be reordered.
423 * It also implies a memory barrier.
424 */
425static inline int test_and_change_bit(unsigned long nr,
426 volatile unsigned long *addr)
427{
428 int bit = nr & SZLONG_MASK;
429 unsigned long res;
430
431 smp_mb__before_llsc();
432
433 if (kernel_uses_llsc && R10000_LLSC_WAR) {
434 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435 unsigned long temp;
436
437 __asm__ __volatile__(
438 " .set push \n"
439 " .set arch=r4000 \n"
440 "1: " __LL "%0, %1 # test_and_change_bit \n"
441 " xor %2, %0, %3 \n"
442 " " __SC "%2, %1 \n"
443 " beqzl %2, 1b \n"
444 " and %2, %0, %3 \n"
445 " .set pop \n"
446 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447 : "r" (1UL << bit)
448 : __LLSC_CLOBBER);
449 } else if (kernel_uses_llsc) {
450 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
451 unsigned long temp;
452
453 loongson_llsc_mb();
454 do {
455 __asm__ __volatile__(
456 " .set push \n"
457 " .set "MIPS_ISA_ARCH_LEVEL" \n"
458 " " __LL "%0, %1 # test_and_change_bit \n"
459 " xor %2, %0, %3 \n"
460 " " __SC "\t%2, %1 \n"
461 " .set pop \n"
462 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
463 : "r" (1UL << bit)
464 : __LLSC_CLOBBER);
465 } while (unlikely(!res));
466
467 res = temp & (1UL << bit);
468 } else
469 res = __mips_test_and_change_bit(nr, addr);
470
471 smp_llsc_mb();
472
473 return res != 0;
474}
475
476#include <asm-generic/bitops/non-atomic.h>
477
478/*
479 * __clear_bit_unlock - Clears a bit in memory
480 * @nr: Bit to clear
481 * @addr: Address to start counting from
482 *
483 * __clear_bit() is non-atomic and implies release semantics before the memory
484 * operation. It can be used for an unlock if no other CPUs can concurrently
485 * modify other bits in the word.
486 */
487static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
488{
489 smp_mb__before_llsc();
490 __clear_bit(nr, addr);
491 nudge_writes();
492}
493
494/*
495 * Return the bit position (0..63) of the most significant 1 bit in a word
496 * Returns -1 if no 1 bit exists
497 */
498static __always_inline unsigned long __fls(unsigned long word)
499{
500 int num;
501
502 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
504 __asm__(
505 " .set push \n"
506 " .set "MIPS_ISA_LEVEL" \n"
507 " clz %0, %1 \n"
508 " .set pop \n"
509 : "=r" (num)
510 : "r" (word));
511
512 return 31 - num;
513 }
514
515 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
517 __asm__(
518 " .set push \n"
519 " .set "MIPS_ISA_LEVEL" \n"
520 " dclz %0, %1 \n"
521 " .set pop \n"
522 : "=r" (num)
523 : "r" (word));
524
525 return 63 - num;
526 }
527
528 num = BITS_PER_LONG - 1;
529
530#if BITS_PER_LONG == 64
531 if (!(word & (~0ul << 32))) {
532 num -= 32;
533 word <<= 32;
534 }
535#endif
536 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
537 num -= 16;
538 word <<= 16;
539 }
540 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
541 num -= 8;
542 word <<= 8;
543 }
544 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
545 num -= 4;
546 word <<= 4;
547 }
548 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
549 num -= 2;
550 word <<= 2;
551 }
552 if (!(word & (~0ul << (BITS_PER_LONG-1))))
553 num -= 1;
554 return num;
555}
556
557/*
558 * __ffs - find first bit in word.
559 * @word: The word to search
560 *
561 * Returns 0..SZLONG-1
562 * Undefined if no bit exists, so code should check against 0 first.
563 */
564static __always_inline unsigned long __ffs(unsigned long word)
565{
566 return __fls(word & -word);
567}
568
569/*
570 * fls - find last bit set.
571 * @word: The word to search
572 *
573 * This is defined the same way as ffs.
574 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
575 */
576static inline int fls(unsigned int x)
577{
578 int r;
579
580 if (!__builtin_constant_p(x) &&
581 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
582 __asm__(
583 " .set push \n"
584 " .set "MIPS_ISA_LEVEL" \n"
585 " clz %0, %1 \n"
586 " .set pop \n"
587 : "=r" (x)
588 : "r" (x));
589
590 return 32 - x;
591 }
592
593 r = 32;
594 if (!x)
595 return 0;
596 if (!(x & 0xffff0000u)) {
597 x <<= 16;
598 r -= 16;
599 }
600 if (!(x & 0xff000000u)) {
601 x <<= 8;
602 r -= 8;
603 }
604 if (!(x & 0xf0000000u)) {
605 x <<= 4;
606 r -= 4;
607 }
608 if (!(x & 0xc0000000u)) {
609 x <<= 2;
610 r -= 2;
611 }
612 if (!(x & 0x80000000u)) {
613 x <<= 1;
614 r -= 1;
615 }
616 return r;
617}
618
619#include <asm-generic/bitops/fls64.h>
620
621/*
622 * ffs - find first bit set.
623 * @word: The word to search
624 *
625 * This is defined the same way as
626 * the libc and compiler builtin ffs routines, therefore
627 * differs in spirit from the above ffz (man ffs).
628 */
629static inline int ffs(int word)
630{
631 if (!word)
632 return 0;
633
634 return fls(word & -word);
635}
636
637#include <asm-generic/bitops/ffz.h>
638#include <asm-generic/bitops/find.h>
639
640#ifdef __KERNEL__
641
642#include <asm-generic/bitops/sched.h>
643
644#include <asm/arch_hweight.h>
645#include <asm-generic/bitops/const_hweight.h>
646
647#include <asm-generic/bitops/le.h>
648#include <asm-generic/bitops/ext2-atomic.h>
649
650#endif /* __KERNEL__ */
651
652#endif /* _ASM_BITOPS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/sgidefs.h>
23#include <asm/war.h>
24
25#if _MIPS_SZLONG == 32
26#define SZLONG_LOG 5
27#define SZLONG_MASK 31UL
28#define __LL "ll "
29#define __SC "sc "
30#define __INS "ins "
31#define __EXT "ext "
32#elif _MIPS_SZLONG == 64
33#define SZLONG_LOG 6
34#define SZLONG_MASK 63UL
35#define __LL "lld "
36#define __SC "scd "
37#define __INS "dins "
38#define __EXT "dext "
39#endif
40
41/*
42 * These are the "slower" versions of the functions and are in bitops.c.
43 * These functions call raw_local_irq_{save,restore}().
44 */
45void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
46void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
47void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
48int __mips_test_and_set_bit(unsigned long nr,
49 volatile unsigned long *addr);
50int __mips_test_and_set_bit_lock(unsigned long nr,
51 volatile unsigned long *addr);
52int __mips_test_and_clear_bit(unsigned long nr,
53 volatile unsigned long *addr);
54int __mips_test_and_change_bit(unsigned long nr,
55 volatile unsigned long *addr);
56
57
58/*
59 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set
61 * @addr: the address to start counting from
62 *
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
67 */
68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69{
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
71 int bit = nr & SZLONG_MASK;
72 unsigned long temp;
73
74 if (kernel_uses_llsc && R10000_LLSC_WAR) {
75 __asm__ __volatile__(
76 " .set arch=r4000 \n"
77 "1: " __LL "%0, %1 # set_bit \n"
78 " or %0, %2 \n"
79 " " __SC "%0, %1 \n"
80 " beqzl %0, 1b \n"
81 " .set mips0 \n"
82 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
83 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
84#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
86 do {
87 __asm__ __volatile__(
88 " " __LL "%0, %1 # set_bit \n"
89 " " __INS "%0, %3, %2, 1 \n"
90 " " __SC "%0, %1 \n"
91 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
92 : "ir" (bit), "r" (~0));
93 } while (unlikely(!temp));
94#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
95 } else if (kernel_uses_llsc) {
96 do {
97 __asm__ __volatile__(
98 " .set "MIPS_ISA_ARCH_LEVEL" \n"
99 " " __LL "%0, %1 # set_bit \n"
100 " or %0, %2 \n"
101 " " __SC "%0, %1 \n"
102 " .set mips0 \n"
103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104 : "ir" (1UL << bit));
105 } while (unlikely(!temp));
106 } else
107 __mips_set_bit(nr, addr);
108}
109
110/*
111 * clear_bit - Clears a bit in memory
112 * @nr: Bit to clear
113 * @addr: Address to start counting from
114 *
115 * clear_bit() is atomic and may not be reordered. However, it does
116 * not contain a memory barrier, so if it is used for locking purposes,
117 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
118 * in order to ensure changes are visible on other processors.
119 */
120static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121{
122 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
123 int bit = nr & SZLONG_MASK;
124 unsigned long temp;
125
126 if (kernel_uses_llsc && R10000_LLSC_WAR) {
127 __asm__ __volatile__(
128 " .set arch=r4000 \n"
129 "1: " __LL "%0, %1 # clear_bit \n"
130 " and %0, %2 \n"
131 " " __SC "%0, %1 \n"
132 " beqzl %0, 1b \n"
133 " .set mips0 \n"
134 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135 : "ir" (~(1UL << bit)));
136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138 do {
139 __asm__ __volatile__(
140 " " __LL "%0, %1 # clear_bit \n"
141 " " __INS "%0, $0, %2, 1 \n"
142 " " __SC "%0, %1 \n"
143 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144 : "ir" (bit));
145 } while (unlikely(!temp));
146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147 } else if (kernel_uses_llsc) {
148 do {
149 __asm__ __volatile__(
150 " .set "MIPS_ISA_ARCH_LEVEL" \n"
151 " " __LL "%0, %1 # clear_bit \n"
152 " and %0, %2 \n"
153 " " __SC "%0, %1 \n"
154 " .set mips0 \n"
155 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156 : "ir" (~(1UL << bit)));
157 } while (unlikely(!temp));
158 } else
159 __mips_clear_bit(nr, addr);
160}
161
162/*
163 * clear_bit_unlock - Clears a bit in memory
164 * @nr: Bit to clear
165 * @addr: Address to start counting from
166 *
167 * clear_bit() is atomic and implies release semantics before the memory
168 * operation. It can be used for an unlock.
169 */
170static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171{
172 smp_mb__before_atomic();
173 clear_bit(nr, addr);
174}
175
176/*
177 * change_bit - Toggle a bit in memory
178 * @nr: Bit to change
179 * @addr: Address to start counting from
180 *
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
184 */
185static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186{
187 int bit = nr & SZLONG_MASK;
188
189 if (kernel_uses_llsc && R10000_LLSC_WAR) {
190 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
191 unsigned long temp;
192
193 __asm__ __volatile__(
194 " .set arch=r4000 \n"
195 "1: " __LL "%0, %1 # change_bit \n"
196 " xor %0, %2 \n"
197 " " __SC "%0, %1 \n"
198 " beqzl %0, 1b \n"
199 " .set mips0 \n"
200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 : "ir" (1UL << bit));
202 } else if (kernel_uses_llsc) {
203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
204 unsigned long temp;
205
206 do {
207 __asm__ __volatile__(
208 " .set "MIPS_ISA_ARCH_LEVEL" \n"
209 " " __LL "%0, %1 # change_bit \n"
210 " xor %0, %2 \n"
211 " " __SC "%0, %1 \n"
212 " .set mips0 \n"
213 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214 : "ir" (1UL << bit));
215 } while (unlikely(!temp));
216 } else
217 __mips_change_bit(nr, addr);
218}
219
220/*
221 * test_and_set_bit - Set a bit and return its old value
222 * @nr: Bit to set
223 * @addr: Address to count from
224 *
225 * This operation is atomic and cannot be reordered.
226 * It also implies a memory barrier.
227 */
228static inline int test_and_set_bit(unsigned long nr,
229 volatile unsigned long *addr)
230{
231 int bit = nr & SZLONG_MASK;
232 unsigned long res;
233
234 smp_mb__before_llsc();
235
236 if (kernel_uses_llsc && R10000_LLSC_WAR) {
237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238 unsigned long temp;
239
240 __asm__ __volatile__(
241 " .set arch=r4000 \n"
242 "1: " __LL "%0, %1 # test_and_set_bit \n"
243 " or %2, %0, %3 \n"
244 " " __SC "%2, %1 \n"
245 " beqzl %2, 1b \n"
246 " and %2, %0, %3 \n"
247 " .set mips0 \n"
248 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249 : "r" (1UL << bit)
250 : "memory");
251 } else if (kernel_uses_llsc) {
252 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
253 unsigned long temp;
254
255 do {
256 __asm__ __volatile__(
257 " .set "MIPS_ISA_ARCH_LEVEL" \n"
258 " " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n"
261 " .set mips0 \n"
262 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263 : "r" (1UL << bit)
264 : "memory");
265 } while (unlikely(!res));
266
267 res = temp & (1UL << bit);
268 } else
269 res = __mips_test_and_set_bit(nr, addr);
270
271 smp_llsc_mb();
272
273 return res != 0;
274}
275
276/*
277 * test_and_set_bit_lock - Set a bit and return its old value
278 * @nr: Bit to set
279 * @addr: Address to count from
280 *
281 * This operation is atomic and implies acquire ordering semantics
282 * after the memory operation.
283 */
284static inline int test_and_set_bit_lock(unsigned long nr,
285 volatile unsigned long *addr)
286{
287 int bit = nr & SZLONG_MASK;
288 unsigned long res;
289
290 if (kernel_uses_llsc && R10000_LLSC_WAR) {
291 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292 unsigned long temp;
293
294 __asm__ __volatile__(
295 " .set arch=r4000 \n"
296 "1: " __LL "%0, %1 # test_and_set_bit \n"
297 " or %2, %0, %3 \n"
298 " " __SC "%2, %1 \n"
299 " beqzl %2, 1b \n"
300 " and %2, %0, %3 \n"
301 " .set mips0 \n"
302 : "=&r" (temp), "+m" (*m), "=&r" (res)
303 : "r" (1UL << bit)
304 : "memory");
305 } else if (kernel_uses_llsc) {
306 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
307 unsigned long temp;
308
309 do {
310 __asm__ __volatile__(
311 " .set "MIPS_ISA_ARCH_LEVEL" \n"
312 " " __LL "%0, %1 # test_and_set_bit \n"
313 " or %2, %0, %3 \n"
314 " " __SC "%2, %1 \n"
315 " .set mips0 \n"
316 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317 : "r" (1UL << bit)
318 : "memory");
319 } while (unlikely(!res));
320
321 res = temp & (1UL << bit);
322 } else
323 res = __mips_test_and_set_bit_lock(nr, addr);
324
325 smp_llsc_mb();
326
327 return res != 0;
328}
329/*
330 * test_and_clear_bit - Clear a bit and return its old value
331 * @nr: Bit to clear
332 * @addr: Address to count from
333 *
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
336 */
337static inline int test_and_clear_bit(unsigned long nr,
338 volatile unsigned long *addr)
339{
340 int bit = nr & SZLONG_MASK;
341 unsigned long res;
342
343 smp_mb__before_llsc();
344
345 if (kernel_uses_llsc && R10000_LLSC_WAR) {
346 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
347 unsigned long temp;
348
349 __asm__ __volatile__(
350 " .set arch=r4000 \n"
351 "1: " __LL "%0, %1 # test_and_clear_bit \n"
352 " or %2, %0, %3 \n"
353 " xor %2, %3 \n"
354 " " __SC "%2, %1 \n"
355 " beqzl %2, 1b \n"
356 " and %2, %0, %3 \n"
357 " .set mips0 \n"
358 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359 : "r" (1UL << bit)
360 : "memory");
361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364 unsigned long temp;
365
366 do {
367 __asm__ __volatile__(
368 " " __LL "%0, %1 # test_and_clear_bit \n"
369 " " __EXT "%2, %0, %3, 1 \n"
370 " " __INS "%0, $0, %3, 1 \n"
371 " " __SC "%0, %1 \n"
372 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373 : "ir" (bit)
374 : "memory");
375 } while (unlikely(!temp));
376#endif
377 } else if (kernel_uses_llsc) {
378 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379 unsigned long temp;
380
381 do {
382 __asm__ __volatile__(
383 " .set "MIPS_ISA_ARCH_LEVEL" \n"
384 " " __LL "%0, %1 # test_and_clear_bit \n"
385 " or %2, %0, %3 \n"
386 " xor %2, %3 \n"
387 " " __SC "%2, %1 \n"
388 " .set mips0 \n"
389 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390 : "r" (1UL << bit)
391 : "memory");
392 } while (unlikely(!res));
393
394 res = temp & (1UL << bit);
395 } else
396 res = __mips_test_and_clear_bit(nr, addr);
397
398 smp_llsc_mb();
399
400 return res != 0;
401}
402
403/*
404 * test_and_change_bit - Change a bit and return its old value
405 * @nr: Bit to change
406 * @addr: Address to count from
407 *
408 * This operation is atomic and cannot be reordered.
409 * It also implies a memory barrier.
410 */
411static inline int test_and_change_bit(unsigned long nr,
412 volatile unsigned long *addr)
413{
414 int bit = nr & SZLONG_MASK;
415 unsigned long res;
416
417 smp_mb__before_llsc();
418
419 if (kernel_uses_llsc && R10000_LLSC_WAR) {
420 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421 unsigned long temp;
422
423 __asm__ __volatile__(
424 " .set arch=r4000 \n"
425 "1: " __LL "%0, %1 # test_and_change_bit \n"
426 " xor %2, %0, %3 \n"
427 " " __SC "%2, %1 \n"
428 " beqzl %2, 1b \n"
429 " and %2, %0, %3 \n"
430 " .set mips0 \n"
431 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432 : "r" (1UL << bit)
433 : "memory");
434 } else if (kernel_uses_llsc) {
435 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
436 unsigned long temp;
437
438 do {
439 __asm__ __volatile__(
440 " .set "MIPS_ISA_ARCH_LEVEL" \n"
441 " " __LL "%0, %1 # test_and_change_bit \n"
442 " xor %2, %0, %3 \n"
443 " " __SC "\t%2, %1 \n"
444 " .set mips0 \n"
445 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446 : "r" (1UL << bit)
447 : "memory");
448 } while (unlikely(!res));
449
450 res = temp & (1UL << bit);
451 } else
452 res = __mips_test_and_change_bit(nr, addr);
453
454 smp_llsc_mb();
455
456 return res != 0;
457}
458
459#include <asm-generic/bitops/non-atomic.h>
460
461/*
462 * __clear_bit_unlock - Clears a bit in memory
463 * @nr: Bit to clear
464 * @addr: Address to start counting from
465 *
466 * __clear_bit() is non-atomic and implies release semantics before the memory
467 * operation. It can be used for an unlock if no other CPUs can concurrently
468 * modify other bits in the word.
469 */
470static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
471{
472 smp_mb__before_llsc();
473 __clear_bit(nr, addr);
474}
475
476/*
477 * Return the bit position (0..63) of the most significant 1 bit in a word
478 * Returns -1 if no 1 bit exists
479 */
480static inline unsigned long __fls(unsigned long word)
481{
482 int num;
483
484 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486 __asm__(
487 " .set push \n"
488 " .set "MIPS_ISA_LEVEL" \n"
489 " clz %0, %1 \n"
490 " .set pop \n"
491 : "=r" (num)
492 : "r" (word));
493
494 return 31 - num;
495 }
496
497 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499 __asm__(
500 " .set push \n"
501 " .set "MIPS_ISA_LEVEL" \n"
502 " dclz %0, %1 \n"
503 " .set pop \n"
504 : "=r" (num)
505 : "r" (word));
506
507 return 63 - num;
508 }
509
510 num = BITS_PER_LONG - 1;
511
512#if BITS_PER_LONG == 64
513 if (!(word & (~0ul << 32))) {
514 num -= 32;
515 word <<= 32;
516 }
517#endif
518 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
519 num -= 16;
520 word <<= 16;
521 }
522 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
523 num -= 8;
524 word <<= 8;
525 }
526 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
527 num -= 4;
528 word <<= 4;
529 }
530 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
531 num -= 2;
532 word <<= 2;
533 }
534 if (!(word & (~0ul << (BITS_PER_LONG-1))))
535 num -= 1;
536 return num;
537}
538
539/*
540 * __ffs - find first bit in word.
541 * @word: The word to search
542 *
543 * Returns 0..SZLONG-1
544 * Undefined if no bit exists, so code should check against 0 first.
545 */
546static inline unsigned long __ffs(unsigned long word)
547{
548 return __fls(word & -word);
549}
550
551/*
552 * fls - find last bit set.
553 * @word: The word to search
554 *
555 * This is defined the same way as ffs.
556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
557 */
558static inline int fls(int x)
559{
560 int r;
561
562 if (!__builtin_constant_p(x) &&
563 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
564 __asm__(
565 " .set push \n"
566 " .set "MIPS_ISA_LEVEL" \n"
567 " clz %0, %1 \n"
568 " .set pop \n"
569 : "=r" (x)
570 : "r" (x));
571
572 return 32 - x;
573 }
574
575 r = 32;
576 if (!x)
577 return 0;
578 if (!(x & 0xffff0000u)) {
579 x <<= 16;
580 r -= 16;
581 }
582 if (!(x & 0xff000000u)) {
583 x <<= 8;
584 r -= 8;
585 }
586 if (!(x & 0xf0000000u)) {
587 x <<= 4;
588 r -= 4;
589 }
590 if (!(x & 0xc0000000u)) {
591 x <<= 2;
592 r -= 2;
593 }
594 if (!(x & 0x80000000u)) {
595 x <<= 1;
596 r -= 1;
597 }
598 return r;
599}
600
601#include <asm-generic/bitops/fls64.h>
602
603/*
604 * ffs - find first bit set.
605 * @word: The word to search
606 *
607 * This is defined the same way as
608 * the libc and compiler builtin ffs routines, therefore
609 * differs in spirit from the above ffz (man ffs).
610 */
611static inline int ffs(int word)
612{
613 if (!word)
614 return 0;
615
616 return fls(word & -word);
617}
618
619#include <asm-generic/bitops/ffz.h>
620#include <asm-generic/bitops/find.h>
621
622#ifdef __KERNEL__
623
624#include <asm-generic/bitops/sched.h>
625
626#include <asm/arch_hweight.h>
627#include <asm-generic/bitops/const_hweight.h>
628
629#include <asm-generic/bitops/le.h>
630#include <asm-generic/bitops/ext2-atomic.h>
631
632#endif /* __KERNEL__ */
633
634#endif /* _ASM_BITOPS_H */