Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/llsc.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/*
 27 * These are the "slower" versions of the functions and are in bitops.c.
 28 * These functions call raw_local_irq_{save,restore}().
 29 */
 30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33int __mips_test_and_set_bit(unsigned long nr,
 34			    volatile unsigned long *addr);
 35int __mips_test_and_set_bit_lock(unsigned long nr,
 36				 volatile unsigned long *addr);
 37int __mips_test_and_clear_bit(unsigned long nr,
 38			      volatile unsigned long *addr);
 39int __mips_test_and_change_bit(unsigned long nr,
 40			       volatile unsigned long *addr);
 41
 42
 43/*
 44 * set_bit - Atomically set a bit in memory
 45 * @nr: the bit to set
 46 * @addr: the address to start counting from
 47 *
 48 * This function is atomic and may not be reordered.  See __set_bit()
 49 * if you do not require the atomic guarantees.
 50 * Note that @nr may be almost arbitrarily large; this function is not
 51 * restricted to acting on a single-word quantity.
 52 */
 53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54{
 55	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56	int bit = nr & SZLONG_MASK;
 57	unsigned long temp;
 58
 59	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60		__asm__ __volatile__(
 61		"	.set	push					\n"
 62		"	.set	arch=r4000				\n"
 63		"1:	" __LL "%0, %1			# set_bit	\n"
 64		"	or	%0, %2					\n"
 65		"	" __SC	"%0, %1					\n"
 66		"	beqzl	%0, 1b					\n"
 67		"	.set	pop					\n"
 68		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 69		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
 70		: __LLSC_CLOBBER);
 71#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 72	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 73		loongson_llsc_mb();
 74		do {
 75			__asm__ __volatile__(
 76			"	" __LL "%0, %1		# set_bit	\n"
 77			"	" __INS "%0, %3, %2, 1			\n"
 78			"	" __SC "%0, %1				\n"
 79			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 80			: "ir" (bit), "r" (~0)
 81			: __LLSC_CLOBBER);
 82		} while (unlikely(!temp));
 83#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 84	} else if (kernel_uses_llsc) {
 85		loongson_llsc_mb();
 86		do {
 87			__asm__ __volatile__(
 88			"	.set	push				\n"
 89			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 90			"	" __LL "%0, %1		# set_bit	\n"
 91			"	or	%0, %2				\n"
 92			"	" __SC	"%0, %1				\n"
 93			"	.set	pop				\n"
 94			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 95			: "ir" (1UL << bit)
 96			: __LLSC_CLOBBER);
 97		} while (unlikely(!temp));
 98	} else
 99		__mips_set_bit(nr, addr);
 
 
 
 
 
 
 
 
 
100}
101
102/*
103 * clear_bit - Clears a bit in memory
104 * @nr: Bit to clear
105 * @addr: Address to start counting from
106 *
107 * clear_bit() is atomic and may not be reordered.  However, it does
108 * not contain a memory barrier, so if it is used for locking purposes,
109 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110 * in order to ensure changes are visible on other processors.
111 */
112static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
113{
114	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115	int bit = nr & SZLONG_MASK;
116	unsigned long temp;
117
118	if (kernel_uses_llsc && R10000_LLSC_WAR) {
119		__asm__ __volatile__(
120		"	.set	push					\n"
121		"	.set	arch=r4000				\n"
122		"1:	" __LL "%0, %1			# clear_bit	\n"
123		"	and	%0, %2					\n"
124		"	" __SC "%0, %1					\n"
125		"	beqzl	%0, 1b					\n"
126		"	.set	pop					\n"
127		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128		: "ir" (~(1UL << bit))
129		: __LLSC_CLOBBER);
130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
132		loongson_llsc_mb();
133		do {
134			__asm__ __volatile__(
135			"	" __LL "%0, %1		# clear_bit	\n"
136			"	" __INS "%0, $0, %2, 1			\n"
137			"	" __SC "%0, %1				\n"
138			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
139			: "ir" (bit)
140			: __LLSC_CLOBBER);
141		} while (unlikely(!temp));
142#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143	} else if (kernel_uses_llsc) {
144		loongson_llsc_mb();
145		do {
146			__asm__ __volatile__(
147			"	.set	push				\n"
148			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
149			"	" __LL "%0, %1		# clear_bit	\n"
150			"	and	%0, %2				\n"
151			"	" __SC "%0, %1				\n"
152			"	.set	pop				\n"
153			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154			: "ir" (~(1UL << bit))
155			: __LLSC_CLOBBER);
156		} while (unlikely(!temp));
157	} else
158		__mips_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
159}
160
161/*
162 * clear_bit_unlock - Clears a bit in memory
163 * @nr: Bit to clear
164 * @addr: Address to start counting from
165 *
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
168 */
169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170{
171	smp_mb__before_atomic();
172	clear_bit(nr, addr);
173}
174
175/*
176 * change_bit - Toggle a bit in memory
177 * @nr: Bit to change
178 * @addr: Address to start counting from
179 *
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
183 */
184static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185{
186	int bit = nr & SZLONG_MASK;
187
188	if (kernel_uses_llsc && R10000_LLSC_WAR) {
189		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190		unsigned long temp;
191
192		__asm__ __volatile__(
193		"	.set	push				\n"
194		"	.set	arch=r4000			\n"
195		"1:	" __LL "%0, %1		# change_bit	\n"
196		"	xor	%0, %2				\n"
197		"	" __SC	"%0, %1				\n"
198		"	beqzl	%0, 1b				\n"
199		"	.set	pop				\n"
200		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201		: "ir" (1UL << bit)
202		: __LLSC_CLOBBER);
203	} else if (kernel_uses_llsc) {
204		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205		unsigned long temp;
206
207		loongson_llsc_mb();
208		do {
209			__asm__ __volatile__(
210			"	.set	push				\n"
211			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
212			"	" __LL "%0, %1		# change_bit	\n"
213			"	xor	%0, %2				\n"
214			"	" __SC	"%0, %1				\n"
215			"	.set	pop				\n"
216			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
217			: "ir" (1UL << bit)
218			: __LLSC_CLOBBER);
219		} while (unlikely(!temp));
220	} else
221		__mips_change_bit(nr, addr);
 
 
 
 
 
 
 
 
 
222}
223
224/*
225 * test_and_set_bit - Set a bit and return its old value
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
231 */
232static inline int test_and_set_bit(unsigned long nr,
233	volatile unsigned long *addr)
234{
235	int bit = nr & SZLONG_MASK;
236	unsigned long res;
237
238	smp_mb__before_llsc();
239
240	if (kernel_uses_llsc && R10000_LLSC_WAR) {
241		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
242		unsigned long temp;
243
244		__asm__ __volatile__(
245		"	.set	push					\n"
246		"	.set	arch=r4000				\n"
247		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
248		"	or	%2, %0, %3				\n"
249		"	" __SC	"%2, %1					\n"
250		"	beqzl	%2, 1b					\n"
251		"	and	%2, %0, %3				\n"
252		"	.set	pop					\n"
253		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
254		: "r" (1UL << bit)
255		: __LLSC_CLOBBER);
256	} else if (kernel_uses_llsc) {
257		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
258		unsigned long temp;
259
260		loongson_llsc_mb();
261		do {
262			__asm__ __volatile__(
263			"	.set	push				\n"
264			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
265			"	" __LL "%0, %1	# test_and_set_bit	\n"
266			"	or	%2, %0, %3			\n"
267			"	" __SC	"%2, %1				\n"
268			"	.set	pop				\n"
269			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
270			: "r" (1UL << bit)
271			: __LLSC_CLOBBER);
272		} while (unlikely(!res));
273
274		res = temp & (1UL << bit);
275	} else
276		res = __mips_test_and_set_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
277
278	smp_llsc_mb();
279
280	return res != 0;
281}
282
283/*
284 * test_and_set_bit_lock - Set a bit and return its old value
285 * @nr: Bit to set
286 * @addr: Address to count from
287 *
288 * This operation is atomic and implies acquire ordering semantics
289 * after the memory operation.
290 */
291static inline int test_and_set_bit_lock(unsigned long nr,
292	volatile unsigned long *addr)
293{
294	int bit = nr & SZLONG_MASK;
295	unsigned long res;
296
297	if (kernel_uses_llsc && R10000_LLSC_WAR) {
298		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
299		unsigned long temp;
300
301		__asm__ __volatile__(
302		"	.set	push					\n"
303		"	.set	arch=r4000				\n"
304		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
305		"	or	%2, %0, %3				\n"
306		"	" __SC	"%2, %1					\n"
307		"	beqzl	%2, 1b					\n"
308		"	and	%2, %0, %3				\n"
309		"	.set	pop					\n"
310		: "=&r" (temp), "+m" (*m), "=&r" (res)
311		: "r" (1UL << bit)
312		: __LLSC_CLOBBER);
313	} else if (kernel_uses_llsc) {
314		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
315		unsigned long temp;
316
317		loongson_llsc_mb();
318		do {
319			__asm__ __volatile__(
320			"	.set	push				\n"
321			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
322			"	" __LL "%0, %1	# test_and_set_bit	\n"
323			"	or	%2, %0, %3			\n"
324			"	" __SC	"%2, %1				\n"
325			"	.set	pop				\n"
326			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
327			: "r" (1UL << bit)
328			: __LLSC_CLOBBER);
329		} while (unlikely(!res));
330
331		res = temp & (1UL << bit);
332	} else
333		res = __mips_test_and_set_bit_lock(nr, addr);
 
 
 
 
 
 
 
 
 
 
334
335	smp_llsc_mb();
336
337	return res != 0;
338}
339/*
340 * test_and_clear_bit - Clear a bit and return its old value
341 * @nr: Bit to clear
342 * @addr: Address to count from
343 *
344 * This operation is atomic and cannot be reordered.
345 * It also implies a memory barrier.
346 */
347static inline int test_and_clear_bit(unsigned long nr,
348	volatile unsigned long *addr)
349{
350	int bit = nr & SZLONG_MASK;
351	unsigned long res;
352
353	smp_mb__before_llsc();
354
355	if (kernel_uses_llsc && R10000_LLSC_WAR) {
356		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
357		unsigned long temp;
358
359		__asm__ __volatile__(
360		"	.set	push					\n"
361		"	.set	arch=r4000				\n"
362		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
363		"	or	%2, %0, %3				\n"
364		"	xor	%2, %3					\n"
365		"	" __SC	"%2, %1					\n"
366		"	beqzl	%2, 1b					\n"
367		"	and	%2, %0, %3				\n"
368		"	.set	pop					\n"
369		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
370		: "r" (1UL << bit)
371		: __LLSC_CLOBBER);
372#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
375		unsigned long temp;
376
377		loongson_llsc_mb();
378		do {
379			__asm__ __volatile__(
380			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
381			"	" __EXT "%2, %0, %3, 1			\n"
382			"	" __INS "%0, $0, %3, 1			\n"
383			"	" __SC	"%0, %1				\n"
384			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
385			: "ir" (bit)
386			: __LLSC_CLOBBER);
387		} while (unlikely(!temp));
388#endif
389	} else if (kernel_uses_llsc) {
390		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
391		unsigned long temp;
392
393		loongson_llsc_mb();
394		do {
395			__asm__ __volatile__(
396			"	.set	push				\n"
397			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
399			"	or	%2, %0, %3			\n"
400			"	xor	%2, %3				\n"
401			"	" __SC	"%2, %1				\n"
402			"	.set	pop				\n"
403			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
404			: "r" (1UL << bit)
405			: __LLSC_CLOBBER);
406		} while (unlikely(!res));
407
408		res = temp & (1UL << bit);
409	} else
410		res = __mips_test_and_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
411
412	smp_llsc_mb();
413
414	return res != 0;
415}
416
417/*
418 * test_and_change_bit - Change a bit and return its old value
419 * @nr: Bit to change
420 * @addr: Address to count from
421 *
422 * This operation is atomic and cannot be reordered.
423 * It also implies a memory barrier.
424 */
425static inline int test_and_change_bit(unsigned long nr,
426	volatile unsigned long *addr)
427{
428	int bit = nr & SZLONG_MASK;
429	unsigned long res;
430
431	smp_mb__before_llsc();
432
433	if (kernel_uses_llsc && R10000_LLSC_WAR) {
434		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435		unsigned long temp;
436
437		__asm__ __volatile__(
438		"	.set	push					\n"
439		"	.set	arch=r4000				\n"
440		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
441		"	xor	%2, %0, %3				\n"
442		"	" __SC	"%2, %1					\n"
443		"	beqzl	%2, 1b					\n"
444		"	and	%2, %0, %3				\n"
445		"	.set	pop					\n"
446		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447		: "r" (1UL << bit)
448		: __LLSC_CLOBBER);
449	} else if (kernel_uses_llsc) {
450		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
451		unsigned long temp;
452
453		loongson_llsc_mb();
454		do {
455			__asm__ __volatile__(
456			"	.set	push				\n"
457			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
458			"	" __LL	"%0, %1 # test_and_change_bit	\n"
459			"	xor	%2, %0, %3			\n"
460			"	" __SC	"\t%2, %1			\n"
461			"	.set	pop				\n"
462			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
463			: "r" (1UL << bit)
464			: __LLSC_CLOBBER);
465		} while (unlikely(!res));
466
467		res = temp & (1UL << bit);
468	} else
469		res = __mips_test_and_change_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
470
471	smp_llsc_mb();
472
473	return res != 0;
474}
475
476#include <asm-generic/bitops/non-atomic.h>
477
478/*
479 * __clear_bit_unlock - Clears a bit in memory
480 * @nr: Bit to clear
481 * @addr: Address to start counting from
482 *
483 * __clear_bit() is non-atomic and implies release semantics before the memory
484 * operation. It can be used for an unlock if no other CPUs can concurrently
485 * modify other bits in the word.
486 */
487static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
488{
489	smp_mb__before_llsc();
490	__clear_bit(nr, addr);
491	nudge_writes();
492}
493
494/*
495 * Return the bit position (0..63) of the most significant 1 bit in a word
496 * Returns -1 if no 1 bit exists
497 */
498static __always_inline unsigned long __fls(unsigned long word)
499{
500	int num;
501
502	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
504		__asm__(
505		"	.set	push					\n"
506		"	.set	"MIPS_ISA_LEVEL"			\n"
507		"	clz	%0, %1					\n"
508		"	.set	pop					\n"
509		: "=r" (num)
510		: "r" (word));
511
512		return 31 - num;
513	}
514
515	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
517		__asm__(
518		"	.set	push					\n"
519		"	.set	"MIPS_ISA_LEVEL"			\n"
520		"	dclz	%0, %1					\n"
521		"	.set	pop					\n"
522		: "=r" (num)
523		: "r" (word));
524
525		return 63 - num;
526	}
527
528	num = BITS_PER_LONG - 1;
529
530#if BITS_PER_LONG == 64
531	if (!(word & (~0ul << 32))) {
532		num -= 32;
533		word <<= 32;
534	}
535#endif
536	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
537		num -= 16;
538		word <<= 16;
539	}
540	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
541		num -= 8;
542		word <<= 8;
543	}
544	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
545		num -= 4;
546		word <<= 4;
547	}
548	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
549		num -= 2;
550		word <<= 2;
551	}
552	if (!(word & (~0ul << (BITS_PER_LONG-1))))
553		num -= 1;
554	return num;
555}
556
557/*
558 * __ffs - find first bit in word.
559 * @word: The word to search
560 *
561 * Returns 0..SZLONG-1
562 * Undefined if no bit exists, so code should check against 0 first.
563 */
564static __always_inline unsigned long __ffs(unsigned long word)
565{
566	return __fls(word & -word);
567}
568
569/*
570 * fls - find last bit set.
571 * @word: The word to search
572 *
573 * This is defined the same way as ffs.
574 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
575 */
576static inline int fls(unsigned int x)
577{
578	int r;
579
580	if (!__builtin_constant_p(x) &&
581	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
582		__asm__(
583		"	.set	push					\n"
584		"	.set	"MIPS_ISA_LEVEL"			\n"
585		"	clz	%0, %1					\n"
586		"	.set	pop					\n"
587		: "=r" (x)
588		: "r" (x));
589
590		return 32 - x;
591	}
592
593	r = 32;
594	if (!x)
595		return 0;
596	if (!(x & 0xffff0000u)) {
597		x <<= 16;
598		r -= 16;
599	}
600	if (!(x & 0xff000000u)) {
601		x <<= 8;
602		r -= 8;
603	}
604	if (!(x & 0xf0000000u)) {
605		x <<= 4;
606		r -= 4;
607	}
608	if (!(x & 0xc0000000u)) {
609		x <<= 2;
610		r -= 2;
611	}
612	if (!(x & 0x80000000u)) {
613		x <<= 1;
614		r -= 1;
615	}
616	return r;
617}
618
619#include <asm-generic/bitops/fls64.h>
620
621/*
622 * ffs - find first bit set.
623 * @word: The word to search
624 *
625 * This is defined the same way as
626 * the libc and compiler builtin ffs routines, therefore
627 * differs in spirit from the above ffz (man ffs).
628 */
629static inline int ffs(int word)
630{
631	if (!word)
632		return 0;
633
634	return fls(word & -word);
635}
636
637#include <asm-generic/bitops/ffz.h>
638#include <asm-generic/bitops/find.h>
639
640#ifdef __KERNEL__
641
642#include <asm-generic/bitops/sched.h>
643
644#include <asm/arch_hweight.h>
645#include <asm-generic/bitops/const_hweight.h>
646
647#include <asm-generic/bitops/le.h>
648#include <asm-generic/bitops/ext2-atomic.h>
649
650#endif /* __KERNEL__ */
651
652#endif /* _ASM_BITOPS_H */
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/bug.h>
 21#include <asm/byteorder.h>		/* sigh ... */
 
 22#include <asm/cpu-features.h>
 
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 26#if _MIPS_SZLONG == 32
 27#define SZLONG_LOG 5
 28#define SZLONG_MASK 31UL
 29#define __LL		"ll	"
 30#define __SC		"sc	"
 31#define __INS		"ins    "
 32#define __EXT		"ext    "
 33#elif _MIPS_SZLONG == 64
 34#define SZLONG_LOG 6
 35#define SZLONG_MASK 63UL
 36#define __LL		"lld	"
 37#define __SC		"scd	"
 38#define __INS		"dins    "
 39#define __EXT		"dext    "
 40#endif
 41
 42/*
 43 * clear_bit() doesn't provide any barrier for the compiler.
 
 44 */
 45#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 46#define smp_mb__after_clear_bit()	smp_llsc_mb()
 
 
 
 
 
 
 
 
 
 
 47
 48/*
 49 * set_bit - Atomically set a bit in memory
 50 * @nr: the bit to set
 51 * @addr: the address to start counting from
 52 *
 53 * This function is atomic and may not be reordered.  See __set_bit()
 54 * if you do not require the atomic guarantees.
 55 * Note that @nr may be almost arbitrarily large; this function is not
 56 * restricted to acting on a single-word quantity.
 57 */
 58static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 59{
 60	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 61	unsigned short bit = nr & SZLONG_MASK;
 62	unsigned long temp;
 63
 64	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 65		__asm__ __volatile__(
 66		"	.set	mips3					\n"
 
 67		"1:	" __LL "%0, %1			# set_bit	\n"
 68		"	or	%0, %2					\n"
 69		"	" __SC	"%0, %1					\n"
 70		"	beqzl	%0, 1b					\n"
 71		"	.set	mips0					\n"
 72		: "=&r" (temp), "=m" (*m)
 73		: "ir" (1UL << bit), "m" (*m));
 74#ifdef CONFIG_CPU_MIPSR2
 
 75	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 
 76		do {
 77			__asm__ __volatile__(
 78			"	" __LL "%0, %1		# set_bit	\n"
 79			"	" __INS "%0, %3, %2, 1			\n"
 80			"	" __SC "%0, %1				\n"
 81			: "=&r" (temp), "+m" (*m)
 82			: "ir" (bit), "r" (~0));
 
 83		} while (unlikely(!temp));
 84#endif /* CONFIG_CPU_MIPSR2 */
 85	} else if (kernel_uses_llsc) {
 
 86		do {
 87			__asm__ __volatile__(
 88			"	.set	mips3				\n"
 
 89			"	" __LL "%0, %1		# set_bit	\n"
 90			"	or	%0, %2				\n"
 91			"	" __SC	"%0, %1				\n"
 92			"	.set	mips0				\n"
 93			: "=&r" (temp), "+m" (*m)
 94			: "ir" (1UL << bit));
 
 95		} while (unlikely(!temp));
 96	} else {
 97		volatile unsigned long *a = addr;
 98		unsigned long mask;
 99		unsigned long flags;
100
101		a += nr >> SZLONG_LOG;
102		mask = 1UL << bit;
103		raw_local_irq_save(flags);
104		*a |= mask;
105		raw_local_irq_restore(flags);
106	}
107}
108
109/*
110 * clear_bit - Clears a bit in memory
111 * @nr: Bit to clear
112 * @addr: Address to start counting from
113 *
114 * clear_bit() is atomic and may not be reordered.  However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
117 * in order to ensure changes are visible on other processors.
118 */
119static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120{
121	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122	unsigned short bit = nr & SZLONG_MASK;
123	unsigned long temp;
124
125	if (kernel_uses_llsc && R10000_LLSC_WAR) {
126		__asm__ __volatile__(
127		"	.set	mips3					\n"
 
128		"1:	" __LL "%0, %1			# clear_bit	\n"
129		"	and	%0, %2					\n"
130		"	" __SC "%0, %1					\n"
131		"	beqzl	%0, 1b					\n"
132		"	.set	mips0					\n"
133		: "=&r" (temp), "+m" (*m)
134		: "ir" (~(1UL << bit)));
135#ifdef CONFIG_CPU_MIPSR2
 
136	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 
137		do {
138			__asm__ __volatile__(
139			"	" __LL "%0, %1		# clear_bit	\n"
140			"	" __INS "%0, $0, %2, 1			\n"
141			"	" __SC "%0, %1				\n"
142			: "=&r" (temp), "+m" (*m)
143			: "ir" (bit));
 
144		} while (unlikely(!temp));
145#endif /* CONFIG_CPU_MIPSR2 */
146	} else if (kernel_uses_llsc) {
 
147		do {
148			__asm__ __volatile__(
149			"	.set	mips3				\n"
 
150			"	" __LL "%0, %1		# clear_bit	\n"
151			"	and	%0, %2				\n"
152			"	" __SC "%0, %1				\n"
153			"	.set	mips0				\n"
154			: "=&r" (temp), "+m" (*m)
155			: "ir" (~(1UL << bit)));
 
156		} while (unlikely(!temp));
157	} else {
158		volatile unsigned long *a = addr;
159		unsigned long mask;
160		unsigned long flags;
161
162		a += nr >> SZLONG_LOG;
163		mask = 1UL << bit;
164		raw_local_irq_save(flags);
165		*a &= ~mask;
166		raw_local_irq_restore(flags);
167	}
168}
169
170/*
171 * clear_bit_unlock - Clears a bit in memory
172 * @nr: Bit to clear
173 * @addr: Address to start counting from
174 *
175 * clear_bit() is atomic and implies release semantics before the memory
176 * operation. It can be used for an unlock.
177 */
178static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
179{
180	smp_mb__before_clear_bit();
181	clear_bit(nr, addr);
182}
183
184/*
185 * change_bit - Toggle a bit in memory
186 * @nr: Bit to change
187 * @addr: Address to start counting from
188 *
189 * change_bit() is atomic and may not be reordered.
190 * Note that @nr may be almost arbitrarily large; this function is not
191 * restricted to acting on a single-word quantity.
192 */
193static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
194{
195	unsigned short bit = nr & SZLONG_MASK;
196
197	if (kernel_uses_llsc && R10000_LLSC_WAR) {
198		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199		unsigned long temp;
200
201		__asm__ __volatile__(
202		"	.set	mips3				\n"
 
203		"1:	" __LL "%0, %1		# change_bit	\n"
204		"	xor	%0, %2				\n"
205		"	" __SC	"%0, %1				\n"
206		"	beqzl	%0, 1b				\n"
207		"	.set	mips0				\n"
208		: "=&r" (temp), "+m" (*m)
209		: "ir" (1UL << bit));
 
210	} else if (kernel_uses_llsc) {
211		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
212		unsigned long temp;
213
 
214		do {
215			__asm__ __volatile__(
216			"	.set	mips3				\n"
 
217			"	" __LL "%0, %1		# change_bit	\n"
218			"	xor	%0, %2				\n"
219			"	" __SC	"%0, %1				\n"
220			"	.set	mips0				\n"
221			: "=&r" (temp), "+m" (*m)
222			: "ir" (1UL << bit));
 
223		} while (unlikely(!temp));
224	} else {
225		volatile unsigned long *a = addr;
226		unsigned long mask;
227		unsigned long flags;
228
229		a += nr >> SZLONG_LOG;
230		mask = 1UL << bit;
231		raw_local_irq_save(flags);
232		*a ^= mask;
233		raw_local_irq_restore(flags);
234	}
235}
236
237/*
238 * test_and_set_bit - Set a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
241 *
242 * This operation is atomic and cannot be reordered.
243 * It also implies a memory barrier.
244 */
245static inline int test_and_set_bit(unsigned long nr,
246	volatile unsigned long *addr)
247{
248	unsigned short bit = nr & SZLONG_MASK;
249	unsigned long res;
250
251	smp_mb__before_llsc();
252
253	if (kernel_uses_llsc && R10000_LLSC_WAR) {
254		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
255		unsigned long temp;
256
257		__asm__ __volatile__(
258		"	.set	mips3					\n"
 
259		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
260		"	or	%2, %0, %3				\n"
261		"	" __SC	"%2, %1					\n"
262		"	beqzl	%2, 1b					\n"
263		"	and	%2, %0, %3				\n"
264		"	.set	mips0					\n"
265		: "=&r" (temp), "+m" (*m), "=&r" (res)
266		: "r" (1UL << bit)
267		: "memory");
268	} else if (kernel_uses_llsc) {
269		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
270		unsigned long temp;
271
 
272		do {
273			__asm__ __volatile__(
274			"	.set	mips3				\n"
 
275			"	" __LL "%0, %1	# test_and_set_bit	\n"
276			"	or	%2, %0, %3			\n"
277			"	" __SC	"%2, %1				\n"
278			"	.set	mips0				\n"
279			: "=&r" (temp), "+m" (*m), "=&r" (res)
280			: "r" (1UL << bit)
281			: "memory");
282		} while (unlikely(!res));
283
284		res = temp & (1UL << bit);
285	} else {
286		volatile unsigned long *a = addr;
287		unsigned long mask;
288		unsigned long flags;
289
290		a += nr >> SZLONG_LOG;
291		mask = 1UL << bit;
292		raw_local_irq_save(flags);
293		res = (mask & *a);
294		*a |= mask;
295		raw_local_irq_restore(flags);
296	}
297
298	smp_llsc_mb();
299
300	return res != 0;
301}
302
303/*
304 * test_and_set_bit_lock - Set a bit and return its old value
305 * @nr: Bit to set
306 * @addr: Address to count from
307 *
308 * This operation is atomic and implies acquire ordering semantics
309 * after the memory operation.
310 */
311static inline int test_and_set_bit_lock(unsigned long nr,
312	volatile unsigned long *addr)
313{
314	unsigned short bit = nr & SZLONG_MASK;
315	unsigned long res;
316
317	if (kernel_uses_llsc && R10000_LLSC_WAR) {
318		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
319		unsigned long temp;
320
321		__asm__ __volatile__(
322		"	.set	mips3					\n"
 
323		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
324		"	or	%2, %0, %3				\n"
325		"	" __SC	"%2, %1					\n"
326		"	beqzl	%2, 1b					\n"
327		"	and	%2, %0, %3				\n"
328		"	.set	mips0					\n"
329		: "=&r" (temp), "+m" (*m), "=&r" (res)
330		: "r" (1UL << bit)
331		: "memory");
332	} else if (kernel_uses_llsc) {
333		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
334		unsigned long temp;
335
 
336		do {
337			__asm__ __volatile__(
338			"	.set	mips3				\n"
 
339			"	" __LL "%0, %1	# test_and_set_bit	\n"
340			"	or	%2, %0, %3			\n"
341			"	" __SC	"%2, %1				\n"
342			"	.set	mips0				\n"
343			: "=&r" (temp), "+m" (*m), "=&r" (res)
344			: "r" (1UL << bit)
345			: "memory");
346		} while (unlikely(!res));
347
348		res = temp & (1UL << bit);
349	} else {
350		volatile unsigned long *a = addr;
351		unsigned long mask;
352		unsigned long flags;
353
354		a += nr >> SZLONG_LOG;
355		mask = 1UL << bit;
356		raw_local_irq_save(flags);
357		res = (mask & *a);
358		*a |= mask;
359		raw_local_irq_restore(flags);
360	}
361
362	smp_llsc_mb();
363
364	return res != 0;
365}
366/*
367 * test_and_clear_bit - Clear a bit and return its old value
368 * @nr: Bit to clear
369 * @addr: Address to count from
370 *
371 * This operation is atomic and cannot be reordered.
372 * It also implies a memory barrier.
373 */
374static inline int test_and_clear_bit(unsigned long nr,
375	volatile unsigned long *addr)
376{
377	unsigned short bit = nr & SZLONG_MASK;
378	unsigned long res;
379
380	smp_mb__before_llsc();
381
382	if (kernel_uses_llsc && R10000_LLSC_WAR) {
383		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
384		unsigned long temp;
385
386		__asm__ __volatile__(
387		"	.set	mips3					\n"
 
388		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
389		"	or	%2, %0, %3				\n"
390		"	xor	%2, %3					\n"
391		"	" __SC 	"%2, %1					\n"
392		"	beqzl	%2, 1b					\n"
393		"	and	%2, %0, %3				\n"
394		"	.set	mips0					\n"
395		: "=&r" (temp), "+m" (*m), "=&r" (res)
396		: "r" (1UL << bit)
397		: "memory");
398#ifdef CONFIG_CPU_MIPSR2
399	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
400		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
401		unsigned long temp;
402
 
403		do {
404			__asm__ __volatile__(
405			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
406			"	" __EXT "%2, %0, %3, 1			\n"
407			"	" __INS	"%0, $0, %3, 1			\n"
408			"	" __SC 	"%0, %1				\n"
409			: "=&r" (temp), "+m" (*m), "=&r" (res)
410			: "ir" (bit)
411			: "memory");
412		} while (unlikely(!temp));
413#endif
414	} else if (kernel_uses_llsc) {
415		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
416		unsigned long temp;
417
 
418		do {
419			__asm__ __volatile__(
420			"	.set	mips3				\n"
421			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
 
422			"	or	%2, %0, %3			\n"
423			"	xor	%2, %3				\n"
424			"	" __SC 	"%2, %1				\n"
425			"	.set	mips0				\n"
426			: "=&r" (temp), "+m" (*m), "=&r" (res)
427			: "r" (1UL << bit)
428			: "memory");
429		} while (unlikely(!res));
430
431		res = temp & (1UL << bit);
432	} else {
433		volatile unsigned long *a = addr;
434		unsigned long mask;
435		unsigned long flags;
436
437		a += nr >> SZLONG_LOG;
438		mask = 1UL << bit;
439		raw_local_irq_save(flags);
440		res = (mask & *a);
441		*a &= ~mask;
442		raw_local_irq_restore(flags);
443	}
444
445	smp_llsc_mb();
446
447	return res != 0;
448}
449
450/*
451 * test_and_change_bit - Change a bit and return its old value
452 * @nr: Bit to change
453 * @addr: Address to count from
454 *
455 * This operation is atomic and cannot be reordered.
456 * It also implies a memory barrier.
457 */
458static inline int test_and_change_bit(unsigned long nr,
459	volatile unsigned long *addr)
460{
461	unsigned short bit = nr & SZLONG_MASK;
462	unsigned long res;
463
464	smp_mb__before_llsc();
465
466	if (kernel_uses_llsc && R10000_LLSC_WAR) {
467		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
468		unsigned long temp;
469
470		__asm__ __volatile__(
471		"	.set	mips3					\n"
 
472		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
473		"	xor	%2, %0, %3				\n"
474		"	" __SC	"%2, %1					\n"
475		"	beqzl	%2, 1b					\n"
476		"	and	%2, %0, %3				\n"
477		"	.set	mips0					\n"
478		: "=&r" (temp), "+m" (*m), "=&r" (res)
479		: "r" (1UL << bit)
480		: "memory");
481	} else if (kernel_uses_llsc) {
482		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
483		unsigned long temp;
484
 
485		do {
486			__asm__ __volatile__(
487			"	.set	mips3				\n"
488			"	" __LL	"%0, %1	# test_and_change_bit	\n"
 
489			"	xor	%2, %0, %3			\n"
490			"	" __SC	"\t%2, %1			\n"
491			"	.set	mips0				\n"
492			: "=&r" (temp), "+m" (*m), "=&r" (res)
493			: "r" (1UL << bit)
494			: "memory");
495		} while (unlikely(!res));
496
497		res = temp & (1UL << bit);
498	} else {
499		volatile unsigned long *a = addr;
500		unsigned long mask;
501		unsigned long flags;
502
503		a += nr >> SZLONG_LOG;
504		mask = 1UL << bit;
505		raw_local_irq_save(flags);
506		res = (mask & *a);
507		*a ^= mask;
508		raw_local_irq_restore(flags);
509	}
510
511	smp_llsc_mb();
512
513	return res != 0;
514}
515
516#include <asm-generic/bitops/non-atomic.h>
517
518/*
519 * __clear_bit_unlock - Clears a bit in memory
520 * @nr: Bit to clear
521 * @addr: Address to start counting from
522 *
523 * __clear_bit() is non-atomic and implies release semantics before the memory
524 * operation. It can be used for an unlock if no other CPUs can concurrently
525 * modify other bits in the word.
526 */
527static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
528{
529	smp_mb();
530	__clear_bit(nr, addr);
 
531}
532
533/*
534 * Return the bit position (0..63) of the most significant 1 bit in a word
535 * Returns -1 if no 1 bit exists
536 */
537static inline unsigned long __fls(unsigned long word)
538{
539	int num;
540
541	if (BITS_PER_LONG == 32 &&
542	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
543		__asm__(
544		"	.set	push					\n"
545		"	.set	mips32					\n"
546		"	clz	%0, %1					\n"
547		"	.set	pop					\n"
548		: "=r" (num)
549		: "r" (word));
550
551		return 31 - num;
552	}
553
554	if (BITS_PER_LONG == 64 &&
555	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
556		__asm__(
557		"	.set	push					\n"
558		"	.set	mips64					\n"
559		"	dclz	%0, %1					\n"
560		"	.set	pop					\n"
561		: "=r" (num)
562		: "r" (word));
563
564		return 63 - num;
565	}
566
567	num = BITS_PER_LONG - 1;
568
569#if BITS_PER_LONG == 64
570	if (!(word & (~0ul << 32))) {
571		num -= 32;
572		word <<= 32;
573	}
574#endif
575	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
576		num -= 16;
577		word <<= 16;
578	}
579	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
580		num -= 8;
581		word <<= 8;
582	}
583	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
584		num -= 4;
585		word <<= 4;
586	}
587	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
588		num -= 2;
589		word <<= 2;
590	}
591	if (!(word & (~0ul << (BITS_PER_LONG-1))))
592		num -= 1;
593	return num;
594}
595
596/*
597 * __ffs - find first bit in word.
598 * @word: The word to search
599 *
600 * Returns 0..SZLONG-1
601 * Undefined if no bit exists, so code should check against 0 first.
602 */
603static inline unsigned long __ffs(unsigned long word)
604{
605	return __fls(word & -word);
606}
607
608/*
609 * fls - find last bit set.
610 * @word: The word to search
611 *
612 * This is defined the same way as ffs.
613 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
614 */
615static inline int fls(int x)
616{
617	int r;
618
619	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
620		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
 
 
 
 
 
 
 
621
622		return 32 - x;
623	}
624
625	r = 32;
626	if (!x)
627		return 0;
628	if (!(x & 0xffff0000u)) {
629		x <<= 16;
630		r -= 16;
631	}
632	if (!(x & 0xff000000u)) {
633		x <<= 8;
634		r -= 8;
635	}
636	if (!(x & 0xf0000000u)) {
637		x <<= 4;
638		r -= 4;
639	}
640	if (!(x & 0xc0000000u)) {
641		x <<= 2;
642		r -= 2;
643	}
644	if (!(x & 0x80000000u)) {
645		x <<= 1;
646		r -= 1;
647	}
648	return r;
649}
650
651#include <asm-generic/bitops/fls64.h>
652
653/*
654 * ffs - find first bit set.
655 * @word: The word to search
656 *
657 * This is defined the same way as
658 * the libc and compiler builtin ffs routines, therefore
659 * differs in spirit from the above ffz (man ffs).
660 */
661static inline int ffs(int word)
662{
663	if (!word)
664		return 0;
665
666	return fls(word & -word);
667}
668
669#include <asm-generic/bitops/ffz.h>
670#include <asm-generic/bitops/find.h>
671
672#ifdef __KERNEL__
673
674#include <asm-generic/bitops/sched.h>
675
676#include <asm/arch_hweight.h>
677#include <asm-generic/bitops/const_hweight.h>
678
679#include <asm-generic/bitops/le.h>
680#include <asm-generic/bitops/ext2-atomic.h>
681
682#endif /* __KERNEL__ */
683
684#endif /* _ASM_BITOPS_H */