Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/bug.h>
 21#include <asm/byteorder.h>		/* sigh ... */
 
 22#include <asm/cpu-features.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 26#if _MIPS_SZLONG == 32
 27#define SZLONG_LOG 5
 28#define SZLONG_MASK 31UL
 29#define __LL		"ll	"
 30#define __SC		"sc	"
 31#define __INS		"ins    "
 32#define __EXT		"ext    "
 33#elif _MIPS_SZLONG == 64
 34#define SZLONG_LOG 6
 35#define SZLONG_MASK 63UL
 36#define __LL		"lld	"
 37#define __SC		"scd	"
 38#define __INS		"dins    "
 39#define __EXT		"dext    "
 40#endif
 41
 42/*
 43 * clear_bit() doesn't provide any barrier for the compiler.
 
 44 */
 45#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 46#define smp_mb__after_clear_bit()	smp_llsc_mb()
 
 
 
 
 
 
 
 
 
 
 47
 48/*
 49 * set_bit - Atomically set a bit in memory
 50 * @nr: the bit to set
 51 * @addr: the address to start counting from
 52 *
 53 * This function is atomic and may not be reordered.  See __set_bit()
 54 * if you do not require the atomic guarantees.
 55 * Note that @nr may be almost arbitrarily large; this function is not
 56 * restricted to acting on a single-word quantity.
 57 */
 58static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 59{
 60	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 61	unsigned short bit = nr & SZLONG_MASK;
 62	unsigned long temp;
 63
 64	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 65		__asm__ __volatile__(
 66		"	.set	mips3					\n"
 67		"1:	" __LL "%0, %1			# set_bit	\n"
 68		"	or	%0, %2					\n"
 69		"	" __SC	"%0, %1					\n"
 70		"	beqzl	%0, 1b					\n"
 71		"	.set	mips0					\n"
 72		: "=&r" (temp), "=m" (*m)
 73		: "ir" (1UL << bit), "m" (*m));
 74#ifdef CONFIG_CPU_MIPSR2
 75	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 76		do {
 77			__asm__ __volatile__(
 78			"	" __LL "%0, %1		# set_bit	\n"
 79			"	" __INS "%0, %3, %2, 1			\n"
 80			"	" __SC "%0, %1				\n"
 81			: "=&r" (temp), "+m" (*m)
 82			: "ir" (bit), "r" (~0));
 83		} while (unlikely(!temp));
 84#endif /* CONFIG_CPU_MIPSR2 */
 85	} else if (kernel_uses_llsc) {
 86		do {
 87			__asm__ __volatile__(
 88			"	.set	mips3				\n"
 89			"	" __LL "%0, %1		# set_bit	\n"
 90			"	or	%0, %2				\n"
 91			"	" __SC	"%0, %1				\n"
 92			"	.set	mips0				\n"
 93			: "=&r" (temp), "+m" (*m)
 94			: "ir" (1UL << bit));
 95		} while (unlikely(!temp));
 96	} else {
 97		volatile unsigned long *a = addr;
 98		unsigned long mask;
 99		unsigned long flags;
100
101		a += nr >> SZLONG_LOG;
102		mask = 1UL << bit;
103		raw_local_irq_save(flags);
104		*a |= mask;
105		raw_local_irq_restore(flags);
106	}
107}
108
109/*
110 * clear_bit - Clears a bit in memory
111 * @nr: Bit to clear
112 * @addr: Address to start counting from
113 *
114 * clear_bit() is atomic and may not be reordered.  However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
117 * in order to ensure changes are visible on other processors.
118 */
119static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120{
121	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122	unsigned short bit = nr & SZLONG_MASK;
123	unsigned long temp;
124
125	if (kernel_uses_llsc && R10000_LLSC_WAR) {
126		__asm__ __volatile__(
127		"	.set	mips3					\n"
128		"1:	" __LL "%0, %1			# clear_bit	\n"
129		"	and	%0, %2					\n"
130		"	" __SC "%0, %1					\n"
131		"	beqzl	%0, 1b					\n"
132		"	.set	mips0					\n"
133		: "=&r" (temp), "+m" (*m)
134		: "ir" (~(1UL << bit)));
135#ifdef CONFIG_CPU_MIPSR2
136	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
137		do {
138			__asm__ __volatile__(
139			"	" __LL "%0, %1		# clear_bit	\n"
140			"	" __INS "%0, $0, %2, 1			\n"
141			"	" __SC "%0, %1				\n"
142			: "=&r" (temp), "+m" (*m)
143			: "ir" (bit));
144		} while (unlikely(!temp));
145#endif /* CONFIG_CPU_MIPSR2 */
146	} else if (kernel_uses_llsc) {
147		do {
148			__asm__ __volatile__(
149			"	.set	mips3				\n"
150			"	" __LL "%0, %1		# clear_bit	\n"
151			"	and	%0, %2				\n"
152			"	" __SC "%0, %1				\n"
153			"	.set	mips0				\n"
154			: "=&r" (temp), "+m" (*m)
155			: "ir" (~(1UL << bit)));
156		} while (unlikely(!temp));
157	} else {
158		volatile unsigned long *a = addr;
159		unsigned long mask;
160		unsigned long flags;
161
162		a += nr >> SZLONG_LOG;
163		mask = 1UL << bit;
164		raw_local_irq_save(flags);
165		*a &= ~mask;
166		raw_local_irq_restore(flags);
167	}
168}
169
170/*
171 * clear_bit_unlock - Clears a bit in memory
172 * @nr: Bit to clear
173 * @addr: Address to start counting from
174 *
175 * clear_bit() is atomic and implies release semantics before the memory
176 * operation. It can be used for an unlock.
177 */
178static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
179{
180	smp_mb__before_clear_bit();
181	clear_bit(nr, addr);
182}
183
184/*
185 * change_bit - Toggle a bit in memory
186 * @nr: Bit to change
187 * @addr: Address to start counting from
188 *
189 * change_bit() is atomic and may not be reordered.
190 * Note that @nr may be almost arbitrarily large; this function is not
191 * restricted to acting on a single-word quantity.
192 */
193static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
194{
195	unsigned short bit = nr & SZLONG_MASK;
196
197	if (kernel_uses_llsc && R10000_LLSC_WAR) {
198		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199		unsigned long temp;
200
201		__asm__ __volatile__(
202		"	.set	mips3				\n"
203		"1:	" __LL "%0, %1		# change_bit	\n"
204		"	xor	%0, %2				\n"
205		"	" __SC	"%0, %1				\n"
206		"	beqzl	%0, 1b				\n"
207		"	.set	mips0				\n"
208		: "=&r" (temp), "+m" (*m)
209		: "ir" (1UL << bit));
210	} else if (kernel_uses_llsc) {
211		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
212		unsigned long temp;
213
214		do {
215			__asm__ __volatile__(
216			"	.set	mips3				\n"
217			"	" __LL "%0, %1		# change_bit	\n"
218			"	xor	%0, %2				\n"
219			"	" __SC	"%0, %1				\n"
220			"	.set	mips0				\n"
221			: "=&r" (temp), "+m" (*m)
222			: "ir" (1UL << bit));
223		} while (unlikely(!temp));
224	} else {
225		volatile unsigned long *a = addr;
226		unsigned long mask;
227		unsigned long flags;
228
229		a += nr >> SZLONG_LOG;
230		mask = 1UL << bit;
231		raw_local_irq_save(flags);
232		*a ^= mask;
233		raw_local_irq_restore(flags);
234	}
235}
236
237/*
238 * test_and_set_bit - Set a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
241 *
242 * This operation is atomic and cannot be reordered.
243 * It also implies a memory barrier.
244 */
245static inline int test_and_set_bit(unsigned long nr,
246	volatile unsigned long *addr)
247{
248	unsigned short bit = nr & SZLONG_MASK;
249	unsigned long res;
250
251	smp_mb__before_llsc();
252
253	if (kernel_uses_llsc && R10000_LLSC_WAR) {
254		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
255		unsigned long temp;
256
257		__asm__ __volatile__(
258		"	.set	mips3					\n"
259		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
260		"	or	%2, %0, %3				\n"
261		"	" __SC	"%2, %1					\n"
262		"	beqzl	%2, 1b					\n"
263		"	and	%2, %0, %3				\n"
264		"	.set	mips0					\n"
265		: "=&r" (temp), "+m" (*m), "=&r" (res)
266		: "r" (1UL << bit)
267		: "memory");
268	} else if (kernel_uses_llsc) {
269		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
270		unsigned long temp;
271
272		do {
273			__asm__ __volatile__(
274			"	.set	mips3				\n"
275			"	" __LL "%0, %1	# test_and_set_bit	\n"
276			"	or	%2, %0, %3			\n"
277			"	" __SC	"%2, %1				\n"
278			"	.set	mips0				\n"
279			: "=&r" (temp), "+m" (*m), "=&r" (res)
280			: "r" (1UL << bit)
281			: "memory");
282		} while (unlikely(!res));
283
284		res = temp & (1UL << bit);
285	} else {
286		volatile unsigned long *a = addr;
287		unsigned long mask;
288		unsigned long flags;
289
290		a += nr >> SZLONG_LOG;
291		mask = 1UL << bit;
292		raw_local_irq_save(flags);
293		res = (mask & *a);
294		*a |= mask;
295		raw_local_irq_restore(flags);
296	}
297
298	smp_llsc_mb();
299
300	return res != 0;
301}
302
303/*
304 * test_and_set_bit_lock - Set a bit and return its old value
305 * @nr: Bit to set
306 * @addr: Address to count from
307 *
308 * This operation is atomic and implies acquire ordering semantics
309 * after the memory operation.
310 */
311static inline int test_and_set_bit_lock(unsigned long nr,
312	volatile unsigned long *addr)
313{
314	unsigned short bit = nr & SZLONG_MASK;
315	unsigned long res;
316
317	if (kernel_uses_llsc && R10000_LLSC_WAR) {
318		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
319		unsigned long temp;
320
321		__asm__ __volatile__(
322		"	.set	mips3					\n"
323		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
324		"	or	%2, %0, %3				\n"
325		"	" __SC	"%2, %1					\n"
326		"	beqzl	%2, 1b					\n"
327		"	and	%2, %0, %3				\n"
328		"	.set	mips0					\n"
329		: "=&r" (temp), "+m" (*m), "=&r" (res)
330		: "r" (1UL << bit)
331		: "memory");
332	} else if (kernel_uses_llsc) {
333		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
334		unsigned long temp;
335
336		do {
337			__asm__ __volatile__(
338			"	.set	mips3				\n"
339			"	" __LL "%0, %1	# test_and_set_bit	\n"
340			"	or	%2, %0, %3			\n"
341			"	" __SC	"%2, %1				\n"
342			"	.set	mips0				\n"
343			: "=&r" (temp), "+m" (*m), "=&r" (res)
344			: "r" (1UL << bit)
345			: "memory");
346		} while (unlikely(!res));
347
348		res = temp & (1UL << bit);
349	} else {
350		volatile unsigned long *a = addr;
351		unsigned long mask;
352		unsigned long flags;
353
354		a += nr >> SZLONG_LOG;
355		mask = 1UL << bit;
356		raw_local_irq_save(flags);
357		res = (mask & *a);
358		*a |= mask;
359		raw_local_irq_restore(flags);
360	}
361
362	smp_llsc_mb();
363
364	return res != 0;
365}
366/*
367 * test_and_clear_bit - Clear a bit and return its old value
368 * @nr: Bit to clear
369 * @addr: Address to count from
370 *
371 * This operation is atomic and cannot be reordered.
372 * It also implies a memory barrier.
373 */
374static inline int test_and_clear_bit(unsigned long nr,
375	volatile unsigned long *addr)
376{
377	unsigned short bit = nr & SZLONG_MASK;
378	unsigned long res;
379
380	smp_mb__before_llsc();
381
382	if (kernel_uses_llsc && R10000_LLSC_WAR) {
383		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
384		unsigned long temp;
385
386		__asm__ __volatile__(
387		"	.set	mips3					\n"
388		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
389		"	or	%2, %0, %3				\n"
390		"	xor	%2, %3					\n"
391		"	" __SC 	"%2, %1					\n"
392		"	beqzl	%2, 1b					\n"
393		"	and	%2, %0, %3				\n"
394		"	.set	mips0					\n"
395		: "=&r" (temp), "+m" (*m), "=&r" (res)
396		: "r" (1UL << bit)
397		: "memory");
398#ifdef CONFIG_CPU_MIPSR2
399	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
400		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
401		unsigned long temp;
402
403		do {
404			__asm__ __volatile__(
405			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
406			"	" __EXT "%2, %0, %3, 1			\n"
407			"	" __INS	"%0, $0, %3, 1			\n"
408			"	" __SC 	"%0, %1				\n"
409			: "=&r" (temp), "+m" (*m), "=&r" (res)
410			: "ir" (bit)
411			: "memory");
412		} while (unlikely(!temp));
413#endif
414	} else if (kernel_uses_llsc) {
415		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
416		unsigned long temp;
417
418		do {
419			__asm__ __volatile__(
420			"	.set	mips3				\n"
421			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
422			"	or	%2, %0, %3			\n"
423			"	xor	%2, %3				\n"
424			"	" __SC 	"%2, %1				\n"
425			"	.set	mips0				\n"
426			: "=&r" (temp), "+m" (*m), "=&r" (res)
427			: "r" (1UL << bit)
428			: "memory");
429		} while (unlikely(!res));
430
431		res = temp & (1UL << bit);
432	} else {
433		volatile unsigned long *a = addr;
434		unsigned long mask;
435		unsigned long flags;
436
437		a += nr >> SZLONG_LOG;
438		mask = 1UL << bit;
439		raw_local_irq_save(flags);
440		res = (mask & *a);
441		*a &= ~mask;
442		raw_local_irq_restore(flags);
443	}
444
445	smp_llsc_mb();
446
447	return res != 0;
448}
449
450/*
451 * test_and_change_bit - Change a bit and return its old value
452 * @nr: Bit to change
453 * @addr: Address to count from
454 *
455 * This operation is atomic and cannot be reordered.
456 * It also implies a memory barrier.
457 */
458static inline int test_and_change_bit(unsigned long nr,
459	volatile unsigned long *addr)
460{
461	unsigned short bit = nr & SZLONG_MASK;
462	unsigned long res;
463
464	smp_mb__before_llsc();
465
466	if (kernel_uses_llsc && R10000_LLSC_WAR) {
467		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
468		unsigned long temp;
469
470		__asm__ __volatile__(
471		"	.set	mips3					\n"
472		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
473		"	xor	%2, %0, %3				\n"
474		"	" __SC	"%2, %1					\n"
475		"	beqzl	%2, 1b					\n"
476		"	and	%2, %0, %3				\n"
477		"	.set	mips0					\n"
478		: "=&r" (temp), "+m" (*m), "=&r" (res)
479		: "r" (1UL << bit)
480		: "memory");
481	} else if (kernel_uses_llsc) {
482		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
483		unsigned long temp;
484
485		do {
486			__asm__ __volatile__(
487			"	.set	mips3				\n"
488			"	" __LL	"%0, %1	# test_and_change_bit	\n"
489			"	xor	%2, %0, %3			\n"
490			"	" __SC	"\t%2, %1			\n"
491			"	.set	mips0				\n"
492			: "=&r" (temp), "+m" (*m), "=&r" (res)
493			: "r" (1UL << bit)
494			: "memory");
495		} while (unlikely(!res));
496
497		res = temp & (1UL << bit);
498	} else {
499		volatile unsigned long *a = addr;
500		unsigned long mask;
501		unsigned long flags;
502
503		a += nr >> SZLONG_LOG;
504		mask = 1UL << bit;
505		raw_local_irq_save(flags);
506		res = (mask & *a);
507		*a ^= mask;
508		raw_local_irq_restore(flags);
509	}
510
511	smp_llsc_mb();
512
513	return res != 0;
514}
515
516#include <asm-generic/bitops/non-atomic.h>
517
518/*
519 * __clear_bit_unlock - Clears a bit in memory
520 * @nr: Bit to clear
521 * @addr: Address to start counting from
522 *
523 * __clear_bit() is non-atomic and implies release semantics before the memory
524 * operation. It can be used for an unlock if no other CPUs can concurrently
525 * modify other bits in the word.
526 */
527static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
528{
529	smp_mb();
530	__clear_bit(nr, addr);
531}
532
533/*
534 * Return the bit position (0..63) of the most significant 1 bit in a word
535 * Returns -1 if no 1 bit exists
536 */
537static inline unsigned long __fls(unsigned long word)
538{
539	int num;
540
541	if (BITS_PER_LONG == 32 &&
542	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
543		__asm__(
544		"	.set	push					\n"
545		"	.set	mips32					\n"
546		"	clz	%0, %1					\n"
547		"	.set	pop					\n"
548		: "=r" (num)
549		: "r" (word));
550
551		return 31 - num;
552	}
553
554	if (BITS_PER_LONG == 64 &&
555	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
556		__asm__(
557		"	.set	push					\n"
558		"	.set	mips64					\n"
559		"	dclz	%0, %1					\n"
560		"	.set	pop					\n"
561		: "=r" (num)
562		: "r" (word));
563
564		return 63 - num;
565	}
566
567	num = BITS_PER_LONG - 1;
568
569#if BITS_PER_LONG == 64
570	if (!(word & (~0ul << 32))) {
571		num -= 32;
572		word <<= 32;
573	}
574#endif
575	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
576		num -= 16;
577		word <<= 16;
578	}
579	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
580		num -= 8;
581		word <<= 8;
582	}
583	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
584		num -= 4;
585		word <<= 4;
586	}
587	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
588		num -= 2;
589		word <<= 2;
590	}
591	if (!(word & (~0ul << (BITS_PER_LONG-1))))
592		num -= 1;
593	return num;
594}
595
596/*
597 * __ffs - find first bit in word.
598 * @word: The word to search
599 *
600 * Returns 0..SZLONG-1
601 * Undefined if no bit exists, so code should check against 0 first.
602 */
603static inline unsigned long __ffs(unsigned long word)
604{
605	return __fls(word & -word);
606}
607
608/*
609 * fls - find last bit set.
610 * @word: The word to search
611 *
612 * This is defined the same way as ffs.
613 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
614 */
615static inline int fls(int x)
616{
617	int r;
618
619	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
620		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
 
 
 
 
 
 
 
621
622		return 32 - x;
623	}
624
625	r = 32;
626	if (!x)
627		return 0;
628	if (!(x & 0xffff0000u)) {
629		x <<= 16;
630		r -= 16;
631	}
632	if (!(x & 0xff000000u)) {
633		x <<= 8;
634		r -= 8;
635	}
636	if (!(x & 0xf0000000u)) {
637		x <<= 4;
638		r -= 4;
639	}
640	if (!(x & 0xc0000000u)) {
641		x <<= 2;
642		r -= 2;
643	}
644	if (!(x & 0x80000000u)) {
645		x <<= 1;
646		r -= 1;
647	}
648	return r;
649}
650
651#include <asm-generic/bitops/fls64.h>
652
653/*
654 * ffs - find first bit set.
655 * @word: The word to search
656 *
657 * This is defined the same way as
658 * the libc and compiler builtin ffs routines, therefore
659 * differs in spirit from the above ffz (man ffs).
660 */
661static inline int ffs(int word)
662{
663	if (!word)
664		return 0;
665
666	return fls(word & -word);
667}
668
669#include <asm-generic/bitops/ffz.h>
670#include <asm-generic/bitops/find.h>
671
672#ifdef __KERNEL__
673
674#include <asm-generic/bitops/sched.h>
675
676#include <asm/arch_hweight.h>
677#include <asm-generic/bitops/const_hweight.h>
678
679#include <asm-generic/bitops/le.h>
680#include <asm-generic/bitops/ext2-atomic.h>
681
682#endif /* __KERNEL__ */
683
684#endif /* _ASM_BITOPS_H */
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/sgidefs.h>
 23#include <asm/war.h>
 24
 25#if _MIPS_SZLONG == 32
 26#define SZLONG_LOG 5
 27#define SZLONG_MASK 31UL
 28#define __LL		"ll	"
 29#define __SC		"sc	"
 30#define __INS		"ins	"
 31#define __EXT		"ext	"
 32#elif _MIPS_SZLONG == 64
 33#define SZLONG_LOG 6
 34#define SZLONG_MASK 63UL
 35#define __LL		"lld	"
 36#define __SC		"scd	"
 37#define __INS		"dins	 "
 38#define __EXT		"dext	 "
 39#endif
 40
 41/*
 42 * These are the "slower" versions of the functions and are in bitops.c.
 43 * These functions call raw_local_irq_{save,restore}().
 44 */
 45void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 46void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 47void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 48int __mips_test_and_set_bit(unsigned long nr,
 49			    volatile unsigned long *addr);
 50int __mips_test_and_set_bit_lock(unsigned long nr,
 51				 volatile unsigned long *addr);
 52int __mips_test_and_clear_bit(unsigned long nr,
 53			      volatile unsigned long *addr);
 54int __mips_test_and_change_bit(unsigned long nr,
 55			       volatile unsigned long *addr);
 56
 57
 58/*
 59 * set_bit - Atomically set a bit in memory
 60 * @nr: the bit to set
 61 * @addr: the address to start counting from
 62 *
 63 * This function is atomic and may not be reordered.  See __set_bit()
 64 * if you do not require the atomic guarantees.
 65 * Note that @nr may be almost arbitrarily large; this function is not
 66 * restricted to acting on a single-word quantity.
 67 */
 68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 69{
 70	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 71	int bit = nr & SZLONG_MASK;
 72	unsigned long temp;
 73
 74	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 75		__asm__ __volatile__(
 76		"	.set	arch=r4000				\n"
 77		"1:	" __LL "%0, %1			# set_bit	\n"
 78		"	or	%0, %2					\n"
 79		"	" __SC	"%0, %1					\n"
 80		"	beqzl	%0, 1b					\n"
 81		"	.set	mips0					\n"
 82		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 83		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 84#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 85	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 86		do {
 87			__asm__ __volatile__(
 88			"	" __LL "%0, %1		# set_bit	\n"
 89			"	" __INS "%0, %3, %2, 1			\n"
 90			"	" __SC "%0, %1				\n"
 91			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 92			: "ir" (bit), "r" (~0));
 93		} while (unlikely(!temp));
 94#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 95	} else if (kernel_uses_llsc) {
 96		do {
 97			__asm__ __volatile__(
 98			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 99			"	" __LL "%0, %1		# set_bit	\n"
100			"	or	%0, %2				\n"
101			"	" __SC	"%0, %1				\n"
102			"	.set	mips0				\n"
103			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104			: "ir" (1UL << bit));
105		} while (unlikely(!temp));
106	} else
107		__mips_set_bit(nr, addr);
 
 
 
 
 
 
 
 
 
108}
109
110/*
111 * clear_bit - Clears a bit in memory
112 * @nr: Bit to clear
113 * @addr: Address to start counting from
114 *
115 * clear_bit() is atomic and may not be reordered.  However, it does
116 * not contain a memory barrier, so if it is used for locking purposes,
117 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
118 * in order to ensure changes are visible on other processors.
119 */
120static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121{
122	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
123	int bit = nr & SZLONG_MASK;
124	unsigned long temp;
125
126	if (kernel_uses_llsc && R10000_LLSC_WAR) {
127		__asm__ __volatile__(
128		"	.set	arch=r4000				\n"
129		"1:	" __LL "%0, %1			# clear_bit	\n"
130		"	and	%0, %2					\n"
131		"	" __SC "%0, %1					\n"
132		"	beqzl	%0, 1b					\n"
133		"	.set	mips0					\n"
134		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135		: "ir" (~(1UL << bit)));
136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138		do {
139			__asm__ __volatile__(
140			"	" __LL "%0, %1		# clear_bit	\n"
141			"	" __INS "%0, $0, %2, 1			\n"
142			"	" __SC "%0, %1				\n"
143			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144			: "ir" (bit));
145		} while (unlikely(!temp));
146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147	} else if (kernel_uses_llsc) {
148		do {
149			__asm__ __volatile__(
150			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
151			"	" __LL "%0, %1		# clear_bit	\n"
152			"	and	%0, %2				\n"
153			"	" __SC "%0, %1				\n"
154			"	.set	mips0				\n"
155			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156			: "ir" (~(1UL << bit)));
157		} while (unlikely(!temp));
158	} else
159		__mips_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
160}
161
162/*
163 * clear_bit_unlock - Clears a bit in memory
164 * @nr: Bit to clear
165 * @addr: Address to start counting from
166 *
167 * clear_bit() is atomic and implies release semantics before the memory
168 * operation. It can be used for an unlock.
169 */
170static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171{
172	smp_mb__before_atomic();
173	clear_bit(nr, addr);
174}
175
176/*
177 * change_bit - Toggle a bit in memory
178 * @nr: Bit to change
179 * @addr: Address to start counting from
180 *
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
184 */
185static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186{
187	int bit = nr & SZLONG_MASK;
188
189	if (kernel_uses_llsc && R10000_LLSC_WAR) {
190		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
191		unsigned long temp;
192
193		__asm__ __volatile__(
194		"	.set	arch=r4000			\n"
195		"1:	" __LL "%0, %1		# change_bit	\n"
196		"	xor	%0, %2				\n"
197		"	" __SC	"%0, %1				\n"
198		"	beqzl	%0, 1b				\n"
199		"	.set	mips0				\n"
200		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201		: "ir" (1UL << bit));
202	} else if (kernel_uses_llsc) {
203		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
204		unsigned long temp;
205
206		do {
207			__asm__ __volatile__(
208			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
209			"	" __LL "%0, %1		# change_bit	\n"
210			"	xor	%0, %2				\n"
211			"	" __SC	"%0, %1				\n"
212			"	.set	mips0				\n"
213			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214			: "ir" (1UL << bit));
215		} while (unlikely(!temp));
216	} else
217		__mips_change_bit(nr, addr);
 
 
 
 
 
 
 
 
 
218}
219
220/*
221 * test_and_set_bit - Set a bit and return its old value
222 * @nr: Bit to set
223 * @addr: Address to count from
224 *
225 * This operation is atomic and cannot be reordered.
226 * It also implies a memory barrier.
227 */
228static inline int test_and_set_bit(unsigned long nr,
229	volatile unsigned long *addr)
230{
231	int bit = nr & SZLONG_MASK;
232	unsigned long res;
233
234	smp_mb__before_llsc();
235
236	if (kernel_uses_llsc && R10000_LLSC_WAR) {
237		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238		unsigned long temp;
239
240		__asm__ __volatile__(
241		"	.set	arch=r4000				\n"
242		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
243		"	or	%2, %0, %3				\n"
244		"	" __SC	"%2, %1					\n"
245		"	beqzl	%2, 1b					\n"
246		"	and	%2, %0, %3				\n"
247		"	.set	mips0					\n"
248		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249		: "r" (1UL << bit)
250		: "memory");
251	} else if (kernel_uses_llsc) {
252		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
253		unsigned long temp;
254
255		do {
256			__asm__ __volatile__(
257			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
258			"	" __LL "%0, %1	# test_and_set_bit	\n"
259			"	or	%2, %0, %3			\n"
260			"	" __SC	"%2, %1				\n"
261			"	.set	mips0				\n"
262			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263			: "r" (1UL << bit)
264			: "memory");
265		} while (unlikely(!res));
266
267		res = temp & (1UL << bit);
268	} else
269		res = __mips_test_and_set_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
270
271	smp_llsc_mb();
272
273	return res != 0;
274}
275
276/*
277 * test_and_set_bit_lock - Set a bit and return its old value
278 * @nr: Bit to set
279 * @addr: Address to count from
280 *
281 * This operation is atomic and implies acquire ordering semantics
282 * after the memory operation.
283 */
284static inline int test_and_set_bit_lock(unsigned long nr,
285	volatile unsigned long *addr)
286{
287	int bit = nr & SZLONG_MASK;
288	unsigned long res;
289
290	if (kernel_uses_llsc && R10000_LLSC_WAR) {
291		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292		unsigned long temp;
293
294		__asm__ __volatile__(
295		"	.set	arch=r4000				\n"
296		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
297		"	or	%2, %0, %3				\n"
298		"	" __SC	"%2, %1					\n"
299		"	beqzl	%2, 1b					\n"
300		"	and	%2, %0, %3				\n"
301		"	.set	mips0					\n"
302		: "=&r" (temp), "+m" (*m), "=&r" (res)
303		: "r" (1UL << bit)
304		: "memory");
305	} else if (kernel_uses_llsc) {
306		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
307		unsigned long temp;
308
309		do {
310			__asm__ __volatile__(
311			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
312			"	" __LL "%0, %1	# test_and_set_bit	\n"
313			"	or	%2, %0, %3			\n"
314			"	" __SC	"%2, %1				\n"
315			"	.set	mips0				\n"
316			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317			: "r" (1UL << bit)
318			: "memory");
319		} while (unlikely(!res));
320
321		res = temp & (1UL << bit);
322	} else
323		res = __mips_test_and_set_bit_lock(nr, addr);
 
 
 
 
 
 
 
 
 
 
324
325	smp_llsc_mb();
326
327	return res != 0;
328}
329/*
330 * test_and_clear_bit - Clear a bit and return its old value
331 * @nr: Bit to clear
332 * @addr: Address to count from
333 *
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
336 */
337static inline int test_and_clear_bit(unsigned long nr,
338	volatile unsigned long *addr)
339{
340	int bit = nr & SZLONG_MASK;
341	unsigned long res;
342
343	smp_mb__before_llsc();
344
345	if (kernel_uses_llsc && R10000_LLSC_WAR) {
346		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
347		unsigned long temp;
348
349		__asm__ __volatile__(
350		"	.set	arch=r4000				\n"
351		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
352		"	or	%2, %0, %3				\n"
353		"	xor	%2, %3					\n"
354		"	" __SC	"%2, %1					\n"
355		"	beqzl	%2, 1b					\n"
356		"	and	%2, %0, %3				\n"
357		"	.set	mips0					\n"
358		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359		: "r" (1UL << bit)
360		: "memory");
361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364		unsigned long temp;
365
366		do {
367			__asm__ __volatile__(
368			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
369			"	" __EXT "%2, %0, %3, 1			\n"
370			"	" __INS "%0, $0, %3, 1			\n"
371			"	" __SC	"%0, %1				\n"
372			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373			: "ir" (bit)
374			: "memory");
375		} while (unlikely(!temp));
376#endif
377	} else if (kernel_uses_llsc) {
378		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379		unsigned long temp;
380
381		do {
382			__asm__ __volatile__(
383			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
384			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
385			"	or	%2, %0, %3			\n"
386			"	xor	%2, %3				\n"
387			"	" __SC	"%2, %1				\n"
388			"	.set	mips0				\n"
389			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390			: "r" (1UL << bit)
391			: "memory");
392		} while (unlikely(!res));
393
394		res = temp & (1UL << bit);
395	} else
396		res = __mips_test_and_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
397
398	smp_llsc_mb();
399
400	return res != 0;
401}
402
403/*
404 * test_and_change_bit - Change a bit and return its old value
405 * @nr: Bit to change
406 * @addr: Address to count from
407 *
408 * This operation is atomic and cannot be reordered.
409 * It also implies a memory barrier.
410 */
411static inline int test_and_change_bit(unsigned long nr,
412	volatile unsigned long *addr)
413{
414	int bit = nr & SZLONG_MASK;
415	unsigned long res;
416
417	smp_mb__before_llsc();
418
419	if (kernel_uses_llsc && R10000_LLSC_WAR) {
420		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421		unsigned long temp;
422
423		__asm__ __volatile__(
424		"	.set	arch=r4000				\n"
425		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
426		"	xor	%2, %0, %3				\n"
427		"	" __SC	"%2, %1					\n"
428		"	beqzl	%2, 1b					\n"
429		"	and	%2, %0, %3				\n"
430		"	.set	mips0					\n"
431		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432		: "r" (1UL << bit)
433		: "memory");
434	} else if (kernel_uses_llsc) {
435		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
436		unsigned long temp;
437
438		do {
439			__asm__ __volatile__(
440			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
441			"	" __LL	"%0, %1 # test_and_change_bit	\n"
442			"	xor	%2, %0, %3			\n"
443			"	" __SC	"\t%2, %1			\n"
444			"	.set	mips0				\n"
445			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446			: "r" (1UL << bit)
447			: "memory");
448		} while (unlikely(!res));
449
450		res = temp & (1UL << bit);
451	} else
452		res = __mips_test_and_change_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
453
454	smp_llsc_mb();
455
456	return res != 0;
457}
458
459#include <asm-generic/bitops/non-atomic.h>
460
461/*
462 * __clear_bit_unlock - Clears a bit in memory
463 * @nr: Bit to clear
464 * @addr: Address to start counting from
465 *
466 * __clear_bit() is non-atomic and implies release semantics before the memory
467 * operation. It can be used for an unlock if no other CPUs can concurrently
468 * modify other bits in the word.
469 */
470static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
471{
472	smp_mb__before_llsc();
473	__clear_bit(nr, addr);
474}
475
476/*
477 * Return the bit position (0..63) of the most significant 1 bit in a word
478 * Returns -1 if no 1 bit exists
479 */
480static inline unsigned long __fls(unsigned long word)
481{
482	int num;
483
484	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
485	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486		__asm__(
487		"	.set	push					\n"
488		"	.set	"MIPS_ISA_LEVEL"			\n"
489		"	clz	%0, %1					\n"
490		"	.set	pop					\n"
491		: "=r" (num)
492		: "r" (word));
493
494		return 31 - num;
495	}
496
497	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
498	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499		__asm__(
500		"	.set	push					\n"
501		"	.set	"MIPS_ISA_LEVEL"			\n"
502		"	dclz	%0, %1					\n"
503		"	.set	pop					\n"
504		: "=r" (num)
505		: "r" (word));
506
507		return 63 - num;
508	}
509
510	num = BITS_PER_LONG - 1;
511
512#if BITS_PER_LONG == 64
513	if (!(word & (~0ul << 32))) {
514		num -= 32;
515		word <<= 32;
516	}
517#endif
518	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
519		num -= 16;
520		word <<= 16;
521	}
522	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
523		num -= 8;
524		word <<= 8;
525	}
526	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
527		num -= 4;
528		word <<= 4;
529	}
530	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
531		num -= 2;
532		word <<= 2;
533	}
534	if (!(word & (~0ul << (BITS_PER_LONG-1))))
535		num -= 1;
536	return num;
537}
538
539/*
540 * __ffs - find first bit in word.
541 * @word: The word to search
542 *
543 * Returns 0..SZLONG-1
544 * Undefined if no bit exists, so code should check against 0 first.
545 */
546static inline unsigned long __ffs(unsigned long word)
547{
548	return __fls(word & -word);
549}
550
551/*
552 * fls - find last bit set.
553 * @word: The word to search
554 *
555 * This is defined the same way as ffs.
556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
557 */
558static inline int fls(int x)
559{
560	int r;
561
562	if (!__builtin_constant_p(x) &&
563	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
564		__asm__(
565		"	.set	push					\n"
566		"	.set	"MIPS_ISA_LEVEL"			\n"
567		"	clz	%0, %1					\n"
568		"	.set	pop					\n"
569		: "=r" (x)
570		: "r" (x));
571
572		return 32 - x;
573	}
574
575	r = 32;
576	if (!x)
577		return 0;
578	if (!(x & 0xffff0000u)) {
579		x <<= 16;
580		r -= 16;
581	}
582	if (!(x & 0xff000000u)) {
583		x <<= 8;
584		r -= 8;
585	}
586	if (!(x & 0xf0000000u)) {
587		x <<= 4;
588		r -= 4;
589	}
590	if (!(x & 0xc0000000u)) {
591		x <<= 2;
592		r -= 2;
593	}
594	if (!(x & 0x80000000u)) {
595		x <<= 1;
596		r -= 1;
597	}
598	return r;
599}
600
601#include <asm-generic/bitops/fls64.h>
602
603/*
604 * ffs - find first bit set.
605 * @word: The word to search
606 *
607 * This is defined the same way as
608 * the libc and compiler builtin ffs routines, therefore
609 * differs in spirit from the above ffz (man ffs).
610 */
611static inline int ffs(int word)
612{
613	if (!word)
614		return 0;
615
616	return fls(word & -word);
617}
618
619#include <asm-generic/bitops/ffz.h>
620#include <asm-generic/bitops/find.h>
621
622#ifdef __KERNEL__
623
624#include <asm-generic/bitops/sched.h>
625
626#include <asm/arch_hweight.h>
627#include <asm-generic/bitops/const_hweight.h>
628
629#include <asm-generic/bitops/le.h>
630#include <asm-generic/bitops/ext2-atomic.h>
631
632#endif /* __KERNEL__ */
633
634#endif /* _ASM_BITOPS_H */