Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/llsc.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/*
 27 * These are the "slower" versions of the functions and are in bitops.c.
 28 * These functions call raw_local_irq_{save,restore}().
 29 */
 30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33int __mips_test_and_set_bit(unsigned long nr,
 34			    volatile unsigned long *addr);
 35int __mips_test_and_set_bit_lock(unsigned long nr,
 36				 volatile unsigned long *addr);
 37int __mips_test_and_clear_bit(unsigned long nr,
 38			      volatile unsigned long *addr);
 39int __mips_test_and_change_bit(unsigned long nr,
 40			       volatile unsigned long *addr);
 41
 42
 43/*
 44 * set_bit - Atomically set a bit in memory
 45 * @nr: the bit to set
 46 * @addr: the address to start counting from
 47 *
 48 * This function is atomic and may not be reordered.  See __set_bit()
 49 * if you do not require the atomic guarantees.
 50 * Note that @nr may be almost arbitrarily large; this function is not
 51 * restricted to acting on a single-word quantity.
 52 */
 53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54{
 55	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56	int bit = nr & SZLONG_MASK;
 57	unsigned long temp;
 58
 59	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60		__asm__ __volatile__(
 61		"	.set	arch=r4000				\n"
 62		"1:	" __LL "%0, %1			# set_bit	\n"
 63		"	or	%0, %2					\n"
 64		"	" __SC	"%0, %1					\n"
 65		"	beqzl	%0, 1b					\n"
 66		"	.set	mips0					\n"
 67		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 68		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 69#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 70	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 71		do {
 72			__asm__ __volatile__(
 73			"	" __LL "%0, %1		# set_bit	\n"
 74			"	" __INS "%0, %3, %2, 1			\n"
 75			"	" __SC "%0, %1				\n"
 76			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 77			: "ir" (bit), "r" (~0));
 78		} while (unlikely(!temp));
 79#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 80	} else if (kernel_uses_llsc) {
 81		do {
 82			__asm__ __volatile__(
 83			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 84			"	" __LL "%0, %1		# set_bit	\n"
 85			"	or	%0, %2				\n"
 86			"	" __SC	"%0, %1				\n"
 87			"	.set	mips0				\n"
 88			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 89			: "ir" (1UL << bit));
 90		} while (unlikely(!temp));
 91	} else
 92		__mips_set_bit(nr, addr);
 93}
 94
 95/*
 96 * clear_bit - Clears a bit in memory
 97 * @nr: Bit to clear
 98 * @addr: Address to start counting from
 99 *
100 * clear_bit() is atomic and may not be reordered.  However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103 * in order to ensure changes are visible on other processors.
104 */
105static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
106{
107	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
108	int bit = nr & SZLONG_MASK;
109	unsigned long temp;
110
111	if (kernel_uses_llsc && R10000_LLSC_WAR) {
112		__asm__ __volatile__(
113		"	.set	arch=r4000				\n"
114		"1:	" __LL "%0, %1			# clear_bit	\n"
115		"	and	%0, %2					\n"
116		"	" __SC "%0, %1					\n"
117		"	beqzl	%0, 1b					\n"
118		"	.set	mips0					\n"
119		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
120		: "ir" (~(1UL << bit)));
121#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
123		do {
124			__asm__ __volatile__(
125			"	" __LL "%0, %1		# clear_bit	\n"
126			"	" __INS "%0, $0, %2, 1			\n"
127			"	" __SC "%0, %1				\n"
128			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
129			: "ir" (bit));
130		} while (unlikely(!temp));
131#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
132	} else if (kernel_uses_llsc) {
133		do {
134			__asm__ __volatile__(
135			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
136			"	" __LL "%0, %1		# clear_bit	\n"
137			"	and	%0, %2				\n"
138			"	" __SC "%0, %1				\n"
139			"	.set	mips0				\n"
140			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
141			: "ir" (~(1UL << bit)));
142		} while (unlikely(!temp));
143	} else
144		__mips_clear_bit(nr, addr);
145}
146
147/*
148 * clear_bit_unlock - Clears a bit in memory
149 * @nr: Bit to clear
150 * @addr: Address to start counting from
151 *
152 * clear_bit() is atomic and implies release semantics before the memory
153 * operation. It can be used for an unlock.
154 */
155static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
156{
157	smp_mb__before_atomic();
158	clear_bit(nr, addr);
159}
160
161/*
162 * change_bit - Toggle a bit in memory
163 * @nr: Bit to change
164 * @addr: Address to start counting from
165 *
166 * change_bit() is atomic and may not be reordered.
167 * Note that @nr may be almost arbitrarily large; this function is not
168 * restricted to acting on a single-word quantity.
169 */
170static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
171{
172	int bit = nr & SZLONG_MASK;
173
174	if (kernel_uses_llsc && R10000_LLSC_WAR) {
175		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
176		unsigned long temp;
177
178		__asm__ __volatile__(
179		"	.set	arch=r4000			\n"
180		"1:	" __LL "%0, %1		# change_bit	\n"
181		"	xor	%0, %2				\n"
182		"	" __SC	"%0, %1				\n"
183		"	beqzl	%0, 1b				\n"
184		"	.set	mips0				\n"
185		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
186		: "ir" (1UL << bit));
187	} else if (kernel_uses_llsc) {
188		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189		unsigned long temp;
190
191		do {
192			__asm__ __volatile__(
193			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
194			"	" __LL "%0, %1		# change_bit	\n"
195			"	xor	%0, %2				\n"
196			"	" __SC	"%0, %1				\n"
197			"	.set	mips0				\n"
198			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
199			: "ir" (1UL << bit));
200		} while (unlikely(!temp));
201	} else
202		__mips_change_bit(nr, addr);
203}
204
205/*
206 * test_and_set_bit - Set a bit and return its old value
207 * @nr: Bit to set
208 * @addr: Address to count from
209 *
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
212 */
213static inline int test_and_set_bit(unsigned long nr,
214	volatile unsigned long *addr)
215{
216	int bit = nr & SZLONG_MASK;
217	unsigned long res;
218
219	smp_mb__before_llsc();
220
221	if (kernel_uses_llsc && R10000_LLSC_WAR) {
222		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
223		unsigned long temp;
224
225		__asm__ __volatile__(
226		"	.set	arch=r4000				\n"
227		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
228		"	or	%2, %0, %3				\n"
229		"	" __SC	"%2, %1					\n"
230		"	beqzl	%2, 1b					\n"
231		"	and	%2, %0, %3				\n"
232		"	.set	mips0					\n"
233		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
234		: "r" (1UL << bit)
235		: "memory");
236	} else if (kernel_uses_llsc) {
237		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238		unsigned long temp;
239
240		do {
241			__asm__ __volatile__(
242			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
243			"	" __LL "%0, %1	# test_and_set_bit	\n"
244			"	or	%2, %0, %3			\n"
245			"	" __SC	"%2, %1				\n"
246			"	.set	mips0				\n"
247			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
248			: "r" (1UL << bit)
249			: "memory");
250		} while (unlikely(!res));
251
252		res = temp & (1UL << bit);
253	} else
254		res = __mips_test_and_set_bit(nr, addr);
255
256	smp_llsc_mb();
257
258	return res != 0;
259}
260
261/*
262 * test_and_set_bit_lock - Set a bit and return its old value
263 * @nr: Bit to set
264 * @addr: Address to count from
265 *
266 * This operation is atomic and implies acquire ordering semantics
267 * after the memory operation.
268 */
269static inline int test_and_set_bit_lock(unsigned long nr,
270	volatile unsigned long *addr)
271{
272	int bit = nr & SZLONG_MASK;
273	unsigned long res;
274
275	if (kernel_uses_llsc && R10000_LLSC_WAR) {
276		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
277		unsigned long temp;
278
279		__asm__ __volatile__(
280		"	.set	arch=r4000				\n"
281		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
282		"	or	%2, %0, %3				\n"
283		"	" __SC	"%2, %1					\n"
284		"	beqzl	%2, 1b					\n"
285		"	and	%2, %0, %3				\n"
286		"	.set	mips0					\n"
287		: "=&r" (temp), "+m" (*m), "=&r" (res)
288		: "r" (1UL << bit)
289		: "memory");
290	} else if (kernel_uses_llsc) {
291		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292		unsigned long temp;
293
294		do {
295			__asm__ __volatile__(
296			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
297			"	" __LL "%0, %1	# test_and_set_bit	\n"
298			"	or	%2, %0, %3			\n"
299			"	" __SC	"%2, %1				\n"
300			"	.set	mips0				\n"
301			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
302			: "r" (1UL << bit)
303			: "memory");
304		} while (unlikely(!res));
305
306		res = temp & (1UL << bit);
307	} else
308		res = __mips_test_and_set_bit_lock(nr, addr);
309
310	smp_llsc_mb();
311
312	return res != 0;
313}
314/*
315 * test_and_clear_bit - Clear a bit and return its old value
316 * @nr: Bit to clear
317 * @addr: Address to count from
318 *
319 * This operation is atomic and cannot be reordered.
320 * It also implies a memory barrier.
321 */
322static inline int test_and_clear_bit(unsigned long nr,
323	volatile unsigned long *addr)
324{
325	int bit = nr & SZLONG_MASK;
326	unsigned long res;
327
328	smp_mb__before_llsc();
329
330	if (kernel_uses_llsc && R10000_LLSC_WAR) {
331		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
332		unsigned long temp;
333
334		__asm__ __volatile__(
335		"	.set	arch=r4000				\n"
336		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
337		"	or	%2, %0, %3				\n"
338		"	xor	%2, %3					\n"
339		"	" __SC	"%2, %1					\n"
340		"	beqzl	%2, 1b					\n"
341		"	and	%2, %0, %3				\n"
342		"	.set	mips0					\n"
343		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
344		: "r" (1UL << bit)
345		: "memory");
346#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
347	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
348		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
349		unsigned long temp;
350
351		do {
352			__asm__ __volatile__(
353			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
354			"	" __EXT "%2, %0, %3, 1			\n"
355			"	" __INS "%0, $0, %3, 1			\n"
356			"	" __SC	"%0, %1				\n"
357			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
358			: "ir" (bit)
359			: "memory");
360		} while (unlikely(!temp));
361#endif
362	} else if (kernel_uses_llsc) {
363		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364		unsigned long temp;
365
366		do {
367			__asm__ __volatile__(
368			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
369			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
370			"	or	%2, %0, %3			\n"
371			"	xor	%2, %3				\n"
372			"	" __SC	"%2, %1				\n"
373			"	.set	mips0				\n"
374			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
375			: "r" (1UL << bit)
376			: "memory");
377		} while (unlikely(!res));
378
379		res = temp & (1UL << bit);
380	} else
381		res = __mips_test_and_clear_bit(nr, addr);
382
383	smp_llsc_mb();
384
385	return res != 0;
386}
387
388/*
389 * test_and_change_bit - Change a bit and return its old value
390 * @nr: Bit to change
391 * @addr: Address to count from
392 *
393 * This operation is atomic and cannot be reordered.
394 * It also implies a memory barrier.
395 */
396static inline int test_and_change_bit(unsigned long nr,
397	volatile unsigned long *addr)
398{
399	int bit = nr & SZLONG_MASK;
400	unsigned long res;
401
402	smp_mb__before_llsc();
403
404	if (kernel_uses_llsc && R10000_LLSC_WAR) {
405		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
406		unsigned long temp;
407
408		__asm__ __volatile__(
409		"	.set	arch=r4000				\n"
410		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
411		"	xor	%2, %0, %3				\n"
412		"	" __SC	"%2, %1					\n"
413		"	beqzl	%2, 1b					\n"
414		"	and	%2, %0, %3				\n"
415		"	.set	mips0					\n"
416		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
417		: "r" (1UL << bit)
418		: "memory");
419	} else if (kernel_uses_llsc) {
420		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421		unsigned long temp;
422
423		do {
424			__asm__ __volatile__(
425			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
426			"	" __LL	"%0, %1 # test_and_change_bit	\n"
427			"	xor	%2, %0, %3			\n"
428			"	" __SC	"\t%2, %1			\n"
429			"	.set	mips0				\n"
430			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
431			: "r" (1UL << bit)
432			: "memory");
433		} while (unlikely(!res));
434
435		res = temp & (1UL << bit);
436	} else
437		res = __mips_test_and_change_bit(nr, addr);
438
439	smp_llsc_mb();
440
441	return res != 0;
442}
443
444#include <asm-generic/bitops/non-atomic.h>
445
446/*
447 * __clear_bit_unlock - Clears a bit in memory
448 * @nr: Bit to clear
449 * @addr: Address to start counting from
450 *
451 * __clear_bit() is non-atomic and implies release semantics before the memory
452 * operation. It can be used for an unlock if no other CPUs can concurrently
453 * modify other bits in the word.
454 */
455static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
456{
457	smp_mb__before_llsc();
458	__clear_bit(nr, addr);
459	nudge_writes();
460}
461
462/*
463 * Return the bit position (0..63) of the most significant 1 bit in a word
464 * Returns -1 if no 1 bit exists
465 */
466static inline unsigned long __fls(unsigned long word)
467{
468	int num;
469
470	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
471	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
472		__asm__(
473		"	.set	push					\n"
474		"	.set	"MIPS_ISA_LEVEL"			\n"
475		"	clz	%0, %1					\n"
476		"	.set	pop					\n"
477		: "=r" (num)
478		: "r" (word));
479
480		return 31 - num;
481	}
482
483	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
484	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
485		__asm__(
486		"	.set	push					\n"
487		"	.set	"MIPS_ISA_LEVEL"			\n"
488		"	dclz	%0, %1					\n"
489		"	.set	pop					\n"
490		: "=r" (num)
491		: "r" (word));
492
493		return 63 - num;
494	}
495
496	num = BITS_PER_LONG - 1;
497
498#if BITS_PER_LONG == 64
499	if (!(word & (~0ul << 32))) {
500		num -= 32;
501		word <<= 32;
502	}
503#endif
504	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
505		num -= 16;
506		word <<= 16;
507	}
508	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
509		num -= 8;
510		word <<= 8;
511	}
512	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
513		num -= 4;
514		word <<= 4;
515	}
516	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
517		num -= 2;
518		word <<= 2;
519	}
520	if (!(word & (~0ul << (BITS_PER_LONG-1))))
521		num -= 1;
522	return num;
523}
524
525/*
526 * __ffs - find first bit in word.
527 * @word: The word to search
528 *
529 * Returns 0..SZLONG-1
530 * Undefined if no bit exists, so code should check against 0 first.
531 */
532static inline unsigned long __ffs(unsigned long word)
533{
534	return __fls(word & -word);
535}
536
537/*
538 * fls - find last bit set.
539 * @word: The word to search
540 *
541 * This is defined the same way as ffs.
542 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
543 */
544static inline int fls(int x)
545{
546	int r;
547
548	if (!__builtin_constant_p(x) &&
549	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
550		__asm__(
551		"	.set	push					\n"
552		"	.set	"MIPS_ISA_LEVEL"			\n"
553		"	clz	%0, %1					\n"
554		"	.set	pop					\n"
555		: "=r" (x)
556		: "r" (x));
557
558		return 32 - x;
559	}
560
561	r = 32;
562	if (!x)
563		return 0;
564	if (!(x & 0xffff0000u)) {
565		x <<= 16;
566		r -= 16;
567	}
568	if (!(x & 0xff000000u)) {
569		x <<= 8;
570		r -= 8;
571	}
572	if (!(x & 0xf0000000u)) {
573		x <<= 4;
574		r -= 4;
575	}
576	if (!(x & 0xc0000000u)) {
577		x <<= 2;
578		r -= 2;
579	}
580	if (!(x & 0x80000000u)) {
581		x <<= 1;
582		r -= 1;
583	}
584	return r;
585}
586
587#include <asm-generic/bitops/fls64.h>
588
589/*
590 * ffs - find first bit set.
591 * @word: The word to search
592 *
593 * This is defined the same way as
594 * the libc and compiler builtin ffs routines, therefore
595 * differs in spirit from the above ffz (man ffs).
596 */
597static inline int ffs(int word)
598{
599	if (!word)
600		return 0;
601
602	return fls(word & -word);
603}
604
605#include <asm-generic/bitops/ffz.h>
606#include <asm-generic/bitops/find.h>
607
608#ifdef __KERNEL__
609
610#include <asm-generic/bitops/sched.h>
611
612#include <asm/arch_hweight.h>
613#include <asm-generic/bitops/const_hweight.h>
614
615#include <asm-generic/bitops/le.h>
616#include <asm-generic/bitops/ext2-atomic.h>
617
618#endif /* __KERNEL__ */
619
620#endif /* _ASM_BITOPS_H */
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 
 20#include <asm/cpu-features.h>
 
 21#include <asm/sgidefs.h>
 22#include <asm/war.h>
 23
 24#if _MIPS_SZLONG == 32
 25#define SZLONG_LOG 5
 26#define SZLONG_MASK 31UL
 27#define __LL		"ll	"
 28#define __SC		"sc	"
 29#define __INS		"ins	"
 30#define __EXT		"ext	"
 31#elif _MIPS_SZLONG == 64
 32#define SZLONG_LOG 6
 33#define SZLONG_MASK 63UL
 34#define __LL		"lld	"
 35#define __SC		"scd	"
 36#define __INS		"dins	 "
 37#define __EXT		"dext	 "
 38#endif
 39
 40/*
 41 * clear_bit() doesn't provide any barrier for the compiler.
 42 */
 43#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 44#define smp_mb__after_clear_bit()	smp_llsc_mb()
 45
 46
 47/*
 48 * These are the "slower" versions of the functions and are in bitops.c.
 49 * These functions call raw_local_irq_{save,restore}().
 50 */
 51void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 52void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 53void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 54int __mips_test_and_set_bit(unsigned long nr,
 55			    volatile unsigned long *addr);
 56int __mips_test_and_set_bit_lock(unsigned long nr,
 57				 volatile unsigned long *addr);
 58int __mips_test_and_clear_bit(unsigned long nr,
 59			      volatile unsigned long *addr);
 60int __mips_test_and_change_bit(unsigned long nr,
 61			       volatile unsigned long *addr);
 62
 63
 64/*
 65 * set_bit - Atomically set a bit in memory
 66 * @nr: the bit to set
 67 * @addr: the address to start counting from
 68 *
 69 * This function is atomic and may not be reordered.  See __set_bit()
 70 * if you do not require the atomic guarantees.
 71 * Note that @nr may be almost arbitrarily large; this function is not
 72 * restricted to acting on a single-word quantity.
 73 */
 74static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 75{
 76	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 77	int bit = nr & SZLONG_MASK;
 78	unsigned long temp;
 79
 80	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 81		__asm__ __volatile__(
 82		"	.set	arch=r4000				\n"
 83		"1:	" __LL "%0, %1			# set_bit	\n"
 84		"	or	%0, %2					\n"
 85		"	" __SC	"%0, %1					\n"
 86		"	beqzl	%0, 1b					\n"
 87		"	.set	mips0					\n"
 88		: "=&r" (temp), "=m" (*m)
 89		: "ir" (1UL << bit), "m" (*m));
 90#ifdef CONFIG_CPU_MIPSR2
 91	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 92		do {
 93			__asm__ __volatile__(
 94			"	" __LL "%0, %1		# set_bit	\n"
 95			"	" __INS "%0, %3, %2, 1			\n"
 96			"	" __SC "%0, %1				\n"
 97			: "=&r" (temp), "+m" (*m)
 98			: "ir" (bit), "r" (~0));
 99		} while (unlikely(!temp));
100#endif /* CONFIG_CPU_MIPSR2 */
101	} else if (kernel_uses_llsc) {
102		do {
103			__asm__ __volatile__(
104			"	.set	arch=r4000			\n"
105			"	" __LL "%0, %1		# set_bit	\n"
106			"	or	%0, %2				\n"
107			"	" __SC	"%0, %1				\n"
108			"	.set	mips0				\n"
109			: "=&r" (temp), "+m" (*m)
110			: "ir" (1UL << bit));
111		} while (unlikely(!temp));
112	} else
113		__mips_set_bit(nr, addr);
114}
115
116/*
117 * clear_bit - Clears a bit in memory
118 * @nr: Bit to clear
119 * @addr: Address to start counting from
120 *
121 * clear_bit() is atomic and may not be reordered.  However, it does
122 * not contain a memory barrier, so if it is used for locking purposes,
123 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
124 * in order to ensure changes are visible on other processors.
125 */
126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
129	int bit = nr & SZLONG_MASK;
130	unsigned long temp;
131
132	if (kernel_uses_llsc && R10000_LLSC_WAR) {
133		__asm__ __volatile__(
134		"	.set	arch=r4000				\n"
135		"1:	" __LL "%0, %1			# clear_bit	\n"
136		"	and	%0, %2					\n"
137		"	" __SC "%0, %1					\n"
138		"	beqzl	%0, 1b					\n"
139		"	.set	mips0					\n"
140		: "=&r" (temp), "+m" (*m)
141		: "ir" (~(1UL << bit)));
142#ifdef CONFIG_CPU_MIPSR2
143	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
144		do {
145			__asm__ __volatile__(
146			"	" __LL "%0, %1		# clear_bit	\n"
147			"	" __INS "%0, $0, %2, 1			\n"
148			"	" __SC "%0, %1				\n"
149			: "=&r" (temp), "+m" (*m)
150			: "ir" (bit));
151		} while (unlikely(!temp));
152#endif /* CONFIG_CPU_MIPSR2 */
153	} else if (kernel_uses_llsc) {
154		do {
155			__asm__ __volatile__(
156			"	.set	arch=r4000			\n"
157			"	" __LL "%0, %1		# clear_bit	\n"
158			"	and	%0, %2				\n"
159			"	" __SC "%0, %1				\n"
160			"	.set	mips0				\n"
161			: "=&r" (temp), "+m" (*m)
162			: "ir" (~(1UL << bit)));
163		} while (unlikely(!temp));
164	} else
165		__mips_clear_bit(nr, addr);
166}
167
168/*
169 * clear_bit_unlock - Clears a bit in memory
170 * @nr: Bit to clear
171 * @addr: Address to start counting from
172 *
173 * clear_bit() is atomic and implies release semantics before the memory
174 * operation. It can be used for an unlock.
175 */
176static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
177{
178	smp_mb__before_clear_bit();
179	clear_bit(nr, addr);
180}
181
182/*
183 * change_bit - Toggle a bit in memory
184 * @nr: Bit to change
185 * @addr: Address to start counting from
186 *
187 * change_bit() is atomic and may not be reordered.
188 * Note that @nr may be almost arbitrarily large; this function is not
189 * restricted to acting on a single-word quantity.
190 */
191static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
192{
193	int bit = nr & SZLONG_MASK;
194
195	if (kernel_uses_llsc && R10000_LLSC_WAR) {
196		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197		unsigned long temp;
198
199		__asm__ __volatile__(
200		"	.set	arch=r4000			\n"
201		"1:	" __LL "%0, %1		# change_bit	\n"
202		"	xor	%0, %2				\n"
203		"	" __SC	"%0, %1				\n"
204		"	beqzl	%0, 1b				\n"
205		"	.set	mips0				\n"
206		: "=&r" (temp), "+m" (*m)
207		: "ir" (1UL << bit));
208	} else if (kernel_uses_llsc) {
209		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
210		unsigned long temp;
211
212		do {
213			__asm__ __volatile__(
214			"	.set	arch=r4000			\n"
215			"	" __LL "%0, %1		# change_bit	\n"
216			"	xor	%0, %2				\n"
217			"	" __SC	"%0, %1				\n"
218			"	.set	mips0				\n"
219			: "=&r" (temp), "+m" (*m)
220			: "ir" (1UL << bit));
221		} while (unlikely(!temp));
222	} else
223		__mips_change_bit(nr, addr);
224}
225
226/*
227 * test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is atomic and cannot be reordered.
232 * It also implies a memory barrier.
233 */
234static inline int test_and_set_bit(unsigned long nr,
235	volatile unsigned long *addr)
236{
237	int bit = nr & SZLONG_MASK;
238	unsigned long res;
239
240	smp_mb__before_llsc();
241
242	if (kernel_uses_llsc && R10000_LLSC_WAR) {
243		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
244		unsigned long temp;
245
246		__asm__ __volatile__(
247		"	.set	arch=r4000				\n"
248		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
249		"	or	%2, %0, %3				\n"
250		"	" __SC	"%2, %1					\n"
251		"	beqzl	%2, 1b					\n"
252		"	and	%2, %0, %3				\n"
253		"	.set	mips0					\n"
254		: "=&r" (temp), "+m" (*m), "=&r" (res)
255		: "r" (1UL << bit)
256		: "memory");
257	} else if (kernel_uses_llsc) {
258		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
259		unsigned long temp;
260
261		do {
262			__asm__ __volatile__(
263			"	.set	arch=r4000			\n"
264			"	" __LL "%0, %1	# test_and_set_bit	\n"
265			"	or	%2, %0, %3			\n"
266			"	" __SC	"%2, %1				\n"
267			"	.set	mips0				\n"
268			: "=&r" (temp), "+m" (*m), "=&r" (res)
269			: "r" (1UL << bit)
270			: "memory");
271		} while (unlikely(!res));
272
273		res = temp & (1UL << bit);
274	} else
275		res = __mips_test_and_set_bit(nr, addr);
276
277	smp_llsc_mb();
278
279	return res != 0;
280}
281
282/*
283 * test_and_set_bit_lock - Set a bit and return its old value
284 * @nr: Bit to set
285 * @addr: Address to count from
286 *
287 * This operation is atomic and implies acquire ordering semantics
288 * after the memory operation.
289 */
290static inline int test_and_set_bit_lock(unsigned long nr,
291	volatile unsigned long *addr)
292{
293	int bit = nr & SZLONG_MASK;
294	unsigned long res;
295
296	if (kernel_uses_llsc && R10000_LLSC_WAR) {
297		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
298		unsigned long temp;
299
300		__asm__ __volatile__(
301		"	.set	arch=r4000				\n"
302		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
303		"	or	%2, %0, %3				\n"
304		"	" __SC	"%2, %1					\n"
305		"	beqzl	%2, 1b					\n"
306		"	and	%2, %0, %3				\n"
307		"	.set	mips0					\n"
308		: "=&r" (temp), "+m" (*m), "=&r" (res)
309		: "r" (1UL << bit)
310		: "memory");
311	} else if (kernel_uses_llsc) {
312		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
313		unsigned long temp;
314
315		do {
316			__asm__ __volatile__(
317			"	.set	arch=r4000			\n"
318			"	" __LL "%0, %1	# test_and_set_bit	\n"
319			"	or	%2, %0, %3			\n"
320			"	" __SC	"%2, %1				\n"
321			"	.set	mips0				\n"
322			: "=&r" (temp), "+m" (*m), "=&r" (res)
323			: "r" (1UL << bit)
324			: "memory");
325		} while (unlikely(!res));
326
327		res = temp & (1UL << bit);
328	} else
329		res = __mips_test_and_set_bit_lock(nr, addr);
330
331	smp_llsc_mb();
332
333	return res != 0;
334}
335/*
336 * test_and_clear_bit - Clear a bit and return its old value
337 * @nr: Bit to clear
338 * @addr: Address to count from
339 *
340 * This operation is atomic and cannot be reordered.
341 * It also implies a memory barrier.
342 */
343static inline int test_and_clear_bit(unsigned long nr,
344	volatile unsigned long *addr)
345{
346	int bit = nr & SZLONG_MASK;
347	unsigned long res;
348
349	smp_mb__before_llsc();
350
351	if (kernel_uses_llsc && R10000_LLSC_WAR) {
352		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
353		unsigned long temp;
354
355		__asm__ __volatile__(
356		"	.set	arch=r4000				\n"
357		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
358		"	or	%2, %0, %3				\n"
359		"	xor	%2, %3					\n"
360		"	" __SC	"%2, %1					\n"
361		"	beqzl	%2, 1b					\n"
362		"	and	%2, %0, %3				\n"
363		"	.set	mips0					\n"
364		: "=&r" (temp), "+m" (*m), "=&r" (res)
365		: "r" (1UL << bit)
366		: "memory");
367#ifdef CONFIG_CPU_MIPSR2
368	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
369		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
370		unsigned long temp;
371
372		do {
373			__asm__ __volatile__(
374			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
375			"	" __EXT "%2, %0, %3, 1			\n"
376			"	" __INS "%0, $0, %3, 1			\n"
377			"	" __SC	"%0, %1				\n"
378			: "=&r" (temp), "+m" (*m), "=&r" (res)
379			: "ir" (bit)
380			: "memory");
381		} while (unlikely(!temp));
382#endif
383	} else if (kernel_uses_llsc) {
384		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
385		unsigned long temp;
386
387		do {
388			__asm__ __volatile__(
389			"	.set	arch=r4000			\n"
390			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
391			"	or	%2, %0, %3			\n"
392			"	xor	%2, %3				\n"
393			"	" __SC	"%2, %1				\n"
394			"	.set	mips0				\n"
395			: "=&r" (temp), "+m" (*m), "=&r" (res)
396			: "r" (1UL << bit)
397			: "memory");
398		} while (unlikely(!res));
399
400		res = temp & (1UL << bit);
401	} else
402		res = __mips_test_and_clear_bit(nr, addr);
403
404	smp_llsc_mb();
405
406	return res != 0;
407}
408
409/*
410 * test_and_change_bit - Change a bit and return its old value
411 * @nr: Bit to change
412 * @addr: Address to count from
413 *
414 * This operation is atomic and cannot be reordered.
415 * It also implies a memory barrier.
416 */
417static inline int test_and_change_bit(unsigned long nr,
418	volatile unsigned long *addr)
419{
420	int bit = nr & SZLONG_MASK;
421	unsigned long res;
422
423	smp_mb__before_llsc();
424
425	if (kernel_uses_llsc && R10000_LLSC_WAR) {
426		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
427		unsigned long temp;
428
429		__asm__ __volatile__(
430		"	.set	arch=r4000				\n"
431		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
432		"	xor	%2, %0, %3				\n"
433		"	" __SC	"%2, %1					\n"
434		"	beqzl	%2, 1b					\n"
435		"	and	%2, %0, %3				\n"
436		"	.set	mips0					\n"
437		: "=&r" (temp), "+m" (*m), "=&r" (res)
438		: "r" (1UL << bit)
439		: "memory");
440	} else if (kernel_uses_llsc) {
441		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
442		unsigned long temp;
443
444		do {
445			__asm__ __volatile__(
446			"	.set	arch=r4000			\n"
447			"	" __LL	"%0, %1 # test_and_change_bit	\n"
448			"	xor	%2, %0, %3			\n"
449			"	" __SC	"\t%2, %1			\n"
450			"	.set	mips0				\n"
451			: "=&r" (temp), "+m" (*m), "=&r" (res)
452			: "r" (1UL << bit)
453			: "memory");
454		} while (unlikely(!res));
455
456		res = temp & (1UL << bit);
457	} else
458		res = __mips_test_and_change_bit(nr, addr);
459
460	smp_llsc_mb();
461
462	return res != 0;
463}
464
465#include <asm-generic/bitops/non-atomic.h>
466
467/*
468 * __clear_bit_unlock - Clears a bit in memory
469 * @nr: Bit to clear
470 * @addr: Address to start counting from
471 *
472 * __clear_bit() is non-atomic and implies release semantics before the memory
473 * operation. It can be used for an unlock if no other CPUs can concurrently
474 * modify other bits in the word.
475 */
476static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
477{
478	smp_mb();
479	__clear_bit(nr, addr);
 
480}
481
482/*
483 * Return the bit position (0..63) of the most significant 1 bit in a word
484 * Returns -1 if no 1 bit exists
485 */
486static inline unsigned long __fls(unsigned long word)
487{
488	int num;
489
490	if (BITS_PER_LONG == 32 &&
491	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
492		__asm__(
493		"	.set	push					\n"
494		"	.set	mips32					\n"
495		"	clz	%0, %1					\n"
496		"	.set	pop					\n"
497		: "=r" (num)
498		: "r" (word));
499
500		return 31 - num;
501	}
502
503	if (BITS_PER_LONG == 64 &&
504	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
505		__asm__(
506		"	.set	push					\n"
507		"	.set	mips64					\n"
508		"	dclz	%0, %1					\n"
509		"	.set	pop					\n"
510		: "=r" (num)
511		: "r" (word));
512
513		return 63 - num;
514	}
515
516	num = BITS_PER_LONG - 1;
517
518#if BITS_PER_LONG == 64
519	if (!(word & (~0ul << 32))) {
520		num -= 32;
521		word <<= 32;
522	}
523#endif
524	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
525		num -= 16;
526		word <<= 16;
527	}
528	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
529		num -= 8;
530		word <<= 8;
531	}
532	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
533		num -= 4;
534		word <<= 4;
535	}
536	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
537		num -= 2;
538		word <<= 2;
539	}
540	if (!(word & (~0ul << (BITS_PER_LONG-1))))
541		num -= 1;
542	return num;
543}
544
545/*
546 * __ffs - find first bit in word.
547 * @word: The word to search
548 *
549 * Returns 0..SZLONG-1
550 * Undefined if no bit exists, so code should check against 0 first.
551 */
552static inline unsigned long __ffs(unsigned long word)
553{
554	return __fls(word & -word);
555}
556
557/*
558 * fls - find last bit set.
559 * @word: The word to search
560 *
561 * This is defined the same way as ffs.
562 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
563 */
564static inline int fls(int x)
565{
566	int r;
567
568	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
569		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
 
 
 
 
 
 
 
570
571		return 32 - x;
572	}
573
574	r = 32;
575	if (!x)
576		return 0;
577	if (!(x & 0xffff0000u)) {
578		x <<= 16;
579		r -= 16;
580	}
581	if (!(x & 0xff000000u)) {
582		x <<= 8;
583		r -= 8;
584	}
585	if (!(x & 0xf0000000u)) {
586		x <<= 4;
587		r -= 4;
588	}
589	if (!(x & 0xc0000000u)) {
590		x <<= 2;
591		r -= 2;
592	}
593	if (!(x & 0x80000000u)) {
594		x <<= 1;
595		r -= 1;
596	}
597	return r;
598}
599
600#include <asm-generic/bitops/fls64.h>
601
602/*
603 * ffs - find first bit set.
604 * @word: The word to search
605 *
606 * This is defined the same way as
607 * the libc and compiler builtin ffs routines, therefore
608 * differs in spirit from the above ffz (man ffs).
609 */
610static inline int ffs(int word)
611{
612	if (!word)
613		return 0;
614
615	return fls(word & -word);
616}
617
618#include <asm-generic/bitops/ffz.h>
619#include <asm-generic/bitops/find.h>
620
621#ifdef __KERNEL__
622
623#include <asm-generic/bitops/sched.h>
624
625#include <asm/arch_hweight.h>
626#include <asm-generic/bitops/const_hweight.h>
627
628#include <asm-generic/bitops/le.h>
629#include <asm-generic/bitops/ext2-atomic.h>
630
631#endif /* __KERNEL__ */
632
633#endif /* _ASM_BITOPS_H */