Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/llsc.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 26/*
 27 * These are the "slower" versions of the functions and are in bitops.c.
 28 * These functions call raw_local_irq_{save,restore}().
 29 */
 30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33int __mips_test_and_set_bit(unsigned long nr,
 34			    volatile unsigned long *addr);
 35int __mips_test_and_set_bit_lock(unsigned long nr,
 36				 volatile unsigned long *addr);
 37int __mips_test_and_clear_bit(unsigned long nr,
 38			      volatile unsigned long *addr);
 39int __mips_test_and_change_bit(unsigned long nr,
 40			       volatile unsigned long *addr);
 41
 42
 43/*
 44 * set_bit - Atomically set a bit in memory
 45 * @nr: the bit to set
 46 * @addr: the address to start counting from
 47 *
 48 * This function is atomic and may not be reordered.  See __set_bit()
 49 * if you do not require the atomic guarantees.
 50 * Note that @nr may be almost arbitrarily large; this function is not
 51 * restricted to acting on a single-word quantity.
 52 */
 53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54{
 55	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56	int bit = nr & SZLONG_MASK;
 57	unsigned long temp;
 58
 59	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60		__asm__ __volatile__(
 
 61		"	.set	arch=r4000				\n"
 62		"1:	" __LL "%0, %1			# set_bit	\n"
 63		"	or	%0, %2					\n"
 64		"	" __SC	"%0, %1					\n"
 65		"	beqzl	%0, 1b					\n"
 66		"	.set	mips0					\n"
 67		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 68		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 
 69#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 70	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 
 71		do {
 72			__asm__ __volatile__(
 73			"	" __LL "%0, %1		# set_bit	\n"
 74			"	" __INS "%0, %3, %2, 1			\n"
 75			"	" __SC "%0, %1				\n"
 76			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 77			: "ir" (bit), "r" (~0));
 
 78		} while (unlikely(!temp));
 79#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 80	} else if (kernel_uses_llsc) {
 
 81		do {
 82			__asm__ __volatile__(
 
 83			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 84			"	" __LL "%0, %1		# set_bit	\n"
 85			"	or	%0, %2				\n"
 86			"	" __SC	"%0, %1				\n"
 87			"	.set	mips0				\n"
 88			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 89			: "ir" (1UL << bit));
 
 90		} while (unlikely(!temp));
 91	} else
 92		__mips_set_bit(nr, addr);
 93}
 94
 95/*
 96 * clear_bit - Clears a bit in memory
 97 * @nr: Bit to clear
 98 * @addr: Address to start counting from
 99 *
100 * clear_bit() is atomic and may not be reordered.  However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103 * in order to ensure changes are visible on other processors.
104 */
105static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
106{
107	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
108	int bit = nr & SZLONG_MASK;
109	unsigned long temp;
110
111	if (kernel_uses_llsc && R10000_LLSC_WAR) {
112		__asm__ __volatile__(
 
113		"	.set	arch=r4000				\n"
114		"1:	" __LL "%0, %1			# clear_bit	\n"
115		"	and	%0, %2					\n"
116		"	" __SC "%0, %1					\n"
117		"	beqzl	%0, 1b					\n"
118		"	.set	mips0					\n"
119		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
120		: "ir" (~(1UL << bit)));
 
121#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 
123		do {
124			__asm__ __volatile__(
125			"	" __LL "%0, %1		# clear_bit	\n"
126			"	" __INS "%0, $0, %2, 1			\n"
127			"	" __SC "%0, %1				\n"
128			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
129			: "ir" (bit));
 
130		} while (unlikely(!temp));
131#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
132	} else if (kernel_uses_llsc) {
 
133		do {
134			__asm__ __volatile__(
 
135			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
136			"	" __LL "%0, %1		# clear_bit	\n"
137			"	and	%0, %2				\n"
138			"	" __SC "%0, %1				\n"
139			"	.set	mips0				\n"
140			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
141			: "ir" (~(1UL << bit)));
 
142		} while (unlikely(!temp));
143	} else
144		__mips_clear_bit(nr, addr);
145}
146
147/*
148 * clear_bit_unlock - Clears a bit in memory
149 * @nr: Bit to clear
150 * @addr: Address to start counting from
151 *
152 * clear_bit() is atomic and implies release semantics before the memory
153 * operation. It can be used for an unlock.
154 */
155static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
156{
157	smp_mb__before_atomic();
158	clear_bit(nr, addr);
159}
160
161/*
162 * change_bit - Toggle a bit in memory
163 * @nr: Bit to change
164 * @addr: Address to start counting from
165 *
166 * change_bit() is atomic and may not be reordered.
167 * Note that @nr may be almost arbitrarily large; this function is not
168 * restricted to acting on a single-word quantity.
169 */
170static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
171{
172	int bit = nr & SZLONG_MASK;
173
174	if (kernel_uses_llsc && R10000_LLSC_WAR) {
175		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
176		unsigned long temp;
177
178		__asm__ __volatile__(
 
179		"	.set	arch=r4000			\n"
180		"1:	" __LL "%0, %1		# change_bit	\n"
181		"	xor	%0, %2				\n"
182		"	" __SC	"%0, %1				\n"
183		"	beqzl	%0, 1b				\n"
184		"	.set	mips0				\n"
185		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
186		: "ir" (1UL << bit));
 
187	} else if (kernel_uses_llsc) {
188		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189		unsigned long temp;
190
 
191		do {
192			__asm__ __volatile__(
 
193			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
194			"	" __LL "%0, %1		# change_bit	\n"
195			"	xor	%0, %2				\n"
196			"	" __SC	"%0, %1				\n"
197			"	.set	mips0				\n"
198			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
199			: "ir" (1UL << bit));
 
200		} while (unlikely(!temp));
201	} else
202		__mips_change_bit(nr, addr);
203}
204
205/*
206 * test_and_set_bit - Set a bit and return its old value
207 * @nr: Bit to set
208 * @addr: Address to count from
209 *
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
212 */
213static inline int test_and_set_bit(unsigned long nr,
214	volatile unsigned long *addr)
215{
216	int bit = nr & SZLONG_MASK;
217	unsigned long res;
218
219	smp_mb__before_llsc();
220
221	if (kernel_uses_llsc && R10000_LLSC_WAR) {
222		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
223		unsigned long temp;
224
225		__asm__ __volatile__(
 
226		"	.set	arch=r4000				\n"
227		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
228		"	or	%2, %0, %3				\n"
229		"	" __SC	"%2, %1					\n"
230		"	beqzl	%2, 1b					\n"
231		"	and	%2, %0, %3				\n"
232		"	.set	mips0					\n"
233		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
234		: "r" (1UL << bit)
235		: "memory");
236	} else if (kernel_uses_llsc) {
237		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238		unsigned long temp;
239
 
240		do {
241			__asm__ __volatile__(
 
242			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
243			"	" __LL "%0, %1	# test_and_set_bit	\n"
244			"	or	%2, %0, %3			\n"
245			"	" __SC	"%2, %1				\n"
246			"	.set	mips0				\n"
247			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
248			: "r" (1UL << bit)
249			: "memory");
250		} while (unlikely(!res));
251
252		res = temp & (1UL << bit);
253	} else
254		res = __mips_test_and_set_bit(nr, addr);
255
256	smp_llsc_mb();
257
258	return res != 0;
259}
260
261/*
262 * test_and_set_bit_lock - Set a bit and return its old value
263 * @nr: Bit to set
264 * @addr: Address to count from
265 *
266 * This operation is atomic and implies acquire ordering semantics
267 * after the memory operation.
268 */
269static inline int test_and_set_bit_lock(unsigned long nr,
270	volatile unsigned long *addr)
271{
272	int bit = nr & SZLONG_MASK;
273	unsigned long res;
274
275	if (kernel_uses_llsc && R10000_LLSC_WAR) {
276		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
277		unsigned long temp;
278
279		__asm__ __volatile__(
 
280		"	.set	arch=r4000				\n"
281		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
282		"	or	%2, %0, %3				\n"
283		"	" __SC	"%2, %1					\n"
284		"	beqzl	%2, 1b					\n"
285		"	and	%2, %0, %3				\n"
286		"	.set	mips0					\n"
287		: "=&r" (temp), "+m" (*m), "=&r" (res)
288		: "r" (1UL << bit)
289		: "memory");
290	} else if (kernel_uses_llsc) {
291		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292		unsigned long temp;
293
 
294		do {
295			__asm__ __volatile__(
 
296			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
297			"	" __LL "%0, %1	# test_and_set_bit	\n"
298			"	or	%2, %0, %3			\n"
299			"	" __SC	"%2, %1				\n"
300			"	.set	mips0				\n"
301			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
302			: "r" (1UL << bit)
303			: "memory");
304		} while (unlikely(!res));
305
306		res = temp & (1UL << bit);
307	} else
308		res = __mips_test_and_set_bit_lock(nr, addr);
309
310	smp_llsc_mb();
311
312	return res != 0;
313}
314/*
315 * test_and_clear_bit - Clear a bit and return its old value
316 * @nr: Bit to clear
317 * @addr: Address to count from
318 *
319 * This operation is atomic and cannot be reordered.
320 * It also implies a memory barrier.
321 */
322static inline int test_and_clear_bit(unsigned long nr,
323	volatile unsigned long *addr)
324{
325	int bit = nr & SZLONG_MASK;
326	unsigned long res;
327
328	smp_mb__before_llsc();
329
330	if (kernel_uses_llsc && R10000_LLSC_WAR) {
331		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
332		unsigned long temp;
333
334		__asm__ __volatile__(
 
335		"	.set	arch=r4000				\n"
336		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
337		"	or	%2, %0, %3				\n"
338		"	xor	%2, %3					\n"
339		"	" __SC	"%2, %1					\n"
340		"	beqzl	%2, 1b					\n"
341		"	and	%2, %0, %3				\n"
342		"	.set	mips0					\n"
343		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
344		: "r" (1UL << bit)
345		: "memory");
346#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
347	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
348		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
349		unsigned long temp;
350
 
351		do {
352			__asm__ __volatile__(
353			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
354			"	" __EXT "%2, %0, %3, 1			\n"
355			"	" __INS "%0, $0, %3, 1			\n"
356			"	" __SC	"%0, %1				\n"
357			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
358			: "ir" (bit)
359			: "memory");
360		} while (unlikely(!temp));
361#endif
362	} else if (kernel_uses_llsc) {
363		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364		unsigned long temp;
365
 
366		do {
367			__asm__ __volatile__(
 
368			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
369			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
370			"	or	%2, %0, %3			\n"
371			"	xor	%2, %3				\n"
372			"	" __SC	"%2, %1				\n"
373			"	.set	mips0				\n"
374			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
375			: "r" (1UL << bit)
376			: "memory");
377		} while (unlikely(!res));
378
379		res = temp & (1UL << bit);
380	} else
381		res = __mips_test_and_clear_bit(nr, addr);
382
383	smp_llsc_mb();
384
385	return res != 0;
386}
387
388/*
389 * test_and_change_bit - Change a bit and return its old value
390 * @nr: Bit to change
391 * @addr: Address to count from
392 *
393 * This operation is atomic and cannot be reordered.
394 * It also implies a memory barrier.
395 */
396static inline int test_and_change_bit(unsigned long nr,
397	volatile unsigned long *addr)
398{
399	int bit = nr & SZLONG_MASK;
400	unsigned long res;
401
402	smp_mb__before_llsc();
403
404	if (kernel_uses_llsc && R10000_LLSC_WAR) {
405		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
406		unsigned long temp;
407
408		__asm__ __volatile__(
 
409		"	.set	arch=r4000				\n"
410		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
411		"	xor	%2, %0, %3				\n"
412		"	" __SC	"%2, %1					\n"
413		"	beqzl	%2, 1b					\n"
414		"	and	%2, %0, %3				\n"
415		"	.set	mips0					\n"
416		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
417		: "r" (1UL << bit)
418		: "memory");
419	} else if (kernel_uses_llsc) {
420		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421		unsigned long temp;
422
 
423		do {
424			__asm__ __volatile__(
 
425			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
426			"	" __LL	"%0, %1 # test_and_change_bit	\n"
427			"	xor	%2, %0, %3			\n"
428			"	" __SC	"\t%2, %1			\n"
429			"	.set	mips0				\n"
430			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
431			: "r" (1UL << bit)
432			: "memory");
433		} while (unlikely(!res));
434
435		res = temp & (1UL << bit);
436	} else
437		res = __mips_test_and_change_bit(nr, addr);
438
439	smp_llsc_mb();
440
441	return res != 0;
442}
443
444#include <asm-generic/bitops/non-atomic.h>
445
446/*
447 * __clear_bit_unlock - Clears a bit in memory
448 * @nr: Bit to clear
449 * @addr: Address to start counting from
450 *
451 * __clear_bit() is non-atomic and implies release semantics before the memory
452 * operation. It can be used for an unlock if no other CPUs can concurrently
453 * modify other bits in the word.
454 */
455static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
456{
457	smp_mb__before_llsc();
458	__clear_bit(nr, addr);
459	nudge_writes();
460}
461
462/*
463 * Return the bit position (0..63) of the most significant 1 bit in a word
464 * Returns -1 if no 1 bit exists
465 */
466static inline unsigned long __fls(unsigned long word)
467{
468	int num;
469
470	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
471	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
472		__asm__(
473		"	.set	push					\n"
474		"	.set	"MIPS_ISA_LEVEL"			\n"
475		"	clz	%0, %1					\n"
476		"	.set	pop					\n"
477		: "=r" (num)
478		: "r" (word));
479
480		return 31 - num;
481	}
482
483	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
484	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
485		__asm__(
486		"	.set	push					\n"
487		"	.set	"MIPS_ISA_LEVEL"			\n"
488		"	dclz	%0, %1					\n"
489		"	.set	pop					\n"
490		: "=r" (num)
491		: "r" (word));
492
493		return 63 - num;
494	}
495
496	num = BITS_PER_LONG - 1;
497
498#if BITS_PER_LONG == 64
499	if (!(word & (~0ul << 32))) {
500		num -= 32;
501		word <<= 32;
502	}
503#endif
504	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
505		num -= 16;
506		word <<= 16;
507	}
508	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
509		num -= 8;
510		word <<= 8;
511	}
512	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
513		num -= 4;
514		word <<= 4;
515	}
516	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
517		num -= 2;
518		word <<= 2;
519	}
520	if (!(word & (~0ul << (BITS_PER_LONG-1))))
521		num -= 1;
522	return num;
523}
524
525/*
526 * __ffs - find first bit in word.
527 * @word: The word to search
528 *
529 * Returns 0..SZLONG-1
530 * Undefined if no bit exists, so code should check against 0 first.
531 */
532static inline unsigned long __ffs(unsigned long word)
533{
534	return __fls(word & -word);
535}
536
537/*
538 * fls - find last bit set.
539 * @word: The word to search
540 *
541 * This is defined the same way as ffs.
542 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
543 */
544static inline int fls(int x)
545{
546	int r;
547
548	if (!__builtin_constant_p(x) &&
549	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
550		__asm__(
551		"	.set	push					\n"
552		"	.set	"MIPS_ISA_LEVEL"			\n"
553		"	clz	%0, %1					\n"
554		"	.set	pop					\n"
555		: "=r" (x)
556		: "r" (x));
557
558		return 32 - x;
559	}
560
561	r = 32;
562	if (!x)
563		return 0;
564	if (!(x & 0xffff0000u)) {
565		x <<= 16;
566		r -= 16;
567	}
568	if (!(x & 0xff000000u)) {
569		x <<= 8;
570		r -= 8;
571	}
572	if (!(x & 0xf0000000u)) {
573		x <<= 4;
574		r -= 4;
575	}
576	if (!(x & 0xc0000000u)) {
577		x <<= 2;
578		r -= 2;
579	}
580	if (!(x & 0x80000000u)) {
581		x <<= 1;
582		r -= 1;
583	}
584	return r;
585}
586
587#include <asm-generic/bitops/fls64.h>
588
589/*
590 * ffs - find first bit set.
591 * @word: The word to search
592 *
593 * This is defined the same way as
594 * the libc and compiler builtin ffs routines, therefore
595 * differs in spirit from the above ffz (man ffs).
596 */
597static inline int ffs(int word)
598{
599	if (!word)
600		return 0;
601
602	return fls(word & -word);
603}
604
605#include <asm-generic/bitops/ffz.h>
606#include <asm-generic/bitops/find.h>
607
608#ifdef __KERNEL__
609
610#include <asm-generic/bitops/sched.h>
611
612#include <asm/arch_hweight.h>
613#include <asm-generic/bitops/const_hweight.h>
614
615#include <asm-generic/bitops/le.h>
616#include <asm-generic/bitops/ext2-atomic.h>
617
618#endif /* __KERNEL__ */
619
620#endif /* _ASM_BITOPS_H */
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/llsc.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 26/*
 27 * These are the "slower" versions of the functions and are in bitops.c.
 28 * These functions call raw_local_irq_{save,restore}().
 29 */
 30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33int __mips_test_and_set_bit(unsigned long nr,
 34			    volatile unsigned long *addr);
 35int __mips_test_and_set_bit_lock(unsigned long nr,
 36				 volatile unsigned long *addr);
 37int __mips_test_and_clear_bit(unsigned long nr,
 38			      volatile unsigned long *addr);
 39int __mips_test_and_change_bit(unsigned long nr,
 40			       volatile unsigned long *addr);
 41
 42
 43/*
 44 * set_bit - Atomically set a bit in memory
 45 * @nr: the bit to set
 46 * @addr: the address to start counting from
 47 *
 48 * This function is atomic and may not be reordered.  See __set_bit()
 49 * if you do not require the atomic guarantees.
 50 * Note that @nr may be almost arbitrarily large; this function is not
 51 * restricted to acting on a single-word quantity.
 52 */
 53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54{
 55	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56	int bit = nr & SZLONG_MASK;
 57	unsigned long temp;
 58
 59	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60		__asm__ __volatile__(
 61		"	.set	push					\n"
 62		"	.set	arch=r4000				\n"
 63		"1:	" __LL "%0, %1			# set_bit	\n"
 64		"	or	%0, %2					\n"
 65		"	" __SC	"%0, %1					\n"
 66		"	beqzl	%0, 1b					\n"
 67		"	.set	pop					\n"
 68		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 69		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
 70		: __LLSC_CLOBBER);
 71#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 72	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 73		loongson_llsc_mb();
 74		do {
 75			__asm__ __volatile__(
 76			"	" __LL "%0, %1		# set_bit	\n"
 77			"	" __INS "%0, %3, %2, 1			\n"
 78			"	" __SC "%0, %1				\n"
 79			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 80			: "ir" (bit), "r" (~0)
 81			: __LLSC_CLOBBER);
 82		} while (unlikely(!temp));
 83#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 84	} else if (kernel_uses_llsc) {
 85		loongson_llsc_mb();
 86		do {
 87			__asm__ __volatile__(
 88			"	.set	push				\n"
 89			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 90			"	" __LL "%0, %1		# set_bit	\n"
 91			"	or	%0, %2				\n"
 92			"	" __SC	"%0, %1				\n"
 93			"	.set	pop				\n"
 94			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 95			: "ir" (1UL << bit)
 96			: __LLSC_CLOBBER);
 97		} while (unlikely(!temp));
 98	} else
 99		__mips_set_bit(nr, addr);
100}
101
102/*
103 * clear_bit - Clears a bit in memory
104 * @nr: Bit to clear
105 * @addr: Address to start counting from
106 *
107 * clear_bit() is atomic and may not be reordered.  However, it does
108 * not contain a memory barrier, so if it is used for locking purposes,
109 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110 * in order to ensure changes are visible on other processors.
111 */
112static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
113{
114	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115	int bit = nr & SZLONG_MASK;
116	unsigned long temp;
117
118	if (kernel_uses_llsc && R10000_LLSC_WAR) {
119		__asm__ __volatile__(
120		"	.set	push					\n"
121		"	.set	arch=r4000				\n"
122		"1:	" __LL "%0, %1			# clear_bit	\n"
123		"	and	%0, %2					\n"
124		"	" __SC "%0, %1					\n"
125		"	beqzl	%0, 1b					\n"
126		"	.set	pop					\n"
127		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128		: "ir" (~(1UL << bit))
129		: __LLSC_CLOBBER);
130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
132		loongson_llsc_mb();
133		do {
134			__asm__ __volatile__(
135			"	" __LL "%0, %1		# clear_bit	\n"
136			"	" __INS "%0, $0, %2, 1			\n"
137			"	" __SC "%0, %1				\n"
138			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
139			: "ir" (bit)
140			: __LLSC_CLOBBER);
141		} while (unlikely(!temp));
142#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143	} else if (kernel_uses_llsc) {
144		loongson_llsc_mb();
145		do {
146			__asm__ __volatile__(
147			"	.set	push				\n"
148			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
149			"	" __LL "%0, %1		# clear_bit	\n"
150			"	and	%0, %2				\n"
151			"	" __SC "%0, %1				\n"
152			"	.set	pop				\n"
153			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154			: "ir" (~(1UL << bit))
155			: __LLSC_CLOBBER);
156		} while (unlikely(!temp));
157	} else
158		__mips_clear_bit(nr, addr);
159}
160
161/*
162 * clear_bit_unlock - Clears a bit in memory
163 * @nr: Bit to clear
164 * @addr: Address to start counting from
165 *
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
168 */
169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170{
171	smp_mb__before_atomic();
172	clear_bit(nr, addr);
173}
174
175/*
176 * change_bit - Toggle a bit in memory
177 * @nr: Bit to change
178 * @addr: Address to start counting from
179 *
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
183 */
184static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185{
186	int bit = nr & SZLONG_MASK;
187
188	if (kernel_uses_llsc && R10000_LLSC_WAR) {
189		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190		unsigned long temp;
191
192		__asm__ __volatile__(
193		"	.set	push				\n"
194		"	.set	arch=r4000			\n"
195		"1:	" __LL "%0, %1		# change_bit	\n"
196		"	xor	%0, %2				\n"
197		"	" __SC	"%0, %1				\n"
198		"	beqzl	%0, 1b				\n"
199		"	.set	pop				\n"
200		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201		: "ir" (1UL << bit)
202		: __LLSC_CLOBBER);
203	} else if (kernel_uses_llsc) {
204		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205		unsigned long temp;
206
207		loongson_llsc_mb();
208		do {
209			__asm__ __volatile__(
210			"	.set	push				\n"
211			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
212			"	" __LL "%0, %1		# change_bit	\n"
213			"	xor	%0, %2				\n"
214			"	" __SC	"%0, %1				\n"
215			"	.set	pop				\n"
216			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
217			: "ir" (1UL << bit)
218			: __LLSC_CLOBBER);
219		} while (unlikely(!temp));
220	} else
221		__mips_change_bit(nr, addr);
222}
223
224/*
225 * test_and_set_bit - Set a bit and return its old value
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
231 */
232static inline int test_and_set_bit(unsigned long nr,
233	volatile unsigned long *addr)
234{
235	int bit = nr & SZLONG_MASK;
236	unsigned long res;
237
238	smp_mb__before_llsc();
239
240	if (kernel_uses_llsc && R10000_LLSC_WAR) {
241		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
242		unsigned long temp;
243
244		__asm__ __volatile__(
245		"	.set	push					\n"
246		"	.set	arch=r4000				\n"
247		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
248		"	or	%2, %0, %3				\n"
249		"	" __SC	"%2, %1					\n"
250		"	beqzl	%2, 1b					\n"
251		"	and	%2, %0, %3				\n"
252		"	.set	pop					\n"
253		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
254		: "r" (1UL << bit)
255		: __LLSC_CLOBBER);
256	} else if (kernel_uses_llsc) {
257		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
258		unsigned long temp;
259
260		loongson_llsc_mb();
261		do {
262			__asm__ __volatile__(
263			"	.set	push				\n"
264			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
265			"	" __LL "%0, %1	# test_and_set_bit	\n"
266			"	or	%2, %0, %3			\n"
267			"	" __SC	"%2, %1				\n"
268			"	.set	pop				\n"
269			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
270			: "r" (1UL << bit)
271			: __LLSC_CLOBBER);
272		} while (unlikely(!res));
273
274		res = temp & (1UL << bit);
275	} else
276		res = __mips_test_and_set_bit(nr, addr);
277
278	smp_llsc_mb();
279
280	return res != 0;
281}
282
283/*
284 * test_and_set_bit_lock - Set a bit and return its old value
285 * @nr: Bit to set
286 * @addr: Address to count from
287 *
288 * This operation is atomic and implies acquire ordering semantics
289 * after the memory operation.
290 */
291static inline int test_and_set_bit_lock(unsigned long nr,
292	volatile unsigned long *addr)
293{
294	int bit = nr & SZLONG_MASK;
295	unsigned long res;
296
297	if (kernel_uses_llsc && R10000_LLSC_WAR) {
298		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
299		unsigned long temp;
300
301		__asm__ __volatile__(
302		"	.set	push					\n"
303		"	.set	arch=r4000				\n"
304		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
305		"	or	%2, %0, %3				\n"
306		"	" __SC	"%2, %1					\n"
307		"	beqzl	%2, 1b					\n"
308		"	and	%2, %0, %3				\n"
309		"	.set	pop					\n"
310		: "=&r" (temp), "+m" (*m), "=&r" (res)
311		: "r" (1UL << bit)
312		: __LLSC_CLOBBER);
313	} else if (kernel_uses_llsc) {
314		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
315		unsigned long temp;
316
317		loongson_llsc_mb();
318		do {
319			__asm__ __volatile__(
320			"	.set	push				\n"
321			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
322			"	" __LL "%0, %1	# test_and_set_bit	\n"
323			"	or	%2, %0, %3			\n"
324			"	" __SC	"%2, %1				\n"
325			"	.set	pop				\n"
326			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
327			: "r" (1UL << bit)
328			: __LLSC_CLOBBER);
329		} while (unlikely(!res));
330
331		res = temp & (1UL << bit);
332	} else
333		res = __mips_test_and_set_bit_lock(nr, addr);
334
335	smp_llsc_mb();
336
337	return res != 0;
338}
339/*
340 * test_and_clear_bit - Clear a bit and return its old value
341 * @nr: Bit to clear
342 * @addr: Address to count from
343 *
344 * This operation is atomic and cannot be reordered.
345 * It also implies a memory barrier.
346 */
347static inline int test_and_clear_bit(unsigned long nr,
348	volatile unsigned long *addr)
349{
350	int bit = nr & SZLONG_MASK;
351	unsigned long res;
352
353	smp_mb__before_llsc();
354
355	if (kernel_uses_llsc && R10000_LLSC_WAR) {
356		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
357		unsigned long temp;
358
359		__asm__ __volatile__(
360		"	.set	push					\n"
361		"	.set	arch=r4000				\n"
362		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
363		"	or	%2, %0, %3				\n"
364		"	xor	%2, %3					\n"
365		"	" __SC	"%2, %1					\n"
366		"	beqzl	%2, 1b					\n"
367		"	and	%2, %0, %3				\n"
368		"	.set	pop					\n"
369		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
370		: "r" (1UL << bit)
371		: __LLSC_CLOBBER);
372#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
375		unsigned long temp;
376
377		loongson_llsc_mb();
378		do {
379			__asm__ __volatile__(
380			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
381			"	" __EXT "%2, %0, %3, 1			\n"
382			"	" __INS "%0, $0, %3, 1			\n"
383			"	" __SC	"%0, %1				\n"
384			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
385			: "ir" (bit)
386			: __LLSC_CLOBBER);
387		} while (unlikely(!temp));
388#endif
389	} else if (kernel_uses_llsc) {
390		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
391		unsigned long temp;
392
393		loongson_llsc_mb();
394		do {
395			__asm__ __volatile__(
396			"	.set	push				\n"
397			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
399			"	or	%2, %0, %3			\n"
400			"	xor	%2, %3				\n"
401			"	" __SC	"%2, %1				\n"
402			"	.set	pop				\n"
403			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
404			: "r" (1UL << bit)
405			: __LLSC_CLOBBER);
406		} while (unlikely(!res));
407
408		res = temp & (1UL << bit);
409	} else
410		res = __mips_test_and_clear_bit(nr, addr);
411
412	smp_llsc_mb();
413
414	return res != 0;
415}
416
417/*
418 * test_and_change_bit - Change a bit and return its old value
419 * @nr: Bit to change
420 * @addr: Address to count from
421 *
422 * This operation is atomic and cannot be reordered.
423 * It also implies a memory barrier.
424 */
425static inline int test_and_change_bit(unsigned long nr,
426	volatile unsigned long *addr)
427{
428	int bit = nr & SZLONG_MASK;
429	unsigned long res;
430
431	smp_mb__before_llsc();
432
433	if (kernel_uses_llsc && R10000_LLSC_WAR) {
434		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435		unsigned long temp;
436
437		__asm__ __volatile__(
438		"	.set	push					\n"
439		"	.set	arch=r4000				\n"
440		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
441		"	xor	%2, %0, %3				\n"
442		"	" __SC	"%2, %1					\n"
443		"	beqzl	%2, 1b					\n"
444		"	and	%2, %0, %3				\n"
445		"	.set	pop					\n"
446		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447		: "r" (1UL << bit)
448		: __LLSC_CLOBBER);
449	} else if (kernel_uses_llsc) {
450		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
451		unsigned long temp;
452
453		loongson_llsc_mb();
454		do {
455			__asm__ __volatile__(
456			"	.set	push				\n"
457			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
458			"	" __LL	"%0, %1 # test_and_change_bit	\n"
459			"	xor	%2, %0, %3			\n"
460			"	" __SC	"\t%2, %1			\n"
461			"	.set	pop				\n"
462			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
463			: "r" (1UL << bit)
464			: __LLSC_CLOBBER);
465		} while (unlikely(!res));
466
467		res = temp & (1UL << bit);
468	} else
469		res = __mips_test_and_change_bit(nr, addr);
470
471	smp_llsc_mb();
472
473	return res != 0;
474}
475
476#include <asm-generic/bitops/non-atomic.h>
477
478/*
479 * __clear_bit_unlock - Clears a bit in memory
480 * @nr: Bit to clear
481 * @addr: Address to start counting from
482 *
483 * __clear_bit() is non-atomic and implies release semantics before the memory
484 * operation. It can be used for an unlock if no other CPUs can concurrently
485 * modify other bits in the word.
486 */
487static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
488{
489	smp_mb__before_llsc();
490	__clear_bit(nr, addr);
491	nudge_writes();
492}
493
494/*
495 * Return the bit position (0..63) of the most significant 1 bit in a word
496 * Returns -1 if no 1 bit exists
497 */
498static __always_inline unsigned long __fls(unsigned long word)
499{
500	int num;
501
502	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
504		__asm__(
505		"	.set	push					\n"
506		"	.set	"MIPS_ISA_LEVEL"			\n"
507		"	clz	%0, %1					\n"
508		"	.set	pop					\n"
509		: "=r" (num)
510		: "r" (word));
511
512		return 31 - num;
513	}
514
515	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
517		__asm__(
518		"	.set	push					\n"
519		"	.set	"MIPS_ISA_LEVEL"			\n"
520		"	dclz	%0, %1					\n"
521		"	.set	pop					\n"
522		: "=r" (num)
523		: "r" (word));
524
525		return 63 - num;
526	}
527
528	num = BITS_PER_LONG - 1;
529
530#if BITS_PER_LONG == 64
531	if (!(word & (~0ul << 32))) {
532		num -= 32;
533		word <<= 32;
534	}
535#endif
536	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
537		num -= 16;
538		word <<= 16;
539	}
540	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
541		num -= 8;
542		word <<= 8;
543	}
544	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
545		num -= 4;
546		word <<= 4;
547	}
548	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
549		num -= 2;
550		word <<= 2;
551	}
552	if (!(word & (~0ul << (BITS_PER_LONG-1))))
553		num -= 1;
554	return num;
555}
556
557/*
558 * __ffs - find first bit in word.
559 * @word: The word to search
560 *
561 * Returns 0..SZLONG-1
562 * Undefined if no bit exists, so code should check against 0 first.
563 */
564static __always_inline unsigned long __ffs(unsigned long word)
565{
566	return __fls(word & -word);
567}
568
569/*
570 * fls - find last bit set.
571 * @word: The word to search
572 *
573 * This is defined the same way as ffs.
574 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
575 */
576static inline int fls(unsigned int x)
577{
578	int r;
579
580	if (!__builtin_constant_p(x) &&
581	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
582		__asm__(
583		"	.set	push					\n"
584		"	.set	"MIPS_ISA_LEVEL"			\n"
585		"	clz	%0, %1					\n"
586		"	.set	pop					\n"
587		: "=r" (x)
588		: "r" (x));
589
590		return 32 - x;
591	}
592
593	r = 32;
594	if (!x)
595		return 0;
596	if (!(x & 0xffff0000u)) {
597		x <<= 16;
598		r -= 16;
599	}
600	if (!(x & 0xff000000u)) {
601		x <<= 8;
602		r -= 8;
603	}
604	if (!(x & 0xf0000000u)) {
605		x <<= 4;
606		r -= 4;
607	}
608	if (!(x & 0xc0000000u)) {
609		x <<= 2;
610		r -= 2;
611	}
612	if (!(x & 0x80000000u)) {
613		x <<= 1;
614		r -= 1;
615	}
616	return r;
617}
618
619#include <asm-generic/bitops/fls64.h>
620
621/*
622 * ffs - find first bit set.
623 * @word: The word to search
624 *
625 * This is defined the same way as
626 * the libc and compiler builtin ffs routines, therefore
627 * differs in spirit from the above ffz (man ffs).
628 */
629static inline int ffs(int word)
630{
631	if (!word)
632		return 0;
633
634	return fls(word & -word);
635}
636
637#include <asm-generic/bitops/ffz.h>
638#include <asm-generic/bitops/find.h>
639
640#ifdef __KERNEL__
641
642#include <asm-generic/bitops/sched.h>
643
644#include <asm/arch_hweight.h>
645#include <asm-generic/bitops/const_hweight.h>
646
647#include <asm-generic/bitops/le.h>
648#include <asm-generic/bitops/ext2-atomic.h>
649
650#endif /* __KERNEL__ */
651
652#endif /* _ASM_BITOPS_H */