Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/llsc.h>
 23#include <asm/sgidefs.h>
 24#include <asm/war.h>
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/*
 27 * These are the "slower" versions of the functions and are in bitops.c.
 28 * These functions call raw_local_irq_{save,restore}().
 29 */
 30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33int __mips_test_and_set_bit(unsigned long nr,
 34			    volatile unsigned long *addr);
 35int __mips_test_and_set_bit_lock(unsigned long nr,
 36				 volatile unsigned long *addr);
 37int __mips_test_and_clear_bit(unsigned long nr,
 38			      volatile unsigned long *addr);
 39int __mips_test_and_change_bit(unsigned long nr,
 40			       volatile unsigned long *addr);
 41
 42
 43/*
 44 * set_bit - Atomically set a bit in memory
 45 * @nr: the bit to set
 46 * @addr: the address to start counting from
 47 *
 48 * This function is atomic and may not be reordered.  See __set_bit()
 49 * if you do not require the atomic guarantees.
 50 * Note that @nr may be almost arbitrarily large; this function is not
 51 * restricted to acting on a single-word quantity.
 52 */
 53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54{
 55	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56	int bit = nr & SZLONG_MASK;
 57	unsigned long temp;
 58
 59	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60		__asm__ __volatile__(
 61		"	.set	arch=r4000				\n"
 62		"1:	" __LL "%0, %1			# set_bit	\n"
 63		"	or	%0, %2					\n"
 64		"	" __SC	"%0, %1					\n"
 65		"	beqzl	%0, 1b					\n"
 66		"	.set	mips0					\n"
 67		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 68		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 69#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 70	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 71		do {
 72			__asm__ __volatile__(
 73			"	" __LL "%0, %1		# set_bit	\n"
 74			"	" __INS "%0, %3, %2, 1			\n"
 75			"	" __SC "%0, %1				\n"
 76			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 77			: "ir" (bit), "r" (~0));
 78		} while (unlikely(!temp));
 79#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 80	} else if (kernel_uses_llsc) {
 81		do {
 82			__asm__ __volatile__(
 83			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 84			"	" __LL "%0, %1		# set_bit	\n"
 85			"	or	%0, %2				\n"
 86			"	" __SC	"%0, %1				\n"
 87			"	.set	mips0				\n"
 88			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 89			: "ir" (1UL << bit));
 90		} while (unlikely(!temp));
 91	} else
 92		__mips_set_bit(nr, addr);
 93}
 94
 95/*
 96 * clear_bit - Clears a bit in memory
 97 * @nr: Bit to clear
 98 * @addr: Address to start counting from
 99 *
100 * clear_bit() is atomic and may not be reordered.  However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103 * in order to ensure changes are visible on other processors.
104 */
105static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
106{
107	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
108	int bit = nr & SZLONG_MASK;
109	unsigned long temp;
110
111	if (kernel_uses_llsc && R10000_LLSC_WAR) {
112		__asm__ __volatile__(
113		"	.set	arch=r4000				\n"
114		"1:	" __LL "%0, %1			# clear_bit	\n"
115		"	and	%0, %2					\n"
116		"	" __SC "%0, %1					\n"
117		"	beqzl	%0, 1b					\n"
118		"	.set	mips0					\n"
119		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
120		: "ir" (~(1UL << bit)));
121#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
123		do {
124			__asm__ __volatile__(
125			"	" __LL "%0, %1		# clear_bit	\n"
126			"	" __INS "%0, $0, %2, 1			\n"
127			"	" __SC "%0, %1				\n"
128			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
129			: "ir" (bit));
130		} while (unlikely(!temp));
131#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
132	} else if (kernel_uses_llsc) {
133		do {
134			__asm__ __volatile__(
135			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
136			"	" __LL "%0, %1		# clear_bit	\n"
137			"	and	%0, %2				\n"
138			"	" __SC "%0, %1				\n"
139			"	.set	mips0				\n"
140			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
141			: "ir" (~(1UL << bit)));
142		} while (unlikely(!temp));
143	} else
144		__mips_clear_bit(nr, addr);
145}
146
147/*
148 * clear_bit_unlock - Clears a bit in memory
149 * @nr: Bit to clear
150 * @addr: Address to start counting from
151 *
152 * clear_bit() is atomic and implies release semantics before the memory
153 * operation. It can be used for an unlock.
154 */
155static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
156{
157	smp_mb__before_atomic();
158	clear_bit(nr, addr);
159}
160
161/*
162 * change_bit - Toggle a bit in memory
163 * @nr: Bit to change
164 * @addr: Address to start counting from
165 *
166 * change_bit() is atomic and may not be reordered.
167 * Note that @nr may be almost arbitrarily large; this function is not
168 * restricted to acting on a single-word quantity.
169 */
170static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
171{
172	int bit = nr & SZLONG_MASK;
173
174	if (kernel_uses_llsc && R10000_LLSC_WAR) {
175		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
176		unsigned long temp;
177
178		__asm__ __volatile__(
179		"	.set	arch=r4000			\n"
180		"1:	" __LL "%0, %1		# change_bit	\n"
181		"	xor	%0, %2				\n"
182		"	" __SC	"%0, %1				\n"
183		"	beqzl	%0, 1b				\n"
184		"	.set	mips0				\n"
185		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
186		: "ir" (1UL << bit));
187	} else if (kernel_uses_llsc) {
188		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189		unsigned long temp;
190
191		do {
192			__asm__ __volatile__(
193			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
194			"	" __LL "%0, %1		# change_bit	\n"
195			"	xor	%0, %2				\n"
196			"	" __SC	"%0, %1				\n"
197			"	.set	mips0				\n"
198			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
199			: "ir" (1UL << bit));
200		} while (unlikely(!temp));
201	} else
202		__mips_change_bit(nr, addr);
203}
204
205/*
206 * test_and_set_bit - Set a bit and return its old value
207 * @nr: Bit to set
208 * @addr: Address to count from
209 *
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
212 */
213static inline int test_and_set_bit(unsigned long nr,
214	volatile unsigned long *addr)
215{
216	int bit = nr & SZLONG_MASK;
217	unsigned long res;
218
219	smp_mb__before_llsc();
220
221	if (kernel_uses_llsc && R10000_LLSC_WAR) {
222		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
223		unsigned long temp;
224
225		__asm__ __volatile__(
226		"	.set	arch=r4000				\n"
227		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
228		"	or	%2, %0, %3				\n"
229		"	" __SC	"%2, %1					\n"
230		"	beqzl	%2, 1b					\n"
231		"	and	%2, %0, %3				\n"
232		"	.set	mips0					\n"
233		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
234		: "r" (1UL << bit)
235		: "memory");
236	} else if (kernel_uses_llsc) {
237		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238		unsigned long temp;
239
240		do {
241			__asm__ __volatile__(
242			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
243			"	" __LL "%0, %1	# test_and_set_bit	\n"
244			"	or	%2, %0, %3			\n"
245			"	" __SC	"%2, %1				\n"
246			"	.set	mips0				\n"
247			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
248			: "r" (1UL << bit)
249			: "memory");
250		} while (unlikely(!res));
251
252		res = temp & (1UL << bit);
253	} else
254		res = __mips_test_and_set_bit(nr, addr);
255
256	smp_llsc_mb();
257
258	return res != 0;
259}
260
261/*
262 * test_and_set_bit_lock - Set a bit and return its old value
263 * @nr: Bit to set
264 * @addr: Address to count from
265 *
266 * This operation is atomic and implies acquire ordering semantics
267 * after the memory operation.
268 */
269static inline int test_and_set_bit_lock(unsigned long nr,
270	volatile unsigned long *addr)
271{
272	int bit = nr & SZLONG_MASK;
273	unsigned long res;
274
275	if (kernel_uses_llsc && R10000_LLSC_WAR) {
276		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
277		unsigned long temp;
278
279		__asm__ __volatile__(
280		"	.set	arch=r4000				\n"
281		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
282		"	or	%2, %0, %3				\n"
283		"	" __SC	"%2, %1					\n"
284		"	beqzl	%2, 1b					\n"
285		"	and	%2, %0, %3				\n"
286		"	.set	mips0					\n"
287		: "=&r" (temp), "+m" (*m), "=&r" (res)
288		: "r" (1UL << bit)
289		: "memory");
290	} else if (kernel_uses_llsc) {
291		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292		unsigned long temp;
293
294		do {
295			__asm__ __volatile__(
296			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
297			"	" __LL "%0, %1	# test_and_set_bit	\n"
298			"	or	%2, %0, %3			\n"
299			"	" __SC	"%2, %1				\n"
300			"	.set	mips0				\n"
301			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
302			: "r" (1UL << bit)
303			: "memory");
304		} while (unlikely(!res));
305
306		res = temp & (1UL << bit);
307	} else
308		res = __mips_test_and_set_bit_lock(nr, addr);
309
310	smp_llsc_mb();
311
312	return res != 0;
313}
314/*
315 * test_and_clear_bit - Clear a bit and return its old value
316 * @nr: Bit to clear
317 * @addr: Address to count from
318 *
319 * This operation is atomic and cannot be reordered.
320 * It also implies a memory barrier.
321 */
322static inline int test_and_clear_bit(unsigned long nr,
323	volatile unsigned long *addr)
324{
325	int bit = nr & SZLONG_MASK;
326	unsigned long res;
327
328	smp_mb__before_llsc();
329
330	if (kernel_uses_llsc && R10000_LLSC_WAR) {
331		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
332		unsigned long temp;
333
334		__asm__ __volatile__(
335		"	.set	arch=r4000				\n"
336		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
337		"	or	%2, %0, %3				\n"
338		"	xor	%2, %3					\n"
339		"	" __SC	"%2, %1					\n"
340		"	beqzl	%2, 1b					\n"
341		"	and	%2, %0, %3				\n"
342		"	.set	mips0					\n"
343		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
344		: "r" (1UL << bit)
345		: "memory");
346#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
347	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
348		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
349		unsigned long temp;
350
351		do {
352			__asm__ __volatile__(
353			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
354			"	" __EXT "%2, %0, %3, 1			\n"
355			"	" __INS "%0, $0, %3, 1			\n"
356			"	" __SC	"%0, %1				\n"
357			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
358			: "ir" (bit)
359			: "memory");
360		} while (unlikely(!temp));
361#endif
362	} else if (kernel_uses_llsc) {
363		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364		unsigned long temp;
365
366		do {
367			__asm__ __volatile__(
368			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
369			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
370			"	or	%2, %0, %3			\n"
371			"	xor	%2, %3				\n"
372			"	" __SC	"%2, %1				\n"
373			"	.set	mips0				\n"
374			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
375			: "r" (1UL << bit)
376			: "memory");
377		} while (unlikely(!res));
378
379		res = temp & (1UL << bit);
380	} else
381		res = __mips_test_and_clear_bit(nr, addr);
382
383	smp_llsc_mb();
384
385	return res != 0;
386}
387
388/*
389 * test_and_change_bit - Change a bit and return its old value
390 * @nr: Bit to change
391 * @addr: Address to count from
392 *
393 * This operation is atomic and cannot be reordered.
394 * It also implies a memory barrier.
395 */
396static inline int test_and_change_bit(unsigned long nr,
397	volatile unsigned long *addr)
398{
399	int bit = nr & SZLONG_MASK;
400	unsigned long res;
401
402	smp_mb__before_llsc();
403
404	if (kernel_uses_llsc && R10000_LLSC_WAR) {
405		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
406		unsigned long temp;
407
408		__asm__ __volatile__(
409		"	.set	arch=r4000				\n"
410		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
411		"	xor	%2, %0, %3				\n"
412		"	" __SC	"%2, %1					\n"
413		"	beqzl	%2, 1b					\n"
414		"	and	%2, %0, %3				\n"
415		"	.set	mips0					\n"
416		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
417		: "r" (1UL << bit)
418		: "memory");
419	} else if (kernel_uses_llsc) {
420		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421		unsigned long temp;
422
423		do {
424			__asm__ __volatile__(
425			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
426			"	" __LL	"%0, %1 # test_and_change_bit	\n"
427			"	xor	%2, %0, %3			\n"
428			"	" __SC	"\t%2, %1			\n"
429			"	.set	mips0				\n"
430			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
431			: "r" (1UL << bit)
432			: "memory");
433		} while (unlikely(!res));
434
435		res = temp & (1UL << bit);
436	} else
437		res = __mips_test_and_change_bit(nr, addr);
438
439	smp_llsc_mb();
440
441	return res != 0;
442}
443
444#include <asm-generic/bitops/non-atomic.h>
445
446/*
447 * __clear_bit_unlock - Clears a bit in memory
448 * @nr: Bit to clear
449 * @addr: Address to start counting from
450 *
451 * __clear_bit() is non-atomic and implies release semantics before the memory
452 * operation. It can be used for an unlock if no other CPUs can concurrently
453 * modify other bits in the word.
454 */
455static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
456{
457	smp_mb__before_llsc();
458	__clear_bit(nr, addr);
459	nudge_writes();
460}
461
462/*
463 * Return the bit position (0..63) of the most significant 1 bit in a word
464 * Returns -1 if no 1 bit exists
465 */
466static inline unsigned long __fls(unsigned long word)
467{
468	int num;
469
470	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
471	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
472		__asm__(
473		"	.set	push					\n"
474		"	.set	"MIPS_ISA_LEVEL"			\n"
475		"	clz	%0, %1					\n"
476		"	.set	pop					\n"
477		: "=r" (num)
478		: "r" (word));
479
480		return 31 - num;
481	}
482
483	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
484	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
485		__asm__(
486		"	.set	push					\n"
487		"	.set	"MIPS_ISA_LEVEL"			\n"
488		"	dclz	%0, %1					\n"
489		"	.set	pop					\n"
490		: "=r" (num)
491		: "r" (word));
492
493		return 63 - num;
494	}
495
496	num = BITS_PER_LONG - 1;
497
498#if BITS_PER_LONG == 64
499	if (!(word & (~0ul << 32))) {
500		num -= 32;
501		word <<= 32;
502	}
503#endif
504	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
505		num -= 16;
506		word <<= 16;
507	}
508	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
509		num -= 8;
510		word <<= 8;
511	}
512	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
513		num -= 4;
514		word <<= 4;
515	}
516	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
517		num -= 2;
518		word <<= 2;
519	}
520	if (!(word & (~0ul << (BITS_PER_LONG-1))))
521		num -= 1;
522	return num;
523}
524
525/*
526 * __ffs - find first bit in word.
527 * @word: The word to search
528 *
529 * Returns 0..SZLONG-1
530 * Undefined if no bit exists, so code should check against 0 first.
531 */
532static inline unsigned long __ffs(unsigned long word)
533{
534	return __fls(word & -word);
535}
536
537/*
538 * fls - find last bit set.
539 * @word: The word to search
540 *
541 * This is defined the same way as ffs.
542 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
543 */
544static inline int fls(int x)
545{
546	int r;
547
548	if (!__builtin_constant_p(x) &&
549	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
550		__asm__(
551		"	.set	push					\n"
552		"	.set	"MIPS_ISA_LEVEL"			\n"
553		"	clz	%0, %1					\n"
554		"	.set	pop					\n"
555		: "=r" (x)
556		: "r" (x));
557
558		return 32 - x;
559	}
560
561	r = 32;
562	if (!x)
563		return 0;
564	if (!(x & 0xffff0000u)) {
565		x <<= 16;
566		r -= 16;
567	}
568	if (!(x & 0xff000000u)) {
569		x <<= 8;
570		r -= 8;
571	}
572	if (!(x & 0xf0000000u)) {
573		x <<= 4;
574		r -= 4;
575	}
576	if (!(x & 0xc0000000u)) {
577		x <<= 2;
578		r -= 2;
579	}
580	if (!(x & 0x80000000u)) {
581		x <<= 1;
582		r -= 1;
583	}
584	return r;
585}
586
587#include <asm-generic/bitops/fls64.h>
588
589/*
590 * ffs - find first bit set.
591 * @word: The word to search
592 *
593 * This is defined the same way as
594 * the libc and compiler builtin ffs routines, therefore
595 * differs in spirit from the above ffz (man ffs).
596 */
597static inline int ffs(int word)
598{
599	if (!word)
600		return 0;
601
602	return fls(word & -word);
603}
604
605#include <asm-generic/bitops/ffz.h>
606#include <asm-generic/bitops/find.h>
607
608#ifdef __KERNEL__
609
610#include <asm-generic/bitops/sched.h>
611
612#include <asm/arch_hweight.h>
613#include <asm-generic/bitops/const_hweight.h>
614
615#include <asm-generic/bitops/le.h>
616#include <asm-generic/bitops/ext2-atomic.h>
617
618#endif /* __KERNEL__ */
619
620#endif /* _ASM_BITOPS_H */
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 
 22#include <asm/sgidefs.h>
 23#include <asm/war.h>
 24
 25#if _MIPS_SZLONG == 32
 26#define SZLONG_LOG 5
 27#define SZLONG_MASK 31UL
 28#define __LL		"ll	"
 29#define __SC		"sc	"
 30#define __INS		"ins	"
 31#define __EXT		"ext	"
 32#elif _MIPS_SZLONG == 64
 33#define SZLONG_LOG 6
 34#define SZLONG_MASK 63UL
 35#define __LL		"lld	"
 36#define __SC		"scd	"
 37#define __INS		"dins	 "
 38#define __EXT		"dext	 "
 39#endif
 40
 41/*
 42 * These are the "slower" versions of the functions and are in bitops.c.
 43 * These functions call raw_local_irq_{save,restore}().
 44 */
 45void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 46void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 47void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 48int __mips_test_and_set_bit(unsigned long nr,
 49			    volatile unsigned long *addr);
 50int __mips_test_and_set_bit_lock(unsigned long nr,
 51				 volatile unsigned long *addr);
 52int __mips_test_and_clear_bit(unsigned long nr,
 53			      volatile unsigned long *addr);
 54int __mips_test_and_change_bit(unsigned long nr,
 55			       volatile unsigned long *addr);
 56
 57
 58/*
 59 * set_bit - Atomically set a bit in memory
 60 * @nr: the bit to set
 61 * @addr: the address to start counting from
 62 *
 63 * This function is atomic and may not be reordered.  See __set_bit()
 64 * if you do not require the atomic guarantees.
 65 * Note that @nr may be almost arbitrarily large; this function is not
 66 * restricted to acting on a single-word quantity.
 67 */
 68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 69{
 70	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 71	int bit = nr & SZLONG_MASK;
 72	unsigned long temp;
 73
 74	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 75		__asm__ __volatile__(
 76		"	.set	arch=r4000				\n"
 77		"1:	" __LL "%0, %1			# set_bit	\n"
 78		"	or	%0, %2					\n"
 79		"	" __SC	"%0, %1					\n"
 80		"	beqzl	%0, 1b					\n"
 81		"	.set	mips0					\n"
 82		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 83		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 84#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 85	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 86		do {
 87			__asm__ __volatile__(
 88			"	" __LL "%0, %1		# set_bit	\n"
 89			"	" __INS "%0, %3, %2, 1			\n"
 90			"	" __SC "%0, %1				\n"
 91			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 92			: "ir" (bit), "r" (~0));
 93		} while (unlikely(!temp));
 94#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 95	} else if (kernel_uses_llsc) {
 96		do {
 97			__asm__ __volatile__(
 98			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 99			"	" __LL "%0, %1		# set_bit	\n"
100			"	or	%0, %2				\n"
101			"	" __SC	"%0, %1				\n"
102			"	.set	mips0				\n"
103			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104			: "ir" (1UL << bit));
105		} while (unlikely(!temp));
106	} else
107		__mips_set_bit(nr, addr);
108}
109
110/*
111 * clear_bit - Clears a bit in memory
112 * @nr: Bit to clear
113 * @addr: Address to start counting from
114 *
115 * clear_bit() is atomic and may not be reordered.  However, it does
116 * not contain a memory barrier, so if it is used for locking purposes,
117 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
118 * in order to ensure changes are visible on other processors.
119 */
120static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121{
122	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
123	int bit = nr & SZLONG_MASK;
124	unsigned long temp;
125
126	if (kernel_uses_llsc && R10000_LLSC_WAR) {
127		__asm__ __volatile__(
128		"	.set	arch=r4000				\n"
129		"1:	" __LL "%0, %1			# clear_bit	\n"
130		"	and	%0, %2					\n"
131		"	" __SC "%0, %1					\n"
132		"	beqzl	%0, 1b					\n"
133		"	.set	mips0					\n"
134		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135		: "ir" (~(1UL << bit)));
136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138		do {
139			__asm__ __volatile__(
140			"	" __LL "%0, %1		# clear_bit	\n"
141			"	" __INS "%0, $0, %2, 1			\n"
142			"	" __SC "%0, %1				\n"
143			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144			: "ir" (bit));
145		} while (unlikely(!temp));
146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147	} else if (kernel_uses_llsc) {
148		do {
149			__asm__ __volatile__(
150			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
151			"	" __LL "%0, %1		# clear_bit	\n"
152			"	and	%0, %2				\n"
153			"	" __SC "%0, %1				\n"
154			"	.set	mips0				\n"
155			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156			: "ir" (~(1UL << bit)));
157		} while (unlikely(!temp));
158	} else
159		__mips_clear_bit(nr, addr);
160}
161
162/*
163 * clear_bit_unlock - Clears a bit in memory
164 * @nr: Bit to clear
165 * @addr: Address to start counting from
166 *
167 * clear_bit() is atomic and implies release semantics before the memory
168 * operation. It can be used for an unlock.
169 */
170static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171{
172	smp_mb__before_atomic();
173	clear_bit(nr, addr);
174}
175
176/*
177 * change_bit - Toggle a bit in memory
178 * @nr: Bit to change
179 * @addr: Address to start counting from
180 *
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
184 */
185static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186{
187	int bit = nr & SZLONG_MASK;
188
189	if (kernel_uses_llsc && R10000_LLSC_WAR) {
190		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
191		unsigned long temp;
192
193		__asm__ __volatile__(
194		"	.set	arch=r4000			\n"
195		"1:	" __LL "%0, %1		# change_bit	\n"
196		"	xor	%0, %2				\n"
197		"	" __SC	"%0, %1				\n"
198		"	beqzl	%0, 1b				\n"
199		"	.set	mips0				\n"
200		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201		: "ir" (1UL << bit));
202	} else if (kernel_uses_llsc) {
203		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
204		unsigned long temp;
205
206		do {
207			__asm__ __volatile__(
208			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
209			"	" __LL "%0, %1		# change_bit	\n"
210			"	xor	%0, %2				\n"
211			"	" __SC	"%0, %1				\n"
212			"	.set	mips0				\n"
213			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214			: "ir" (1UL << bit));
215		} while (unlikely(!temp));
216	} else
217		__mips_change_bit(nr, addr);
218}
219
220/*
221 * test_and_set_bit - Set a bit and return its old value
222 * @nr: Bit to set
223 * @addr: Address to count from
224 *
225 * This operation is atomic and cannot be reordered.
226 * It also implies a memory barrier.
227 */
228static inline int test_and_set_bit(unsigned long nr,
229	volatile unsigned long *addr)
230{
231	int bit = nr & SZLONG_MASK;
232	unsigned long res;
233
234	smp_mb__before_llsc();
235
236	if (kernel_uses_llsc && R10000_LLSC_WAR) {
237		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238		unsigned long temp;
239
240		__asm__ __volatile__(
241		"	.set	arch=r4000				\n"
242		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
243		"	or	%2, %0, %3				\n"
244		"	" __SC	"%2, %1					\n"
245		"	beqzl	%2, 1b					\n"
246		"	and	%2, %0, %3				\n"
247		"	.set	mips0					\n"
248		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249		: "r" (1UL << bit)
250		: "memory");
251	} else if (kernel_uses_llsc) {
252		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
253		unsigned long temp;
254
255		do {
256			__asm__ __volatile__(
257			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
258			"	" __LL "%0, %1	# test_and_set_bit	\n"
259			"	or	%2, %0, %3			\n"
260			"	" __SC	"%2, %1				\n"
261			"	.set	mips0				\n"
262			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263			: "r" (1UL << bit)
264			: "memory");
265		} while (unlikely(!res));
266
267		res = temp & (1UL << bit);
268	} else
269		res = __mips_test_and_set_bit(nr, addr);
270
271	smp_llsc_mb();
272
273	return res != 0;
274}
275
276/*
277 * test_and_set_bit_lock - Set a bit and return its old value
278 * @nr: Bit to set
279 * @addr: Address to count from
280 *
281 * This operation is atomic and implies acquire ordering semantics
282 * after the memory operation.
283 */
284static inline int test_and_set_bit_lock(unsigned long nr,
285	volatile unsigned long *addr)
286{
287	int bit = nr & SZLONG_MASK;
288	unsigned long res;
289
290	if (kernel_uses_llsc && R10000_LLSC_WAR) {
291		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292		unsigned long temp;
293
294		__asm__ __volatile__(
295		"	.set	arch=r4000				\n"
296		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
297		"	or	%2, %0, %3				\n"
298		"	" __SC	"%2, %1					\n"
299		"	beqzl	%2, 1b					\n"
300		"	and	%2, %0, %3				\n"
301		"	.set	mips0					\n"
302		: "=&r" (temp), "+m" (*m), "=&r" (res)
303		: "r" (1UL << bit)
304		: "memory");
305	} else if (kernel_uses_llsc) {
306		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
307		unsigned long temp;
308
309		do {
310			__asm__ __volatile__(
311			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
312			"	" __LL "%0, %1	# test_and_set_bit	\n"
313			"	or	%2, %0, %3			\n"
314			"	" __SC	"%2, %1				\n"
315			"	.set	mips0				\n"
316			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317			: "r" (1UL << bit)
318			: "memory");
319		} while (unlikely(!res));
320
321		res = temp & (1UL << bit);
322	} else
323		res = __mips_test_and_set_bit_lock(nr, addr);
324
325	smp_llsc_mb();
326
327	return res != 0;
328}
329/*
330 * test_and_clear_bit - Clear a bit and return its old value
331 * @nr: Bit to clear
332 * @addr: Address to count from
333 *
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
336 */
337static inline int test_and_clear_bit(unsigned long nr,
338	volatile unsigned long *addr)
339{
340	int bit = nr & SZLONG_MASK;
341	unsigned long res;
342
343	smp_mb__before_llsc();
344
345	if (kernel_uses_llsc && R10000_LLSC_WAR) {
346		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
347		unsigned long temp;
348
349		__asm__ __volatile__(
350		"	.set	arch=r4000				\n"
351		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
352		"	or	%2, %0, %3				\n"
353		"	xor	%2, %3					\n"
354		"	" __SC	"%2, %1					\n"
355		"	beqzl	%2, 1b					\n"
356		"	and	%2, %0, %3				\n"
357		"	.set	mips0					\n"
358		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359		: "r" (1UL << bit)
360		: "memory");
361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364		unsigned long temp;
365
366		do {
367			__asm__ __volatile__(
368			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
369			"	" __EXT "%2, %0, %3, 1			\n"
370			"	" __INS "%0, $0, %3, 1			\n"
371			"	" __SC	"%0, %1				\n"
372			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373			: "ir" (bit)
374			: "memory");
375		} while (unlikely(!temp));
376#endif
377	} else if (kernel_uses_llsc) {
378		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379		unsigned long temp;
380
381		do {
382			__asm__ __volatile__(
383			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
384			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
385			"	or	%2, %0, %3			\n"
386			"	xor	%2, %3				\n"
387			"	" __SC	"%2, %1				\n"
388			"	.set	mips0				\n"
389			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390			: "r" (1UL << bit)
391			: "memory");
392		} while (unlikely(!res));
393
394		res = temp & (1UL << bit);
395	} else
396		res = __mips_test_and_clear_bit(nr, addr);
397
398	smp_llsc_mb();
399
400	return res != 0;
401}
402
403/*
404 * test_and_change_bit - Change a bit and return its old value
405 * @nr: Bit to change
406 * @addr: Address to count from
407 *
408 * This operation is atomic and cannot be reordered.
409 * It also implies a memory barrier.
410 */
411static inline int test_and_change_bit(unsigned long nr,
412	volatile unsigned long *addr)
413{
414	int bit = nr & SZLONG_MASK;
415	unsigned long res;
416
417	smp_mb__before_llsc();
418
419	if (kernel_uses_llsc && R10000_LLSC_WAR) {
420		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421		unsigned long temp;
422
423		__asm__ __volatile__(
424		"	.set	arch=r4000				\n"
425		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
426		"	xor	%2, %0, %3				\n"
427		"	" __SC	"%2, %1					\n"
428		"	beqzl	%2, 1b					\n"
429		"	and	%2, %0, %3				\n"
430		"	.set	mips0					\n"
431		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432		: "r" (1UL << bit)
433		: "memory");
434	} else if (kernel_uses_llsc) {
435		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
436		unsigned long temp;
437
438		do {
439			__asm__ __volatile__(
440			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
441			"	" __LL	"%0, %1 # test_and_change_bit	\n"
442			"	xor	%2, %0, %3			\n"
443			"	" __SC	"\t%2, %1			\n"
444			"	.set	mips0				\n"
445			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446			: "r" (1UL << bit)
447			: "memory");
448		} while (unlikely(!res));
449
450		res = temp & (1UL << bit);
451	} else
452		res = __mips_test_and_change_bit(nr, addr);
453
454	smp_llsc_mb();
455
456	return res != 0;
457}
458
459#include <asm-generic/bitops/non-atomic.h>
460
461/*
462 * __clear_bit_unlock - Clears a bit in memory
463 * @nr: Bit to clear
464 * @addr: Address to start counting from
465 *
466 * __clear_bit() is non-atomic and implies release semantics before the memory
467 * operation. It can be used for an unlock if no other CPUs can concurrently
468 * modify other bits in the word.
469 */
470static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
471{
472	smp_mb__before_llsc();
473	__clear_bit(nr, addr);
 
474}
475
476/*
477 * Return the bit position (0..63) of the most significant 1 bit in a word
478 * Returns -1 if no 1 bit exists
479 */
480static inline unsigned long __fls(unsigned long word)
481{
482	int num;
483
484	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
485	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486		__asm__(
487		"	.set	push					\n"
488		"	.set	"MIPS_ISA_LEVEL"			\n"
489		"	clz	%0, %1					\n"
490		"	.set	pop					\n"
491		: "=r" (num)
492		: "r" (word));
493
494		return 31 - num;
495	}
496
497	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
498	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499		__asm__(
500		"	.set	push					\n"
501		"	.set	"MIPS_ISA_LEVEL"			\n"
502		"	dclz	%0, %1					\n"
503		"	.set	pop					\n"
504		: "=r" (num)
505		: "r" (word));
506
507		return 63 - num;
508	}
509
510	num = BITS_PER_LONG - 1;
511
512#if BITS_PER_LONG == 64
513	if (!(word & (~0ul << 32))) {
514		num -= 32;
515		word <<= 32;
516	}
517#endif
518	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
519		num -= 16;
520		word <<= 16;
521	}
522	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
523		num -= 8;
524		word <<= 8;
525	}
526	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
527		num -= 4;
528		word <<= 4;
529	}
530	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
531		num -= 2;
532		word <<= 2;
533	}
534	if (!(word & (~0ul << (BITS_PER_LONG-1))))
535		num -= 1;
536	return num;
537}
538
539/*
540 * __ffs - find first bit in word.
541 * @word: The word to search
542 *
543 * Returns 0..SZLONG-1
544 * Undefined if no bit exists, so code should check against 0 first.
545 */
546static inline unsigned long __ffs(unsigned long word)
547{
548	return __fls(word & -word);
549}
550
551/*
552 * fls - find last bit set.
553 * @word: The word to search
554 *
555 * This is defined the same way as ffs.
556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
557 */
558static inline int fls(int x)
559{
560	int r;
561
562	if (!__builtin_constant_p(x) &&
563	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
564		__asm__(
565		"	.set	push					\n"
566		"	.set	"MIPS_ISA_LEVEL"			\n"
567		"	clz	%0, %1					\n"
568		"	.set	pop					\n"
569		: "=r" (x)
570		: "r" (x));
571
572		return 32 - x;
573	}
574
575	r = 32;
576	if (!x)
577		return 0;
578	if (!(x & 0xffff0000u)) {
579		x <<= 16;
580		r -= 16;
581	}
582	if (!(x & 0xff000000u)) {
583		x <<= 8;
584		r -= 8;
585	}
586	if (!(x & 0xf0000000u)) {
587		x <<= 4;
588		r -= 4;
589	}
590	if (!(x & 0xc0000000u)) {
591		x <<= 2;
592		r -= 2;
593	}
594	if (!(x & 0x80000000u)) {
595		x <<= 1;
596		r -= 1;
597	}
598	return r;
599}
600
601#include <asm-generic/bitops/fls64.h>
602
603/*
604 * ffs - find first bit set.
605 * @word: The word to search
606 *
607 * This is defined the same way as
608 * the libc and compiler builtin ffs routines, therefore
609 * differs in spirit from the above ffz (man ffs).
610 */
611static inline int ffs(int word)
612{
613	if (!word)
614		return 0;
615
616	return fls(word & -word);
617}
618
619#include <asm-generic/bitops/ffz.h>
620#include <asm-generic/bitops/find.h>
621
622#ifdef __KERNEL__
623
624#include <asm-generic/bitops/sched.h>
625
626#include <asm/arch_hweight.h>
627#include <asm-generic/bitops/const_hweight.h>
628
629#include <asm-generic/bitops/le.h>
630#include <asm-generic/bitops/ext2-atomic.h>
631
632#endif /* __KERNEL__ */
633
634#endif /* _ASM_BITOPS_H */