Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/bits.h>
 17#include <linux/compiler.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/byteorder.h>		/* sigh ... */
 21#include <asm/compiler.h>
 22#include <asm/cpu-features.h>
 23#include <asm/isa-rev.h>
 24#include <asm/llsc.h>
 25#include <asm/sgidefs.h>
 26#include <asm/war.h>
 27
 28#define __bit_op(mem, insn, inputs...) do {			\
 29	unsigned long temp;					\
 30								\
 31	asm volatile(						\
 32	"	.set		push			\n"	\
 33	"	.set		" MIPS_ISA_LEVEL "	\n"	\
 34	"	" __SYNC(full, loongson3_war) "		\n"	\
 35	"1:	" __LL		"%0, %1			\n"	\
 36	"	" insn		"			\n"	\
 37	"	" __SC		"%0, %1			\n"	\
 38	"	" __SC_BEQZ	"%0, 1b			\n"	\
 39	"	.set		pop			\n"	\
 40	: "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem)		\
 41	: inputs						\
 42	: __LLSC_CLOBBER);					\
 43} while (0)
 44
 45#define __test_bit_op(mem, ll_dst, insn, inputs...) ({		\
 46	unsigned long orig, temp;				\
 47								\
 48	asm volatile(						\
 49	"	.set		push			\n"	\
 50	"	.set		" MIPS_ISA_LEVEL "	\n"	\
 51	"	" __SYNC(full, loongson3_war) "		\n"	\
 52	"1:	" __LL		ll_dst ", %2		\n"	\
 53	"	" insn		"			\n"	\
 54	"	" __SC		"%1, %2			\n"	\
 55	"	" __SC_BEQZ	"%1, 1b			\n"	\
 56	"	.set		pop			\n"	\
 57	: "=&r"(orig), "=&r"(temp),				\
 58	  "+" GCC_OFF_SMALL_ASM()(mem)				\
 59	: inputs						\
 60	: __LLSC_CLOBBER);					\
 61								\
 62	orig;							\
 63})
 64
 65/*
 66 * These are the "slower" versions of the functions and are in bitops.c.
 67 * These functions call raw_local_irq_{save,restore}().
 68 */
 69void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 70void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 71void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 
 
 72int __mips_test_and_set_bit_lock(unsigned long nr,
 73				 volatile unsigned long *addr);
 74int __mips_test_and_clear_bit(unsigned long nr,
 75			      volatile unsigned long *addr);
 76int __mips_test_and_change_bit(unsigned long nr,
 77			       volatile unsigned long *addr);
 78
 79
 80/*
 81 * set_bit - Atomically set a bit in memory
 82 * @nr: the bit to set
 83 * @addr: the address to start counting from
 84 *
 85 * This function is atomic and may not be reordered.  See __set_bit()
 86 * if you do not require the atomic guarantees.
 87 * Note that @nr may be almost arbitrarily large; this function is not
 88 * restricted to acting on a single-word quantity.
 89 */
 90static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 91{
 92	volatile unsigned long *m = &addr[BIT_WORD(nr)];
 93	int bit = nr % BITS_PER_LONG;
 94
 95	if (!kernel_uses_llsc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96		__mips_set_bit(nr, addr);
 97		return;
 98	}
 99
100	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
101		__bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
102		return;
103	}
104
105	__bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
106}
107
108/*
109 * clear_bit - Clears a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * clear_bit() is atomic and may not be reordered.  However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
116 * in order to ensure changes are visible on other processors.
117 */
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{
120	volatile unsigned long *m = &addr[BIT_WORD(nr)];
121	int bit = nr % BITS_PER_LONG;
122
123	if (!kernel_uses_llsc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124		__mips_clear_bit(nr, addr);
125		return;
126	}
127
128	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
129		__bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
130		return;
131	}
132
133	__bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
134}
135
136/*
137 * clear_bit_unlock - Clears a bit in memory
138 * @nr: Bit to clear
139 * @addr: Address to start counting from
140 *
141 * clear_bit() is atomic and implies release semantics before the memory
142 * operation. It can be used for an unlock.
143 */
144static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
145{
146	smp_mb__before_atomic();
147	clear_bit(nr, addr);
148}
149
150/*
151 * change_bit - Toggle a bit in memory
152 * @nr: Bit to change
153 * @addr: Address to start counting from
154 *
155 * change_bit() is atomic and may not be reordered.
156 * Note that @nr may be almost arbitrarily large; this function is not
157 * restricted to acting on a single-word quantity.
158 */
159static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
160{
161	volatile unsigned long *m = &addr[BIT_WORD(nr)];
162	int bit = nr % BITS_PER_LONG;
163
164	if (!kernel_uses_llsc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165		__mips_change_bit(nr, addr);
166		return;
167	}
168
169	__bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
170}
171
172/*
173 * test_and_set_bit_lock - Set a bit and return its old value
174 * @nr: Bit to set
175 * @addr: Address to count from
176 *
177 * This operation is atomic and implies acquire ordering semantics
178 * after the memory operation.
179 */
180static inline int test_and_set_bit_lock(unsigned long nr,
181	volatile unsigned long *addr)
182{
183	volatile unsigned long *m = &addr[BIT_WORD(nr)];
184	int bit = nr % BITS_PER_LONG;
185	unsigned long res, orig;
186
187	if (!kernel_uses_llsc) {
188		res = __mips_test_and_set_bit_lock(nr, addr);
189	} else {
190		orig = __test_bit_op(*m, "%0",
191				     "or\t%1, %0, %3",
192				     "ir"(BIT(bit)));
193		res = (orig & BIT(bit)) != 0;
194	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196	smp_llsc_mb();
197
198	return res;
199}
200
201/*
202 * test_and_set_bit - Set a bit and return its old value
203 * @nr: Bit to set
204 * @addr: Address to count from
205 *
206 * This operation is atomic and cannot be reordered.
207 * It also implies a memory barrier.
208 */
209static inline int test_and_set_bit(unsigned long nr,
210	volatile unsigned long *addr)
211{
212	smp_mb__before_atomic();
213	return test_and_set_bit_lock(nr, addr);
214}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
 
 
 
216/*
217 * test_and_clear_bit - Clear a bit and return its old value
218 * @nr: Bit to clear
219 * @addr: Address to count from
220 *
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
223 */
224static inline int test_and_clear_bit(unsigned long nr,
225	volatile unsigned long *addr)
226{
227	volatile unsigned long *m = &addr[BIT_WORD(nr)];
228	int bit = nr % BITS_PER_LONG;
229	unsigned long res, orig;
230
231	smp_mb__before_atomic();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
233	if (!kernel_uses_llsc) {
 
234		res = __mips_test_and_clear_bit(nr, addr);
235	} else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
236		res = __test_bit_op(*m, "%1",
237				    __EXT "%0, %1, %3, 1;"
238				    __INS "%1, $0, %3, 1",
239				    "i"(bit));
240	} else {
241		orig = __test_bit_op(*m, "%0",
242				     "or\t%1, %0, %3;"
243				     "xor\t%1, %1, %3",
244				     "ir"(BIT(bit)));
245		res = (orig & BIT(bit)) != 0;
246	}
247
248	smp_llsc_mb();
249
250	return res;
251}
252
253/*
254 * test_and_change_bit - Change a bit and return its old value
255 * @nr: Bit to change
256 * @addr: Address to count from
257 *
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
260 */
261static inline int test_and_change_bit(unsigned long nr,
262	volatile unsigned long *addr)
263{
264	volatile unsigned long *m = &addr[BIT_WORD(nr)];
265	int bit = nr % BITS_PER_LONG;
266	unsigned long res, orig;
267
268	smp_mb__before_atomic();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270	if (!kernel_uses_llsc) {
 
271		res = __mips_test_and_change_bit(nr, addr);
272	} else {
273		orig = __test_bit_op(*m, "%0",
274				     "xor\t%1, %0, %3",
275				     "ir"(BIT(bit)));
276		res = (orig & BIT(bit)) != 0;
277	}
278
279	smp_llsc_mb();
280
281	return res;
282}
283
284#undef __bit_op
285#undef __test_bit_op
286
287#include <asm-generic/bitops/non-atomic.h>
288
289/*
290 * __clear_bit_unlock - Clears a bit in memory
291 * @nr: Bit to clear
292 * @addr: Address to start counting from
293 *
294 * __clear_bit() is non-atomic and implies release semantics before the memory
295 * operation. It can be used for an unlock if no other CPUs can concurrently
296 * modify other bits in the word.
297 */
298static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
299{
300	smp_mb__before_llsc();
301	__clear_bit(nr, addr);
302	nudge_writes();
303}
304
305/*
306 * Return the bit position (0..63) of the most significant 1 bit in a word
307 * Returns -1 if no 1 bit exists
308 */
309static __always_inline unsigned long __fls(unsigned long word)
310{
311	int num;
312
313	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
314	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
315		__asm__(
316		"	.set	push					\n"
317		"	.set	"MIPS_ISA_LEVEL"			\n"
318		"	clz	%0, %1					\n"
319		"	.set	pop					\n"
320		: "=r" (num)
321		: "r" (word));
322
323		return 31 - num;
324	}
325
326	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
327	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
328		__asm__(
329		"	.set	push					\n"
330		"	.set	"MIPS_ISA_LEVEL"			\n"
331		"	dclz	%0, %1					\n"
332		"	.set	pop					\n"
333		: "=r" (num)
334		: "r" (word));
335
336		return 63 - num;
337	}
338
339	num = BITS_PER_LONG - 1;
340
341#if BITS_PER_LONG == 64
342	if (!(word & (~0ul << 32))) {
343		num -= 32;
344		word <<= 32;
345	}
346#endif
347	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
348		num -= 16;
349		word <<= 16;
350	}
351	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
352		num -= 8;
353		word <<= 8;
354	}
355	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
356		num -= 4;
357		word <<= 4;
358	}
359	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
360		num -= 2;
361		word <<= 2;
362	}
363	if (!(word & (~0ul << (BITS_PER_LONG-1))))
364		num -= 1;
365	return num;
366}
367
368/*
369 * __ffs - find first bit in word.
370 * @word: The word to search
371 *
372 * Returns 0..SZLONG-1
373 * Undefined if no bit exists, so code should check against 0 first.
374 */
375static __always_inline unsigned long __ffs(unsigned long word)
376{
377	return __fls(word & -word);
378}
379
380/*
381 * fls - find last bit set.
382 * @word: The word to search
383 *
384 * This is defined the same way as ffs.
385 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
386 */
387static inline int fls(unsigned int x)
388{
389	int r;
390
391	if (!__builtin_constant_p(x) &&
392	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
393		__asm__(
394		"	.set	push					\n"
395		"	.set	"MIPS_ISA_LEVEL"			\n"
396		"	clz	%0, %1					\n"
397		"	.set	pop					\n"
398		: "=r" (x)
399		: "r" (x));
400
401		return 32 - x;
402	}
403
404	r = 32;
405	if (!x)
406		return 0;
407	if (!(x & 0xffff0000u)) {
408		x <<= 16;
409		r -= 16;
410	}
411	if (!(x & 0xff000000u)) {
412		x <<= 8;
413		r -= 8;
414	}
415	if (!(x & 0xf0000000u)) {
416		x <<= 4;
417		r -= 4;
418	}
419	if (!(x & 0xc0000000u)) {
420		x <<= 2;
421		r -= 2;
422	}
423	if (!(x & 0x80000000u)) {
424		x <<= 1;
425		r -= 1;
426	}
427	return r;
428}
429
430#include <asm-generic/bitops/fls64.h>
431
432/*
433 * ffs - find first bit set.
434 * @word: The word to search
435 *
436 * This is defined the same way as
437 * the libc and compiler builtin ffs routines, therefore
438 * differs in spirit from the above ffz (man ffs).
439 */
440static inline int ffs(int word)
441{
442	if (!word)
443		return 0;
444
445	return fls(word & -word);
446}
447
448#include <asm-generic/bitops/ffz.h>
449#include <asm-generic/bitops/find.h>
450
451#ifdef __KERNEL__
452
453#include <asm-generic/bitops/sched.h>
454
455#include <asm/arch_hweight.h>
456#include <asm-generic/bitops/const_hweight.h>
457
458#include <asm-generic/bitops/le.h>
459#include <asm-generic/bitops/ext2-atomic.h>
460
461#endif /* __KERNEL__ */
462
463#endif /* _ASM_BITOPS_H */
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/barrier.h>
 19#include <asm/byteorder.h>		/* sigh ... */
 
 20#include <asm/cpu-features.h>
 
 
 21#include <asm/sgidefs.h>
 22#include <asm/war.h>
 23
 24#if _MIPS_SZLONG == 32
 25#define SZLONG_LOG 5
 26#define SZLONG_MASK 31UL
 27#define __LL		"ll	"
 28#define __SC		"sc	"
 29#define __INS		"ins	"
 30#define __EXT		"ext	"
 31#elif _MIPS_SZLONG == 64
 32#define SZLONG_LOG 6
 33#define SZLONG_MASK 63UL
 34#define __LL		"lld	"
 35#define __SC		"scd	"
 36#define __INS		"dins	 "
 37#define __EXT		"dext	 "
 38#endif
 39
 40/*
 41 * clear_bit() doesn't provide any barrier for the compiler.
 42 */
 43#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 44#define smp_mb__after_clear_bit()	smp_llsc_mb()
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46
 47/*
 48 * These are the "slower" versions of the functions and are in bitops.c.
 49 * These functions call raw_local_irq_{save,restore}().
 50 */
 51void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 52void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 53void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 54int __mips_test_and_set_bit(unsigned long nr,
 55			    volatile unsigned long *addr);
 56int __mips_test_and_set_bit_lock(unsigned long nr,
 57				 volatile unsigned long *addr);
 58int __mips_test_and_clear_bit(unsigned long nr,
 59			      volatile unsigned long *addr);
 60int __mips_test_and_change_bit(unsigned long nr,
 61			       volatile unsigned long *addr);
 62
 63
 64/*
 65 * set_bit - Atomically set a bit in memory
 66 * @nr: the bit to set
 67 * @addr: the address to start counting from
 68 *
 69 * This function is atomic and may not be reordered.  See __set_bit()
 70 * if you do not require the atomic guarantees.
 71 * Note that @nr may be almost arbitrarily large; this function is not
 72 * restricted to acting on a single-word quantity.
 73 */
 74static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 75{
 76	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 77	int bit = nr & SZLONG_MASK;
 78	unsigned long temp;
 79
 80	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 81		__asm__ __volatile__(
 82		"	.set	arch=r4000				\n"
 83		"1:	" __LL "%0, %1			# set_bit	\n"
 84		"	or	%0, %2					\n"
 85		"	" __SC	"%0, %1					\n"
 86		"	beqzl	%0, 1b					\n"
 87		"	.set	mips0					\n"
 88		: "=&r" (temp), "=m" (*m)
 89		: "ir" (1UL << bit), "m" (*m));
 90#ifdef CONFIG_CPU_MIPSR2
 91	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 92		do {
 93			__asm__ __volatile__(
 94			"	" __LL "%0, %1		# set_bit	\n"
 95			"	" __INS "%0, %3, %2, 1			\n"
 96			"	" __SC "%0, %1				\n"
 97			: "=&r" (temp), "+m" (*m)
 98			: "ir" (bit), "r" (~0));
 99		} while (unlikely(!temp));
100#endif /* CONFIG_CPU_MIPSR2 */
101	} else if (kernel_uses_llsc) {
102		do {
103			__asm__ __volatile__(
104			"	.set	arch=r4000			\n"
105			"	" __LL "%0, %1		# set_bit	\n"
106			"	or	%0, %2				\n"
107			"	" __SC	"%0, %1				\n"
108			"	.set	mips0				\n"
109			: "=&r" (temp), "+m" (*m)
110			: "ir" (1UL << bit));
111		} while (unlikely(!temp));
112	} else
113		__mips_set_bit(nr, addr);
 
 
 
 
 
 
 
 
 
114}
115
116/*
117 * clear_bit - Clears a bit in memory
118 * @nr: Bit to clear
119 * @addr: Address to start counting from
120 *
121 * clear_bit() is atomic and may not be reordered.  However, it does
122 * not contain a memory barrier, so if it is used for locking purposes,
123 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
124 * in order to ensure changes are visible on other processors.
125 */
126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
129	int bit = nr & SZLONG_MASK;
130	unsigned long temp;
131
132	if (kernel_uses_llsc && R10000_LLSC_WAR) {
133		__asm__ __volatile__(
134		"	.set	arch=r4000				\n"
135		"1:	" __LL "%0, %1			# clear_bit	\n"
136		"	and	%0, %2					\n"
137		"	" __SC "%0, %1					\n"
138		"	beqzl	%0, 1b					\n"
139		"	.set	mips0					\n"
140		: "=&r" (temp), "+m" (*m)
141		: "ir" (~(1UL << bit)));
142#ifdef CONFIG_CPU_MIPSR2
143	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
144		do {
145			__asm__ __volatile__(
146			"	" __LL "%0, %1		# clear_bit	\n"
147			"	" __INS "%0, $0, %2, 1			\n"
148			"	" __SC "%0, %1				\n"
149			: "=&r" (temp), "+m" (*m)
150			: "ir" (bit));
151		} while (unlikely(!temp));
152#endif /* CONFIG_CPU_MIPSR2 */
153	} else if (kernel_uses_llsc) {
154		do {
155			__asm__ __volatile__(
156			"	.set	arch=r4000			\n"
157			"	" __LL "%0, %1		# clear_bit	\n"
158			"	and	%0, %2				\n"
159			"	" __SC "%0, %1				\n"
160			"	.set	mips0				\n"
161			: "=&r" (temp), "+m" (*m)
162			: "ir" (~(1UL << bit)));
163		} while (unlikely(!temp));
164	} else
165		__mips_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
166}
167
168/*
169 * clear_bit_unlock - Clears a bit in memory
170 * @nr: Bit to clear
171 * @addr: Address to start counting from
172 *
173 * clear_bit() is atomic and implies release semantics before the memory
174 * operation. It can be used for an unlock.
175 */
176static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
177{
178	smp_mb__before_clear_bit();
179	clear_bit(nr, addr);
180}
181
182/*
183 * change_bit - Toggle a bit in memory
184 * @nr: Bit to change
185 * @addr: Address to start counting from
186 *
187 * change_bit() is atomic and may not be reordered.
188 * Note that @nr may be almost arbitrarily large; this function is not
189 * restricted to acting on a single-word quantity.
190 */
191static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
192{
193	int bit = nr & SZLONG_MASK;
 
194
195	if (kernel_uses_llsc && R10000_LLSC_WAR) {
196		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197		unsigned long temp;
198
199		__asm__ __volatile__(
200		"	.set	arch=r4000			\n"
201		"1:	" __LL "%0, %1		# change_bit	\n"
202		"	xor	%0, %2				\n"
203		"	" __SC	"%0, %1				\n"
204		"	beqzl	%0, 1b				\n"
205		"	.set	mips0				\n"
206		: "=&r" (temp), "+m" (*m)
207		: "ir" (1UL << bit));
208	} else if (kernel_uses_llsc) {
209		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
210		unsigned long temp;
211
212		do {
213			__asm__ __volatile__(
214			"	.set	arch=r4000			\n"
215			"	" __LL "%0, %1		# change_bit	\n"
216			"	xor	%0, %2				\n"
217			"	" __SC	"%0, %1				\n"
218			"	.set	mips0				\n"
219			: "=&r" (temp), "+m" (*m)
220			: "ir" (1UL << bit));
221		} while (unlikely(!temp));
222	} else
223		__mips_change_bit(nr, addr);
 
 
 
 
224}
225
226/*
227 * test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is atomic and cannot be reordered.
232 * It also implies a memory barrier.
233 */
234static inline int test_and_set_bit(unsigned long nr,
235	volatile unsigned long *addr)
236{
237	int bit = nr & SZLONG_MASK;
238	unsigned long res;
 
239
240	smp_mb__before_llsc();
241
242	if (kernel_uses_llsc && R10000_LLSC_WAR) {
243		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
244		unsigned long temp;
245
246		__asm__ __volatile__(
247		"	.set	arch=r4000				\n"
248		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
249		"	or	%2, %0, %3				\n"
250		"	" __SC	"%2, %1					\n"
251		"	beqzl	%2, 1b					\n"
252		"	and	%2, %0, %3				\n"
253		"	.set	mips0					\n"
254		: "=&r" (temp), "+m" (*m), "=&r" (res)
255		: "r" (1UL << bit)
256		: "memory");
257	} else if (kernel_uses_llsc) {
258		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
259		unsigned long temp;
260
261		do {
262			__asm__ __volatile__(
263			"	.set	arch=r4000			\n"
264			"	" __LL "%0, %1	# test_and_set_bit	\n"
265			"	or	%2, %0, %3			\n"
266			"	" __SC	"%2, %1				\n"
267			"	.set	mips0				\n"
268			: "=&r" (temp), "+m" (*m), "=&r" (res)
269			: "r" (1UL << bit)
270			: "memory");
271		} while (unlikely(!res));
272
273		res = temp & (1UL << bit);
274	} else
275		res = __mips_test_and_set_bit(nr, addr);
276
277	smp_llsc_mb();
278
279	return res != 0;
280}
281
282/*
283 * test_and_set_bit_lock - Set a bit and return its old value
284 * @nr: Bit to set
285 * @addr: Address to count from
286 *
287 * This operation is atomic and implies acquire ordering semantics
288 * after the memory operation.
289 */
290static inline int test_and_set_bit_lock(unsigned long nr,
291	volatile unsigned long *addr)
292{
293	int bit = nr & SZLONG_MASK;
294	unsigned long res;
295
296	if (kernel_uses_llsc && R10000_LLSC_WAR) {
297		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
298		unsigned long temp;
299
300		__asm__ __volatile__(
301		"	.set	arch=r4000				\n"
302		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
303		"	or	%2, %0, %3				\n"
304		"	" __SC	"%2, %1					\n"
305		"	beqzl	%2, 1b					\n"
306		"	and	%2, %0, %3				\n"
307		"	.set	mips0					\n"
308		: "=&r" (temp), "+m" (*m), "=&r" (res)
309		: "r" (1UL << bit)
310		: "memory");
311	} else if (kernel_uses_llsc) {
312		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
313		unsigned long temp;
314
315		do {
316			__asm__ __volatile__(
317			"	.set	arch=r4000			\n"
318			"	" __LL "%0, %1	# test_and_set_bit	\n"
319			"	or	%2, %0, %3			\n"
320			"	" __SC	"%2, %1				\n"
321			"	.set	mips0				\n"
322			: "=&r" (temp), "+m" (*m), "=&r" (res)
323			: "r" (1UL << bit)
324			: "memory");
325		} while (unlikely(!res));
326
327		res = temp & (1UL << bit);
328	} else
329		res = __mips_test_and_set_bit_lock(nr, addr);
330
331	smp_llsc_mb();
332
333	return res != 0;
334}
335/*
336 * test_and_clear_bit - Clear a bit and return its old value
337 * @nr: Bit to clear
338 * @addr: Address to count from
339 *
340 * This operation is atomic and cannot be reordered.
341 * It also implies a memory barrier.
342 */
343static inline int test_and_clear_bit(unsigned long nr,
344	volatile unsigned long *addr)
345{
346	int bit = nr & SZLONG_MASK;
347	unsigned long res;
 
348
349	smp_mb__before_llsc();
350
351	if (kernel_uses_llsc && R10000_LLSC_WAR) {
352		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
353		unsigned long temp;
354
355		__asm__ __volatile__(
356		"	.set	arch=r4000				\n"
357		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
358		"	or	%2, %0, %3				\n"
359		"	xor	%2, %3					\n"
360		"	" __SC	"%2, %1					\n"
361		"	beqzl	%2, 1b					\n"
362		"	and	%2, %0, %3				\n"
363		"	.set	mips0					\n"
364		: "=&r" (temp), "+m" (*m), "=&r" (res)
365		: "r" (1UL << bit)
366		: "memory");
367#ifdef CONFIG_CPU_MIPSR2
368	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
369		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
370		unsigned long temp;
371
372		do {
373			__asm__ __volatile__(
374			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
375			"	" __EXT "%2, %0, %3, 1			\n"
376			"	" __INS "%0, $0, %3, 1			\n"
377			"	" __SC	"%0, %1				\n"
378			: "=&r" (temp), "+m" (*m), "=&r" (res)
379			: "ir" (bit)
380			: "memory");
381		} while (unlikely(!temp));
382#endif
383	} else if (kernel_uses_llsc) {
384		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
385		unsigned long temp;
386
387		do {
388			__asm__ __volatile__(
389			"	.set	arch=r4000			\n"
390			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
391			"	or	%2, %0, %3			\n"
392			"	xor	%2, %3				\n"
393			"	" __SC	"%2, %1				\n"
394			"	.set	mips0				\n"
395			: "=&r" (temp), "+m" (*m), "=&r" (res)
396			: "r" (1UL << bit)
397			: "memory");
398		} while (unlikely(!res));
399
400		res = temp & (1UL << bit);
401	} else
402		res = __mips_test_and_clear_bit(nr, addr);
 
 
 
 
 
 
 
 
 
 
 
 
403
404	smp_llsc_mb();
405
406	return res != 0;
407}
408
409/*
410 * test_and_change_bit - Change a bit and return its old value
411 * @nr: Bit to change
412 * @addr: Address to count from
413 *
414 * This operation is atomic and cannot be reordered.
415 * It also implies a memory barrier.
416 */
417static inline int test_and_change_bit(unsigned long nr,
418	volatile unsigned long *addr)
419{
420	int bit = nr & SZLONG_MASK;
421	unsigned long res;
 
422
423	smp_mb__before_llsc();
424
425	if (kernel_uses_llsc && R10000_LLSC_WAR) {
426		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
427		unsigned long temp;
428
429		__asm__ __volatile__(
430		"	.set	arch=r4000				\n"
431		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
432		"	xor	%2, %0, %3				\n"
433		"	" __SC	"%2, %1					\n"
434		"	beqzl	%2, 1b					\n"
435		"	and	%2, %0, %3				\n"
436		"	.set	mips0					\n"
437		: "=&r" (temp), "+m" (*m), "=&r" (res)
438		: "r" (1UL << bit)
439		: "memory");
440	} else if (kernel_uses_llsc) {
441		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
442		unsigned long temp;
443
444		do {
445			__asm__ __volatile__(
446			"	.set	arch=r4000			\n"
447			"	" __LL	"%0, %1 # test_and_change_bit	\n"
448			"	xor	%2, %0, %3			\n"
449			"	" __SC	"\t%2, %1			\n"
450			"	.set	mips0				\n"
451			: "=&r" (temp), "+m" (*m), "=&r" (res)
452			: "r" (1UL << bit)
453			: "memory");
454		} while (unlikely(!res));
455
456		res = temp & (1UL << bit);
457	} else
458		res = __mips_test_and_change_bit(nr, addr);
 
 
 
 
 
 
459
460	smp_llsc_mb();
461
462	return res != 0;
463}
464
 
 
 
465#include <asm-generic/bitops/non-atomic.h>
466
467/*
468 * __clear_bit_unlock - Clears a bit in memory
469 * @nr: Bit to clear
470 * @addr: Address to start counting from
471 *
472 * __clear_bit() is non-atomic and implies release semantics before the memory
473 * operation. It can be used for an unlock if no other CPUs can concurrently
474 * modify other bits in the word.
475 */
476static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
477{
478	smp_mb();
479	__clear_bit(nr, addr);
 
480}
481
482/*
483 * Return the bit position (0..63) of the most significant 1 bit in a word
484 * Returns -1 if no 1 bit exists
485 */
486static inline unsigned long __fls(unsigned long word)
487{
488	int num;
489
490	if (BITS_PER_LONG == 32 &&
491	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
492		__asm__(
493		"	.set	push					\n"
494		"	.set	mips32					\n"
495		"	clz	%0, %1					\n"
496		"	.set	pop					\n"
497		: "=r" (num)
498		: "r" (word));
499
500		return 31 - num;
501	}
502
503	if (BITS_PER_LONG == 64 &&
504	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
505		__asm__(
506		"	.set	push					\n"
507		"	.set	mips64					\n"
508		"	dclz	%0, %1					\n"
509		"	.set	pop					\n"
510		: "=r" (num)
511		: "r" (word));
512
513		return 63 - num;
514	}
515
516	num = BITS_PER_LONG - 1;
517
518#if BITS_PER_LONG == 64
519	if (!(word & (~0ul << 32))) {
520		num -= 32;
521		word <<= 32;
522	}
523#endif
524	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
525		num -= 16;
526		word <<= 16;
527	}
528	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
529		num -= 8;
530		word <<= 8;
531	}
532	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
533		num -= 4;
534		word <<= 4;
535	}
536	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
537		num -= 2;
538		word <<= 2;
539	}
540	if (!(word & (~0ul << (BITS_PER_LONG-1))))
541		num -= 1;
542	return num;
543}
544
545/*
546 * __ffs - find first bit in word.
547 * @word: The word to search
548 *
549 * Returns 0..SZLONG-1
550 * Undefined if no bit exists, so code should check against 0 first.
551 */
552static inline unsigned long __ffs(unsigned long word)
553{
554	return __fls(word & -word);
555}
556
557/*
558 * fls - find last bit set.
559 * @word: The word to search
560 *
561 * This is defined the same way as ffs.
562 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
563 */
564static inline int fls(int x)
565{
566	int r;
567
568	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
569		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
 
 
 
 
 
 
 
570
571		return 32 - x;
572	}
573
574	r = 32;
575	if (!x)
576		return 0;
577	if (!(x & 0xffff0000u)) {
578		x <<= 16;
579		r -= 16;
580	}
581	if (!(x & 0xff000000u)) {
582		x <<= 8;
583		r -= 8;
584	}
585	if (!(x & 0xf0000000u)) {
586		x <<= 4;
587		r -= 4;
588	}
589	if (!(x & 0xc0000000u)) {
590		x <<= 2;
591		r -= 2;
592	}
593	if (!(x & 0x80000000u)) {
594		x <<= 1;
595		r -= 1;
596	}
597	return r;
598}
599
600#include <asm-generic/bitops/fls64.h>
601
602/*
603 * ffs - find first bit set.
604 * @word: The word to search
605 *
606 * This is defined the same way as
607 * the libc and compiler builtin ffs routines, therefore
608 * differs in spirit from the above ffz (man ffs).
609 */
610static inline int ffs(int word)
611{
612	if (!word)
613		return 0;
614
615	return fls(word & -word);
616}
617
618#include <asm-generic/bitops/ffz.h>
619#include <asm-generic/bitops/find.h>
620
621#ifdef __KERNEL__
622
623#include <asm-generic/bitops/sched.h>
624
625#include <asm/arch_hweight.h>
626#include <asm-generic/bitops/const_hweight.h>
627
628#include <asm-generic/bitops/le.h>
629#include <asm-generic/bitops/ext2-atomic.h>
630
631#endif /* __KERNEL__ */
632
633#endif /* _ASM_BITOPS_H */