Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/bits.h>
17#include <linux/compiler.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/byteorder.h> /* sigh ... */
21#include <asm/compiler.h>
22#include <asm/cpu-features.h>
23#include <asm/isa-rev.h>
24#include <asm/llsc.h>
25#include <asm/sgidefs.h>
26#include <asm/war.h>
27
28#define __bit_op(mem, insn, inputs...) do { \
29 unsigned long temp; \
30 \
31 asm volatile( \
32 " .set push \n" \
33 " .set " MIPS_ISA_LEVEL " \n" \
34 " " __SYNC(full, loongson3_war) " \n" \
35 "1: " __LL "%0, %1 \n" \
36 " " insn " \n" \
37 " " __SC "%0, %1 \n" \
38 " " __SC_BEQZ "%0, 1b \n" \
39 " .set pop \n" \
40 : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
41 : inputs \
42 : __LLSC_CLOBBER); \
43} while (0)
44
45#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
46 unsigned long orig, temp; \
47 \
48 asm volatile( \
49 " .set push \n" \
50 " .set " MIPS_ISA_LEVEL " \n" \
51 " " __SYNC(full, loongson3_war) " \n" \
52 "1: " __LL ll_dst ", %2 \n" \
53 " " insn " \n" \
54 " " __SC "%1, %2 \n" \
55 " " __SC_BEQZ "%1, 1b \n" \
56 " .set pop \n" \
57 : "=&r"(orig), "=&r"(temp), \
58 "+" GCC_OFF_SMALL_ASM()(mem) \
59 : inputs \
60 : __LLSC_CLOBBER); \
61 \
62 orig; \
63})
64
65/*
66 * These are the "slower" versions of the functions and are in bitops.c.
67 * These functions call raw_local_irq_{save,restore}().
68 */
69void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
70void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
71void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
72int __mips_test_and_set_bit_lock(unsigned long nr,
73 volatile unsigned long *addr);
74int __mips_test_and_clear_bit(unsigned long nr,
75 volatile unsigned long *addr);
76int __mips_test_and_change_bit(unsigned long nr,
77 volatile unsigned long *addr);
78
79
80/*
81 * set_bit - Atomically set a bit in memory
82 * @nr: the bit to set
83 * @addr: the address to start counting from
84 *
85 * This function is atomic and may not be reordered. See __set_bit()
86 * if you do not require the atomic guarantees.
87 * Note that @nr may be almost arbitrarily large; this function is not
88 * restricted to acting on a single-word quantity.
89 */
90static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
91{
92 volatile unsigned long *m = &addr[BIT_WORD(nr)];
93 int bit = nr % BITS_PER_LONG;
94
95 if (!kernel_uses_llsc) {
96 __mips_set_bit(nr, addr);
97 return;
98 }
99
100 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
101 __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
102 return;
103 }
104
105 __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
106}
107
108/*
109 * clear_bit - Clears a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
116 * in order to ensure changes are visible on other processors.
117 */
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{
120 volatile unsigned long *m = &addr[BIT_WORD(nr)];
121 int bit = nr % BITS_PER_LONG;
122
123 if (!kernel_uses_llsc) {
124 __mips_clear_bit(nr, addr);
125 return;
126 }
127
128 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
129 __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
130 return;
131 }
132
133 __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
134}
135
136/*
137 * clear_bit_unlock - Clears a bit in memory
138 * @nr: Bit to clear
139 * @addr: Address to start counting from
140 *
141 * clear_bit() is atomic and implies release semantics before the memory
142 * operation. It can be used for an unlock.
143 */
144static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
145{
146 smp_mb__before_atomic();
147 clear_bit(nr, addr);
148}
149
150/*
151 * change_bit - Toggle a bit in memory
152 * @nr: Bit to change
153 * @addr: Address to start counting from
154 *
155 * change_bit() is atomic and may not be reordered.
156 * Note that @nr may be almost arbitrarily large; this function is not
157 * restricted to acting on a single-word quantity.
158 */
159static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
160{
161 volatile unsigned long *m = &addr[BIT_WORD(nr)];
162 int bit = nr % BITS_PER_LONG;
163
164 if (!kernel_uses_llsc) {
165 __mips_change_bit(nr, addr);
166 return;
167 }
168
169 __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
170}
171
172/*
173 * test_and_set_bit_lock - Set a bit and return its old value
174 * @nr: Bit to set
175 * @addr: Address to count from
176 *
177 * This operation is atomic and implies acquire ordering semantics
178 * after the memory operation.
179 */
180static inline int test_and_set_bit_lock(unsigned long nr,
181 volatile unsigned long *addr)
182{
183 volatile unsigned long *m = &addr[BIT_WORD(nr)];
184 int bit = nr % BITS_PER_LONG;
185 unsigned long res, orig;
186
187 if (!kernel_uses_llsc) {
188 res = __mips_test_and_set_bit_lock(nr, addr);
189 } else {
190 orig = __test_bit_op(*m, "%0",
191 "or\t%1, %0, %3",
192 "ir"(BIT(bit)));
193 res = (orig & BIT(bit)) != 0;
194 }
195
196 smp_llsc_mb();
197
198 return res;
199}
200
201/*
202 * test_and_set_bit - Set a bit and return its old value
203 * @nr: Bit to set
204 * @addr: Address to count from
205 *
206 * This operation is atomic and cannot be reordered.
207 * It also implies a memory barrier.
208 */
209static inline int test_and_set_bit(unsigned long nr,
210 volatile unsigned long *addr)
211{
212 smp_mb__before_atomic();
213 return test_and_set_bit_lock(nr, addr);
214}
215
216/*
217 * test_and_clear_bit - Clear a bit and return its old value
218 * @nr: Bit to clear
219 * @addr: Address to count from
220 *
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
223 */
224static inline int test_and_clear_bit(unsigned long nr,
225 volatile unsigned long *addr)
226{
227 volatile unsigned long *m = &addr[BIT_WORD(nr)];
228 int bit = nr % BITS_PER_LONG;
229 unsigned long res, orig;
230
231 smp_mb__before_atomic();
232
233 if (!kernel_uses_llsc) {
234 res = __mips_test_and_clear_bit(nr, addr);
235 } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
236 res = __test_bit_op(*m, "%1",
237 __EXT "%0, %1, %3, 1;"
238 __INS "%1, $0, %3, 1",
239 "i"(bit));
240 } else {
241 orig = __test_bit_op(*m, "%0",
242 "or\t%1, %0, %3;"
243 "xor\t%1, %1, %3",
244 "ir"(BIT(bit)));
245 res = (orig & BIT(bit)) != 0;
246 }
247
248 smp_llsc_mb();
249
250 return res;
251}
252
253/*
254 * test_and_change_bit - Change a bit and return its old value
255 * @nr: Bit to change
256 * @addr: Address to count from
257 *
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
260 */
261static inline int test_and_change_bit(unsigned long nr,
262 volatile unsigned long *addr)
263{
264 volatile unsigned long *m = &addr[BIT_WORD(nr)];
265 int bit = nr % BITS_PER_LONG;
266 unsigned long res, orig;
267
268 smp_mb__before_atomic();
269
270 if (!kernel_uses_llsc) {
271 res = __mips_test_and_change_bit(nr, addr);
272 } else {
273 orig = __test_bit_op(*m, "%0",
274 "xor\t%1, %0, %3",
275 "ir"(BIT(bit)));
276 res = (orig & BIT(bit)) != 0;
277 }
278
279 smp_llsc_mb();
280
281 return res;
282}
283
284#undef __bit_op
285#undef __test_bit_op
286
287#include <asm-generic/bitops/non-atomic.h>
288
289/*
290 * __clear_bit_unlock - Clears a bit in memory
291 * @nr: Bit to clear
292 * @addr: Address to start counting from
293 *
294 * __clear_bit() is non-atomic and implies release semantics before the memory
295 * operation. It can be used for an unlock if no other CPUs can concurrently
296 * modify other bits in the word.
297 */
298static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
299{
300 smp_mb__before_llsc();
301 __clear_bit(nr, addr);
302 nudge_writes();
303}
304
305/*
306 * Return the bit position (0..63) of the most significant 1 bit in a word
307 * Returns -1 if no 1 bit exists
308 */
309static __always_inline unsigned long __fls(unsigned long word)
310{
311 int num;
312
313 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
314 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
315 __asm__(
316 " .set push \n"
317 " .set "MIPS_ISA_LEVEL" \n"
318 " clz %0, %1 \n"
319 " .set pop \n"
320 : "=r" (num)
321 : "r" (word));
322
323 return 31 - num;
324 }
325
326 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
327 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
328 __asm__(
329 " .set push \n"
330 " .set "MIPS_ISA_LEVEL" \n"
331 " dclz %0, %1 \n"
332 " .set pop \n"
333 : "=r" (num)
334 : "r" (word));
335
336 return 63 - num;
337 }
338
339 num = BITS_PER_LONG - 1;
340
341#if BITS_PER_LONG == 64
342 if (!(word & (~0ul << 32))) {
343 num -= 32;
344 word <<= 32;
345 }
346#endif
347 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
348 num -= 16;
349 word <<= 16;
350 }
351 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
352 num -= 8;
353 word <<= 8;
354 }
355 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
356 num -= 4;
357 word <<= 4;
358 }
359 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
360 num -= 2;
361 word <<= 2;
362 }
363 if (!(word & (~0ul << (BITS_PER_LONG-1))))
364 num -= 1;
365 return num;
366}
367
368/*
369 * __ffs - find first bit in word.
370 * @word: The word to search
371 *
372 * Returns 0..SZLONG-1
373 * Undefined if no bit exists, so code should check against 0 first.
374 */
375static __always_inline unsigned long __ffs(unsigned long word)
376{
377 return __fls(word & -word);
378}
379
380/*
381 * fls - find last bit set.
382 * @word: The word to search
383 *
384 * This is defined the same way as ffs.
385 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
386 */
387static inline int fls(unsigned int x)
388{
389 int r;
390
391 if (!__builtin_constant_p(x) &&
392 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
393 __asm__(
394 " .set push \n"
395 " .set "MIPS_ISA_LEVEL" \n"
396 " clz %0, %1 \n"
397 " .set pop \n"
398 : "=r" (x)
399 : "r" (x));
400
401 return 32 - x;
402 }
403
404 r = 32;
405 if (!x)
406 return 0;
407 if (!(x & 0xffff0000u)) {
408 x <<= 16;
409 r -= 16;
410 }
411 if (!(x & 0xff000000u)) {
412 x <<= 8;
413 r -= 8;
414 }
415 if (!(x & 0xf0000000u)) {
416 x <<= 4;
417 r -= 4;
418 }
419 if (!(x & 0xc0000000u)) {
420 x <<= 2;
421 r -= 2;
422 }
423 if (!(x & 0x80000000u)) {
424 x <<= 1;
425 r -= 1;
426 }
427 return r;
428}
429
430#include <asm-generic/bitops/fls64.h>
431
432/*
433 * ffs - find first bit set.
434 * @word: The word to search
435 *
436 * This is defined the same way as
437 * the libc and compiler builtin ffs routines, therefore
438 * differs in spirit from the above ffz (man ffs).
439 */
440static inline int ffs(int word)
441{
442 if (!word)
443 return 0;
444
445 return fls(word & -word);
446}
447
448#include <asm-generic/bitops/ffz.h>
449#include <asm-generic/bitops/find.h>
450
451#ifdef __KERNEL__
452
453#include <asm-generic/bitops/sched.h>
454
455#include <asm/arch_hweight.h>
456#include <asm-generic/bitops/const_hweight.h>
457
458#include <asm-generic/bitops/le.h>
459#include <asm-generic/bitops/ext2-atomic.h>
460
461#endif /* __KERNEL__ */
462
463#endif /* _ASM_BITOPS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/bits.h>
17#include <linux/compiler.h>
18#include <linux/types.h>
19#include <asm/asm.h>
20#include <asm/barrier.h>
21#include <asm/byteorder.h> /* sigh ... */
22#include <asm/compiler.h>
23#include <asm/cpu-features.h>
24#include <asm/sgidefs.h>
25
26#define __bit_op(mem, insn, inputs...) do { \
27 unsigned long __temp; \
28 \
29 asm volatile( \
30 " .set push \n" \
31 " .set " MIPS_ISA_LEVEL " \n" \
32 " " __SYNC(full, loongson3_war) " \n" \
33 "1: " __stringify(LONG_LL) " %0, %1 \n" \
34 " " insn " \n" \
35 " " __stringify(LONG_SC) " %0, %1 \n" \
36 " " __stringify(SC_BEQZ) " %0, 1b \n" \
37 " .set pop \n" \
38 : "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \
39 : inputs \
40 : __LLSC_CLOBBER); \
41} while (0)
42
43#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
44 unsigned long __orig, __temp; \
45 \
46 asm volatile( \
47 " .set push \n" \
48 " .set " MIPS_ISA_LEVEL " \n" \
49 " " __SYNC(full, loongson3_war) " \n" \
50 "1: " __stringify(LONG_LL) " " ll_dst ", %2\n" \
51 " " insn " \n" \
52 " " __stringify(LONG_SC) " %1, %2 \n" \
53 " " __stringify(SC_BEQZ) " %1, 1b \n" \
54 " .set pop \n" \
55 : "=&r"(__orig), "=&r"(__temp), \
56 "+" GCC_OFF_SMALL_ASM()(mem) \
57 : inputs \
58 : __LLSC_CLOBBER); \
59 \
60 __orig; \
61})
62
63/*
64 * These are the "slower" versions of the functions and are in bitops.c.
65 * These functions call raw_local_irq_{save,restore}().
66 */
67void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
68void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
69void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
70int __mips_test_and_set_bit_lock(unsigned long nr,
71 volatile unsigned long *addr);
72int __mips_test_and_clear_bit(unsigned long nr,
73 volatile unsigned long *addr);
74int __mips_test_and_change_bit(unsigned long nr,
75 volatile unsigned long *addr);
76bool __mips_xor_is_negative_byte(unsigned long mask,
77 volatile unsigned long *addr);
78
79/*
80 * set_bit - Atomically set a bit in memory
81 * @nr: the bit to set
82 * @addr: the address to start counting from
83 *
84 * This function is atomic and may not be reordered. See __set_bit()
85 * if you do not require the atomic guarantees.
86 * Note that @nr may be almost arbitrarily large; this function is not
87 * restricted to acting on a single-word quantity.
88 */
89static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
90{
91 volatile unsigned long *m = &addr[BIT_WORD(nr)];
92 int bit = nr % BITS_PER_LONG;
93
94 if (!kernel_uses_llsc) {
95 __mips_set_bit(nr, addr);
96 return;
97 }
98
99 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
100 __bit_op(*m, __stringify(LONG_INS) " %0, %3, %2, 1", "i"(bit), "r"(~0));
101 return;
102 }
103
104 __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
105}
106
107/*
108 * clear_bit - Clears a bit in memory
109 * @nr: Bit to clear
110 * @addr: Address to start counting from
111 *
112 * clear_bit() is atomic and may not be reordered. However, it does
113 * not contain a memory barrier, so if it is used for locking purposes,
114 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
115 * in order to ensure changes are visible on other processors.
116 */
117static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
118{
119 volatile unsigned long *m = &addr[BIT_WORD(nr)];
120 int bit = nr % BITS_PER_LONG;
121
122 if (!kernel_uses_llsc) {
123 __mips_clear_bit(nr, addr);
124 return;
125 }
126
127 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
128 __bit_op(*m, __stringify(LONG_INS) " %0, $0, %2, 1", "i"(bit));
129 return;
130 }
131
132 __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
133}
134
135/*
136 * clear_bit_unlock - Clears a bit in memory
137 * @nr: Bit to clear
138 * @addr: Address to start counting from
139 *
140 * clear_bit() is atomic and implies release semantics before the memory
141 * operation. It can be used for an unlock.
142 */
143static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
144{
145 smp_mb__before_atomic();
146 clear_bit(nr, addr);
147}
148
149/*
150 * change_bit - Toggle a bit in memory
151 * @nr: Bit to change
152 * @addr: Address to start counting from
153 *
154 * change_bit() is atomic and may not be reordered.
155 * Note that @nr may be almost arbitrarily large; this function is not
156 * restricted to acting on a single-word quantity.
157 */
158static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
159{
160 volatile unsigned long *m = &addr[BIT_WORD(nr)];
161 int bit = nr % BITS_PER_LONG;
162
163 if (!kernel_uses_llsc) {
164 __mips_change_bit(nr, addr);
165 return;
166 }
167
168 __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
169}
170
171/*
172 * test_and_set_bit_lock - Set a bit and return its old value
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This operation is atomic and implies acquire ordering semantics
177 * after the memory operation.
178 */
179static inline int test_and_set_bit_lock(unsigned long nr,
180 volatile unsigned long *addr)
181{
182 volatile unsigned long *m = &addr[BIT_WORD(nr)];
183 int bit = nr % BITS_PER_LONG;
184 unsigned long res, orig;
185
186 if (!kernel_uses_llsc) {
187 res = __mips_test_and_set_bit_lock(nr, addr);
188 } else {
189 orig = __test_bit_op(*m, "%0",
190 "or\t%1, %0, %3",
191 "ir"(BIT(bit)));
192 res = (orig & BIT(bit)) != 0;
193 }
194
195 smp_llsc_mb();
196
197 return res;
198}
199
200/*
201 * test_and_set_bit - Set a bit and return its old value
202 * @nr: Bit to set
203 * @addr: Address to count from
204 *
205 * This operation is atomic and cannot be reordered.
206 * It also implies a memory barrier.
207 */
208static inline int test_and_set_bit(unsigned long nr,
209 volatile unsigned long *addr)
210{
211 smp_mb__before_atomic();
212 return test_and_set_bit_lock(nr, addr);
213}
214
215/*
216 * test_and_clear_bit - Clear a bit and return its old value
217 * @nr: Bit to clear
218 * @addr: Address to count from
219 *
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
222 */
223static inline int test_and_clear_bit(unsigned long nr,
224 volatile unsigned long *addr)
225{
226 volatile unsigned long *m = &addr[BIT_WORD(nr)];
227 int bit = nr % BITS_PER_LONG;
228 unsigned long res, orig;
229
230 smp_mb__before_atomic();
231
232 if (!kernel_uses_llsc) {
233 res = __mips_test_and_clear_bit(nr, addr);
234 } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
235 res = __test_bit_op(*m, "%1",
236 __stringify(LONG_EXT) " %0, %1, %3, 1;"
237 __stringify(LONG_INS) " %1, $0, %3, 1",
238 "i"(bit));
239 } else {
240 orig = __test_bit_op(*m, "%0",
241 "or\t%1, %0, %3;"
242 "xor\t%1, %1, %3",
243 "ir"(BIT(bit)));
244 res = (orig & BIT(bit)) != 0;
245 }
246
247 smp_llsc_mb();
248
249 return res;
250}
251
252/*
253 * test_and_change_bit - Change a bit and return its old value
254 * @nr: Bit to change
255 * @addr: Address to count from
256 *
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
259 */
260static inline int test_and_change_bit(unsigned long nr,
261 volatile unsigned long *addr)
262{
263 volatile unsigned long *m = &addr[BIT_WORD(nr)];
264 int bit = nr % BITS_PER_LONG;
265 unsigned long res, orig;
266
267 smp_mb__before_atomic();
268
269 if (!kernel_uses_llsc) {
270 res = __mips_test_and_change_bit(nr, addr);
271 } else {
272 orig = __test_bit_op(*m, "%0",
273 "xor\t%1, %0, %3",
274 "ir"(BIT(bit)));
275 res = (orig & BIT(bit)) != 0;
276 }
277
278 smp_llsc_mb();
279
280 return res;
281}
282
283static inline bool xor_unlock_is_negative_byte(unsigned long mask,
284 volatile unsigned long *p)
285{
286 unsigned long orig;
287 bool res;
288
289 smp_mb__before_atomic();
290
291 if (!kernel_uses_llsc) {
292 res = __mips_xor_is_negative_byte(mask, p);
293 } else {
294 orig = __test_bit_op(*p, "%0",
295 "xor\t%1, %0, %3",
296 "ir"(mask));
297 res = (orig & BIT(7)) != 0;
298 }
299
300 smp_llsc_mb();
301
302 return res;
303}
304
305#undef __bit_op
306#undef __test_bit_op
307
308#include <asm-generic/bitops/non-atomic.h>
309
310/*
311 * __clear_bit_unlock - Clears a bit in memory
312 * @nr: Bit to clear
313 * @addr: Address to start counting from
314 *
315 * __clear_bit() is non-atomic and implies release semantics before the memory
316 * operation. It can be used for an unlock if no other CPUs can concurrently
317 * modify other bits in the word.
318 */
319static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
320{
321 smp_mb__before_llsc();
322 __clear_bit(nr, addr);
323 nudge_writes();
324}
325
326/*
327 * Return the bit position (0..63) of the most significant 1 bit in a word
328 * Returns -1 if no 1 bit exists
329 */
330static __always_inline unsigned long __fls(unsigned long word)
331{
332 int num;
333
334 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
335 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
336 __asm__(
337 " .set push \n"
338 " .set "MIPS_ISA_LEVEL" \n"
339 " clz %0, %1 \n"
340 " .set pop \n"
341 : "=r" (num)
342 : "r" (word));
343
344 return 31 - num;
345 }
346
347 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
348 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
349 __asm__(
350 " .set push \n"
351 " .set "MIPS_ISA_LEVEL" \n"
352 " dclz %0, %1 \n"
353 " .set pop \n"
354 : "=r" (num)
355 : "r" (word));
356
357 return 63 - num;
358 }
359
360 num = BITS_PER_LONG - 1;
361
362#if BITS_PER_LONG == 64
363 if (!(word & (~0ul << 32))) {
364 num -= 32;
365 word <<= 32;
366 }
367#endif
368 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
369 num -= 16;
370 word <<= 16;
371 }
372 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
373 num -= 8;
374 word <<= 8;
375 }
376 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
377 num -= 4;
378 word <<= 4;
379 }
380 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
381 num -= 2;
382 word <<= 2;
383 }
384 if (!(word & (~0ul << (BITS_PER_LONG-1))))
385 num -= 1;
386 return num;
387}
388
389/*
390 * __ffs - find first bit in word.
391 * @word: The word to search
392 *
393 * Returns 0..SZLONG-1
394 * Undefined if no bit exists, so code should check against 0 first.
395 */
396static __always_inline unsigned long __ffs(unsigned long word)
397{
398 return __fls(word & -word);
399}
400
401/*
402 * fls - find last bit set.
403 * @word: The word to search
404 *
405 * This is defined the same way as ffs.
406 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
407 */
408static inline int fls(unsigned int x)
409{
410 int r;
411
412 if (!__builtin_constant_p(x) &&
413 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
414 __asm__(
415 " .set push \n"
416 " .set "MIPS_ISA_LEVEL" \n"
417 " clz %0, %1 \n"
418 " .set pop \n"
419 : "=r" (x)
420 : "r" (x));
421
422 return 32 - x;
423 }
424
425 r = 32;
426 if (!x)
427 return 0;
428 if (!(x & 0xffff0000u)) {
429 x <<= 16;
430 r -= 16;
431 }
432 if (!(x & 0xff000000u)) {
433 x <<= 8;
434 r -= 8;
435 }
436 if (!(x & 0xf0000000u)) {
437 x <<= 4;
438 r -= 4;
439 }
440 if (!(x & 0xc0000000u)) {
441 x <<= 2;
442 r -= 2;
443 }
444 if (!(x & 0x80000000u)) {
445 x <<= 1;
446 r -= 1;
447 }
448 return r;
449}
450
451#include <asm-generic/bitops/fls64.h>
452
453/*
454 * ffs - find first bit set.
455 * @word: The word to search
456 *
457 * This is defined the same way as
458 * the libc and compiler builtin ffs routines, therefore
459 * differs in spirit from the below ffz (man ffs).
460 */
461static inline int ffs(int word)
462{
463 if (!word)
464 return 0;
465
466 return fls(word & -word);
467}
468
469#include <asm-generic/bitops/ffz.h>
470
471#ifdef __KERNEL__
472
473#include <asm-generic/bitops/sched.h>
474
475#include <asm/arch_hweight.h>
476#include <asm-generic/bitops/const_hweight.h>
477
478#include <asm-generic/bitops/le.h>
479#include <asm-generic/bitops/ext2-atomic.h>
480
481#endif /* __KERNEL__ */
482
483#endif /* _ASM_BITOPS_H */