Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_BITOPS_H
3#define _ASM_X86_BITOPS_H
4
5/*
6 * Copyright 1992, Linus Torvalds.
7 *
8 * Note: inlines with more than a single statement should be marked
9 * __always_inline to avoid problems with older gcc's inlining heuristics.
10 */
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <asm/alternative.h>
18#include <asm/rmwcc.h>
19#include <asm/barrier.h>
20
21#if BITS_PER_LONG == 32
22# define _BITOPS_LONG_SHIFT 5
23#elif BITS_PER_LONG == 64
24# define _BITOPS_LONG_SHIFT 6
25#else
26# error "Unexpected BITS_PER_LONG"
27#endif
28
29#define BIT_64(n) (U64_C(1) << (n))
30
31/*
32 * These have to be done with inline assembly: that way the bit-setting
33 * is guaranteed to be atomic. All bit operations return 0 if the bit
34 * was cleared before the operation and != 0 if it was not.
35 *
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37 */
38
39#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
40/* Technically wrong, but this avoids compilation errors on some gcc
41 versions. */
42#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
43#else
44#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
45#endif
46
47#define ADDR BITOP_ADDR(addr)
48
49/*
50 * We do the locked ops that don't return the old value as
51 * a mask operation on a byte.
52 */
53#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
54#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
55#define CONST_MASK(nr) (1 << ((nr) & 7))
56
57/**
58 * set_bit - Atomically set a bit in memory
59 * @nr: the bit to set
60 * @addr: the address to start counting from
61 *
62 * This function is atomic and may not be reordered. See __set_bit()
63 * if you do not require the atomic guarantees.
64 *
65 * Note: there are no guarantees that this function will not be reordered
66 * on non x86 architectures, so if you are writing portable code,
67 * make sure not to rely on its reordering guarantees.
68 *
69 * Note that @nr may be almost arbitrarily large; this function is not
70 * restricted to acting on a single-word quantity.
71 */
72static __always_inline void
73set_bit(long nr, volatile unsigned long *addr)
74{
75 if (IS_IMMEDIATE(nr)) {
76 asm volatile(LOCK_PREFIX "orb %1,%0"
77 : CONST_MASK_ADDR(nr, addr)
78 : "iq" ((u8)CONST_MASK(nr))
79 : "memory");
80 } else {
81 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
82 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
83 }
84}
85
86/**
87 * __set_bit - Set a bit in memory
88 * @nr: the bit to set
89 * @addr: the address to start counting from
90 *
91 * Unlike set_bit(), this function is non-atomic and may be reordered.
92 * If it's called on the same region of memory simultaneously, the effect
93 * may be that only one operation succeeds.
94 */
95static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
96{
97 asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
98}
99
100/**
101 * clear_bit - Clears a bit in memory
102 * @nr: Bit to clear
103 * @addr: Address to start counting from
104 *
105 * clear_bit() is atomic and may not be reordered. However, it does
106 * not contain a memory barrier, so if it is used for locking purposes,
107 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
108 * in order to ensure changes are visible on other processors.
109 */
110static __always_inline void
111clear_bit(long nr, volatile unsigned long *addr)
112{
113 if (IS_IMMEDIATE(nr)) {
114 asm volatile(LOCK_PREFIX "andb %1,%0"
115 : CONST_MASK_ADDR(nr, addr)
116 : "iq" ((u8)~CONST_MASK(nr)));
117 } else {
118 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
119 : BITOP_ADDR(addr)
120 : "Ir" (nr));
121 }
122}
123
124/*
125 * clear_bit_unlock - Clears a bit in memory
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * clear_bit() is atomic and implies release semantics before the memory
130 * operation. It can be used for an unlock.
131 */
132static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
133{
134 barrier();
135 clear_bit(nr, addr);
136}
137
138static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
139{
140 asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
141}
142
143static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
144{
145 bool negative;
146 asm volatile(LOCK_PREFIX "andb %2,%1"
147 CC_SET(s)
148 : CC_OUT(s) (negative), ADDR
149 : "ir" ((char) ~(1 << nr)) : "memory");
150 return negative;
151}
152
153// Let everybody know we have it
154#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
155
156/*
157 * __clear_bit_unlock - Clears a bit in memory
158 * @nr: Bit to clear
159 * @addr: Address to start counting from
160 *
161 * __clear_bit() is non-atomic and implies release semantics before the memory
162 * operation. It can be used for an unlock if no other CPUs can concurrently
163 * modify other bits in the word.
164 *
165 * No memory barrier is required here, because x86 cannot reorder stores past
166 * older loads. Same principle as spin_unlock.
167 */
168static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
169{
170 barrier();
171 __clear_bit(nr, addr);
172}
173
174/**
175 * __change_bit - Toggle a bit in memory
176 * @nr: the bit to change
177 * @addr: the address to start counting from
178 *
179 * Unlike change_bit(), this function is non-atomic and may be reordered.
180 * If it's called on the same region of memory simultaneously, the effect
181 * may be that only one operation succeeds.
182 */
183static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
184{
185 asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
186}
187
188/**
189 * change_bit - Toggle a bit in memory
190 * @nr: Bit to change
191 * @addr: Address to start counting from
192 *
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
196 */
197static __always_inline void change_bit(long nr, volatile unsigned long *addr)
198{
199 if (IS_IMMEDIATE(nr)) {
200 asm volatile(LOCK_PREFIX "xorb %1,%0"
201 : CONST_MASK_ADDR(nr, addr)
202 : "iq" ((u8)CONST_MASK(nr)));
203 } else {
204 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
205 : BITOP_ADDR(addr)
206 : "Ir" (nr));
207 }
208}
209
210/**
211 * test_and_set_bit - Set a bit and return its old value
212 * @nr: Bit to set
213 * @addr: Address to count from
214 *
215 * This operation is atomic and cannot be reordered.
216 * It also implies a memory barrier.
217 */
218static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
219{
220 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
221 *addr, "Ir", nr, "%0", c);
222}
223
224/**
225 * test_and_set_bit_lock - Set a bit and return its old value for lock
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This is the same as test_and_set_bit on x86.
230 */
231static __always_inline bool
232test_and_set_bit_lock(long nr, volatile unsigned long *addr)
233{
234 return test_and_set_bit(nr, addr);
235}
236
237/**
238 * __test_and_set_bit - Set a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
241 *
242 * This operation is non-atomic and can be reordered.
243 * If two examples of this operation race, one can appear to succeed
244 * but actually fail. You must protect multiple accesses with a lock.
245 */
246static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
247{
248 bool oldbit;
249
250 asm(__ASM_SIZE(bts) " %2,%1"
251 CC_SET(c)
252 : CC_OUT(c) (oldbit), ADDR
253 : "Ir" (nr));
254 return oldbit;
255}
256
257/**
258 * test_and_clear_bit - Clear a bit and return its old value
259 * @nr: Bit to clear
260 * @addr: Address to count from
261 *
262 * This operation is atomic and cannot be reordered.
263 * It also implies a memory barrier.
264 */
265static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
266{
267 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
268 *addr, "Ir", nr, "%0", c);
269}
270
271/**
272 * __test_and_clear_bit - Clear a bit and return its old value
273 * @nr: Bit to clear
274 * @addr: Address to count from
275 *
276 * This operation is non-atomic and can be reordered.
277 * If two examples of this operation race, one can appear to succeed
278 * but actually fail. You must protect multiple accesses with a lock.
279 *
280 * Note: the operation is performed atomically with respect to
281 * the local CPU, but not other CPUs. Portable code should not
282 * rely on this behaviour.
283 * KVM relies on this behaviour on x86 for modifying memory that is also
284 * accessed from a hypervisor on the same CPU if running in a VM: don't change
285 * this without also updating arch/x86/kernel/kvm.c
286 */
287static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
288{
289 bool oldbit;
290
291 asm volatile(__ASM_SIZE(btr) " %2,%1"
292 CC_SET(c)
293 : CC_OUT(c) (oldbit), ADDR
294 : "Ir" (nr));
295 return oldbit;
296}
297
298/* WARNING: non atomic and it can be reordered! */
299static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
300{
301 bool oldbit;
302
303 asm volatile(__ASM_SIZE(btc) " %2,%1"
304 CC_SET(c)
305 : CC_OUT(c) (oldbit), ADDR
306 : "Ir" (nr) : "memory");
307
308 return oldbit;
309}
310
311/**
312 * test_and_change_bit - Change a bit and return its old value
313 * @nr: Bit to change
314 * @addr: Address to count from
315 *
316 * This operation is atomic and cannot be reordered.
317 * It also implies a memory barrier.
318 */
319static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
320{
321 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
322 *addr, "Ir", nr, "%0", c);
323}
324
325static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
326{
327 return ((1UL << (nr & (BITS_PER_LONG-1))) &
328 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
329}
330
331static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
332{
333 bool oldbit;
334
335 asm volatile(__ASM_SIZE(bt) " %2,%1"
336 CC_SET(c)
337 : CC_OUT(c) (oldbit)
338 : "m" (*(unsigned long *)addr), "Ir" (nr));
339
340 return oldbit;
341}
342
343#if 0 /* Fool kernel-doc since it doesn't do macros yet */
344/**
345 * test_bit - Determine whether a bit is set
346 * @nr: bit number to test
347 * @addr: Address to start counting from
348 */
349static bool test_bit(int nr, const volatile unsigned long *addr);
350#endif
351
352#define test_bit(nr, addr) \
353 (__builtin_constant_p((nr)) \
354 ? constant_test_bit((nr), (addr)) \
355 : variable_test_bit((nr), (addr)))
356
357/**
358 * __ffs - find first set bit in word
359 * @word: The word to search
360 *
361 * Undefined if no bit exists, so code should check against 0 first.
362 */
363static __always_inline unsigned long __ffs(unsigned long word)
364{
365 asm("rep; bsf %1,%0"
366 : "=r" (word)
367 : "rm" (word));
368 return word;
369}
370
371/**
372 * ffz - find first zero bit in word
373 * @word: The word to search
374 *
375 * Undefined if no zero exists, so code should check against ~0UL first.
376 */
377static __always_inline unsigned long ffz(unsigned long word)
378{
379 asm("rep; bsf %1,%0"
380 : "=r" (word)
381 : "r" (~word));
382 return word;
383}
384
385/*
386 * __fls: find last set bit in word
387 * @word: The word to search
388 *
389 * Undefined if no set bit exists, so code should check against 0 first.
390 */
391static __always_inline unsigned long __fls(unsigned long word)
392{
393 asm("bsr %1,%0"
394 : "=r" (word)
395 : "rm" (word));
396 return word;
397}
398
399#undef ADDR
400
401#ifdef __KERNEL__
402/**
403 * ffs - find first set bit in word
404 * @x: the word to search
405 *
406 * This is defined the same way as the libc and compiler builtin ffs
407 * routines, therefore differs in spirit from the other bitops.
408 *
409 * ffs(value) returns 0 if value is 0 or the position of the first
410 * set bit if value is nonzero. The first (least significant) bit
411 * is at position 1.
412 */
413static __always_inline int ffs(int x)
414{
415 int r;
416
417#ifdef CONFIG_X86_64
418 /*
419 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
420 * dest reg is undefined if x==0, but their CPU architect says its
421 * value is written to set it to the same as before, except that the
422 * top 32 bits will be cleared.
423 *
424 * We cannot do this on 32 bits because at the very least some
425 * 486 CPUs did not behave this way.
426 */
427 asm("bsfl %1,%0"
428 : "=r" (r)
429 : "rm" (x), "0" (-1));
430#elif defined(CONFIG_X86_CMOV)
431 asm("bsfl %1,%0\n\t"
432 "cmovzl %2,%0"
433 : "=&r" (r) : "rm" (x), "r" (-1));
434#else
435 asm("bsfl %1,%0\n\t"
436 "jnz 1f\n\t"
437 "movl $-1,%0\n"
438 "1:" : "=r" (r) : "rm" (x));
439#endif
440 return r + 1;
441}
442
443/**
444 * fls - find last set bit in word
445 * @x: the word to search
446 *
447 * This is defined in a similar way as the libc and compiler builtin
448 * ffs, but returns the position of the most significant set bit.
449 *
450 * fls(value) returns 0 if value is 0 or the position of the last
451 * set bit if value is nonzero. The last (most significant) bit is
452 * at position 32.
453 */
454static __always_inline int fls(int x)
455{
456 int r;
457
458#ifdef CONFIG_X86_64
459 /*
460 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
461 * dest reg is undefined if x==0, but their CPU architect says its
462 * value is written to set it to the same as before, except that the
463 * top 32 bits will be cleared.
464 *
465 * We cannot do this on 32 bits because at the very least some
466 * 486 CPUs did not behave this way.
467 */
468 asm("bsrl %1,%0"
469 : "=r" (r)
470 : "rm" (x), "0" (-1));
471#elif defined(CONFIG_X86_CMOV)
472 asm("bsrl %1,%0\n\t"
473 "cmovzl %2,%0"
474 : "=&r" (r) : "rm" (x), "rm" (-1));
475#else
476 asm("bsrl %1,%0\n\t"
477 "jnz 1f\n\t"
478 "movl $-1,%0\n"
479 "1:" : "=r" (r) : "rm" (x));
480#endif
481 return r + 1;
482}
483
484/**
485 * fls64 - find last set bit in a 64-bit word
486 * @x: the word to search
487 *
488 * This is defined in a similar way as the libc and compiler builtin
489 * ffsll, but returns the position of the most significant set bit.
490 *
491 * fls64(value) returns 0 if value is 0 or the position of the last
492 * set bit if value is nonzero. The last (most significant) bit is
493 * at position 64.
494 */
495#ifdef CONFIG_X86_64
496static __always_inline int fls64(__u64 x)
497{
498 int bitpos = -1;
499 /*
500 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
501 * dest reg is undefined if x==0, but their CPU architect says its
502 * value is written to set it to the same as before.
503 */
504 asm("bsrq %1,%q0"
505 : "+r" (bitpos)
506 : "rm" (x));
507 return bitpos + 1;
508}
509#else
510#include <asm-generic/bitops/fls64.h>
511#endif
512
513#include <asm-generic/bitops/find.h>
514
515#include <asm-generic/bitops/sched.h>
516
517#include <asm/arch_hweight.h>
518
519#include <asm-generic/bitops/const_hweight.h>
520
521#include <asm-generic/bitops/le.h>
522
523#include <asm-generic/bitops/ext2-atomic-setbit.h>
524
525#endif /* __KERNEL__ */
526#endif /* _ASM_X86_BITOPS_H */
1#ifndef _ASM_X86_BITOPS_H
2#define _ASM_X86_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16#include <asm/alternative.h>
17#include <asm/rmwcc.h>
18
19#if BITS_PER_LONG == 32
20# define _BITOPS_LONG_SHIFT 5
21#elif BITS_PER_LONG == 64
22# define _BITOPS_LONG_SHIFT 6
23#else
24# error "Unexpected BITS_PER_LONG"
25#endif
26
27#define BIT_64(n) (U64_C(1) << (n))
28
29/*
30 * These have to be done with inline assembly: that way the bit-setting
31 * is guaranteed to be atomic. All bit operations return 0 if the bit
32 * was cleared before the operation and != 0 if it was not.
33 *
34 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
35 */
36
37#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
38/* Technically wrong, but this avoids compilation errors on some gcc
39 versions. */
40#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
41#else
42#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
43#endif
44
45#define ADDR BITOP_ADDR(addr)
46
47/*
48 * We do the locked ops that don't return the old value as
49 * a mask operation on a byte.
50 */
51#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
52#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
53#define CONST_MASK(nr) (1 << ((nr) & 7))
54
55/**
56 * set_bit - Atomically set a bit in memory
57 * @nr: the bit to set
58 * @addr: the address to start counting from
59 *
60 * This function is atomic and may not be reordered. See __set_bit()
61 * if you do not require the atomic guarantees.
62 *
63 * Note: there are no guarantees that this function will not be reordered
64 * on non x86 architectures, so if you are writing portable code,
65 * make sure not to rely on its reordering guarantees.
66 *
67 * Note that @nr may be almost arbitrarily large; this function is not
68 * restricted to acting on a single-word quantity.
69 */
70static __always_inline void
71set_bit(long nr, volatile unsigned long *addr)
72{
73 if (IS_IMMEDIATE(nr)) {
74 asm volatile(LOCK_PREFIX "orb %1,%0"
75 : CONST_MASK_ADDR(nr, addr)
76 : "iq" ((u8)CONST_MASK(nr))
77 : "memory");
78 } else {
79 asm volatile(LOCK_PREFIX "bts %1,%0"
80 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
81 }
82}
83
84/**
85 * __set_bit - Set a bit in memory
86 * @nr: the bit to set
87 * @addr: the address to start counting from
88 *
89 * Unlike set_bit(), this function is non-atomic and may be reordered.
90 * If it's called on the same region of memory simultaneously, the effect
91 * may be that only one operation succeeds.
92 */
93static inline void __set_bit(long nr, volatile unsigned long *addr)
94{
95 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
96}
97
98/**
99 * clear_bit - Clears a bit in memory
100 * @nr: Bit to clear
101 * @addr: Address to start counting from
102 *
103 * clear_bit() is atomic and may not be reordered. However, it does
104 * not contain a memory barrier, so if it is used for locking purposes,
105 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
106 * in order to ensure changes are visible on other processors.
107 */
108static __always_inline void
109clear_bit(long nr, volatile unsigned long *addr)
110{
111 if (IS_IMMEDIATE(nr)) {
112 asm volatile(LOCK_PREFIX "andb %1,%0"
113 : CONST_MASK_ADDR(nr, addr)
114 : "iq" ((u8)~CONST_MASK(nr)));
115 } else {
116 asm volatile(LOCK_PREFIX "btr %1,%0"
117 : BITOP_ADDR(addr)
118 : "Ir" (nr));
119 }
120}
121
122/*
123 * clear_bit_unlock - Clears a bit in memory
124 * @nr: Bit to clear
125 * @addr: Address to start counting from
126 *
127 * clear_bit() is atomic and implies release semantics before the memory
128 * operation. It can be used for an unlock.
129 */
130static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
131{
132 barrier();
133 clear_bit(nr, addr);
134}
135
136static inline void __clear_bit(long nr, volatile unsigned long *addr)
137{
138 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
139}
140
141/*
142 * __clear_bit_unlock - Clears a bit in memory
143 * @nr: Bit to clear
144 * @addr: Address to start counting from
145 *
146 * __clear_bit() is non-atomic and implies release semantics before the memory
147 * operation. It can be used for an unlock if no other CPUs can concurrently
148 * modify other bits in the word.
149 *
150 * No memory barrier is required here, because x86 cannot reorder stores past
151 * older loads. Same principle as spin_unlock.
152 */
153static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
154{
155 barrier();
156 __clear_bit(nr, addr);
157}
158
159#define smp_mb__before_clear_bit() barrier()
160#define smp_mb__after_clear_bit() barrier()
161
162/**
163 * __change_bit - Toggle a bit in memory
164 * @nr: the bit to change
165 * @addr: the address to start counting from
166 *
167 * Unlike change_bit(), this function is non-atomic and may be reordered.
168 * If it's called on the same region of memory simultaneously, the effect
169 * may be that only one operation succeeds.
170 */
171static inline void __change_bit(long nr, volatile unsigned long *addr)
172{
173 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
174}
175
176/**
177 * change_bit - Toggle a bit in memory
178 * @nr: Bit to change
179 * @addr: Address to start counting from
180 *
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
184 */
185static inline void change_bit(long nr, volatile unsigned long *addr)
186{
187 if (IS_IMMEDIATE(nr)) {
188 asm volatile(LOCK_PREFIX "xorb %1,%0"
189 : CONST_MASK_ADDR(nr, addr)
190 : "iq" ((u8)CONST_MASK(nr)));
191 } else {
192 asm volatile(LOCK_PREFIX "btc %1,%0"
193 : BITOP_ADDR(addr)
194 : "Ir" (nr));
195 }
196}
197
198/**
199 * test_and_set_bit - Set a bit and return its old value
200 * @nr: Bit to set
201 * @addr: Address to count from
202 *
203 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier.
205 */
206static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
207{
208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
209}
210
211/**
212 * test_and_set_bit_lock - Set a bit and return its old value for lock
213 * @nr: Bit to set
214 * @addr: Address to count from
215 *
216 * This is the same as test_and_set_bit on x86.
217 */
218static __always_inline int
219test_and_set_bit_lock(long nr, volatile unsigned long *addr)
220{
221 return test_and_set_bit(nr, addr);
222}
223
224/**
225 * __test_and_set_bit - Set a bit and return its old value
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This operation is non-atomic and can be reordered.
230 * If two examples of this operation race, one can appear to succeed
231 * but actually fail. You must protect multiple accesses with a lock.
232 */
233static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
234{
235 int oldbit;
236
237 asm("bts %2,%1\n\t"
238 "sbb %0,%0"
239 : "=r" (oldbit), ADDR
240 : "Ir" (nr));
241 return oldbit;
242}
243
244/**
245 * test_and_clear_bit - Clear a bit and return its old value
246 * @nr: Bit to clear
247 * @addr: Address to count from
248 *
249 * This operation is atomic and cannot be reordered.
250 * It also implies a memory barrier.
251 */
252static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
253{
254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
255}
256
257/**
258 * __test_and_clear_bit - Clear a bit and return its old value
259 * @nr: Bit to clear
260 * @addr: Address to count from
261 *
262 * This operation is non-atomic and can be reordered.
263 * If two examples of this operation race, one can appear to succeed
264 * but actually fail. You must protect multiple accesses with a lock.
265 *
266 * Note: the operation is performed atomically with respect to
267 * the local CPU, but not other CPUs. Portable code should not
268 * rely on this behaviour.
269 * KVM relies on this behaviour on x86 for modifying memory that is also
270 * accessed from a hypervisor on the same CPU if running in a VM: don't change
271 * this without also updating arch/x86/kernel/kvm.c
272 */
273static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
274{
275 int oldbit;
276
277 asm volatile("btr %2,%1\n\t"
278 "sbb %0,%0"
279 : "=r" (oldbit), ADDR
280 : "Ir" (nr));
281 return oldbit;
282}
283
284/* WARNING: non atomic and it can be reordered! */
285static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
286{
287 int oldbit;
288
289 asm volatile("btc %2,%1\n\t"
290 "sbb %0,%0"
291 : "=r" (oldbit), ADDR
292 : "Ir" (nr) : "memory");
293
294 return oldbit;
295}
296
297/**
298 * test_and_change_bit - Change a bit and return its old value
299 * @nr: Bit to change
300 * @addr: Address to count from
301 *
302 * This operation is atomic and cannot be reordered.
303 * It also implies a memory barrier.
304 */
305static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
306{
307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
308}
309
310static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
311{
312 return ((1UL << (nr & (BITS_PER_LONG-1))) &
313 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
314}
315
316static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
317{
318 int oldbit;
319
320 asm volatile("bt %2,%1\n\t"
321 "sbb %0,%0"
322 : "=r" (oldbit)
323 : "m" (*(unsigned long *)addr), "Ir" (nr));
324
325 return oldbit;
326}
327
328#if 0 /* Fool kernel-doc since it doesn't do macros yet */
329/**
330 * test_bit - Determine whether a bit is set
331 * @nr: bit number to test
332 * @addr: Address to start counting from
333 */
334static int test_bit(int nr, const volatile unsigned long *addr);
335#endif
336
337#define test_bit(nr, addr) \
338 (__builtin_constant_p((nr)) \
339 ? constant_test_bit((nr), (addr)) \
340 : variable_test_bit((nr), (addr)))
341
342/**
343 * __ffs - find first set bit in word
344 * @word: The word to search
345 *
346 * Undefined if no bit exists, so code should check against 0 first.
347 */
348static inline unsigned long __ffs(unsigned long word)
349{
350 asm("rep; bsf %1,%0"
351 : "=r" (word)
352 : "rm" (word));
353 return word;
354}
355
356/**
357 * ffz - find first zero bit in word
358 * @word: The word to search
359 *
360 * Undefined if no zero exists, so code should check against ~0UL first.
361 */
362static inline unsigned long ffz(unsigned long word)
363{
364 asm("rep; bsf %1,%0"
365 : "=r" (word)
366 : "r" (~word));
367 return word;
368}
369
370/*
371 * __fls: find last set bit in word
372 * @word: The word to search
373 *
374 * Undefined if no set bit exists, so code should check against 0 first.
375 */
376static inline unsigned long __fls(unsigned long word)
377{
378 asm("bsr %1,%0"
379 : "=r" (word)
380 : "rm" (word));
381 return word;
382}
383
384#undef ADDR
385
386#ifdef __KERNEL__
387/**
388 * ffs - find first set bit in word
389 * @x: the word to search
390 *
391 * This is defined the same way as the libc and compiler builtin ffs
392 * routines, therefore differs in spirit from the other bitops.
393 *
394 * ffs(value) returns 0 if value is 0 or the position of the first
395 * set bit if value is nonzero. The first (least significant) bit
396 * is at position 1.
397 */
398static inline int ffs(int x)
399{
400 int r;
401
402#ifdef CONFIG_X86_64
403 /*
404 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
405 * dest reg is undefined if x==0, but their CPU architect says its
406 * value is written to set it to the same as before, except that the
407 * top 32 bits will be cleared.
408 *
409 * We cannot do this on 32 bits because at the very least some
410 * 486 CPUs did not behave this way.
411 */
412 asm("bsfl %1,%0"
413 : "=r" (r)
414 : "rm" (x), "0" (-1));
415#elif defined(CONFIG_X86_CMOV)
416 asm("bsfl %1,%0\n\t"
417 "cmovzl %2,%0"
418 : "=&r" (r) : "rm" (x), "r" (-1));
419#else
420 asm("bsfl %1,%0\n\t"
421 "jnz 1f\n\t"
422 "movl $-1,%0\n"
423 "1:" : "=r" (r) : "rm" (x));
424#endif
425 return r + 1;
426}
427
428/**
429 * fls - find last set bit in word
430 * @x: the word to search
431 *
432 * This is defined in a similar way as the libc and compiler builtin
433 * ffs, but returns the position of the most significant set bit.
434 *
435 * fls(value) returns 0 if value is 0 or the position of the last
436 * set bit if value is nonzero. The last (most significant) bit is
437 * at position 32.
438 */
439static inline int fls(int x)
440{
441 int r;
442
443#ifdef CONFIG_X86_64
444 /*
445 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
446 * dest reg is undefined if x==0, but their CPU architect says its
447 * value is written to set it to the same as before, except that the
448 * top 32 bits will be cleared.
449 *
450 * We cannot do this on 32 bits because at the very least some
451 * 486 CPUs did not behave this way.
452 */
453 asm("bsrl %1,%0"
454 : "=r" (r)
455 : "rm" (x), "0" (-1));
456#elif defined(CONFIG_X86_CMOV)
457 asm("bsrl %1,%0\n\t"
458 "cmovzl %2,%0"
459 : "=&r" (r) : "rm" (x), "rm" (-1));
460#else
461 asm("bsrl %1,%0\n\t"
462 "jnz 1f\n\t"
463 "movl $-1,%0\n"
464 "1:" : "=r" (r) : "rm" (x));
465#endif
466 return r + 1;
467}
468
469/**
470 * fls64 - find last set bit in a 64-bit word
471 * @x: the word to search
472 *
473 * This is defined in a similar way as the libc and compiler builtin
474 * ffsll, but returns the position of the most significant set bit.
475 *
476 * fls64(value) returns 0 if value is 0 or the position of the last
477 * set bit if value is nonzero. The last (most significant) bit is
478 * at position 64.
479 */
480#ifdef CONFIG_X86_64
481static __always_inline int fls64(__u64 x)
482{
483 int bitpos = -1;
484 /*
485 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
486 * dest reg is undefined if x==0, but their CPU architect says its
487 * value is written to set it to the same as before.
488 */
489 asm("bsrq %1,%q0"
490 : "+r" (bitpos)
491 : "rm" (x));
492 return bitpos + 1;
493}
494#else
495#include <asm-generic/bitops/fls64.h>
496#endif
497
498#include <asm-generic/bitops/find.h>
499
500#include <asm-generic/bitops/sched.h>
501
502#define ARCH_HAS_FAST_MULTIPLIER 1
503
504#include <asm/arch_hweight.h>
505
506#include <asm-generic/bitops/const_hweight.h>
507
508#include <asm-generic/bitops/le.h>
509
510#include <asm-generic/bitops/ext2-atomic-setbit.h>
511
512#endif /* __KERNEL__ */
513#endif /* _ASM_X86_BITOPS_H */