Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_BITOPS_H
  3#define _ASM_X86_BITOPS_H
  4
  5/*
  6 * Copyright 1992, Linus Torvalds.
  7 *
  8 * Note: inlines with more than a single statement should be marked
  9 * __always_inline to avoid problems with older gcc's inlining heuristics.
 10 */
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <asm/alternative.h>
 18#include <asm/rmwcc.h>
 19#include <asm/barrier.h>
 20
 21#if BITS_PER_LONG == 32
 22# define _BITOPS_LONG_SHIFT 5
 23#elif BITS_PER_LONG == 64
 24# define _BITOPS_LONG_SHIFT 6
 25#else
 26# error "Unexpected BITS_PER_LONG"
 27#endif
 28
 29#define BIT_64(n)			(U64_C(1) << (n))
 30
 31/*
 32 * These have to be done with inline assembly: that way the bit-setting
 33 * is guaranteed to be atomic. All bit operations return 0 if the bit
 34 * was cleared before the operation and != 0 if it was not.
 35 *
 36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 37 */
 38
 39#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
 40/* Technically wrong, but this avoids compilation errors on some gcc
 41   versions. */
 42#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
 43#else
 44#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
 45#endif
 46
 47#define ADDR				BITOP_ADDR(addr)
 48
 49/*
 50 * We do the locked ops that don't return the old value as
 51 * a mask operation on a byte.
 52 */
 53#define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
 54#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
 55#define CONST_MASK(nr)			(1 << ((nr) & 7))
 56
 57/**
 58 * set_bit - Atomically set a bit in memory
 59 * @nr: the bit to set
 60 * @addr: the address to start counting from
 61 *
 62 * This function is atomic and may not be reordered.  See __set_bit()
 63 * if you do not require the atomic guarantees.
 64 *
 65 * Note: there are no guarantees that this function will not be reordered
 66 * on non x86 architectures, so if you are writing portable code,
 67 * make sure not to rely on its reordering guarantees.
 68 *
 69 * Note that @nr may be almost arbitrarily large; this function is not
 70 * restricted to acting on a single-word quantity.
 71 */
 72static __always_inline void
 73set_bit(long nr, volatile unsigned long *addr)
 74{
 75	if (IS_IMMEDIATE(nr)) {
 76		asm volatile(LOCK_PREFIX "orb %1,%0"
 77			: CONST_MASK_ADDR(nr, addr)
 78			: "iq" ((u8)CONST_MASK(nr))
 79			: "memory");
 80	} else {
 81		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
 82			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
 83	}
 84}
 85
 86/**
 87 * __set_bit - Set a bit in memory
 88 * @nr: the bit to set
 89 * @addr: the address to start counting from
 90 *
 91 * Unlike set_bit(), this function is non-atomic and may be reordered.
 92 * If it's called on the same region of memory simultaneously, the effect
 93 * may be that only one operation succeeds.
 94 */
 95static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 96{
 97	asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
 98}
 99
100/**
101 * clear_bit - Clears a bit in memory
102 * @nr: Bit to clear
103 * @addr: Address to start counting from
104 *
105 * clear_bit() is atomic and may not be reordered.  However, it does
106 * not contain a memory barrier, so if it is used for locking purposes,
107 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
108 * in order to ensure changes are visible on other processors.
109 */
110static __always_inline void
111clear_bit(long nr, volatile unsigned long *addr)
112{
113	if (IS_IMMEDIATE(nr)) {
114		asm volatile(LOCK_PREFIX "andb %1,%0"
115			: CONST_MASK_ADDR(nr, addr)
116			: "iq" ((u8)~CONST_MASK(nr)));
117	} else {
118		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
119			: BITOP_ADDR(addr)
120			: "Ir" (nr));
121	}
122}
123
124/*
125 * clear_bit_unlock - Clears a bit in memory
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * clear_bit() is atomic and implies release semantics before the memory
130 * operation. It can be used for an unlock.
131 */
132static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
133{
134	barrier();
135	clear_bit(nr, addr);
136}
137
138static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
139{
140	asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
141}
142
143static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
144{
145	bool negative;
146	asm volatile(LOCK_PREFIX "andb %2,%1"
147		CC_SET(s)
148		: CC_OUT(s) (negative), ADDR
149		: "ir" ((char) ~(1 << nr)) : "memory");
150	return negative;
151}
152
153// Let everybody know we have it
154#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
155
156/*
157 * __clear_bit_unlock - Clears a bit in memory
158 * @nr: Bit to clear
159 * @addr: Address to start counting from
160 *
161 * __clear_bit() is non-atomic and implies release semantics before the memory
162 * operation. It can be used for an unlock if no other CPUs can concurrently
163 * modify other bits in the word.
164 *
165 * No memory barrier is required here, because x86 cannot reorder stores past
166 * older loads. Same principle as spin_unlock.
167 */
168static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
169{
170	barrier();
171	__clear_bit(nr, addr);
172}
173
174/**
175 * __change_bit - Toggle a bit in memory
176 * @nr: the bit to change
177 * @addr: the address to start counting from
178 *
179 * Unlike change_bit(), this function is non-atomic and may be reordered.
180 * If it's called on the same region of memory simultaneously, the effect
181 * may be that only one operation succeeds.
182 */
183static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
184{
185	asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
186}
187
188/**
189 * change_bit - Toggle a bit in memory
190 * @nr: Bit to change
191 * @addr: Address to start counting from
192 *
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
196 */
197static __always_inline void change_bit(long nr, volatile unsigned long *addr)
198{
199	if (IS_IMMEDIATE(nr)) {
200		asm volatile(LOCK_PREFIX "xorb %1,%0"
201			: CONST_MASK_ADDR(nr, addr)
202			: "iq" ((u8)CONST_MASK(nr)));
203	} else {
204		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
205			: BITOP_ADDR(addr)
206			: "Ir" (nr));
207	}
208}
209
210/**
211 * test_and_set_bit - Set a bit and return its old value
212 * @nr: Bit to set
213 * @addr: Address to count from
214 *
215 * This operation is atomic and cannot be reordered.
216 * It also implies a memory barrier.
217 */
218static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
219{
220	GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
221	                 *addr, "Ir", nr, "%0", c);
222}
223
224/**
225 * test_and_set_bit_lock - Set a bit and return its old value for lock
226 * @nr: Bit to set
227 * @addr: Address to count from
228 *
229 * This is the same as test_and_set_bit on x86.
230 */
231static __always_inline bool
232test_and_set_bit_lock(long nr, volatile unsigned long *addr)
233{
234	return test_and_set_bit(nr, addr);
235}
236
237/**
238 * __test_and_set_bit - Set a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
241 *
242 * This operation is non-atomic and can be reordered.
243 * If two examples of this operation race, one can appear to succeed
244 * but actually fail.  You must protect multiple accesses with a lock.
245 */
246static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
247{
248	bool oldbit;
249
250	asm(__ASM_SIZE(bts) " %2,%1"
251	    CC_SET(c)
252	    : CC_OUT(c) (oldbit), ADDR
253	    : "Ir" (nr));
254	return oldbit;
255}
256
257/**
258 * test_and_clear_bit - Clear a bit and return its old value
259 * @nr: Bit to clear
260 * @addr: Address to count from
261 *
262 * This operation is atomic and cannot be reordered.
263 * It also implies a memory barrier.
264 */
265static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
266{
267	GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
268	                 *addr, "Ir", nr, "%0", c);
269}
270
271/**
272 * __test_and_clear_bit - Clear a bit and return its old value
273 * @nr: Bit to clear
274 * @addr: Address to count from
275 *
276 * This operation is non-atomic and can be reordered.
277 * If two examples of this operation race, one can appear to succeed
278 * but actually fail.  You must protect multiple accesses with a lock.
279 *
280 * Note: the operation is performed atomically with respect to
281 * the local CPU, but not other CPUs. Portable code should not
282 * rely on this behaviour.
283 * KVM relies on this behaviour on x86 for modifying memory that is also
284 * accessed from a hypervisor on the same CPU if running in a VM: don't change
285 * this without also updating arch/x86/kernel/kvm.c
286 */
287static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
288{
289	bool oldbit;
290
291	asm volatile(__ASM_SIZE(btr) " %2,%1"
292		     CC_SET(c)
293		     : CC_OUT(c) (oldbit), ADDR
294		     : "Ir" (nr));
295	return oldbit;
296}
297
298/* WARNING: non atomic and it can be reordered! */
299static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
300{
301	bool oldbit;
302
303	asm volatile(__ASM_SIZE(btc) " %2,%1"
304		     CC_SET(c)
305		     : CC_OUT(c) (oldbit), ADDR
306		     : "Ir" (nr) : "memory");
307
308	return oldbit;
309}
310
311/**
312 * test_and_change_bit - Change a bit and return its old value
313 * @nr: Bit to change
314 * @addr: Address to count from
315 *
316 * This operation is atomic and cannot be reordered.
317 * It also implies a memory barrier.
318 */
319static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
320{
321	GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
322	                 *addr, "Ir", nr, "%0", c);
323}
324
325static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
326{
327	return ((1UL << (nr & (BITS_PER_LONG-1))) &
328		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
329}
330
331static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
332{
333	bool oldbit;
334
335	asm volatile(__ASM_SIZE(bt) " %2,%1"
336		     CC_SET(c)
337		     : CC_OUT(c) (oldbit)
338		     : "m" (*(unsigned long *)addr), "Ir" (nr));
339
340	return oldbit;
341}
342
343#if 0 /* Fool kernel-doc since it doesn't do macros yet */
344/**
345 * test_bit - Determine whether a bit is set
346 * @nr: bit number to test
347 * @addr: Address to start counting from
348 */
349static bool test_bit(int nr, const volatile unsigned long *addr);
350#endif
351
352#define test_bit(nr, addr)			\
353	(__builtin_constant_p((nr))		\
354	 ? constant_test_bit((nr), (addr))	\
355	 : variable_test_bit((nr), (addr)))
356
357/**
358 * __ffs - find first set bit in word
359 * @word: The word to search
360 *
361 * Undefined if no bit exists, so code should check against 0 first.
362 */
363static __always_inline unsigned long __ffs(unsigned long word)
364{
365	asm("rep; bsf %1,%0"
366		: "=r" (word)
367		: "rm" (word));
368	return word;
369}
370
371/**
372 * ffz - find first zero bit in word
373 * @word: The word to search
374 *
375 * Undefined if no zero exists, so code should check against ~0UL first.
376 */
377static __always_inline unsigned long ffz(unsigned long word)
378{
379	asm("rep; bsf %1,%0"
380		: "=r" (word)
381		: "r" (~word));
382	return word;
383}
384
385/*
386 * __fls: find last set bit in word
387 * @word: The word to search
388 *
389 * Undefined if no set bit exists, so code should check against 0 first.
390 */
391static __always_inline unsigned long __fls(unsigned long word)
392{
393	asm("bsr %1,%0"
394	    : "=r" (word)
395	    : "rm" (word));
396	return word;
397}
398
399#undef ADDR
400
401#ifdef __KERNEL__
402/**
403 * ffs - find first set bit in word
404 * @x: the word to search
405 *
406 * This is defined the same way as the libc and compiler builtin ffs
407 * routines, therefore differs in spirit from the other bitops.
408 *
409 * ffs(value) returns 0 if value is 0 or the position of the first
410 * set bit if value is nonzero. The first (least significant) bit
411 * is at position 1.
412 */
413static __always_inline int ffs(int x)
414{
415	int r;
416
417#ifdef CONFIG_X86_64
418	/*
419	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
420	 * dest reg is undefined if x==0, but their CPU architect says its
421	 * value is written to set it to the same as before, except that the
422	 * top 32 bits will be cleared.
423	 *
424	 * We cannot do this on 32 bits because at the very least some
425	 * 486 CPUs did not behave this way.
426	 */
427	asm("bsfl %1,%0"
428	    : "=r" (r)
429	    : "rm" (x), "0" (-1));
430#elif defined(CONFIG_X86_CMOV)
431	asm("bsfl %1,%0\n\t"
432	    "cmovzl %2,%0"
433	    : "=&r" (r) : "rm" (x), "r" (-1));
434#else
435	asm("bsfl %1,%0\n\t"
436	    "jnz 1f\n\t"
437	    "movl $-1,%0\n"
438	    "1:" : "=r" (r) : "rm" (x));
439#endif
440	return r + 1;
441}
442
443/**
444 * fls - find last set bit in word
445 * @x: the word to search
446 *
447 * This is defined in a similar way as the libc and compiler builtin
448 * ffs, but returns the position of the most significant set bit.
449 *
450 * fls(value) returns 0 if value is 0 or the position of the last
451 * set bit if value is nonzero. The last (most significant) bit is
452 * at position 32.
453 */
454static __always_inline int fls(int x)
455{
456	int r;
457
458#ifdef CONFIG_X86_64
459	/*
460	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
461	 * dest reg is undefined if x==0, but their CPU architect says its
462	 * value is written to set it to the same as before, except that the
463	 * top 32 bits will be cleared.
464	 *
465	 * We cannot do this on 32 bits because at the very least some
466	 * 486 CPUs did not behave this way.
467	 */
468	asm("bsrl %1,%0"
469	    : "=r" (r)
470	    : "rm" (x), "0" (-1));
471#elif defined(CONFIG_X86_CMOV)
472	asm("bsrl %1,%0\n\t"
473	    "cmovzl %2,%0"
474	    : "=&r" (r) : "rm" (x), "rm" (-1));
475#else
476	asm("bsrl %1,%0\n\t"
477	    "jnz 1f\n\t"
478	    "movl $-1,%0\n"
479	    "1:" : "=r" (r) : "rm" (x));
480#endif
481	return r + 1;
482}
483
484/**
485 * fls64 - find last set bit in a 64-bit word
486 * @x: the word to search
487 *
488 * This is defined in a similar way as the libc and compiler builtin
489 * ffsll, but returns the position of the most significant set bit.
490 *
491 * fls64(value) returns 0 if value is 0 or the position of the last
492 * set bit if value is nonzero. The last (most significant) bit is
493 * at position 64.
494 */
495#ifdef CONFIG_X86_64
496static __always_inline int fls64(__u64 x)
497{
498	int bitpos = -1;
499	/*
500	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
501	 * dest reg is undefined if x==0, but their CPU architect says its
502	 * value is written to set it to the same as before.
503	 */
504	asm("bsrq %1,%q0"
505	    : "+r" (bitpos)
506	    : "rm" (x));
507	return bitpos + 1;
508}
509#else
510#include <asm-generic/bitops/fls64.h>
511#endif
512
513#include <asm-generic/bitops/find.h>
514
515#include <asm-generic/bitops/sched.h>
516
517#include <asm/arch_hweight.h>
518
519#include <asm-generic/bitops/const_hweight.h>
520
521#include <asm-generic/bitops/le.h>
522
523#include <asm-generic/bitops/ext2-atomic-setbit.h>
524
525#endif /* __KERNEL__ */
526#endif /* _ASM_X86_BITOPS_H */
v4.10.11
 
  1#ifndef _ASM_X86_BITOPS_H
  2#define _ASM_X86_BITOPS_H
  3
  4/*
  5 * Copyright 1992, Linus Torvalds.
  6 *
  7 * Note: inlines with more than a single statement should be marked
  8 * __always_inline to avoid problems with older gcc's inlining heuristics.
  9 */
 10
 11#ifndef _LINUX_BITOPS_H
 12#error only <linux/bitops.h> can be included directly
 13#endif
 14
 15#include <linux/compiler.h>
 16#include <asm/alternative.h>
 17#include <asm/rmwcc.h>
 18#include <asm/barrier.h>
 19
 20#if BITS_PER_LONG == 32
 21# define _BITOPS_LONG_SHIFT 5
 22#elif BITS_PER_LONG == 64
 23# define _BITOPS_LONG_SHIFT 6
 24#else
 25# error "Unexpected BITS_PER_LONG"
 26#endif
 27
 28#define BIT_64(n)			(U64_C(1) << (n))
 29
 30/*
 31 * These have to be done with inline assembly: that way the bit-setting
 32 * is guaranteed to be atomic. All bit operations return 0 if the bit
 33 * was cleared before the operation and != 0 if it was not.
 34 *
 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 36 */
 37
 38#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
 39/* Technically wrong, but this avoids compilation errors on some gcc
 40   versions. */
 41#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
 42#else
 43#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
 44#endif
 45
 46#define ADDR				BITOP_ADDR(addr)
 47
 48/*
 49 * We do the locked ops that don't return the old value as
 50 * a mask operation on a byte.
 51 */
 52#define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
 53#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
 54#define CONST_MASK(nr)			(1 << ((nr) & 7))
 55
 56/**
 57 * set_bit - Atomically set a bit in memory
 58 * @nr: the bit to set
 59 * @addr: the address to start counting from
 60 *
 61 * This function is atomic and may not be reordered.  See __set_bit()
 62 * if you do not require the atomic guarantees.
 63 *
 64 * Note: there are no guarantees that this function will not be reordered
 65 * on non x86 architectures, so if you are writing portable code,
 66 * make sure not to rely on its reordering guarantees.
 67 *
 68 * Note that @nr may be almost arbitrarily large; this function is not
 69 * restricted to acting on a single-word quantity.
 70 */
 71static __always_inline void
 72set_bit(long nr, volatile unsigned long *addr)
 73{
 74	if (IS_IMMEDIATE(nr)) {
 75		asm volatile(LOCK_PREFIX "orb %1,%0"
 76			: CONST_MASK_ADDR(nr, addr)
 77			: "iq" ((u8)CONST_MASK(nr))
 78			: "memory");
 79	} else {
 80		asm volatile(LOCK_PREFIX "bts %1,%0"
 81			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
 82	}
 83}
 84
 85/**
 86 * __set_bit - Set a bit in memory
 87 * @nr: the bit to set
 88 * @addr: the address to start counting from
 89 *
 90 * Unlike set_bit(), this function is non-atomic and may be reordered.
 91 * If it's called on the same region of memory simultaneously, the effect
 92 * may be that only one operation succeeds.
 93 */
 94static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 95{
 96	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 97}
 98
 99/**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered.  However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109static __always_inline void
110clear_bit(long nr, volatile unsigned long *addr)
111{
112	if (IS_IMMEDIATE(nr)) {
113		asm volatile(LOCK_PREFIX "andb %1,%0"
114			: CONST_MASK_ADDR(nr, addr)
115			: "iq" ((u8)~CONST_MASK(nr)));
116	} else {
117		asm volatile(LOCK_PREFIX "btr %1,%0"
118			: BITOP_ADDR(addr)
119			: "Ir" (nr));
120	}
121}
122
123/*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132{
133	barrier();
134	clear_bit(nr, addr);
135}
136
137static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
138{
139	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140}
141
142static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
143{
144	bool negative;
145	asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
146		CC_SET(s)
147		: CC_OUT(s) (negative), ADDR
148		: "ir" ((char) ~(1 << nr)) : "memory");
149	return negative;
150}
151
152// Let everybody know we have it
153#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
154
155/*
156 * __clear_bit_unlock - Clears a bit in memory
157 * @nr: Bit to clear
158 * @addr: Address to start counting from
159 *
160 * __clear_bit() is non-atomic and implies release semantics before the memory
161 * operation. It can be used for an unlock if no other CPUs can concurrently
162 * modify other bits in the word.
163 *
164 * No memory barrier is required here, because x86 cannot reorder stores past
165 * older loads. Same principle as spin_unlock.
166 */
167static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
168{
169	barrier();
170	__clear_bit(nr, addr);
171}
172
173/**
174 * __change_bit - Toggle a bit in memory
175 * @nr: the bit to change
176 * @addr: the address to start counting from
177 *
178 * Unlike change_bit(), this function is non-atomic and may be reordered.
179 * If it's called on the same region of memory simultaneously, the effect
180 * may be that only one operation succeeds.
181 */
182static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
183{
184	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
185}
186
187/**
188 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change
190 * @addr: Address to start counting from
191 *
192 * change_bit() is atomic and may not be reordered.
193 * Note that @nr may be almost arbitrarily large; this function is not
194 * restricted to acting on a single-word quantity.
195 */
196static __always_inline void change_bit(long nr, volatile unsigned long *addr)
197{
198	if (IS_IMMEDIATE(nr)) {
199		asm volatile(LOCK_PREFIX "xorb %1,%0"
200			: CONST_MASK_ADDR(nr, addr)
201			: "iq" ((u8)CONST_MASK(nr)));
202	} else {
203		asm volatile(LOCK_PREFIX "btc %1,%0"
204			: BITOP_ADDR(addr)
205			: "Ir" (nr));
206	}
207}
208
209/**
210 * test_and_set_bit - Set a bit and return its old value
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This operation is atomic and cannot be reordered.
215 * It also implies a memory barrier.
216 */
217static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
218{
219	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
 
220}
221
222/**
223 * test_and_set_bit_lock - Set a bit and return its old value for lock
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This is the same as test_and_set_bit on x86.
228 */
229static __always_inline bool
230test_and_set_bit_lock(long nr, volatile unsigned long *addr)
231{
232	return test_and_set_bit(nr, addr);
233}
234
235/**
236 * __test_and_set_bit - Set a bit and return its old value
237 * @nr: Bit to set
238 * @addr: Address to count from
239 *
240 * This operation is non-atomic and can be reordered.
241 * If two examples of this operation race, one can appear to succeed
242 * but actually fail.  You must protect multiple accesses with a lock.
243 */
244static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
245{
246	bool oldbit;
247
248	asm("bts %2,%1\n\t"
249	    CC_SET(c)
250	    : CC_OUT(c) (oldbit), ADDR
251	    : "Ir" (nr));
252	return oldbit;
253}
254
255/**
256 * test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is atomic and cannot be reordered.
261 * It also implies a memory barrier.
262 */
263static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
264{
265	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
 
266}
267
268/**
269 * __test_and_clear_bit - Clear a bit and return its old value
270 * @nr: Bit to clear
271 * @addr: Address to count from
272 *
273 * This operation is non-atomic and can be reordered.
274 * If two examples of this operation race, one can appear to succeed
275 * but actually fail.  You must protect multiple accesses with a lock.
276 *
277 * Note: the operation is performed atomically with respect to
278 * the local CPU, but not other CPUs. Portable code should not
279 * rely on this behaviour.
280 * KVM relies on this behaviour on x86 for modifying memory that is also
281 * accessed from a hypervisor on the same CPU if running in a VM: don't change
282 * this without also updating arch/x86/kernel/kvm.c
283 */
284static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
285{
286	bool oldbit;
287
288	asm volatile("btr %2,%1\n\t"
289		     CC_SET(c)
290		     : CC_OUT(c) (oldbit), ADDR
291		     : "Ir" (nr));
292	return oldbit;
293}
294
295/* WARNING: non atomic and it can be reordered! */
296static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
297{
298	bool oldbit;
299
300	asm volatile("btc %2,%1\n\t"
301		     CC_SET(c)
302		     : CC_OUT(c) (oldbit), ADDR
303		     : "Ir" (nr) : "memory");
304
305	return oldbit;
306}
307
308/**
309 * test_and_change_bit - Change a bit and return its old value
310 * @nr: Bit to change
311 * @addr: Address to count from
312 *
313 * This operation is atomic and cannot be reordered.
314 * It also implies a memory barrier.
315 */
316static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
317{
318	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
 
319}
320
321static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
322{
323	return ((1UL << (nr & (BITS_PER_LONG-1))) &
324		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
325}
326
327static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
328{
329	bool oldbit;
330
331	asm volatile("bt %2,%1\n\t"
332		     CC_SET(c)
333		     : CC_OUT(c) (oldbit)
334		     : "m" (*(unsigned long *)addr), "Ir" (nr));
335
336	return oldbit;
337}
338
339#if 0 /* Fool kernel-doc since it doesn't do macros yet */
340/**
341 * test_bit - Determine whether a bit is set
342 * @nr: bit number to test
343 * @addr: Address to start counting from
344 */
345static bool test_bit(int nr, const volatile unsigned long *addr);
346#endif
347
348#define test_bit(nr, addr)			\
349	(__builtin_constant_p((nr))		\
350	 ? constant_test_bit((nr), (addr))	\
351	 : variable_test_bit((nr), (addr)))
352
353/**
354 * __ffs - find first set bit in word
355 * @word: The word to search
356 *
357 * Undefined if no bit exists, so code should check against 0 first.
358 */
359static __always_inline unsigned long __ffs(unsigned long word)
360{
361	asm("rep; bsf %1,%0"
362		: "=r" (word)
363		: "rm" (word));
364	return word;
365}
366
367/**
368 * ffz - find first zero bit in word
369 * @word: The word to search
370 *
371 * Undefined if no zero exists, so code should check against ~0UL first.
372 */
373static __always_inline unsigned long ffz(unsigned long word)
374{
375	asm("rep; bsf %1,%0"
376		: "=r" (word)
377		: "r" (~word));
378	return word;
379}
380
381/*
382 * __fls: find last set bit in word
383 * @word: The word to search
384 *
385 * Undefined if no set bit exists, so code should check against 0 first.
386 */
387static __always_inline unsigned long __fls(unsigned long word)
388{
389	asm("bsr %1,%0"
390	    : "=r" (word)
391	    : "rm" (word));
392	return word;
393}
394
395#undef ADDR
396
397#ifdef __KERNEL__
398/**
399 * ffs - find first set bit in word
400 * @x: the word to search
401 *
402 * This is defined the same way as the libc and compiler builtin ffs
403 * routines, therefore differs in spirit from the other bitops.
404 *
405 * ffs(value) returns 0 if value is 0 or the position of the first
406 * set bit if value is nonzero. The first (least significant) bit
407 * is at position 1.
408 */
409static __always_inline int ffs(int x)
410{
411	int r;
412
413#ifdef CONFIG_X86_64
414	/*
415	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
416	 * dest reg is undefined if x==0, but their CPU architect says its
417	 * value is written to set it to the same as before, except that the
418	 * top 32 bits will be cleared.
419	 *
420	 * We cannot do this on 32 bits because at the very least some
421	 * 486 CPUs did not behave this way.
422	 */
423	asm("bsfl %1,%0"
424	    : "=r" (r)
425	    : "rm" (x), "0" (-1));
426#elif defined(CONFIG_X86_CMOV)
427	asm("bsfl %1,%0\n\t"
428	    "cmovzl %2,%0"
429	    : "=&r" (r) : "rm" (x), "r" (-1));
430#else
431	asm("bsfl %1,%0\n\t"
432	    "jnz 1f\n\t"
433	    "movl $-1,%0\n"
434	    "1:" : "=r" (r) : "rm" (x));
435#endif
436	return r + 1;
437}
438
439/**
440 * fls - find last set bit in word
441 * @x: the word to search
442 *
443 * This is defined in a similar way as the libc and compiler builtin
444 * ffs, but returns the position of the most significant set bit.
445 *
446 * fls(value) returns 0 if value is 0 or the position of the last
447 * set bit if value is nonzero. The last (most significant) bit is
448 * at position 32.
449 */
450static __always_inline int fls(int x)
451{
452	int r;
453
454#ifdef CONFIG_X86_64
455	/*
456	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
457	 * dest reg is undefined if x==0, but their CPU architect says its
458	 * value is written to set it to the same as before, except that the
459	 * top 32 bits will be cleared.
460	 *
461	 * We cannot do this on 32 bits because at the very least some
462	 * 486 CPUs did not behave this way.
463	 */
464	asm("bsrl %1,%0"
465	    : "=r" (r)
466	    : "rm" (x), "0" (-1));
467#elif defined(CONFIG_X86_CMOV)
468	asm("bsrl %1,%0\n\t"
469	    "cmovzl %2,%0"
470	    : "=&r" (r) : "rm" (x), "rm" (-1));
471#else
472	asm("bsrl %1,%0\n\t"
473	    "jnz 1f\n\t"
474	    "movl $-1,%0\n"
475	    "1:" : "=r" (r) : "rm" (x));
476#endif
477	return r + 1;
478}
479
480/**
481 * fls64 - find last set bit in a 64-bit word
482 * @x: the word to search
483 *
484 * This is defined in a similar way as the libc and compiler builtin
485 * ffsll, but returns the position of the most significant set bit.
486 *
487 * fls64(value) returns 0 if value is 0 or the position of the last
488 * set bit if value is nonzero. The last (most significant) bit is
489 * at position 64.
490 */
491#ifdef CONFIG_X86_64
492static __always_inline int fls64(__u64 x)
493{
494	int bitpos = -1;
495	/*
496	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
497	 * dest reg is undefined if x==0, but their CPU architect says its
498	 * value is written to set it to the same as before.
499	 */
500	asm("bsrq %1,%q0"
501	    : "+r" (bitpos)
502	    : "rm" (x));
503	return bitpos + 1;
504}
505#else
506#include <asm-generic/bitops/fls64.h>
507#endif
508
509#include <asm-generic/bitops/find.h>
510
511#include <asm-generic/bitops/sched.h>
512
513#include <asm/arch_hweight.h>
514
515#include <asm-generic/bitops/const_hweight.h>
516
517#include <asm-generic/bitops/le.h>
518
519#include <asm-generic/bitops/ext2-atomic-setbit.h>
520
521#endif /* __KERNEL__ */
522#endif /* _ASM_X86_BITOPS_H */