Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef _ASM_X86_BITOPS_H
  2#define _ASM_X86_BITOPS_H
  3
  4/*
  5 * Copyright 1992, Linus Torvalds.
  6 *
  7 * Note: inlines with more than a single statement should be marked
  8 * __always_inline to avoid problems with older gcc's inlining heuristics.
  9 */
 10
 11#ifndef _LINUX_BITOPS_H
 12#error only <linux/bitops.h> can be included directly
 13#endif
 14
 15#include <linux/compiler.h>
 16#include <asm/alternative.h>
 17#include <asm/rmwcc.h>
 18#include <asm/barrier.h>
 19
 20#if BITS_PER_LONG == 32
 21# define _BITOPS_LONG_SHIFT 5
 22#elif BITS_PER_LONG == 64
 23# define _BITOPS_LONG_SHIFT 6
 24#else
 25# error "Unexpected BITS_PER_LONG"
 26#endif
 27
 28#define BIT_64(n)			(U64_C(1) << (n))
 29
 30/*
 31 * These have to be done with inline assembly: that way the bit-setting
 32 * is guaranteed to be atomic. All bit operations return 0 if the bit
 33 * was cleared before the operation and != 0 if it was not.
 34 *
 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 36 */
 37
 38#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
 39/* Technically wrong, but this avoids compilation errors on some gcc
 40   versions. */
 41#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
 42#else
 43#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
 44#endif
 45
 46#define ADDR				BITOP_ADDR(addr)
 47
 48/*
 49 * We do the locked ops that don't return the old value as
 50 * a mask operation on a byte.
 51 */
 52#define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
 53#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
 54#define CONST_MASK(nr)			(1 << ((nr) & 7))
 55
 56/**
 57 * set_bit - Atomically set a bit in memory
 58 * @nr: the bit to set
 59 * @addr: the address to start counting from
 60 *
 61 * This function is atomic and may not be reordered.  See __set_bit()
 62 * if you do not require the atomic guarantees.
 63 *
 64 * Note: there are no guarantees that this function will not be reordered
 65 * on non x86 architectures, so if you are writing portable code,
 66 * make sure not to rely on its reordering guarantees.
 67 *
 68 * Note that @nr may be almost arbitrarily large; this function is not
 69 * restricted to acting on a single-word quantity.
 70 */
 71static __always_inline void
 72set_bit(long nr, volatile unsigned long *addr)
 73{
 74	if (IS_IMMEDIATE(nr)) {
 75		asm volatile(LOCK_PREFIX "orb %1,%0"
 76			: CONST_MASK_ADDR(nr, addr)
 77			: "iq" ((u8)CONST_MASK(nr))
 78			: "memory");
 79	} else {
 80		asm volatile(LOCK_PREFIX "bts %1,%0"
 81			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
 82	}
 83}
 84
 85/**
 86 * __set_bit - Set a bit in memory
 87 * @nr: the bit to set
 88 * @addr: the address to start counting from
 89 *
 90 * Unlike set_bit(), this function is non-atomic and may be reordered.
 91 * If it's called on the same region of memory simultaneously, the effect
 92 * may be that only one operation succeeds.
 93 */
 94static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 95{
 96	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 97}
 98
 99/**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered.  However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109static __always_inline void
110clear_bit(long nr, volatile unsigned long *addr)
111{
112	if (IS_IMMEDIATE(nr)) {
113		asm volatile(LOCK_PREFIX "andb %1,%0"
114			: CONST_MASK_ADDR(nr, addr)
115			: "iq" ((u8)~CONST_MASK(nr)));
116	} else {
117		asm volatile(LOCK_PREFIX "btr %1,%0"
118			: BITOP_ADDR(addr)
119			: "Ir" (nr));
120	}
121}
122
123/*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132{
133	barrier();
134	clear_bit(nr, addr);
135}
136
137static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 
138{
139	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140}
141
142/*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155{
156	barrier();
157	__clear_bit(nr, addr);
 
 
 
 
158}
 
159
160/**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
170{
171	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172}
173
174/**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183static __always_inline void change_bit(long nr, volatile unsigned long *addr)
184{
185	if (IS_IMMEDIATE(nr)) {
186		asm volatile(LOCK_PREFIX "xorb %1,%0"
187			: CONST_MASK_ADDR(nr, addr)
188			: "iq" ((u8)CONST_MASK(nr)));
189	} else {
190		asm volatile(LOCK_PREFIX "btc %1,%0"
191			: BITOP_ADDR(addr)
192			: "Ir" (nr));
193	}
194}
195
196/**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
205{
206	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207}
208
209/**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216static __always_inline int
217test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218{
219	return test_and_set_bit(nr, addr);
220}
221
222/**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail.  You must protect multiple accesses with a lock.
230 */
231static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
232{
233	int oldbit;
234
235	asm("bts %2,%1\n\t"
236	    "sbb %0,%0"
237	    : "=r" (oldbit), ADDR
238	    : "Ir" (nr));
239	return oldbit;
240}
241
242/**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
251{
252	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253}
254
255/**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail.  You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
 
272{
273	int oldbit;
274
275	asm volatile("btr %2,%1\n\t"
276		     "sbb %0,%0"
277		     : "=r" (oldbit), ADDR
278		     : "Ir" (nr));
279	return oldbit;
280}
281
282/* WARNING: non atomic and it can be reordered! */
283static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
284{
285	int oldbit;
286
287	asm volatile("btc %2,%1\n\t"
288		     "sbb %0,%0"
289		     : "=r" (oldbit), ADDR
290		     : "Ir" (nr) : "memory");
291
292	return oldbit;
293}
294
295/**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
304{
305	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306}
307
308static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
309{
310	return ((1UL << (nr & (BITS_PER_LONG-1))) &
311		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312}
313
314static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
315{
316	int oldbit;
317
318	asm volatile("bt %2,%1\n\t"
319		     "sbb %0,%0"
320		     : "=r" (oldbit)
321		     : "m" (*(unsigned long *)addr), "Ir" (nr));
 
 
322
323	return oldbit;
324}
325
326#if 0 /* Fool kernel-doc since it doesn't do macros yet */
327/**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332static int test_bit(int nr, const volatile unsigned long *addr);
333#endif
334
335#define test_bit(nr, addr)			\
336	(__builtin_constant_p((nr))		\
337	 ? constant_test_bit((nr), (addr))	\
338	 : variable_test_bit((nr), (addr)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
340/**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346static __always_inline unsigned long __ffs(unsigned long word)
 
 
 
 
 
347{
348	asm("rep; bsf %1,%0"
349		: "=r" (word)
350		: "rm" (word));
351	return word;
352}
353
354/**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360static __always_inline unsigned long ffz(unsigned long word)
361{
362	asm("rep; bsf %1,%0"
363		: "=r" (word)
364		: "r" (~word));
365	return word;
366}
367
368/*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374static __always_inline unsigned long __fls(unsigned long word)
375{
 
 
 
376	asm("bsr %1,%0"
377	    : "=r" (word)
378	    : "rm" (word));
379	return word;
380}
381
382#undef ADDR
383
384#ifdef __KERNEL__
385/**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396static __always_inline int ffs(int x)
397{
398	int r;
399
400#ifdef CONFIG_X86_64
401	/*
402	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403	 * dest reg is undefined if x==0, but their CPU architect says its
404	 * value is written to set it to the same as before, except that the
405	 * top 32 bits will be cleared.
406	 *
407	 * We cannot do this on 32 bits because at the very least some
408	 * 486 CPUs did not behave this way.
409	 */
410	asm("bsfl %1,%0"
411	    : "=r" (r)
412	    : "rm" (x), "0" (-1));
413#elif defined(CONFIG_X86_CMOV)
414	asm("bsfl %1,%0\n\t"
415	    "cmovzl %2,%0"
416	    : "=&r" (r) : "rm" (x), "r" (-1));
417#else
418	asm("bsfl %1,%0\n\t"
419	    "jnz 1f\n\t"
420	    "movl $-1,%0\n"
421	    "1:" : "=r" (r) : "rm" (x));
422#endif
423	return r + 1;
424}
425
426/**
 
 
 
 
 
 
 
 
 
 
 
 
 
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437static __always_inline int fls(int x)
438{
439	int r;
440
 
 
 
441#ifdef CONFIG_X86_64
442	/*
443	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444	 * dest reg is undefined if x==0, but their CPU architect says its
445	 * value is written to set it to the same as before, except that the
446	 * top 32 bits will be cleared.
447	 *
448	 * We cannot do this on 32 bits because at the very least some
449	 * 486 CPUs did not behave this way.
450	 */
451	asm("bsrl %1,%0"
452	    : "=r" (r)
453	    : "rm" (x), "0" (-1));
454#elif defined(CONFIG_X86_CMOV)
455	asm("bsrl %1,%0\n\t"
456	    "cmovzl %2,%0"
457	    : "=&r" (r) : "rm" (x), "rm" (-1));
458#else
459	asm("bsrl %1,%0\n\t"
460	    "jnz 1f\n\t"
461	    "movl $-1,%0\n"
462	    "1:" : "=r" (r) : "rm" (x));
463#endif
464	return r + 1;
465}
466
467/**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478#ifdef CONFIG_X86_64
479static __always_inline int fls64(__u64 x)
480{
481	int bitpos = -1;
 
 
 
482	/*
483	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484	 * dest reg is undefined if x==0, but their CPU architect says its
485	 * value is written to set it to the same as before.
486	 */
487	asm("bsrq %1,%q0"
488	    : "+r" (bitpos)
489	    : "rm" (x));
490	return bitpos + 1;
491}
492#else
493#include <asm-generic/bitops/fls64.h>
494#endif
495
496#include <asm-generic/bitops/find.h>
497
498#include <asm-generic/bitops/sched.h>
499
500#include <asm/arch_hweight.h>
501
502#include <asm-generic/bitops/const_hweight.h>
 
 
 
 
503
504#include <asm-generic/bitops/le.h>
505
506#include <asm-generic/bitops/ext2-atomic-setbit.h>
507
508#endif /* __KERNEL__ */
509#endif /* _ASM_X86_BITOPS_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_BITOPS_H
  3#define _ASM_X86_BITOPS_H
  4
  5/*
  6 * Copyright 1992, Linus Torvalds.
  7 *
  8 * Note: inlines with more than a single statement should be marked
  9 * __always_inline to avoid problems with older gcc's inlining heuristics.
 10 */
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <asm/alternative.h>
 18#include <asm/rmwcc.h>
 19#include <asm/barrier.h>
 20
 21#if BITS_PER_LONG == 32
 22# define _BITOPS_LONG_SHIFT 5
 23#elif BITS_PER_LONG == 64
 24# define _BITOPS_LONG_SHIFT 6
 25#else
 26# error "Unexpected BITS_PER_LONG"
 27#endif
 28
 29#define BIT_64(n)			(U64_C(1) << (n))
 30
 31/*
 32 * These have to be done with inline assembly: that way the bit-setting
 33 * is guaranteed to be atomic. All bit operations return 0 if the bit
 34 * was cleared before the operation and != 0 if it was not.
 35 *
 36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 37 */
 38
 39#define RLONG_ADDR(x)			 "m" (*(volatile long *) (x))
 40#define WBYTE_ADDR(x)			"+m" (*(volatile char *) (x))
 
 
 
 
 
 41
 42#define ADDR				RLONG_ADDR(addr)
 43
 44/*
 45 * We do the locked ops that don't return the old value as
 46 * a mask operation on a byte.
 47 */
 48#define CONST_MASK_ADDR(nr, addr)	WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 
 49#define CONST_MASK(nr)			(1 << ((nr) & 7))
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51static __always_inline void
 52arch_set_bit(long nr, volatile unsigned long *addr)
 53{
 54	if (__builtin_constant_p(nr)) {
 55		asm volatile(LOCK_PREFIX "orb %b1,%0"
 56			: CONST_MASK_ADDR(nr, addr)
 57			: "iq" (CONST_MASK(nr))
 58			: "memory");
 59	} else {
 60		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
 61			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 62	}
 63}
 64
 65static __always_inline void
 66arch___set_bit(unsigned long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
 
 67{
 68	asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 69}
 70
 
 
 
 
 
 
 
 
 
 
 71static __always_inline void
 72arch_clear_bit(long nr, volatile unsigned long *addr)
 73{
 74	if (__builtin_constant_p(nr)) {
 75		asm volatile(LOCK_PREFIX "andb %b1,%0"
 76			: CONST_MASK_ADDR(nr, addr)
 77			: "iq" (~CONST_MASK(nr)));
 78	} else {
 79		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
 80			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 
 81	}
 82}
 83
 84static __always_inline void
 85arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
 86{
 87	barrier();
 88	arch_clear_bit(nr, addr);
 89}
 90
 91static __always_inline void
 92arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
 93{
 94	asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 95}
 96
 97static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
 98		volatile unsigned long *addr)
 
 
 
 
 
 
 
 
 
 
 
 99{
100	bool negative;
101	asm volatile(LOCK_PREFIX "xorb %2,%1"
102		CC_SET(s)
103		: CC_OUT(s) (negative), WBYTE_ADDR(addr)
104		: "iq" ((char)mask) : "memory");
105	return negative;
106}
107#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
108
109static __always_inline void
110arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
 
111{
112	arch___clear_bit(nr, addr);
113}
114
115static __always_inline void
116arch___change_bit(unsigned long nr, volatile unsigned long *addr)
117{
118	asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
119}
120
121static __always_inline void
122arch_change_bit(long nr, volatile unsigned long *addr)
 
 
123{
124	if (__builtin_constant_p(nr)) {
125		asm volatile(LOCK_PREFIX "xorb %b1,%0"
126			: CONST_MASK_ADDR(nr, addr)
127			: "iq" (CONST_MASK(nr)));
128	} else {
129		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
130			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 
131	}
132}
133
134static __always_inline bool
135arch_test_and_set_bit(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
136{
137	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
138}
139
140static __always_inline bool
141arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
142{
143	return arch_test_and_set_bit(nr, addr);
144}
145
146static __always_inline bool
147arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
 
148{
149	bool oldbit;
150
151	asm(__ASM_SIZE(bts) " %2,%1"
152	    CC_SET(c)
153	    : CC_OUT(c) (oldbit)
154	    : ADDR, "Ir" (nr) : "memory");
155	return oldbit;
156}
157
158static __always_inline bool
159arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
160{
161	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
162}
163
164/*
 
 
 
 
 
 
 
 
165 * Note: the operation is performed atomically with respect to
166 * the local CPU, but not other CPUs. Portable code should not
167 * rely on this behaviour.
168 * KVM relies on this behaviour on x86 for modifying memory that is also
169 * accessed from a hypervisor on the same CPU if running in a VM: don't change
170 * this without also updating arch/x86/kernel/kvm.c
171 */
172static __always_inline bool
173arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
174{
175	bool oldbit;
176
177	asm volatile(__ASM_SIZE(btr) " %2,%1"
178		     CC_SET(c)
179		     : CC_OUT(c) (oldbit)
180		     : ADDR, "Ir" (nr) : "memory");
181	return oldbit;
182}
183
184static __always_inline bool
185arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
186{
187	bool oldbit;
188
189	asm volatile(__ASM_SIZE(btc) " %2,%1"
190		     CC_SET(c)
191		     : CC_OUT(c) (oldbit)
192		     : ADDR, "Ir" (nr) : "memory");
193
194	return oldbit;
195}
196
197static __always_inline bool
198arch_test_and_change_bit(long nr, volatile unsigned long *addr)
 
 
 
 
 
 
 
199{
200	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
201}
202
203static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
204{
205	return ((1UL << (nr & (BITS_PER_LONG-1))) &
206		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
207}
208
209static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
210{
211	bool oldbit;
212
213	asm volatile("testb %2,%1"
214		     CC_SET(nz)
215		     : CC_OUT(nz) (oldbit)
216		     : "m" (((unsigned char *)addr)[nr >> 3]),
217		       "i" (1 << (nr & 7))
218		     :"memory");
219
220	return oldbit;
221}
222
223static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
224{
225	bool oldbit;
 
 
 
 
 
226
227	asm volatile(__ASM_SIZE(bt) " %2,%1"
228		     CC_SET(c)
229		     : CC_OUT(c) (oldbit)
230		     : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
231
232	return oldbit;
233}
234
235static __always_inline bool
236arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
237{
238	return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
239					  variable_test_bit(nr, addr);
240}
241
242static __always_inline bool
243arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
244{
245	return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
246					  variable_test_bit(nr, addr);
247}
248
249static __always_inline unsigned long variable__ffs(unsigned long word)
250{
251	asm("rep; bsf %1,%0"
252		: "=r" (word)
253		: "rm" (word));
254	return word;
255}
256
257/**
258 * __ffs - find first set bit in word
259 * @word: The word to search
260 *
261 * Undefined if no bit exists, so code should check against 0 first.
262 */
263#define __ffs(word)				\
264	(__builtin_constant_p(word) ?		\
265	 (unsigned long)__builtin_ctzl(word) :	\
266	 variable__ffs(word))
267
268static __always_inline unsigned long variable_ffz(unsigned long word)
269{
270	asm("rep; bsf %1,%0"
271		: "=r" (word)
272		: "r" (~word));
273	return word;
274}
275
276/**
277 * ffz - find first zero bit in word
278 * @word: The word to search
279 *
280 * Undefined if no zero exists, so code should check against ~0UL first.
281 */
282#define ffz(word)				\
283	(__builtin_constant_p(word) ?		\
284	 (unsigned long)__builtin_ctzl(~word) :	\
285	 variable_ffz(word))
 
 
 
286
287/*
288 * __fls: find last set bit in word
289 * @word: The word to search
290 *
291 * Undefined if no set bit exists, so code should check against 0 first.
292 */
293static __always_inline unsigned long __fls(unsigned long word)
294{
295	if (__builtin_constant_p(word))
296		return BITS_PER_LONG - 1 - __builtin_clzl(word);
297
298	asm("bsr %1,%0"
299	    : "=r" (word)
300	    : "rm" (word));
301	return word;
302}
303
304#undef ADDR
305
306#ifdef __KERNEL__
307static __always_inline int variable_ffs(int x)
 
 
 
 
 
 
 
 
 
 
 
308{
309	int r;
310
311#ifdef CONFIG_X86_64
312	/*
313	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
314	 * dest reg is undefined if x==0, but their CPU architect says its
315	 * value is written to set it to the same as before, except that the
316	 * top 32 bits will be cleared.
317	 *
318	 * We cannot do this on 32 bits because at the very least some
319	 * 486 CPUs did not behave this way.
320	 */
321	asm("bsfl %1,%0"
322	    : "=r" (r)
323	    : "rm" (x), "0" (-1));
324#elif defined(CONFIG_X86_CMOV)
325	asm("bsfl %1,%0\n\t"
326	    "cmovzl %2,%0"
327	    : "=&r" (r) : "rm" (x), "r" (-1));
328#else
329	asm("bsfl %1,%0\n\t"
330	    "jnz 1f\n\t"
331	    "movl $-1,%0\n"
332	    "1:" : "=r" (r) : "rm" (x));
333#endif
334	return r + 1;
335}
336
337/**
338 * ffs - find first set bit in word
339 * @x: the word to search
340 *
341 * This is defined the same way as the libc and compiler builtin ffs
342 * routines, therefore differs in spirit from the other bitops.
343 *
344 * ffs(value) returns 0 if value is 0 or the position of the first
345 * set bit if value is nonzero. The first (least significant) bit
346 * is at position 1.
347 */
348#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
349
350/**
351 * fls - find last set bit in word
352 * @x: the word to search
353 *
354 * This is defined in a similar way as the libc and compiler builtin
355 * ffs, but returns the position of the most significant set bit.
356 *
357 * fls(value) returns 0 if value is 0 or the position of the last
358 * set bit if value is nonzero. The last (most significant) bit is
359 * at position 32.
360 */
361static __always_inline int fls(unsigned int x)
362{
363	int r;
364
365	if (__builtin_constant_p(x))
366		return x ? 32 - __builtin_clz(x) : 0;
367
368#ifdef CONFIG_X86_64
369	/*
370	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
371	 * dest reg is undefined if x==0, but their CPU architect says its
372	 * value is written to set it to the same as before, except that the
373	 * top 32 bits will be cleared.
374	 *
375	 * We cannot do this on 32 bits because at the very least some
376	 * 486 CPUs did not behave this way.
377	 */
378	asm("bsrl %1,%0"
379	    : "=r" (r)
380	    : "rm" (x), "0" (-1));
381#elif defined(CONFIG_X86_CMOV)
382	asm("bsrl %1,%0\n\t"
383	    "cmovzl %2,%0"
384	    : "=&r" (r) : "rm" (x), "rm" (-1));
385#else
386	asm("bsrl %1,%0\n\t"
387	    "jnz 1f\n\t"
388	    "movl $-1,%0\n"
389	    "1:" : "=r" (r) : "rm" (x));
390#endif
391	return r + 1;
392}
393
394/**
395 * fls64 - find last set bit in a 64-bit word
396 * @x: the word to search
397 *
398 * This is defined in a similar way as the libc and compiler builtin
399 * ffsll, but returns the position of the most significant set bit.
400 *
401 * fls64(value) returns 0 if value is 0 or the position of the last
402 * set bit if value is nonzero. The last (most significant) bit is
403 * at position 64.
404 */
405#ifdef CONFIG_X86_64
406static __always_inline int fls64(__u64 x)
407{
408	int bitpos = -1;
409
410	if (__builtin_constant_p(x))
411		return x ? 64 - __builtin_clzll(x) : 0;
412	/*
413	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
414	 * dest reg is undefined if x==0, but their CPU architect says its
415	 * value is written to set it to the same as before.
416	 */
417	asm("bsrq %1,%q0"
418	    : "+r" (bitpos)
419	    : "rm" (x));
420	return bitpos + 1;
421}
422#else
423#include <asm-generic/bitops/fls64.h>
424#endif
425
 
 
426#include <asm-generic/bitops/sched.h>
427
428#include <asm/arch_hweight.h>
429
430#include <asm-generic/bitops/const_hweight.h>
431
432#include <asm-generic/bitops/instrumented-atomic.h>
433#include <asm-generic/bitops/instrumented-non-atomic.h>
434#include <asm-generic/bitops/instrumented-lock.h>
435
436#include <asm-generic/bitops/le.h>
437
438#include <asm-generic/bitops/ext2-atomic-setbit.h>
439
440#endif /* __KERNEL__ */
441#endif /* _ASM_X86_BITOPS_H */