Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_BITOPS_H
  3#define _ASM_IA64_BITOPS_H
  4
  5/*
  6 * Copyright (C) 1998-2003 Hewlett-Packard Co
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
 10 * O(1) scheduler patch
 11 */
 12
 13#ifndef _LINUX_BITOPS_H
 14#error only <linux/bitops.h> can be included directly
 15#endif
 16
 17#include <linux/compiler.h>
 18#include <linux/types.h>
 19#include <asm/intrinsics.h>
 20#include <asm/barrier.h>
 21
 22/**
 23 * set_bit - Atomically set a bit in memory
 24 * @nr: the bit to set
 25 * @addr: the address to start counting from
 26 *
 27 * This function is atomic and may not be reordered.  See __set_bit()
 28 * if you do not require the atomic guarantees.
 29 * Note that @nr may be almost arbitrarily large; this function is not
 30 * restricted to acting on a single-word quantity.
 31 *
 32 * The address must be (at least) "long" aligned.
 33 * Note that there are driver (e.g., eepro100) which use these operations to
 34 * operate on hw-defined data-structures, so we can't easily change these
 35 * operations to force a bigger alignment.
 36 *
 37 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 38 */
 39static __inline__ void
 40set_bit (int nr, volatile void *addr)
 41{
 42	__u32 bit, old, new;
 43	volatile __u32 *m;
 44	CMPXCHG_BUGCHECK_DECL
 45
 46	m = (volatile __u32 *) addr + (nr >> 5);
 47	bit = 1 << (nr & 31);
 48	do {
 49		CMPXCHG_BUGCHECK(m);
 50		old = *m;
 51		new = old | bit;
 52	} while (cmpxchg_acq(m, old, new) != old);
 53}
 54
 55/**
 56 * __set_bit - Set a bit in memory
 57 * @nr: the bit to set
 58 * @addr: the address to start counting from
 59 *
 60 * Unlike set_bit(), this function is non-atomic and may be reordered.
 61 * If it's called on the same region of memory simultaneously, the effect
 62 * may be that only one operation succeeds.
 63 */
 64static __inline__ void
 65__set_bit (int nr, volatile void *addr)
 66{
 67	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
 68}
 69
 70/**
 71 * clear_bit - Clears a bit in memory
 72 * @nr: Bit to clear
 73 * @addr: Address to start counting from
 74 *
 75 * clear_bit() is atomic and may not be reordered.  However, it does
 76 * not contain a memory barrier, so if it is used for locking purposes,
 77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 78 * in order to ensure changes are visible on other processors.
 79 */
 80static __inline__ void
 81clear_bit (int nr, volatile void *addr)
 82{
 83	__u32 mask, old, new;
 84	volatile __u32 *m;
 85	CMPXCHG_BUGCHECK_DECL
 86
 87	m = (volatile __u32 *) addr + (nr >> 5);
 88	mask = ~(1 << (nr & 31));
 89	do {
 90		CMPXCHG_BUGCHECK(m);
 91		old = *m;
 92		new = old & mask;
 93	} while (cmpxchg_acq(m, old, new) != old);
 94}
 95
 96/**
 97 * clear_bit_unlock - Clears a bit in memory with release
 98 * @nr: Bit to clear
 99 * @addr: Address to start counting from
100 *
101 * clear_bit_unlock() is atomic and may not be reordered.  It does
102 * contain a memory barrier suitable for unlock type operations.
103 */
104static __inline__ void
105clear_bit_unlock (int nr, volatile void *addr)
106{
107	__u32 mask, old, new;
108	volatile __u32 *m;
109	CMPXCHG_BUGCHECK_DECL
110
111	m = (volatile __u32 *) addr + (nr >> 5);
112	mask = ~(1 << (nr & 31));
113	do {
114		CMPXCHG_BUGCHECK(m);
115		old = *m;
116		new = old & mask;
117	} while (cmpxchg_rel(m, old, new) != old);
118}
119
120/**
121 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
122 * @nr: Bit to clear
123 * @addr: Address to start counting from
124 *
125 * Similarly to clear_bit_unlock, the implementation uses a store
126 * with release semantics. See also arch_spin_unlock().
127 */
128static __inline__ void
129__clear_bit_unlock(int nr, void *addr)
130{
131	__u32 * const m = (__u32 *) addr + (nr >> 5);
132	__u32 const new = *m & ~(1 << (nr & 31));
133
134	ia64_st4_rel_nta(m, new);
135}
136
137/**
138 * __clear_bit - Clears a bit in memory (non-atomic version)
139 * @nr: the bit to clear
140 * @addr: the address to start counting from
141 *
142 * Unlike clear_bit(), this function is non-atomic and may be reordered.
143 * If it's called on the same region of memory simultaneously, the effect
144 * may be that only one operation succeeds.
145 */
146static __inline__ void
147__clear_bit (int nr, volatile void *addr)
148{
149	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150}
151
152/**
153 * change_bit - Toggle a bit in memory
154 * @nr: Bit to toggle
155 * @addr: Address to start counting from
156 *
157 * change_bit() is atomic and may not be reordered.
158 * Note that @nr may be almost arbitrarily large; this function is not
159 * restricted to acting on a single-word quantity.
160 */
161static __inline__ void
162change_bit (int nr, volatile void *addr)
163{
164	__u32 bit, old, new;
165	volatile __u32 *m;
166	CMPXCHG_BUGCHECK_DECL
167
168	m = (volatile __u32 *) addr + (nr >> 5);
169	bit = (1 << (nr & 31));
170	do {
171		CMPXCHG_BUGCHECK(m);
172		old = *m;
173		new = old ^ bit;
174	} while (cmpxchg_acq(m, old, new) != old);
175}
176
177/**
178 * __change_bit - Toggle a bit in memory
179 * @nr: the bit to toggle
180 * @addr: the address to start counting from
181 *
182 * Unlike change_bit(), this function is non-atomic and may be reordered.
183 * If it's called on the same region of memory simultaneously, the effect
184 * may be that only one operation succeeds.
185 */
186static __inline__ void
187__change_bit (int nr, volatile void *addr)
188{
189	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190}
191
192/**
193 * test_and_set_bit - Set a bit and return its old value
194 * @nr: Bit to set
195 * @addr: Address to count from
196 *
197 * This operation is atomic and cannot be reordered.  
198 * It also implies the acquisition side of the memory barrier.
199 */
200static __inline__ int
201test_and_set_bit (int nr, volatile void *addr)
202{
203	__u32 bit, old, new;
204	volatile __u32 *m;
205	CMPXCHG_BUGCHECK_DECL
206
207	m = (volatile __u32 *) addr + (nr >> 5);
208	bit = 1 << (nr & 31);
209	do {
210		CMPXCHG_BUGCHECK(m);
211		old = *m;
212		new = old | bit;
213	} while (cmpxchg_acq(m, old, new) != old);
214	return (old & bit) != 0;
215}
216
217/**
218 * test_and_set_bit_lock - Set a bit and return its old value for lock
219 * @nr: Bit to set
220 * @addr: Address to count from
221 *
222 * This is the same as test_and_set_bit on ia64
223 */
224#define test_and_set_bit_lock test_and_set_bit
225
226/**
227 * __test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is non-atomic and can be reordered.  
232 * If two examples of this operation race, one can appear to succeed
233 * but actually fail.  You must protect multiple accesses with a lock.
234 */
235static __inline__ int
236__test_and_set_bit (int nr, volatile void *addr)
237{
238	__u32 *p = (__u32 *) addr + (nr >> 5);
239	__u32 m = 1 << (nr & 31);
240	int oldbitset = (*p & m) != 0;
241
242	*p |= m;
243	return oldbitset;
244}
245
246/**
247 * test_and_clear_bit - Clear a bit and return its old value
248 * @nr: Bit to clear
249 * @addr: Address to count from
250 *
251 * This operation is atomic and cannot be reordered.  
252 * It also implies the acquisition side of the memory barrier.
253 */
254static __inline__ int
255test_and_clear_bit (int nr, volatile void *addr)
256{
257	__u32 mask, old, new;
258	volatile __u32 *m;
259	CMPXCHG_BUGCHECK_DECL
260
261	m = (volatile __u32 *) addr + (nr >> 5);
262	mask = ~(1 << (nr & 31));
263	do {
264		CMPXCHG_BUGCHECK(m);
265		old = *m;
266		new = old & mask;
267	} while (cmpxchg_acq(m, old, new) != old);
268	return (old & ~mask) != 0;
269}
270
271/**
272 * __test_and_clear_bit - Clear a bit and return its old value
273 * @nr: Bit to clear
274 * @addr: Address to count from
275 *
276 * This operation is non-atomic and can be reordered.  
277 * If two examples of this operation race, one can appear to succeed
278 * but actually fail.  You must protect multiple accesses with a lock.
279 */
280static __inline__ int
281__test_and_clear_bit(int nr, volatile void * addr)
282{
283	__u32 *p = (__u32 *) addr + (nr >> 5);
284	__u32 m = 1 << (nr & 31);
285	int oldbitset = (*p & m) != 0;
286
287	*p &= ~m;
288	return oldbitset;
289}
290
291/**
292 * test_and_change_bit - Change a bit and return its old value
293 * @nr: Bit to change
294 * @addr: Address to count from
295 *
296 * This operation is atomic and cannot be reordered.  
297 * It also implies the acquisition side of the memory barrier.
298 */
299static __inline__ int
300test_and_change_bit (int nr, volatile void *addr)
301{
302	__u32 bit, old, new;
303	volatile __u32 *m;
304	CMPXCHG_BUGCHECK_DECL
305
306	m = (volatile __u32 *) addr + (nr >> 5);
307	bit = (1 << (nr & 31));
308	do {
309		CMPXCHG_BUGCHECK(m);
310		old = *m;
311		new = old ^ bit;
312	} while (cmpxchg_acq(m, old, new) != old);
313	return (old & bit) != 0;
314}
315
316/**
317 * __test_and_change_bit - Change a bit and return its old value
318 * @nr: Bit to change
319 * @addr: Address to count from
320 *
321 * This operation is non-atomic and can be reordered.
322 */
323static __inline__ int
324__test_and_change_bit (int nr, void *addr)
325{
326	__u32 old, bit = (1 << (nr & 31));
327	__u32 *m = (__u32 *) addr + (nr >> 5);
328
329	old = *m;
330	*m = old ^ bit;
331	return (old & bit) != 0;
332}
333
334static __inline__ int
335test_bit (int nr, const volatile void *addr)
336{
337	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338}
339
340/**
341 * ffz - find the first zero bit in a long word
342 * @x: The long word to find the bit in
343 *
344 * Returns the bit-number (0..63) of the first (least significant) zero bit.
345 * Undefined if no zero exists, so code should check against ~0UL first...
346 */
347static inline unsigned long
348ffz (unsigned long x)
349{
350	unsigned long result;
351
352	result = ia64_popcnt(x & (~x - 1));
353	return result;
354}
355
356/**
357 * __ffs - find first bit in word.
358 * @x: The word to search
359 *
360 * Undefined if no bit exists, so code should check against 0 first.
361 */
362static __inline__ unsigned long
363__ffs (unsigned long x)
364{
365	unsigned long result;
366
367	result = ia64_popcnt((x-1) & ~x);
368	return result;
369}
370
371#ifdef __KERNEL__
372
373/*
374 * Return bit number of last (most-significant) bit set.  Undefined
375 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
376 */
377static inline unsigned long
378ia64_fls (unsigned long x)
379{
380	long double d = x;
381	long exp;
382
383	exp = ia64_getf_exp(d);
384	return exp - 0xffff;
385}
386
387/*
388 * Find the last (most significant) bit set.  Returns 0 for x==0 and
389 * bits are numbered from 1..32 (e.g., fls(9) == 4).
390 */
391static inline int fls(unsigned int t)
 
392{
393	unsigned long x = t & 0xffffffffu;
394
395	if (!x)
396		return 0;
397	x |= x >> 1;
398	x |= x >> 2;
399	x |= x >> 4;
400	x |= x >> 8;
401	x |= x >> 16;
402	return ia64_popcnt(x);
403}
404
405/*
406 * Find the last (most significant) bit set.  Undefined for x==0.
407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
408 */
409static inline unsigned long
410__fls (unsigned long x)
411{
412	x |= x >> 1;
413	x |= x >> 2;
414	x |= x >> 4;
415	x |= x >> 8;
416	x |= x >> 16;
417	x |= x >> 32;
418	return ia64_popcnt(x) - 1;
419}
420
421#include <asm-generic/bitops/fls64.h>
422
423#include <asm-generic/bitops/builtin-ffs.h>
424
425/*
426 * hweightN: returns the hamming weight (i.e. the number
427 * of bits set) of a N-bit word
428 */
429static __inline__ unsigned long __arch_hweight64(unsigned long x)
430{
431	unsigned long result;
432	result = ia64_popcnt(x);
433	return result;
434}
435
436#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
439
440#include <asm-generic/bitops/const_hweight.h>
441
442#endif /* __KERNEL__ */
443
444#include <asm-generic/bitops/find.h>
445
446#ifdef __KERNEL__
447
448#include <asm-generic/bitops/le.h>
449
450#include <asm-generic/bitops/ext2-atomic-setbit.h>
451
452#include <asm-generic/bitops/sched.h>
453
454#endif /* __KERNEL__ */
455
456#endif /* _ASM_IA64_BITOPS_H */
v4.10.11
 
  1#ifndef _ASM_IA64_BITOPS_H
  2#define _ASM_IA64_BITOPS_H
  3
  4/*
  5 * Copyright (C) 1998-2003 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 *
  8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
  9 * O(1) scheduler patch
 10 */
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/intrinsics.h>
 19#include <asm/barrier.h>
 20
 21/**
 22 * set_bit - Atomically set a bit in memory
 23 * @nr: the bit to set
 24 * @addr: the address to start counting from
 25 *
 26 * This function is atomic and may not be reordered.  See __set_bit()
 27 * if you do not require the atomic guarantees.
 28 * Note that @nr may be almost arbitrarily large; this function is not
 29 * restricted to acting on a single-word quantity.
 30 *
 31 * The address must be (at least) "long" aligned.
 32 * Note that there are driver (e.g., eepro100) which use these operations to
 33 * operate on hw-defined data-structures, so we can't easily change these
 34 * operations to force a bigger alignment.
 35 *
 36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 37 */
 38static __inline__ void
 39set_bit (int nr, volatile void *addr)
 40{
 41	__u32 bit, old, new;
 42	volatile __u32 *m;
 43	CMPXCHG_BUGCHECK_DECL
 44
 45	m = (volatile __u32 *) addr + (nr >> 5);
 46	bit = 1 << (nr & 31);
 47	do {
 48		CMPXCHG_BUGCHECK(m);
 49		old = *m;
 50		new = old | bit;
 51	} while (cmpxchg_acq(m, old, new) != old);
 52}
 53
 54/**
 55 * __set_bit - Set a bit in memory
 56 * @nr: the bit to set
 57 * @addr: the address to start counting from
 58 *
 59 * Unlike set_bit(), this function is non-atomic and may be reordered.
 60 * If it's called on the same region of memory simultaneously, the effect
 61 * may be that only one operation succeeds.
 62 */
 63static __inline__ void
 64__set_bit (int nr, volatile void *addr)
 65{
 66	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
 67}
 68
 69/**
 70 * clear_bit - Clears a bit in memory
 71 * @nr: Bit to clear
 72 * @addr: Address to start counting from
 73 *
 74 * clear_bit() is atomic and may not be reordered.  However, it does
 75 * not contain a memory barrier, so if it is used for locking purposes,
 76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 77 * in order to ensure changes are visible on other processors.
 78 */
 79static __inline__ void
 80clear_bit (int nr, volatile void *addr)
 81{
 82	__u32 mask, old, new;
 83	volatile __u32 *m;
 84	CMPXCHG_BUGCHECK_DECL
 85
 86	m = (volatile __u32 *) addr + (nr >> 5);
 87	mask = ~(1 << (nr & 31));
 88	do {
 89		CMPXCHG_BUGCHECK(m);
 90		old = *m;
 91		new = old & mask;
 92	} while (cmpxchg_acq(m, old, new) != old);
 93}
 94
 95/**
 96 * clear_bit_unlock - Clears a bit in memory with release
 97 * @nr: Bit to clear
 98 * @addr: Address to start counting from
 99 *
100 * clear_bit_unlock() is atomic and may not be reordered.  It does
101 * contain a memory barrier suitable for unlock type operations.
102 */
103static __inline__ void
104clear_bit_unlock (int nr, volatile void *addr)
105{
106	__u32 mask, old, new;
107	volatile __u32 *m;
108	CMPXCHG_BUGCHECK_DECL
109
110	m = (volatile __u32 *) addr + (nr >> 5);
111	mask = ~(1 << (nr & 31));
112	do {
113		CMPXCHG_BUGCHECK(m);
114		old = *m;
115		new = old & mask;
116	} while (cmpxchg_rel(m, old, new) != old);
117}
118
119/**
120 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
121 * @nr: Bit to clear
122 * @addr: Address to start counting from
123 *
124 * Similarly to clear_bit_unlock, the implementation uses a store
125 * with release semantics. See also arch_spin_unlock().
126 */
127static __inline__ void
128__clear_bit_unlock(int nr, void *addr)
129{
130	__u32 * const m = (__u32 *) addr + (nr >> 5);
131	__u32 const new = *m & ~(1 << (nr & 31));
132
133	ia64_st4_rel_nta(m, new);
134}
135
136/**
137 * __clear_bit - Clears a bit in memory (non-atomic version)
138 * @nr: the bit to clear
139 * @addr: the address to start counting from
140 *
141 * Unlike clear_bit(), this function is non-atomic and may be reordered.
142 * If it's called on the same region of memory simultaneously, the effect
143 * may be that only one operation succeeds.
144 */
145static __inline__ void
146__clear_bit (int nr, volatile void *addr)
147{
148	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
149}
150
151/**
152 * change_bit - Toggle a bit in memory
153 * @nr: Bit to toggle
154 * @addr: Address to start counting from
155 *
156 * change_bit() is atomic and may not be reordered.
157 * Note that @nr may be almost arbitrarily large; this function is not
158 * restricted to acting on a single-word quantity.
159 */
160static __inline__ void
161change_bit (int nr, volatile void *addr)
162{
163	__u32 bit, old, new;
164	volatile __u32 *m;
165	CMPXCHG_BUGCHECK_DECL
166
167	m = (volatile __u32 *) addr + (nr >> 5);
168	bit = (1 << (nr & 31));
169	do {
170		CMPXCHG_BUGCHECK(m);
171		old = *m;
172		new = old ^ bit;
173	} while (cmpxchg_acq(m, old, new) != old);
174}
175
176/**
177 * __change_bit - Toggle a bit in memory
178 * @nr: the bit to toggle
179 * @addr: the address to start counting from
180 *
181 * Unlike change_bit(), this function is non-atomic and may be reordered.
182 * If it's called on the same region of memory simultaneously, the effect
183 * may be that only one operation succeeds.
184 */
185static __inline__ void
186__change_bit (int nr, volatile void *addr)
187{
188	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
189}
190
191/**
192 * test_and_set_bit - Set a bit and return its old value
193 * @nr: Bit to set
194 * @addr: Address to count from
195 *
196 * This operation is atomic and cannot be reordered.  
197 * It also implies the acquisition side of the memory barrier.
198 */
199static __inline__ int
200test_and_set_bit (int nr, volatile void *addr)
201{
202	__u32 bit, old, new;
203	volatile __u32 *m;
204	CMPXCHG_BUGCHECK_DECL
205
206	m = (volatile __u32 *) addr + (nr >> 5);
207	bit = 1 << (nr & 31);
208	do {
209		CMPXCHG_BUGCHECK(m);
210		old = *m;
211		new = old | bit;
212	} while (cmpxchg_acq(m, old, new) != old);
213	return (old & bit) != 0;
214}
215
216/**
217 * test_and_set_bit_lock - Set a bit and return its old value for lock
218 * @nr: Bit to set
219 * @addr: Address to count from
220 *
221 * This is the same as test_and_set_bit on ia64
222 */
223#define test_and_set_bit_lock test_and_set_bit
224
225/**
226 * __test_and_set_bit - Set a bit and return its old value
227 * @nr: Bit to set
228 * @addr: Address to count from
229 *
230 * This operation is non-atomic and can be reordered.  
231 * If two examples of this operation race, one can appear to succeed
232 * but actually fail.  You must protect multiple accesses with a lock.
233 */
234static __inline__ int
235__test_and_set_bit (int nr, volatile void *addr)
236{
237	__u32 *p = (__u32 *) addr + (nr >> 5);
238	__u32 m = 1 << (nr & 31);
239	int oldbitset = (*p & m) != 0;
240
241	*p |= m;
242	return oldbitset;
243}
244
245/**
246 * test_and_clear_bit - Clear a bit and return its old value
247 * @nr: Bit to clear
248 * @addr: Address to count from
249 *
250 * This operation is atomic and cannot be reordered.  
251 * It also implies the acquisition side of the memory barrier.
252 */
253static __inline__ int
254test_and_clear_bit (int nr, volatile void *addr)
255{
256	__u32 mask, old, new;
257	volatile __u32 *m;
258	CMPXCHG_BUGCHECK_DECL
259
260	m = (volatile __u32 *) addr + (nr >> 5);
261	mask = ~(1 << (nr & 31));
262	do {
263		CMPXCHG_BUGCHECK(m);
264		old = *m;
265		new = old & mask;
266	} while (cmpxchg_acq(m, old, new) != old);
267	return (old & ~mask) != 0;
268}
269
270/**
271 * __test_and_clear_bit - Clear a bit and return its old value
272 * @nr: Bit to clear
273 * @addr: Address to count from
274 *
275 * This operation is non-atomic and can be reordered.  
276 * If two examples of this operation race, one can appear to succeed
277 * but actually fail.  You must protect multiple accesses with a lock.
278 */
279static __inline__ int
280__test_and_clear_bit(int nr, volatile void * addr)
281{
282	__u32 *p = (__u32 *) addr + (nr >> 5);
283	__u32 m = 1 << (nr & 31);
284	int oldbitset = (*p & m) != 0;
285
286	*p &= ~m;
287	return oldbitset;
288}
289
290/**
291 * test_and_change_bit - Change a bit and return its old value
292 * @nr: Bit to change
293 * @addr: Address to count from
294 *
295 * This operation is atomic and cannot be reordered.  
296 * It also implies the acquisition side of the memory barrier.
297 */
298static __inline__ int
299test_and_change_bit (int nr, volatile void *addr)
300{
301	__u32 bit, old, new;
302	volatile __u32 *m;
303	CMPXCHG_BUGCHECK_DECL
304
305	m = (volatile __u32 *) addr + (nr >> 5);
306	bit = (1 << (nr & 31));
307	do {
308		CMPXCHG_BUGCHECK(m);
309		old = *m;
310		new = old ^ bit;
311	} while (cmpxchg_acq(m, old, new) != old);
312	return (old & bit) != 0;
313}
314
315/**
316 * __test_and_change_bit - Change a bit and return its old value
317 * @nr: Bit to change
318 * @addr: Address to count from
319 *
320 * This operation is non-atomic and can be reordered.
321 */
322static __inline__ int
323__test_and_change_bit (int nr, void *addr)
324{
325	__u32 old, bit = (1 << (nr & 31));
326	__u32 *m = (__u32 *) addr + (nr >> 5);
327
328	old = *m;
329	*m = old ^ bit;
330	return (old & bit) != 0;
331}
332
333static __inline__ int
334test_bit (int nr, const volatile void *addr)
335{
336	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
337}
338
339/**
340 * ffz - find the first zero bit in a long word
341 * @x: The long word to find the bit in
342 *
343 * Returns the bit-number (0..63) of the first (least significant) zero bit.
344 * Undefined if no zero exists, so code should check against ~0UL first...
345 */
346static inline unsigned long
347ffz (unsigned long x)
348{
349	unsigned long result;
350
351	result = ia64_popcnt(x & (~x - 1));
352	return result;
353}
354
355/**
356 * __ffs - find first bit in word.
357 * @x: The word to search
358 *
359 * Undefined if no bit exists, so code should check against 0 first.
360 */
361static __inline__ unsigned long
362__ffs (unsigned long x)
363{
364	unsigned long result;
365
366	result = ia64_popcnt((x-1) & ~x);
367	return result;
368}
369
370#ifdef __KERNEL__
371
372/*
373 * Return bit number of last (most-significant) bit set.  Undefined
374 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
375 */
376static inline unsigned long
377ia64_fls (unsigned long x)
378{
379	long double d = x;
380	long exp;
381
382	exp = ia64_getf_exp(d);
383	return exp - 0xffff;
384}
385
386/*
387 * Find the last (most significant) bit set.  Returns 0 for x==0 and
388 * bits are numbered from 1..32 (e.g., fls(9) == 4).
389 */
390static inline int
391fls (int t)
392{
393	unsigned long x = t & 0xffffffffu;
394
395	if (!x)
396		return 0;
397	x |= x >> 1;
398	x |= x >> 2;
399	x |= x >> 4;
400	x |= x >> 8;
401	x |= x >> 16;
402	return ia64_popcnt(x);
403}
404
405/*
406 * Find the last (most significant) bit set.  Undefined for x==0.
407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
408 */
409static inline unsigned long
410__fls (unsigned long x)
411{
412	x |= x >> 1;
413	x |= x >> 2;
414	x |= x >> 4;
415	x |= x >> 8;
416	x |= x >> 16;
417	x |= x >> 32;
418	return ia64_popcnt(x) - 1;
419}
420
421#include <asm-generic/bitops/fls64.h>
422
423#include <asm-generic/bitops/builtin-ffs.h>
424
425/*
426 * hweightN: returns the hamming weight (i.e. the number
427 * of bits set) of a N-bit word
428 */
429static __inline__ unsigned long __arch_hweight64(unsigned long x)
430{
431	unsigned long result;
432	result = ia64_popcnt(x);
433	return result;
434}
435
436#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
439
440#include <asm-generic/bitops/const_hweight.h>
441
442#endif /* __KERNEL__ */
443
444#include <asm-generic/bitops/find.h>
445
446#ifdef __KERNEL__
447
448#include <asm-generic/bitops/le.h>
449
450#include <asm-generic/bitops/ext2-atomic-setbit.h>
451
452#include <asm-generic/bitops/sched.h>
453
454#endif /* __KERNEL__ */
455
456#endif /* _ASM_IA64_BITOPS_H */