Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_BITOPS_H
  3#define _ASM_IA64_BITOPS_H
  4
  5/*
  6 * Copyright (C) 1998-2003 Hewlett-Packard Co
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
 10 * O(1) scheduler patch
 11 */
 12
 13#ifndef _LINUX_BITOPS_H
 14#error only <linux/bitops.h> can be included directly
 15#endif
 16
 17#include <linux/compiler.h>
 18#include <linux/types.h>
 19#include <asm/intrinsics.h>
 20#include <asm/barrier.h>
 21
 22/**
 23 * set_bit - Atomically set a bit in memory
 24 * @nr: the bit to set
 25 * @addr: the address to start counting from
 26 *
 27 * This function is atomic and may not be reordered.  See __set_bit()
 28 * if you do not require the atomic guarantees.
 29 * Note that @nr may be almost arbitrarily large; this function is not
 30 * restricted to acting on a single-word quantity.
 31 *
 32 * The address must be (at least) "long" aligned.
 33 * Note that there are driver (e.g., eepro100) which use these operations to
 34 * operate on hw-defined data-structures, so we can't easily change these
 35 * operations to force a bigger alignment.
 36 *
 37 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 38 */
 39static __inline__ void
 40set_bit (int nr, volatile void *addr)
 41{
 42	__u32 bit, old, new;
 43	volatile __u32 *m;
 44	CMPXCHG_BUGCHECK_DECL
 45
 46	m = (volatile __u32 *) addr + (nr >> 5);
 47	bit = 1 << (nr & 31);
 48	do {
 49		CMPXCHG_BUGCHECK(m);
 50		old = *m;
 51		new = old | bit;
 52	} while (cmpxchg_acq(m, old, new) != old);
 53}
 54
 55/**
 56 * __set_bit - Set a bit in memory
 57 * @nr: the bit to set
 58 * @addr: the address to start counting from
 59 *
 60 * Unlike set_bit(), this function is non-atomic and may be reordered.
 61 * If it's called on the same region of memory simultaneously, the effect
 62 * may be that only one operation succeeds.
 63 */
 64static __inline__ void
 65__set_bit (int nr, volatile void *addr)
 66{
 67	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
 68}
 69
 
 
 
 
 
 
 70/**
 71 * clear_bit - Clears a bit in memory
 72 * @nr: Bit to clear
 73 * @addr: Address to start counting from
 74 *
 75 * clear_bit() is atomic and may not be reordered.  However, it does
 76 * not contain a memory barrier, so if it is used for locking purposes,
 77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 78 * in order to ensure changes are visible on other processors.
 79 */
 80static __inline__ void
 81clear_bit (int nr, volatile void *addr)
 82{
 83	__u32 mask, old, new;
 84	volatile __u32 *m;
 85	CMPXCHG_BUGCHECK_DECL
 86
 87	m = (volatile __u32 *) addr + (nr >> 5);
 88	mask = ~(1 << (nr & 31));
 89	do {
 90		CMPXCHG_BUGCHECK(m);
 91		old = *m;
 92		new = old & mask;
 93	} while (cmpxchg_acq(m, old, new) != old);
 94}
 95
 96/**
 97 * clear_bit_unlock - Clears a bit in memory with release
 98 * @nr: Bit to clear
 99 * @addr: Address to start counting from
100 *
101 * clear_bit_unlock() is atomic and may not be reordered.  It does
102 * contain a memory barrier suitable for unlock type operations.
103 */
104static __inline__ void
105clear_bit_unlock (int nr, volatile void *addr)
106{
107	__u32 mask, old, new;
108	volatile __u32 *m;
109	CMPXCHG_BUGCHECK_DECL
110
111	m = (volatile __u32 *) addr + (nr >> 5);
112	mask = ~(1 << (nr & 31));
113	do {
114		CMPXCHG_BUGCHECK(m);
115		old = *m;
116		new = old & mask;
117	} while (cmpxchg_rel(m, old, new) != old);
118}
119
120/**
121 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
122 * @nr: Bit to clear
123 * @addr: Address to start counting from
124 *
125 * Similarly to clear_bit_unlock, the implementation uses a store
126 * with release semantics. See also arch_spin_unlock().
127 */
128static __inline__ void
129__clear_bit_unlock(int nr, void *addr)
130{
131	__u32 * const m = (__u32 *) addr + (nr >> 5);
132	__u32 const new = *m & ~(1 << (nr & 31));
133
134	ia64_st4_rel_nta(m, new);
135}
136
137/**
138 * __clear_bit - Clears a bit in memory (non-atomic version)
139 * @nr: the bit to clear
140 * @addr: the address to start counting from
141 *
142 * Unlike clear_bit(), this function is non-atomic and may be reordered.
143 * If it's called on the same region of memory simultaneously, the effect
144 * may be that only one operation succeeds.
145 */
146static __inline__ void
147__clear_bit (int nr, volatile void *addr)
148{
149	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150}
151
152/**
153 * change_bit - Toggle a bit in memory
154 * @nr: Bit to toggle
155 * @addr: Address to start counting from
156 *
157 * change_bit() is atomic and may not be reordered.
158 * Note that @nr may be almost arbitrarily large; this function is not
159 * restricted to acting on a single-word quantity.
160 */
161static __inline__ void
162change_bit (int nr, volatile void *addr)
163{
164	__u32 bit, old, new;
165	volatile __u32 *m;
166	CMPXCHG_BUGCHECK_DECL
167
168	m = (volatile __u32 *) addr + (nr >> 5);
169	bit = (1 << (nr & 31));
170	do {
171		CMPXCHG_BUGCHECK(m);
172		old = *m;
173		new = old ^ bit;
174	} while (cmpxchg_acq(m, old, new) != old);
175}
176
177/**
178 * __change_bit - Toggle a bit in memory
179 * @nr: the bit to toggle
180 * @addr: the address to start counting from
181 *
182 * Unlike change_bit(), this function is non-atomic and may be reordered.
183 * If it's called on the same region of memory simultaneously, the effect
184 * may be that only one operation succeeds.
185 */
186static __inline__ void
187__change_bit (int nr, volatile void *addr)
188{
189	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190}
191
192/**
193 * test_and_set_bit - Set a bit and return its old value
194 * @nr: Bit to set
195 * @addr: Address to count from
196 *
197 * This operation is atomic and cannot be reordered.  
198 * It also implies the acquisition side of the memory barrier.
199 */
200static __inline__ int
201test_and_set_bit (int nr, volatile void *addr)
202{
203	__u32 bit, old, new;
204	volatile __u32 *m;
205	CMPXCHG_BUGCHECK_DECL
206
207	m = (volatile __u32 *) addr + (nr >> 5);
208	bit = 1 << (nr & 31);
209	do {
210		CMPXCHG_BUGCHECK(m);
211		old = *m;
212		new = old | bit;
213	} while (cmpxchg_acq(m, old, new) != old);
214	return (old & bit) != 0;
215}
216
217/**
218 * test_and_set_bit_lock - Set a bit and return its old value for lock
219 * @nr: Bit to set
220 * @addr: Address to count from
221 *
222 * This is the same as test_and_set_bit on ia64
223 */
224#define test_and_set_bit_lock test_and_set_bit
225
226/**
227 * __test_and_set_bit - Set a bit and return its old value
228 * @nr: Bit to set
229 * @addr: Address to count from
230 *
231 * This operation is non-atomic and can be reordered.  
232 * If two examples of this operation race, one can appear to succeed
233 * but actually fail.  You must protect multiple accesses with a lock.
234 */
235static __inline__ int
236__test_and_set_bit (int nr, volatile void *addr)
237{
238	__u32 *p = (__u32 *) addr + (nr >> 5);
239	__u32 m = 1 << (nr & 31);
240	int oldbitset = (*p & m) != 0;
241
242	*p |= m;
243	return oldbitset;
244}
245
246/**
247 * test_and_clear_bit - Clear a bit and return its old value
248 * @nr: Bit to clear
249 * @addr: Address to count from
250 *
251 * This operation is atomic and cannot be reordered.  
252 * It also implies the acquisition side of the memory barrier.
253 */
254static __inline__ int
255test_and_clear_bit (int nr, volatile void *addr)
256{
257	__u32 mask, old, new;
258	volatile __u32 *m;
259	CMPXCHG_BUGCHECK_DECL
260
261	m = (volatile __u32 *) addr + (nr >> 5);
262	mask = ~(1 << (nr & 31));
263	do {
264		CMPXCHG_BUGCHECK(m);
265		old = *m;
266		new = old & mask;
267	} while (cmpxchg_acq(m, old, new) != old);
268	return (old & ~mask) != 0;
269}
270
271/**
272 * __test_and_clear_bit - Clear a bit and return its old value
273 * @nr: Bit to clear
274 * @addr: Address to count from
275 *
276 * This operation is non-atomic and can be reordered.  
277 * If two examples of this operation race, one can appear to succeed
278 * but actually fail.  You must protect multiple accesses with a lock.
279 */
280static __inline__ int
281__test_and_clear_bit(int nr, volatile void * addr)
282{
283	__u32 *p = (__u32 *) addr + (nr >> 5);
284	__u32 m = 1 << (nr & 31);
285	int oldbitset = (*p & m) != 0;
286
287	*p &= ~m;
288	return oldbitset;
289}
290
291/**
292 * test_and_change_bit - Change a bit and return its old value
293 * @nr: Bit to change
294 * @addr: Address to count from
295 *
296 * This operation is atomic and cannot be reordered.  
297 * It also implies the acquisition side of the memory barrier.
298 */
299static __inline__ int
300test_and_change_bit (int nr, volatile void *addr)
301{
302	__u32 bit, old, new;
303	volatile __u32 *m;
304	CMPXCHG_BUGCHECK_DECL
305
306	m = (volatile __u32 *) addr + (nr >> 5);
307	bit = (1 << (nr & 31));
308	do {
309		CMPXCHG_BUGCHECK(m);
310		old = *m;
311		new = old ^ bit;
312	} while (cmpxchg_acq(m, old, new) != old);
313	return (old & bit) != 0;
314}
315
316/**
317 * __test_and_change_bit - Change a bit and return its old value
318 * @nr: Bit to change
319 * @addr: Address to count from
320 *
321 * This operation is non-atomic and can be reordered.
322 */
323static __inline__ int
324__test_and_change_bit (int nr, void *addr)
325{
326	__u32 old, bit = (1 << (nr & 31));
327	__u32 *m = (__u32 *) addr + (nr >> 5);
328
329	old = *m;
330	*m = old ^ bit;
331	return (old & bit) != 0;
332}
333
334static __inline__ int
335test_bit (int nr, const volatile void *addr)
336{
337	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338}
339
340/**
341 * ffz - find the first zero bit in a long word
342 * @x: The long word to find the bit in
343 *
344 * Returns the bit-number (0..63) of the first (least significant) zero bit.
345 * Undefined if no zero exists, so code should check against ~0UL first...
346 */
347static inline unsigned long
348ffz (unsigned long x)
349{
350	unsigned long result;
351
352	result = ia64_popcnt(x & (~x - 1));
353	return result;
354}
355
356/**
357 * __ffs - find first bit in word.
358 * @x: The word to search
359 *
360 * Undefined if no bit exists, so code should check against 0 first.
361 */
362static __inline__ unsigned long
363__ffs (unsigned long x)
364{
365	unsigned long result;
366
367	result = ia64_popcnt((x-1) & ~x);
368	return result;
369}
370
371#ifdef __KERNEL__
372
373/*
374 * Return bit number of last (most-significant) bit set.  Undefined
375 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
376 */
377static inline unsigned long
378ia64_fls (unsigned long x)
379{
380	long double d = x;
381	long exp;
382
383	exp = ia64_getf_exp(d);
384	return exp - 0xffff;
385}
386
387/*
388 * Find the last (most significant) bit set.  Returns 0 for x==0 and
389 * bits are numbered from 1..32 (e.g., fls(9) == 4).
390 */
391static inline int fls(unsigned int t)
 
392{
393	unsigned long x = t & 0xffffffffu;
394
395	if (!x)
396		return 0;
397	x |= x >> 1;
398	x |= x >> 2;
399	x |= x >> 4;
400	x |= x >> 8;
401	x |= x >> 16;
402	return ia64_popcnt(x);
403}
404
405/*
406 * Find the last (most significant) bit set.  Undefined for x==0.
407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
408 */
409static inline unsigned long
410__fls (unsigned long x)
411{
412	x |= x >> 1;
413	x |= x >> 2;
414	x |= x >> 4;
415	x |= x >> 8;
416	x |= x >> 16;
417	x |= x >> 32;
418	return ia64_popcnt(x) - 1;
419}
420
421#include <asm-generic/bitops/fls64.h>
422
423#include <asm-generic/bitops/builtin-ffs.h>
 
 
 
 
 
 
424
425/*
426 * hweightN: returns the hamming weight (i.e. the number
427 * of bits set) of a N-bit word
428 */
429static __inline__ unsigned long __arch_hweight64(unsigned long x)
430{
431	unsigned long result;
432	result = ia64_popcnt(x);
433	return result;
434}
435
436#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
439
440#include <asm-generic/bitops/const_hweight.h>
441
442#endif /* __KERNEL__ */
443
444#include <asm-generic/bitops/find.h>
445
446#ifdef __KERNEL__
447
448#include <asm-generic/bitops/le.h>
449
450#include <asm-generic/bitops/ext2-atomic-setbit.h>
451
452#include <asm-generic/bitops/sched.h>
453
454#endif /* __KERNEL__ */
455
456#endif /* _ASM_IA64_BITOPS_H */
v3.5.6
 
  1#ifndef _ASM_IA64_BITOPS_H
  2#define _ASM_IA64_BITOPS_H
  3
  4/*
  5 * Copyright (C) 1998-2003 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 *
  8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
  9 * O(1) scheduler patch
 10 */
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#include <linux/compiler.h>
 17#include <linux/types.h>
 18#include <asm/intrinsics.h>
 
 19
 20/**
 21 * set_bit - Atomically set a bit in memory
 22 * @nr: the bit to set
 23 * @addr: the address to start counting from
 24 *
 25 * This function is atomic and may not be reordered.  See __set_bit()
 26 * if you do not require the atomic guarantees.
 27 * Note that @nr may be almost arbitrarily large; this function is not
 28 * restricted to acting on a single-word quantity.
 29 *
 30 * The address must be (at least) "long" aligned.
 31 * Note that there are driver (e.g., eepro100) which use these operations to
 32 * operate on hw-defined data-structures, so we can't easily change these
 33 * operations to force a bigger alignment.
 34 *
 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 36 */
 37static __inline__ void
 38set_bit (int nr, volatile void *addr)
 39{
 40	__u32 bit, old, new;
 41	volatile __u32 *m;
 42	CMPXCHG_BUGCHECK_DECL
 43
 44	m = (volatile __u32 *) addr + (nr >> 5);
 45	bit = 1 << (nr & 31);
 46	do {
 47		CMPXCHG_BUGCHECK(m);
 48		old = *m;
 49		new = old | bit;
 50	} while (cmpxchg_acq(m, old, new) != old);
 51}
 52
 53/**
 54 * __set_bit - Set a bit in memory
 55 * @nr: the bit to set
 56 * @addr: the address to start counting from
 57 *
 58 * Unlike set_bit(), this function is non-atomic and may be reordered.
 59 * If it's called on the same region of memory simultaneously, the effect
 60 * may be that only one operation succeeds.
 61 */
 62static __inline__ void
 63__set_bit (int nr, volatile void *addr)
 64{
 65	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
 66}
 67
 68/*
 69 * clear_bit() has "acquire" semantics.
 70 */
 71#define smp_mb__before_clear_bit()	smp_mb()
 72#define smp_mb__after_clear_bit()	do { /* skip */; } while (0)
 73
 74/**
 75 * clear_bit - Clears a bit in memory
 76 * @nr: Bit to clear
 77 * @addr: Address to start counting from
 78 *
 79 * clear_bit() is atomic and may not be reordered.  However, it does
 80 * not contain a memory barrier, so if it is used for locking purposes,
 81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 82 * in order to ensure changes are visible on other processors.
 83 */
 84static __inline__ void
 85clear_bit (int nr, volatile void *addr)
 86{
 87	__u32 mask, old, new;
 88	volatile __u32 *m;
 89	CMPXCHG_BUGCHECK_DECL
 90
 91	m = (volatile __u32 *) addr + (nr >> 5);
 92	mask = ~(1 << (nr & 31));
 93	do {
 94		CMPXCHG_BUGCHECK(m);
 95		old = *m;
 96		new = old & mask;
 97	} while (cmpxchg_acq(m, old, new) != old);
 98}
 99
100/**
101 * clear_bit_unlock - Clears a bit in memory with release
102 * @nr: Bit to clear
103 * @addr: Address to start counting from
104 *
105 * clear_bit_unlock() is atomic and may not be reordered.  It does
106 * contain a memory barrier suitable for unlock type operations.
107 */
108static __inline__ void
109clear_bit_unlock (int nr, volatile void *addr)
110{
111	__u32 mask, old, new;
112	volatile __u32 *m;
113	CMPXCHG_BUGCHECK_DECL
114
115	m = (volatile __u32 *) addr + (nr >> 5);
116	mask = ~(1 << (nr & 31));
117	do {
118		CMPXCHG_BUGCHECK(m);
119		old = *m;
120		new = old & mask;
121	} while (cmpxchg_rel(m, old, new) != old);
122}
123
124/**
125 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also arch_spin_unlock().
131 */
132static __inline__ void
133__clear_bit_unlock(int nr, void *addr)
134{
135	__u32 * const m = (__u32 *) addr + (nr >> 5);
136	__u32 const new = *m & ~(1 << (nr & 31));
137
138	ia64_st4_rel_nta(m, new);
139}
140
141/**
142 * __clear_bit - Clears a bit in memory (non-atomic version)
143 * @nr: the bit to clear
144 * @addr: the address to start counting from
145 *
146 * Unlike clear_bit(), this function is non-atomic and may be reordered.
147 * If it's called on the same region of memory simultaneously, the effect
148 * may be that only one operation succeeds.
149 */
150static __inline__ void
151__clear_bit (int nr, volatile void *addr)
152{
153	*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
154}
155
156/**
157 * change_bit - Toggle a bit in memory
158 * @nr: Bit to toggle
159 * @addr: Address to start counting from
160 *
161 * change_bit() is atomic and may not be reordered.
162 * Note that @nr may be almost arbitrarily large; this function is not
163 * restricted to acting on a single-word quantity.
164 */
165static __inline__ void
166change_bit (int nr, volatile void *addr)
167{
168	__u32 bit, old, new;
169	volatile __u32 *m;
170	CMPXCHG_BUGCHECK_DECL
171
172	m = (volatile __u32 *) addr + (nr >> 5);
173	bit = (1 << (nr & 31));
174	do {
175		CMPXCHG_BUGCHECK(m);
176		old = *m;
177		new = old ^ bit;
178	} while (cmpxchg_acq(m, old, new) != old);
179}
180
181/**
182 * __change_bit - Toggle a bit in memory
183 * @nr: the bit to toggle
184 * @addr: the address to start counting from
185 *
186 * Unlike change_bit(), this function is non-atomic and may be reordered.
187 * If it's called on the same region of memory simultaneously, the effect
188 * may be that only one operation succeeds.
189 */
190static __inline__ void
191__change_bit (int nr, volatile void *addr)
192{
193	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
194}
195
196/**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.  
202 * It also implies the acquisition side of the memory barrier.
203 */
204static __inline__ int
205test_and_set_bit (int nr, volatile void *addr)
206{
207	__u32 bit, old, new;
208	volatile __u32 *m;
209	CMPXCHG_BUGCHECK_DECL
210
211	m = (volatile __u32 *) addr + (nr >> 5);
212	bit = 1 << (nr & 31);
213	do {
214		CMPXCHG_BUGCHECK(m);
215		old = *m;
216		new = old | bit;
217	} while (cmpxchg_acq(m, old, new) != old);
218	return (old & bit) != 0;
219}
220
221/**
222 * test_and_set_bit_lock - Set a bit and return its old value for lock
223 * @nr: Bit to set
224 * @addr: Address to count from
225 *
226 * This is the same as test_and_set_bit on ia64
227 */
228#define test_and_set_bit_lock test_and_set_bit
229
230/**
231 * __test_and_set_bit - Set a bit and return its old value
232 * @nr: Bit to set
233 * @addr: Address to count from
234 *
235 * This operation is non-atomic and can be reordered.  
236 * If two examples of this operation race, one can appear to succeed
237 * but actually fail.  You must protect multiple accesses with a lock.
238 */
239static __inline__ int
240__test_and_set_bit (int nr, volatile void *addr)
241{
242	__u32 *p = (__u32 *) addr + (nr >> 5);
243	__u32 m = 1 << (nr & 31);
244	int oldbitset = (*p & m) != 0;
245
246	*p |= m;
247	return oldbitset;
248}
249
250/**
251 * test_and_clear_bit - Clear a bit and return its old value
252 * @nr: Bit to clear
253 * @addr: Address to count from
254 *
255 * This operation is atomic and cannot be reordered.  
256 * It also implies the acquisition side of the memory barrier.
257 */
258static __inline__ int
259test_and_clear_bit (int nr, volatile void *addr)
260{
261	__u32 mask, old, new;
262	volatile __u32 *m;
263	CMPXCHG_BUGCHECK_DECL
264
265	m = (volatile __u32 *) addr + (nr >> 5);
266	mask = ~(1 << (nr & 31));
267	do {
268		CMPXCHG_BUGCHECK(m);
269		old = *m;
270		new = old & mask;
271	} while (cmpxchg_acq(m, old, new) != old);
272	return (old & ~mask) != 0;
273}
274
275/**
276 * __test_and_clear_bit - Clear a bit and return its old value
277 * @nr: Bit to clear
278 * @addr: Address to count from
279 *
280 * This operation is non-atomic and can be reordered.  
281 * If two examples of this operation race, one can appear to succeed
282 * but actually fail.  You must protect multiple accesses with a lock.
283 */
284static __inline__ int
285__test_and_clear_bit(int nr, volatile void * addr)
286{
287	__u32 *p = (__u32 *) addr + (nr >> 5);
288	__u32 m = 1 << (nr & 31);
289	int oldbitset = (*p & m) != 0;
290
291	*p &= ~m;
292	return oldbitset;
293}
294
295/**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.  
301 * It also implies the acquisition side of the memory barrier.
302 */
303static __inline__ int
304test_and_change_bit (int nr, volatile void *addr)
305{
306	__u32 bit, old, new;
307	volatile __u32 *m;
308	CMPXCHG_BUGCHECK_DECL
309
310	m = (volatile __u32 *) addr + (nr >> 5);
311	bit = (1 << (nr & 31));
312	do {
313		CMPXCHG_BUGCHECK(m);
314		old = *m;
315		new = old ^ bit;
316	} while (cmpxchg_acq(m, old, new) != old);
317	return (old & bit) != 0;
318}
319
320/**
321 * __test_and_change_bit - Change a bit and return its old value
322 * @nr: Bit to change
323 * @addr: Address to count from
324 *
325 * This operation is non-atomic and can be reordered.
326 */
327static __inline__ int
328__test_and_change_bit (int nr, void *addr)
329{
330	__u32 old, bit = (1 << (nr & 31));
331	__u32 *m = (__u32 *) addr + (nr >> 5);
332
333	old = *m;
334	*m = old ^ bit;
335	return (old & bit) != 0;
336}
337
338static __inline__ int
339test_bit (int nr, const volatile void *addr)
340{
341	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
342}
343
344/**
345 * ffz - find the first zero bit in a long word
346 * @x: The long word to find the bit in
347 *
348 * Returns the bit-number (0..63) of the first (least significant) zero bit.
349 * Undefined if no zero exists, so code should check against ~0UL first...
350 */
351static inline unsigned long
352ffz (unsigned long x)
353{
354	unsigned long result;
355
356	result = ia64_popcnt(x & (~x - 1));
357	return result;
358}
359
360/**
361 * __ffs - find first bit in word.
362 * @x: The word to search
363 *
364 * Undefined if no bit exists, so code should check against 0 first.
365 */
366static __inline__ unsigned long
367__ffs (unsigned long x)
368{
369	unsigned long result;
370
371	result = ia64_popcnt((x-1) & ~x);
372	return result;
373}
374
375#ifdef __KERNEL__
376
377/*
378 * Return bit number of last (most-significant) bit set.  Undefined
379 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
380 */
381static inline unsigned long
382ia64_fls (unsigned long x)
383{
384	long double d = x;
385	long exp;
386
387	exp = ia64_getf_exp(d);
388	return exp - 0xffff;
389}
390
391/*
392 * Find the last (most significant) bit set.  Returns 0 for x==0 and
393 * bits are numbered from 1..32 (e.g., fls(9) == 4).
394 */
395static inline int
396fls (int t)
397{
398	unsigned long x = t & 0xffffffffu;
399
400	if (!x)
401		return 0;
402	x |= x >> 1;
403	x |= x >> 2;
404	x |= x >> 4;
405	x |= x >> 8;
406	x |= x >> 16;
407	return ia64_popcnt(x);
408}
409
410/*
411 * Find the last (most significant) bit set.  Undefined for x==0.
412 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
413 */
414static inline unsigned long
415__fls (unsigned long x)
416{
417	x |= x >> 1;
418	x |= x >> 2;
419	x |= x >> 4;
420	x |= x >> 8;
421	x |= x >> 16;
422	x |= x >> 32;
423	return ia64_popcnt(x) - 1;
424}
425
426#include <asm-generic/bitops/fls64.h>
427
428/*
429 * ffs: find first bit set. This is defined the same way as the libc and
430 * compiler builtin ffs routines, therefore differs in spirit from the above
431 * ffz (man ffs): it operates on "int" values only and the result value is the
432 * bit number + 1.  ffs(0) is defined to return zero.
433 */
434#define ffs(x)	__builtin_ffs(x)
435
436/*
437 * hweightN: returns the hamming weight (i.e. the number
438 * of bits set) of a N-bit word
439 */
440static __inline__ unsigned long __arch_hweight64(unsigned long x)
441{
442	unsigned long result;
443	result = ia64_popcnt(x);
444	return result;
445}
446
447#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
448#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
449#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
450
451#include <asm-generic/bitops/const_hweight.h>
452
453#endif /* __KERNEL__ */
454
455#include <asm-generic/bitops/find.h>
456
457#ifdef __KERNEL__
458
459#include <asm-generic/bitops/le.h>
460
461#include <asm-generic/bitops/ext2-atomic-setbit.h>
462
463#include <asm-generic/bitops/sched.h>
464
465#endif /* __KERNEL__ */
466
467#endif /* _ASM_IA64_BITOPS_H */