Loading...
1#ifndef _ASM_IA64_BITOPS_H
2#define _ASM_IA64_BITOPS_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
9 * O(1) scheduler patch
10 */
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/intrinsics.h>
19
20/**
21 * set_bit - Atomically set a bit in memory
22 * @nr: the bit to set
23 * @addr: the address to start counting from
24 *
25 * This function is atomic and may not be reordered. See __set_bit()
26 * if you do not require the atomic guarantees.
27 * Note that @nr may be almost arbitrarily large; this function is not
28 * restricted to acting on a single-word quantity.
29 *
30 * The address must be (at least) "long" aligned.
31 * Note that there are driver (e.g., eepro100) which use these operations to
32 * operate on hw-defined data-structures, so we can't easily change these
33 * operations to force a bigger alignment.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37static __inline__ void
38set_bit (int nr, volatile void *addr)
39{
40 __u32 bit, old, new;
41 volatile __u32 *m;
42 CMPXCHG_BUGCHECK_DECL
43
44 m = (volatile __u32 *) addr + (nr >> 5);
45 bit = 1 << (nr & 31);
46 do {
47 CMPXCHG_BUGCHECK(m);
48 old = *m;
49 new = old | bit;
50 } while (cmpxchg_acq(m, old, new) != old);
51}
52
53/**
54 * __set_bit - Set a bit in memory
55 * @nr: the bit to set
56 * @addr: the address to start counting from
57 *
58 * Unlike set_bit(), this function is non-atomic and may be reordered.
59 * If it's called on the same region of memory simultaneously, the effect
60 * may be that only one operation succeeds.
61 */
62static __inline__ void
63__set_bit (int nr, volatile void *addr)
64{
65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
66}
67
68/*
69 * clear_bit() has "acquire" semantics.
70 */
71#define smp_mb__before_clear_bit() smp_mb()
72#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
73
74/**
75 * clear_bit - Clears a bit in memory
76 * @nr: Bit to clear
77 * @addr: Address to start counting from
78 *
79 * clear_bit() is atomic and may not be reordered. However, it does
80 * not contain a memory barrier, so if it is used for locking purposes,
81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
82 * in order to ensure changes are visible on other processors.
83 */
84static __inline__ void
85clear_bit (int nr, volatile void *addr)
86{
87 __u32 mask, old, new;
88 volatile __u32 *m;
89 CMPXCHG_BUGCHECK_DECL
90
91 m = (volatile __u32 *) addr + (nr >> 5);
92 mask = ~(1 << (nr & 31));
93 do {
94 CMPXCHG_BUGCHECK(m);
95 old = *m;
96 new = old & mask;
97 } while (cmpxchg_acq(m, old, new) != old);
98}
99
100/**
101 * clear_bit_unlock - Clears a bit in memory with release
102 * @nr: Bit to clear
103 * @addr: Address to start counting from
104 *
105 * clear_bit_unlock() is atomic and may not be reordered. It does
106 * contain a memory barrier suitable for unlock type operations.
107 */
108static __inline__ void
109clear_bit_unlock (int nr, volatile void *addr)
110{
111 __u32 mask, old, new;
112 volatile __u32 *m;
113 CMPXCHG_BUGCHECK_DECL
114
115 m = (volatile __u32 *) addr + (nr >> 5);
116 mask = ~(1 << (nr & 31));
117 do {
118 CMPXCHG_BUGCHECK(m);
119 old = *m;
120 new = old & mask;
121 } while (cmpxchg_rel(m, old, new) != old);
122}
123
124/**
125 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also arch_spin_unlock().
131 */
132static __inline__ void
133__clear_bit_unlock(int nr, void *addr)
134{
135 __u32 * const m = (__u32 *) addr + (nr >> 5);
136 __u32 const new = *m & ~(1 << (nr & 31));
137
138 ia64_st4_rel_nta(m, new);
139}
140
141/**
142 * __clear_bit - Clears a bit in memory (non-atomic version)
143 * @nr: the bit to clear
144 * @addr: the address to start counting from
145 *
146 * Unlike clear_bit(), this function is non-atomic and may be reordered.
147 * If it's called on the same region of memory simultaneously, the effect
148 * may be that only one operation succeeds.
149 */
150static __inline__ void
151__clear_bit (int nr, volatile void *addr)
152{
153 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
154}
155
156/**
157 * change_bit - Toggle a bit in memory
158 * @nr: Bit to toggle
159 * @addr: Address to start counting from
160 *
161 * change_bit() is atomic and may not be reordered.
162 * Note that @nr may be almost arbitrarily large; this function is not
163 * restricted to acting on a single-word quantity.
164 */
165static __inline__ void
166change_bit (int nr, volatile void *addr)
167{
168 __u32 bit, old, new;
169 volatile __u32 *m;
170 CMPXCHG_BUGCHECK_DECL
171
172 m = (volatile __u32 *) addr + (nr >> 5);
173 bit = (1 << (nr & 31));
174 do {
175 CMPXCHG_BUGCHECK(m);
176 old = *m;
177 new = old ^ bit;
178 } while (cmpxchg_acq(m, old, new) != old);
179}
180
181/**
182 * __change_bit - Toggle a bit in memory
183 * @nr: the bit to toggle
184 * @addr: the address to start counting from
185 *
186 * Unlike change_bit(), this function is non-atomic and may be reordered.
187 * If it's called on the same region of memory simultaneously, the effect
188 * may be that only one operation succeeds.
189 */
190static __inline__ void
191__change_bit (int nr, volatile void *addr)
192{
193 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
194}
195
196/**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies the acquisition side of the memory barrier.
203 */
204static __inline__ int
205test_and_set_bit (int nr, volatile void *addr)
206{
207 __u32 bit, old, new;
208 volatile __u32 *m;
209 CMPXCHG_BUGCHECK_DECL
210
211 m = (volatile __u32 *) addr + (nr >> 5);
212 bit = 1 << (nr & 31);
213 do {
214 CMPXCHG_BUGCHECK(m);
215 old = *m;
216 new = old | bit;
217 } while (cmpxchg_acq(m, old, new) != old);
218 return (old & bit) != 0;
219}
220
221/**
222 * test_and_set_bit_lock - Set a bit and return its old value for lock
223 * @nr: Bit to set
224 * @addr: Address to count from
225 *
226 * This is the same as test_and_set_bit on ia64
227 */
228#define test_and_set_bit_lock test_and_set_bit
229
230/**
231 * __test_and_set_bit - Set a bit and return its old value
232 * @nr: Bit to set
233 * @addr: Address to count from
234 *
235 * This operation is non-atomic and can be reordered.
236 * If two examples of this operation race, one can appear to succeed
237 * but actually fail. You must protect multiple accesses with a lock.
238 */
239static __inline__ int
240__test_and_set_bit (int nr, volatile void *addr)
241{
242 __u32 *p = (__u32 *) addr + (nr >> 5);
243 __u32 m = 1 << (nr & 31);
244 int oldbitset = (*p & m) != 0;
245
246 *p |= m;
247 return oldbitset;
248}
249
250/**
251 * test_and_clear_bit - Clear a bit and return its old value
252 * @nr: Bit to clear
253 * @addr: Address to count from
254 *
255 * This operation is atomic and cannot be reordered.
256 * It also implies the acquisition side of the memory barrier.
257 */
258static __inline__ int
259test_and_clear_bit (int nr, volatile void *addr)
260{
261 __u32 mask, old, new;
262 volatile __u32 *m;
263 CMPXCHG_BUGCHECK_DECL
264
265 m = (volatile __u32 *) addr + (nr >> 5);
266 mask = ~(1 << (nr & 31));
267 do {
268 CMPXCHG_BUGCHECK(m);
269 old = *m;
270 new = old & mask;
271 } while (cmpxchg_acq(m, old, new) != old);
272 return (old & ~mask) != 0;
273}
274
275/**
276 * __test_and_clear_bit - Clear a bit and return its old value
277 * @nr: Bit to clear
278 * @addr: Address to count from
279 *
280 * This operation is non-atomic and can be reordered.
281 * If two examples of this operation race, one can appear to succeed
282 * but actually fail. You must protect multiple accesses with a lock.
283 */
284static __inline__ int
285__test_and_clear_bit(int nr, volatile void * addr)
286{
287 __u32 *p = (__u32 *) addr + (nr >> 5);
288 __u32 m = 1 << (nr & 31);
289 int oldbitset = (*p & m) != 0;
290
291 *p &= ~m;
292 return oldbitset;
293}
294
295/**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies the acquisition side of the memory barrier.
302 */
303static __inline__ int
304test_and_change_bit (int nr, volatile void *addr)
305{
306 __u32 bit, old, new;
307 volatile __u32 *m;
308 CMPXCHG_BUGCHECK_DECL
309
310 m = (volatile __u32 *) addr + (nr >> 5);
311 bit = (1 << (nr & 31));
312 do {
313 CMPXCHG_BUGCHECK(m);
314 old = *m;
315 new = old ^ bit;
316 } while (cmpxchg_acq(m, old, new) != old);
317 return (old & bit) != 0;
318}
319
320/**
321 * __test_and_change_bit - Change a bit and return its old value
322 * @nr: Bit to change
323 * @addr: Address to count from
324 *
325 * This operation is non-atomic and can be reordered.
326 */
327static __inline__ int
328__test_and_change_bit (int nr, void *addr)
329{
330 __u32 old, bit = (1 << (nr & 31));
331 __u32 *m = (__u32 *) addr + (nr >> 5);
332
333 old = *m;
334 *m = old ^ bit;
335 return (old & bit) != 0;
336}
337
338static __inline__ int
339test_bit (int nr, const volatile void *addr)
340{
341 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
342}
343
344/**
345 * ffz - find the first zero bit in a long word
346 * @x: The long word to find the bit in
347 *
348 * Returns the bit-number (0..63) of the first (least significant) zero bit.
349 * Undefined if no zero exists, so code should check against ~0UL first...
350 */
351static inline unsigned long
352ffz (unsigned long x)
353{
354 unsigned long result;
355
356 result = ia64_popcnt(x & (~x - 1));
357 return result;
358}
359
360/**
361 * __ffs - find first bit in word.
362 * @x: The word to search
363 *
364 * Undefined if no bit exists, so code should check against 0 first.
365 */
366static __inline__ unsigned long
367__ffs (unsigned long x)
368{
369 unsigned long result;
370
371 result = ia64_popcnt((x-1) & ~x);
372 return result;
373}
374
375#ifdef __KERNEL__
376
377/*
378 * Return bit number of last (most-significant) bit set. Undefined
379 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
380 */
381static inline unsigned long
382ia64_fls (unsigned long x)
383{
384 long double d = x;
385 long exp;
386
387 exp = ia64_getf_exp(d);
388 return exp - 0xffff;
389}
390
391/*
392 * Find the last (most significant) bit set. Returns 0 for x==0 and
393 * bits are numbered from 1..32 (e.g., fls(9) == 4).
394 */
395static inline int
396fls (int t)
397{
398 unsigned long x = t & 0xffffffffu;
399
400 if (!x)
401 return 0;
402 x |= x >> 1;
403 x |= x >> 2;
404 x |= x >> 4;
405 x |= x >> 8;
406 x |= x >> 16;
407 return ia64_popcnt(x);
408}
409
410/*
411 * Find the last (most significant) bit set. Undefined for x==0.
412 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
413 */
414static inline unsigned long
415__fls (unsigned long x)
416{
417 x |= x >> 1;
418 x |= x >> 2;
419 x |= x >> 4;
420 x |= x >> 8;
421 x |= x >> 16;
422 x |= x >> 32;
423 return ia64_popcnt(x) - 1;
424}
425
426#include <asm-generic/bitops/fls64.h>
427
428#include <asm-generic/bitops/builtin-ffs.h>
429
430/*
431 * hweightN: returns the hamming weight (i.e. the number
432 * of bits set) of a N-bit word
433 */
434static __inline__ unsigned long __arch_hweight64(unsigned long x)
435{
436 unsigned long result;
437 result = ia64_popcnt(x);
438 return result;
439}
440
441#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
442#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
443#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
444
445#include <asm-generic/bitops/const_hweight.h>
446
447#endif /* __KERNEL__ */
448
449#include <asm-generic/bitops/find.h>
450
451#ifdef __KERNEL__
452
453#include <asm-generic/bitops/le.h>
454
455#include <asm-generic/bitops/ext2-atomic-setbit.h>
456
457#include <asm-generic/bitops/sched.h>
458
459#endif /* __KERNEL__ */
460
461#endif /* _ASM_IA64_BITOPS_H */
1#ifndef _ASM_IA64_BITOPS_H
2#define _ASM_IA64_BITOPS_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
9 * O(1) scheduler patch
10 */
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
20
21/**
22 * set_bit - Atomically set a bit in memory
23 * @nr: the bit to set
24 * @addr: the address to start counting from
25 *
26 * This function is atomic and may not be reordered. See __set_bit()
27 * if you do not require the atomic guarantees.
28 * Note that @nr may be almost arbitrarily large; this function is not
29 * restricted to acting on a single-word quantity.
30 *
31 * The address must be (at least) "long" aligned.
32 * Note that there are driver (e.g., eepro100) which use these operations to
33 * operate on hw-defined data-structures, so we can't easily change these
34 * operations to force a bigger alignment.
35 *
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37 */
38static __inline__ void
39set_bit (int nr, volatile void *addr)
40{
41 __u32 bit, old, new;
42 volatile __u32 *m;
43 CMPXCHG_BUGCHECK_DECL
44
45 m = (volatile __u32 *) addr + (nr >> 5);
46 bit = 1 << (nr & 31);
47 do {
48 CMPXCHG_BUGCHECK(m);
49 old = *m;
50 new = old | bit;
51 } while (cmpxchg_acq(m, old, new) != old);
52}
53
54/**
55 * __set_bit - Set a bit in memory
56 * @nr: the bit to set
57 * @addr: the address to start counting from
58 *
59 * Unlike set_bit(), this function is non-atomic and may be reordered.
60 * If it's called on the same region of memory simultaneously, the effect
61 * may be that only one operation succeeds.
62 */
63static __inline__ void
64__set_bit (int nr, volatile void *addr)
65{
66 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
67}
68
69/**
70 * clear_bit - Clears a bit in memory
71 * @nr: Bit to clear
72 * @addr: Address to start counting from
73 *
74 * clear_bit() is atomic and may not be reordered. However, it does
75 * not contain a memory barrier, so if it is used for locking purposes,
76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
77 * in order to ensure changes are visible on other processors.
78 */
79static __inline__ void
80clear_bit (int nr, volatile void *addr)
81{
82 __u32 mask, old, new;
83 volatile __u32 *m;
84 CMPXCHG_BUGCHECK_DECL
85
86 m = (volatile __u32 *) addr + (nr >> 5);
87 mask = ~(1 << (nr & 31));
88 do {
89 CMPXCHG_BUGCHECK(m);
90 old = *m;
91 new = old & mask;
92 } while (cmpxchg_acq(m, old, new) != old);
93}
94
95/**
96 * clear_bit_unlock - Clears a bit in memory with release
97 * @nr: Bit to clear
98 * @addr: Address to start counting from
99 *
100 * clear_bit_unlock() is atomic and may not be reordered. It does
101 * contain a memory barrier suitable for unlock type operations.
102 */
103static __inline__ void
104clear_bit_unlock (int nr, volatile void *addr)
105{
106 __u32 mask, old, new;
107 volatile __u32 *m;
108 CMPXCHG_BUGCHECK_DECL
109
110 m = (volatile __u32 *) addr + (nr >> 5);
111 mask = ~(1 << (nr & 31));
112 do {
113 CMPXCHG_BUGCHECK(m);
114 old = *m;
115 new = old & mask;
116 } while (cmpxchg_rel(m, old, new) != old);
117}
118
119/**
120 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
121 * @nr: Bit to clear
122 * @addr: Address to start counting from
123 *
124 * Similarly to clear_bit_unlock, the implementation uses a store
125 * with release semantics. See also arch_spin_unlock().
126 */
127static __inline__ void
128__clear_bit_unlock(int nr, void *addr)
129{
130 __u32 * const m = (__u32 *) addr + (nr >> 5);
131 __u32 const new = *m & ~(1 << (nr & 31));
132
133 ia64_st4_rel_nta(m, new);
134}
135
136/**
137 * __clear_bit - Clears a bit in memory (non-atomic version)
138 * @nr: the bit to clear
139 * @addr: the address to start counting from
140 *
141 * Unlike clear_bit(), this function is non-atomic and may be reordered.
142 * If it's called on the same region of memory simultaneously, the effect
143 * may be that only one operation succeeds.
144 */
145static __inline__ void
146__clear_bit (int nr, volatile void *addr)
147{
148 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
149}
150
151/**
152 * change_bit - Toggle a bit in memory
153 * @nr: Bit to toggle
154 * @addr: Address to start counting from
155 *
156 * change_bit() is atomic and may not be reordered.
157 * Note that @nr may be almost arbitrarily large; this function is not
158 * restricted to acting on a single-word quantity.
159 */
160static __inline__ void
161change_bit (int nr, volatile void *addr)
162{
163 __u32 bit, old, new;
164 volatile __u32 *m;
165 CMPXCHG_BUGCHECK_DECL
166
167 m = (volatile __u32 *) addr + (nr >> 5);
168 bit = (1 << (nr & 31));
169 do {
170 CMPXCHG_BUGCHECK(m);
171 old = *m;
172 new = old ^ bit;
173 } while (cmpxchg_acq(m, old, new) != old);
174}
175
176/**
177 * __change_bit - Toggle a bit in memory
178 * @nr: the bit to toggle
179 * @addr: the address to start counting from
180 *
181 * Unlike change_bit(), this function is non-atomic and may be reordered.
182 * If it's called on the same region of memory simultaneously, the effect
183 * may be that only one operation succeeds.
184 */
185static __inline__ void
186__change_bit (int nr, volatile void *addr)
187{
188 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
189}
190
191/**
192 * test_and_set_bit - Set a bit and return its old value
193 * @nr: Bit to set
194 * @addr: Address to count from
195 *
196 * This operation is atomic and cannot be reordered.
197 * It also implies the acquisition side of the memory barrier.
198 */
199static __inline__ int
200test_and_set_bit (int nr, volatile void *addr)
201{
202 __u32 bit, old, new;
203 volatile __u32 *m;
204 CMPXCHG_BUGCHECK_DECL
205
206 m = (volatile __u32 *) addr + (nr >> 5);
207 bit = 1 << (nr & 31);
208 do {
209 CMPXCHG_BUGCHECK(m);
210 old = *m;
211 new = old | bit;
212 } while (cmpxchg_acq(m, old, new) != old);
213 return (old & bit) != 0;
214}
215
216/**
217 * test_and_set_bit_lock - Set a bit and return its old value for lock
218 * @nr: Bit to set
219 * @addr: Address to count from
220 *
221 * This is the same as test_and_set_bit on ia64
222 */
223#define test_and_set_bit_lock test_and_set_bit
224
225/**
226 * __test_and_set_bit - Set a bit and return its old value
227 * @nr: Bit to set
228 * @addr: Address to count from
229 *
230 * This operation is non-atomic and can be reordered.
231 * If two examples of this operation race, one can appear to succeed
232 * but actually fail. You must protect multiple accesses with a lock.
233 */
234static __inline__ int
235__test_and_set_bit (int nr, volatile void *addr)
236{
237 __u32 *p = (__u32 *) addr + (nr >> 5);
238 __u32 m = 1 << (nr & 31);
239 int oldbitset = (*p & m) != 0;
240
241 *p |= m;
242 return oldbitset;
243}
244
245/**
246 * test_and_clear_bit - Clear a bit and return its old value
247 * @nr: Bit to clear
248 * @addr: Address to count from
249 *
250 * This operation is atomic and cannot be reordered.
251 * It also implies the acquisition side of the memory barrier.
252 */
253static __inline__ int
254test_and_clear_bit (int nr, volatile void *addr)
255{
256 __u32 mask, old, new;
257 volatile __u32 *m;
258 CMPXCHG_BUGCHECK_DECL
259
260 m = (volatile __u32 *) addr + (nr >> 5);
261 mask = ~(1 << (nr & 31));
262 do {
263 CMPXCHG_BUGCHECK(m);
264 old = *m;
265 new = old & mask;
266 } while (cmpxchg_acq(m, old, new) != old);
267 return (old & ~mask) != 0;
268}
269
270/**
271 * __test_and_clear_bit - Clear a bit and return its old value
272 * @nr: Bit to clear
273 * @addr: Address to count from
274 *
275 * This operation is non-atomic and can be reordered.
276 * If two examples of this operation race, one can appear to succeed
277 * but actually fail. You must protect multiple accesses with a lock.
278 */
279static __inline__ int
280__test_and_clear_bit(int nr, volatile void * addr)
281{
282 __u32 *p = (__u32 *) addr + (nr >> 5);
283 __u32 m = 1 << (nr & 31);
284 int oldbitset = (*p & m) != 0;
285
286 *p &= ~m;
287 return oldbitset;
288}
289
290/**
291 * test_and_change_bit - Change a bit and return its old value
292 * @nr: Bit to change
293 * @addr: Address to count from
294 *
295 * This operation is atomic and cannot be reordered.
296 * It also implies the acquisition side of the memory barrier.
297 */
298static __inline__ int
299test_and_change_bit (int nr, volatile void *addr)
300{
301 __u32 bit, old, new;
302 volatile __u32 *m;
303 CMPXCHG_BUGCHECK_DECL
304
305 m = (volatile __u32 *) addr + (nr >> 5);
306 bit = (1 << (nr & 31));
307 do {
308 CMPXCHG_BUGCHECK(m);
309 old = *m;
310 new = old ^ bit;
311 } while (cmpxchg_acq(m, old, new) != old);
312 return (old & bit) != 0;
313}
314
315/**
316 * __test_and_change_bit - Change a bit and return its old value
317 * @nr: Bit to change
318 * @addr: Address to count from
319 *
320 * This operation is non-atomic and can be reordered.
321 */
322static __inline__ int
323__test_and_change_bit (int nr, void *addr)
324{
325 __u32 old, bit = (1 << (nr & 31));
326 __u32 *m = (__u32 *) addr + (nr >> 5);
327
328 old = *m;
329 *m = old ^ bit;
330 return (old & bit) != 0;
331}
332
333static __inline__ int
334test_bit (int nr, const volatile void *addr)
335{
336 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
337}
338
339/**
340 * ffz - find the first zero bit in a long word
341 * @x: The long word to find the bit in
342 *
343 * Returns the bit-number (0..63) of the first (least significant) zero bit.
344 * Undefined if no zero exists, so code should check against ~0UL first...
345 */
346static inline unsigned long
347ffz (unsigned long x)
348{
349 unsigned long result;
350
351 result = ia64_popcnt(x & (~x - 1));
352 return result;
353}
354
355/**
356 * __ffs - find first bit in word.
357 * @x: The word to search
358 *
359 * Undefined if no bit exists, so code should check against 0 first.
360 */
361static __inline__ unsigned long
362__ffs (unsigned long x)
363{
364 unsigned long result;
365
366 result = ia64_popcnt((x-1) & ~x);
367 return result;
368}
369
370#ifdef __KERNEL__
371
372/*
373 * Return bit number of last (most-significant) bit set. Undefined
374 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
375 */
376static inline unsigned long
377ia64_fls (unsigned long x)
378{
379 long double d = x;
380 long exp;
381
382 exp = ia64_getf_exp(d);
383 return exp - 0xffff;
384}
385
386/*
387 * Find the last (most significant) bit set. Returns 0 for x==0 and
388 * bits are numbered from 1..32 (e.g., fls(9) == 4).
389 */
390static inline int
391fls (int t)
392{
393 unsigned long x = t & 0xffffffffu;
394
395 if (!x)
396 return 0;
397 x |= x >> 1;
398 x |= x >> 2;
399 x |= x >> 4;
400 x |= x >> 8;
401 x |= x >> 16;
402 return ia64_popcnt(x);
403}
404
405/*
406 * Find the last (most significant) bit set. Undefined for x==0.
407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
408 */
409static inline unsigned long
410__fls (unsigned long x)
411{
412 x |= x >> 1;
413 x |= x >> 2;
414 x |= x >> 4;
415 x |= x >> 8;
416 x |= x >> 16;
417 x |= x >> 32;
418 return ia64_popcnt(x) - 1;
419}
420
421#include <asm-generic/bitops/fls64.h>
422
423#include <asm-generic/bitops/builtin-ffs.h>
424
425/*
426 * hweightN: returns the hamming weight (i.e. the number
427 * of bits set) of a N-bit word
428 */
429static __inline__ unsigned long __arch_hweight64(unsigned long x)
430{
431 unsigned long result;
432 result = ia64_popcnt(x);
433 return result;
434}
435
436#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
439
440#include <asm-generic/bitops/const_hweight.h>
441
442#endif /* __KERNEL__ */
443
444#include <asm-generic/bitops/find.h>
445
446#ifdef __KERNEL__
447
448#include <asm-generic/bitops/le.h>
449
450#include <asm-generic/bitops/ext2-atomic-setbit.h>
451
452#include <asm-generic/bitops/sched.h>
453
454#endif /* __KERNEL__ */
455
456#endif /* _ASM_IA64_BITOPS_H */