Loading...
1#ifndef _ALPHA_BITOPS_H
2#define _ALPHA_BITOPS_H
3
4#ifndef _LINUX_BITOPS_H
5#error only <linux/bitops.h> can be included directly
6#endif
7
8#include <asm/compiler.h>
9#include <asm/barrier.h>
10
11/*
12 * Copyright 1994, Linus Torvalds.
13 */
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
23 *
24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
25 */
26
27static inline void
28set_bit(unsigned long nr, volatile void * addr)
29{
30 unsigned long temp;
31 int *m = ((int *) addr) + (nr >> 5);
32
33 __asm__ __volatile__(
34 "1: ldl_l %0,%3\n"
35 " bis %0,%2,%0\n"
36 " stl_c %0,%1\n"
37 " beq %0,2f\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 :"=&r" (temp), "=m" (*m)
42 :"Ir" (1UL << (nr & 31)), "m" (*m));
43}
44
45/*
46 * WARNING: non atomic version.
47 */
48static inline void
49__set_bit(unsigned long nr, volatile void * addr)
50{
51 int *m = ((int *) addr) + (nr >> 5);
52
53 *m |= 1 << (nr & 31);
54}
55
56#define smp_mb__before_clear_bit() smp_mb()
57#define smp_mb__after_clear_bit() smp_mb()
58
59static inline void
60clear_bit(unsigned long nr, volatile void * addr)
61{
62 unsigned long temp;
63 int *m = ((int *) addr) + (nr >> 5);
64
65 __asm__ __volatile__(
66 "1: ldl_l %0,%3\n"
67 " bic %0,%2,%0\n"
68 " stl_c %0,%1\n"
69 " beq %0,2f\n"
70 ".subsection 2\n"
71 "2: br 1b\n"
72 ".previous"
73 :"=&r" (temp), "=m" (*m)
74 :"Ir" (1UL << (nr & 31)), "m" (*m));
75}
76
77static inline void
78clear_bit_unlock(unsigned long nr, volatile void * addr)
79{
80 smp_mb();
81 clear_bit(nr, addr);
82}
83
84/*
85 * WARNING: non atomic version.
86 */
87static __inline__ void
88__clear_bit(unsigned long nr, volatile void * addr)
89{
90 int *m = ((int *) addr) + (nr >> 5);
91
92 *m &= ~(1 << (nr & 31));
93}
94
95static inline void
96__clear_bit_unlock(unsigned long nr, volatile void * addr)
97{
98 smp_mb();
99 __clear_bit(nr, addr);
100}
101
102static inline void
103change_bit(unsigned long nr, volatile void * addr)
104{
105 unsigned long temp;
106 int *m = ((int *) addr) + (nr >> 5);
107
108 __asm__ __volatile__(
109 "1: ldl_l %0,%3\n"
110 " xor %0,%2,%0\n"
111 " stl_c %0,%1\n"
112 " beq %0,2f\n"
113 ".subsection 2\n"
114 "2: br 1b\n"
115 ".previous"
116 :"=&r" (temp), "=m" (*m)
117 :"Ir" (1UL << (nr & 31)), "m" (*m));
118}
119
120/*
121 * WARNING: non atomic version.
122 */
123static __inline__ void
124__change_bit(unsigned long nr, volatile void * addr)
125{
126 int *m = ((int *) addr) + (nr >> 5);
127
128 *m ^= 1 << (nr & 31);
129}
130
131static inline int
132test_and_set_bit(unsigned long nr, volatile void *addr)
133{
134 unsigned long oldbit;
135 unsigned long temp;
136 int *m = ((int *) addr) + (nr >> 5);
137
138 __asm__ __volatile__(
139#ifdef CONFIG_SMP
140 " mb\n"
141#endif
142 "1: ldl_l %0,%4\n"
143 " and %0,%3,%2\n"
144 " bne %2,2f\n"
145 " xor %0,%3,%0\n"
146 " stl_c %0,%1\n"
147 " beq %0,3f\n"
148 "2:\n"
149#ifdef CONFIG_SMP
150 " mb\n"
151#endif
152 ".subsection 2\n"
153 "3: br 1b\n"
154 ".previous"
155 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
156 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
157
158 return oldbit != 0;
159}
160
161static inline int
162test_and_set_bit_lock(unsigned long nr, volatile void *addr)
163{
164 unsigned long oldbit;
165 unsigned long temp;
166 int *m = ((int *) addr) + (nr >> 5);
167
168 __asm__ __volatile__(
169 "1: ldl_l %0,%4\n"
170 " and %0,%3,%2\n"
171 " bne %2,2f\n"
172 " xor %0,%3,%0\n"
173 " stl_c %0,%1\n"
174 " beq %0,3f\n"
175 "2:\n"
176#ifdef CONFIG_SMP
177 " mb\n"
178#endif
179 ".subsection 2\n"
180 "3: br 1b\n"
181 ".previous"
182 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
183 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
184
185 return oldbit != 0;
186}
187
188/*
189 * WARNING: non atomic version.
190 */
191static inline int
192__test_and_set_bit(unsigned long nr, volatile void * addr)
193{
194 unsigned long mask = 1 << (nr & 0x1f);
195 int *m = ((int *) addr) + (nr >> 5);
196 int old = *m;
197
198 *m = old | mask;
199 return (old & mask) != 0;
200}
201
202static inline int
203test_and_clear_bit(unsigned long nr, volatile void * addr)
204{
205 unsigned long oldbit;
206 unsigned long temp;
207 int *m = ((int *) addr) + (nr >> 5);
208
209 __asm__ __volatile__(
210#ifdef CONFIG_SMP
211 " mb\n"
212#endif
213 "1: ldl_l %0,%4\n"
214 " and %0,%3,%2\n"
215 " beq %2,2f\n"
216 " xor %0,%3,%0\n"
217 " stl_c %0,%1\n"
218 " beq %0,3f\n"
219 "2:\n"
220#ifdef CONFIG_SMP
221 " mb\n"
222#endif
223 ".subsection 2\n"
224 "3: br 1b\n"
225 ".previous"
226 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
227 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
228
229 return oldbit != 0;
230}
231
232/*
233 * WARNING: non atomic version.
234 */
235static inline int
236__test_and_clear_bit(unsigned long nr, volatile void * addr)
237{
238 unsigned long mask = 1 << (nr & 0x1f);
239 int *m = ((int *) addr) + (nr >> 5);
240 int old = *m;
241
242 *m = old & ~mask;
243 return (old & mask) != 0;
244}
245
246static inline int
247test_and_change_bit(unsigned long nr, volatile void * addr)
248{
249 unsigned long oldbit;
250 unsigned long temp;
251 int *m = ((int *) addr) + (nr >> 5);
252
253 __asm__ __volatile__(
254#ifdef CONFIG_SMP
255 " mb\n"
256#endif
257 "1: ldl_l %0,%4\n"
258 " and %0,%3,%2\n"
259 " xor %0,%3,%0\n"
260 " stl_c %0,%1\n"
261 " beq %0,3f\n"
262#ifdef CONFIG_SMP
263 " mb\n"
264#endif
265 ".subsection 2\n"
266 "3: br 1b\n"
267 ".previous"
268 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
269 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
270
271 return oldbit != 0;
272}
273
274/*
275 * WARNING: non atomic version.
276 */
277static __inline__ int
278__test_and_change_bit(unsigned long nr, volatile void * addr)
279{
280 unsigned long mask = 1 << (nr & 0x1f);
281 int *m = ((int *) addr) + (nr >> 5);
282 int old = *m;
283
284 *m = old ^ mask;
285 return (old & mask) != 0;
286}
287
288static inline int
289test_bit(int nr, const volatile void * addr)
290{
291 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
292}
293
294/*
295 * ffz = Find First Zero in word. Undefined if no zero exists,
296 * so code should check against ~0UL first..
297 *
298 * Do a binary search on the bits. Due to the nature of large
299 * constants on the alpha, it is worthwhile to split the search.
300 */
301static inline unsigned long ffz_b(unsigned long x)
302{
303 unsigned long sum, x1, x2, x4;
304
305 x = ~x & -~x; /* set first 0 bit, clear others */
306 x1 = x & 0xAA;
307 x2 = x & 0xCC;
308 x4 = x & 0xF0;
309 sum = x2 ? 2 : 0;
310 sum += (x4 != 0) * 4;
311 sum += (x1 != 0);
312
313 return sum;
314}
315
316static inline unsigned long ffz(unsigned long word)
317{
318#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
319 /* Whee. EV67 can calculate it directly. */
320 return __kernel_cttz(~word);
321#else
322 unsigned long bits, qofs, bofs;
323
324 bits = __kernel_cmpbge(word, ~0UL);
325 qofs = ffz_b(bits);
326 bits = __kernel_extbl(word, qofs);
327 bofs = ffz_b(bits);
328
329 return qofs*8 + bofs;
330#endif
331}
332
333/*
334 * __ffs = Find First set bit in word. Undefined if no set bit exists.
335 */
336static inline unsigned long __ffs(unsigned long word)
337{
338#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
339 /* Whee. EV67 can calculate it directly. */
340 return __kernel_cttz(word);
341#else
342 unsigned long bits, qofs, bofs;
343
344 bits = __kernel_cmpbge(0, word);
345 qofs = ffz_b(bits);
346 bits = __kernel_extbl(word, qofs);
347 bofs = ffz_b(~bits);
348
349 return qofs*8 + bofs;
350#endif
351}
352
353#ifdef __KERNEL__
354
355/*
356 * ffs: find first bit set. This is defined the same way as
357 * the libc and compiler builtin ffs routines, therefore
358 * differs in spirit from the above __ffs.
359 */
360
361static inline int ffs(int word)
362{
363 int result = __ffs(word) + 1;
364 return word ? result : 0;
365}
366
367/*
368 * fls: find last bit set.
369 */
370#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
371static inline int fls64(unsigned long word)
372{
373 return 64 - __kernel_ctlz(word);
374}
375#else
376extern const unsigned char __flsm1_tab[256];
377
378static inline int fls64(unsigned long x)
379{
380 unsigned long t, a, r;
381
382 t = __kernel_cmpbge (x, 0x0101010101010101UL);
383 a = __flsm1_tab[t];
384 t = __kernel_extbl (x, a);
385 r = a*8 + __flsm1_tab[t] + (x != 0);
386
387 return r;
388}
389#endif
390
391static inline unsigned long __fls(unsigned long x)
392{
393 return fls64(x) - 1;
394}
395
396static inline int fls(int x)
397{
398 return fls64((unsigned int) x);
399}
400
401/*
402 * hweightN: returns the hamming weight (i.e. the number
403 * of bits set) of a N-bit word
404 */
405
406#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
407/* Whee. EV67 can calculate it directly. */
408static inline unsigned long __arch_hweight64(unsigned long w)
409{
410 return __kernel_ctpop(w);
411}
412
413static inline unsigned int __arch_hweight32(unsigned int w)
414{
415 return __arch_hweight64(w);
416}
417
418static inline unsigned int __arch_hweight16(unsigned int w)
419{
420 return __arch_hweight64(w & 0xffff);
421}
422
423static inline unsigned int __arch_hweight8(unsigned int w)
424{
425 return __arch_hweight64(w & 0xff);
426}
427#else
428#include <asm-generic/bitops/arch_hweight.h>
429#endif
430
431#include <asm-generic/bitops/const_hweight.h>
432
433#endif /* __KERNEL__ */
434
435#include <asm-generic/bitops/find.h>
436
437#ifdef __KERNEL__
438
439/*
440 * Every architecture must define this function. It's the fastest
441 * way of searching a 100-bit bitmap. It's guaranteed that at least
442 * one of the 100 bits is cleared.
443 */
444static inline unsigned long
445sched_find_first_bit(const unsigned long b[2])
446{
447 unsigned long b0, b1, ofs, tmp;
448
449 b0 = b[0];
450 b1 = b[1];
451 ofs = (b0 ? 0 : 64);
452 tmp = (b0 ? b0 : b1);
453
454 return __ffs(tmp) + ofs;
455}
456
457#include <asm-generic/bitops/le.h>
458
459#include <asm-generic/bitops/ext2-atomic-setbit.h>
460
461#endif /* __KERNEL__ */
462
463#endif /* _ALPHA_BITOPS_H */
1#ifndef _ALPHA_BITOPS_H
2#define _ALPHA_BITOPS_H
3
4#ifndef _LINUX_BITOPS_H
5#error only <linux/bitops.h> can be included directly
6#endif
7
8#include <asm/compiler.h>
9#include <asm/barrier.h>
10
11/*
12 * Copyright 1994, Linus Torvalds.
13 */
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
23 *
24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
25 */
26
27static inline void
28set_bit(unsigned long nr, volatile void * addr)
29{
30 unsigned long temp;
31 int *m = ((int *) addr) + (nr >> 5);
32
33 __asm__ __volatile__(
34 "1: ldl_l %0,%3\n"
35 " bis %0,%2,%0\n"
36 " stl_c %0,%1\n"
37 " beq %0,2f\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 :"=&r" (temp), "=m" (*m)
42 :"Ir" (1UL << (nr & 31)), "m" (*m));
43}
44
45/*
46 * WARNING: non atomic version.
47 */
48static inline void
49__set_bit(unsigned long nr, volatile void * addr)
50{
51 int *m = ((int *) addr) + (nr >> 5);
52
53 *m |= 1 << (nr & 31);
54}
55
56static inline void
57clear_bit(unsigned long nr, volatile void * addr)
58{
59 unsigned long temp;
60 int *m = ((int *) addr) + (nr >> 5);
61
62 __asm__ __volatile__(
63 "1: ldl_l %0,%3\n"
64 " bic %0,%2,%0\n"
65 " stl_c %0,%1\n"
66 " beq %0,2f\n"
67 ".subsection 2\n"
68 "2: br 1b\n"
69 ".previous"
70 :"=&r" (temp), "=m" (*m)
71 :"Ir" (1UL << (nr & 31)), "m" (*m));
72}
73
74static inline void
75clear_bit_unlock(unsigned long nr, volatile void * addr)
76{
77 smp_mb();
78 clear_bit(nr, addr);
79}
80
81/*
82 * WARNING: non atomic version.
83 */
84static __inline__ void
85__clear_bit(unsigned long nr, volatile void * addr)
86{
87 int *m = ((int *) addr) + (nr >> 5);
88
89 *m &= ~(1 << (nr & 31));
90}
91
92static inline void
93__clear_bit_unlock(unsigned long nr, volatile void * addr)
94{
95 smp_mb();
96 __clear_bit(nr, addr);
97}
98
99static inline void
100change_bit(unsigned long nr, volatile void * addr)
101{
102 unsigned long temp;
103 int *m = ((int *) addr) + (nr >> 5);
104
105 __asm__ __volatile__(
106 "1: ldl_l %0,%3\n"
107 " xor %0,%2,%0\n"
108 " stl_c %0,%1\n"
109 " beq %0,2f\n"
110 ".subsection 2\n"
111 "2: br 1b\n"
112 ".previous"
113 :"=&r" (temp), "=m" (*m)
114 :"Ir" (1UL << (nr & 31)), "m" (*m));
115}
116
117/*
118 * WARNING: non atomic version.
119 */
120static __inline__ void
121__change_bit(unsigned long nr, volatile void * addr)
122{
123 int *m = ((int *) addr) + (nr >> 5);
124
125 *m ^= 1 << (nr & 31);
126}
127
128static inline int
129test_and_set_bit(unsigned long nr, volatile void *addr)
130{
131 unsigned long oldbit;
132 unsigned long temp;
133 int *m = ((int *) addr) + (nr >> 5);
134
135 __asm__ __volatile__(
136#ifdef CONFIG_SMP
137 " mb\n"
138#endif
139 "1: ldl_l %0,%4\n"
140 " and %0,%3,%2\n"
141 " bne %2,2f\n"
142 " xor %0,%3,%0\n"
143 " stl_c %0,%1\n"
144 " beq %0,3f\n"
145 "2:\n"
146#ifdef CONFIG_SMP
147 " mb\n"
148#endif
149 ".subsection 2\n"
150 "3: br 1b\n"
151 ".previous"
152 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
153 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
154
155 return oldbit != 0;
156}
157
158static inline int
159test_and_set_bit_lock(unsigned long nr, volatile void *addr)
160{
161 unsigned long oldbit;
162 unsigned long temp;
163 int *m = ((int *) addr) + (nr >> 5);
164
165 __asm__ __volatile__(
166 "1: ldl_l %0,%4\n"
167 " and %0,%3,%2\n"
168 " bne %2,2f\n"
169 " xor %0,%3,%0\n"
170 " stl_c %0,%1\n"
171 " beq %0,3f\n"
172 "2:\n"
173#ifdef CONFIG_SMP
174 " mb\n"
175#endif
176 ".subsection 2\n"
177 "3: br 1b\n"
178 ".previous"
179 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
180 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
181
182 return oldbit != 0;
183}
184
185/*
186 * WARNING: non atomic version.
187 */
188static inline int
189__test_and_set_bit(unsigned long nr, volatile void * addr)
190{
191 unsigned long mask = 1 << (nr & 0x1f);
192 int *m = ((int *) addr) + (nr >> 5);
193 int old = *m;
194
195 *m = old | mask;
196 return (old & mask) != 0;
197}
198
199static inline int
200test_and_clear_bit(unsigned long nr, volatile void * addr)
201{
202 unsigned long oldbit;
203 unsigned long temp;
204 int *m = ((int *) addr) + (nr >> 5);
205
206 __asm__ __volatile__(
207#ifdef CONFIG_SMP
208 " mb\n"
209#endif
210 "1: ldl_l %0,%4\n"
211 " and %0,%3,%2\n"
212 " beq %2,2f\n"
213 " xor %0,%3,%0\n"
214 " stl_c %0,%1\n"
215 " beq %0,3f\n"
216 "2:\n"
217#ifdef CONFIG_SMP
218 " mb\n"
219#endif
220 ".subsection 2\n"
221 "3: br 1b\n"
222 ".previous"
223 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
224 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
225
226 return oldbit != 0;
227}
228
229/*
230 * WARNING: non atomic version.
231 */
232static inline int
233__test_and_clear_bit(unsigned long nr, volatile void * addr)
234{
235 unsigned long mask = 1 << (nr & 0x1f);
236 int *m = ((int *) addr) + (nr >> 5);
237 int old = *m;
238
239 *m = old & ~mask;
240 return (old & mask) != 0;
241}
242
243static inline int
244test_and_change_bit(unsigned long nr, volatile void * addr)
245{
246 unsigned long oldbit;
247 unsigned long temp;
248 int *m = ((int *) addr) + (nr >> 5);
249
250 __asm__ __volatile__(
251#ifdef CONFIG_SMP
252 " mb\n"
253#endif
254 "1: ldl_l %0,%4\n"
255 " and %0,%3,%2\n"
256 " xor %0,%3,%0\n"
257 " stl_c %0,%1\n"
258 " beq %0,3f\n"
259#ifdef CONFIG_SMP
260 " mb\n"
261#endif
262 ".subsection 2\n"
263 "3: br 1b\n"
264 ".previous"
265 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
266 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
267
268 return oldbit != 0;
269}
270
271/*
272 * WARNING: non atomic version.
273 */
274static __inline__ int
275__test_and_change_bit(unsigned long nr, volatile void * addr)
276{
277 unsigned long mask = 1 << (nr & 0x1f);
278 int *m = ((int *) addr) + (nr >> 5);
279 int old = *m;
280
281 *m = old ^ mask;
282 return (old & mask) != 0;
283}
284
285static inline int
286test_bit(int nr, const volatile void * addr)
287{
288 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
289}
290
291/*
292 * ffz = Find First Zero in word. Undefined if no zero exists,
293 * so code should check against ~0UL first..
294 *
295 * Do a binary search on the bits. Due to the nature of large
296 * constants on the alpha, it is worthwhile to split the search.
297 */
298static inline unsigned long ffz_b(unsigned long x)
299{
300 unsigned long sum, x1, x2, x4;
301
302 x = ~x & -~x; /* set first 0 bit, clear others */
303 x1 = x & 0xAA;
304 x2 = x & 0xCC;
305 x4 = x & 0xF0;
306 sum = x2 ? 2 : 0;
307 sum += (x4 != 0) * 4;
308 sum += (x1 != 0);
309
310 return sum;
311}
312
313static inline unsigned long ffz(unsigned long word)
314{
315#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
316 /* Whee. EV67 can calculate it directly. */
317 return __kernel_cttz(~word);
318#else
319 unsigned long bits, qofs, bofs;
320
321 bits = __kernel_cmpbge(word, ~0UL);
322 qofs = ffz_b(bits);
323 bits = __kernel_extbl(word, qofs);
324 bofs = ffz_b(bits);
325
326 return qofs*8 + bofs;
327#endif
328}
329
330/*
331 * __ffs = Find First set bit in word. Undefined if no set bit exists.
332 */
333static inline unsigned long __ffs(unsigned long word)
334{
335#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
336 /* Whee. EV67 can calculate it directly. */
337 return __kernel_cttz(word);
338#else
339 unsigned long bits, qofs, bofs;
340
341 bits = __kernel_cmpbge(0, word);
342 qofs = ffz_b(bits);
343 bits = __kernel_extbl(word, qofs);
344 bofs = ffz_b(~bits);
345
346 return qofs*8 + bofs;
347#endif
348}
349
350#ifdef __KERNEL__
351
352/*
353 * ffs: find first bit set. This is defined the same way as
354 * the libc and compiler builtin ffs routines, therefore
355 * differs in spirit from the above __ffs.
356 */
357
358static inline int ffs(int word)
359{
360 int result = __ffs(word) + 1;
361 return word ? result : 0;
362}
363
364/*
365 * fls: find last bit set.
366 */
367#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
368static inline int fls64(unsigned long word)
369{
370 return 64 - __kernel_ctlz(word);
371}
372#else
373extern const unsigned char __flsm1_tab[256];
374
375static inline int fls64(unsigned long x)
376{
377 unsigned long t, a, r;
378
379 t = __kernel_cmpbge (x, 0x0101010101010101UL);
380 a = __flsm1_tab[t];
381 t = __kernel_extbl (x, a);
382 r = a*8 + __flsm1_tab[t] + (x != 0);
383
384 return r;
385}
386#endif
387
388static inline unsigned long __fls(unsigned long x)
389{
390 return fls64(x) - 1;
391}
392
393static inline int fls(int x)
394{
395 return fls64((unsigned int) x);
396}
397
398/*
399 * hweightN: returns the hamming weight (i.e. the number
400 * of bits set) of a N-bit word
401 */
402
403#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
404/* Whee. EV67 can calculate it directly. */
405static inline unsigned long __arch_hweight64(unsigned long w)
406{
407 return __kernel_ctpop(w);
408}
409
410static inline unsigned int __arch_hweight32(unsigned int w)
411{
412 return __arch_hweight64(w);
413}
414
415static inline unsigned int __arch_hweight16(unsigned int w)
416{
417 return __arch_hweight64(w & 0xffff);
418}
419
420static inline unsigned int __arch_hweight8(unsigned int w)
421{
422 return __arch_hweight64(w & 0xff);
423}
424#else
425#include <asm-generic/bitops/arch_hweight.h>
426#endif
427
428#include <asm-generic/bitops/const_hweight.h>
429
430#endif /* __KERNEL__ */
431
432#include <asm-generic/bitops/find.h>
433
434#ifdef __KERNEL__
435
436/*
437 * Every architecture must define this function. It's the fastest
438 * way of searching a 100-bit bitmap. It's guaranteed that at least
439 * one of the 100 bits is cleared.
440 */
441static inline unsigned long
442sched_find_first_bit(const unsigned long b[2])
443{
444 unsigned long b0, b1, ofs, tmp;
445
446 b0 = b[0];
447 b1 = b[1];
448 ofs = (b0 ? 0 : 64);
449 tmp = (b0 ? b0 : b1);
450
451 return __ffs(tmp) + ofs;
452}
453
454#include <asm-generic/bitops/le.h>
455
456#include <asm-generic/bitops/ext2-atomic-setbit.h>
457
458#endif /* __KERNEL__ */
459
460#endif /* _ALPHA_BITOPS_H */