Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef _ALPHA_BITOPS_H
  2#define _ALPHA_BITOPS_H
  3
  4#ifndef _LINUX_BITOPS_H
  5#error only <linux/bitops.h> can be included directly
  6#endif
  7
  8#include <asm/compiler.h>
  9#include <asm/barrier.h>
 10
 11/*
 12 * Copyright 1994, Linus Torvalds.
 13 */
 14
 15/*
 16 * These have to be done with inline assembly: that way the bit-setting
 17 * is guaranteed to be atomic. All bit operations return 0 if the bit
 18 * was cleared before the operation and != 0 if it was not.
 19 *
 20 * To get proper branch prediction for the main line, we must branch
 21 * forward to code at the end of this object's .text section, then
 22 * branch back to restart the operation.
 23 *
 24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
 25 */
 26
 27static inline void
 28set_bit(unsigned long nr, volatile void * addr)
 29{
 30	unsigned long temp;
 31	int *m = ((int *) addr) + (nr >> 5);
 32
 33	__asm__ __volatile__(
 34	"1:	ldl_l %0,%3\n"
 35	"	bis %0,%2,%0\n"
 36	"	stl_c %0,%1\n"
 37	"	beq %0,2f\n"
 38	".subsection 2\n"
 39	"2:	br 1b\n"
 40	".previous"
 41	:"=&r" (temp), "=m" (*m)
 42	:"Ir" (1UL << (nr & 31)), "m" (*m));
 43}
 44
 45/*
 46 * WARNING: non atomic version.
 47 */
 48static inline void
 49__set_bit(unsigned long nr, volatile void * addr)
 50{
 51	int *m = ((int *) addr) + (nr >> 5);
 52
 53	*m |= 1 << (nr & 31);
 54}
 55
 56#define smp_mb__before_clear_bit()	smp_mb()
 57#define smp_mb__after_clear_bit()	smp_mb()
 58
 59static inline void
 60clear_bit(unsigned long nr, volatile void * addr)
 61{
 62	unsigned long temp;
 63	int *m = ((int *) addr) + (nr >> 5);
 64
 65	__asm__ __volatile__(
 66	"1:	ldl_l %0,%3\n"
 67	"	bic %0,%2,%0\n"
 68	"	stl_c %0,%1\n"
 69	"	beq %0,2f\n"
 70	".subsection 2\n"
 71	"2:	br 1b\n"
 72	".previous"
 73	:"=&r" (temp), "=m" (*m)
 74	:"Ir" (1UL << (nr & 31)), "m" (*m));
 75}
 76
 77static inline void
 78clear_bit_unlock(unsigned long nr, volatile void * addr)
 79{
 80	smp_mb();
 81	clear_bit(nr, addr);
 82}
 83
 84/*
 85 * WARNING: non atomic version.
 86 */
 87static __inline__ void
 88__clear_bit(unsigned long nr, volatile void * addr)
 89{
 90	int *m = ((int *) addr) + (nr >> 5);
 91
 92	*m &= ~(1 << (nr & 31));
 93}
 94
 95static inline void
 96__clear_bit_unlock(unsigned long nr, volatile void * addr)
 97{
 98	smp_mb();
 99	__clear_bit(nr, addr);
100}
101
102static inline void
103change_bit(unsigned long nr, volatile void * addr)
104{
105	unsigned long temp;
106	int *m = ((int *) addr) + (nr >> 5);
107
108	__asm__ __volatile__(
109	"1:	ldl_l %0,%3\n"
110	"	xor %0,%2,%0\n"
111	"	stl_c %0,%1\n"
112	"	beq %0,2f\n"
113	".subsection 2\n"
114	"2:	br 1b\n"
115	".previous"
116	:"=&r" (temp), "=m" (*m)
117	:"Ir" (1UL << (nr & 31)), "m" (*m));
118}
119
120/*
121 * WARNING: non atomic version.
122 */
123static __inline__ void
124__change_bit(unsigned long nr, volatile void * addr)
125{
126	int *m = ((int *) addr) + (nr >> 5);
127
128	*m ^= 1 << (nr & 31);
129}
130
131static inline int
132test_and_set_bit(unsigned long nr, volatile void *addr)
133{
134	unsigned long oldbit;
135	unsigned long temp;
136	int *m = ((int *) addr) + (nr >> 5);
137
138	__asm__ __volatile__(
139#ifdef CONFIG_SMP
140	"	mb\n"
141#endif
142	"1:	ldl_l %0,%4\n"
143	"	and %0,%3,%2\n"
144	"	bne %2,2f\n"
145	"	xor %0,%3,%0\n"
146	"	stl_c %0,%1\n"
147	"	beq %0,3f\n"
148	"2:\n"
149#ifdef CONFIG_SMP
150	"	mb\n"
151#endif
152	".subsection 2\n"
153	"3:	br 1b\n"
154	".previous"
155	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
156	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
157
158	return oldbit != 0;
159}
160
161static inline int
162test_and_set_bit_lock(unsigned long nr, volatile void *addr)
163{
164	unsigned long oldbit;
165	unsigned long temp;
166	int *m = ((int *) addr) + (nr >> 5);
167
168	__asm__ __volatile__(
169	"1:	ldl_l %0,%4\n"
170	"	and %0,%3,%2\n"
171	"	bne %2,2f\n"
172	"	xor %0,%3,%0\n"
173	"	stl_c %0,%1\n"
174	"	beq %0,3f\n"
175	"2:\n"
176#ifdef CONFIG_SMP
177	"	mb\n"
178#endif
179	".subsection 2\n"
180	"3:	br 1b\n"
181	".previous"
182	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
183	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
184
185	return oldbit != 0;
186}
187
188/*
189 * WARNING: non atomic version.
190 */
191static inline int
192__test_and_set_bit(unsigned long nr, volatile void * addr)
193{
194	unsigned long mask = 1 << (nr & 0x1f);
195	int *m = ((int *) addr) + (nr >> 5);
196	int old = *m;
197
198	*m = old | mask;
199	return (old & mask) != 0;
200}
201
202static inline int
203test_and_clear_bit(unsigned long nr, volatile void * addr)
204{
205	unsigned long oldbit;
206	unsigned long temp;
207	int *m = ((int *) addr) + (nr >> 5);
208
209	__asm__ __volatile__(
210#ifdef CONFIG_SMP
211	"	mb\n"
212#endif
213	"1:	ldl_l %0,%4\n"
214	"	and %0,%3,%2\n"
215	"	beq %2,2f\n"
216	"	xor %0,%3,%0\n"
217	"	stl_c %0,%1\n"
218	"	beq %0,3f\n"
219	"2:\n"
220#ifdef CONFIG_SMP
221	"	mb\n"
222#endif
223	".subsection 2\n"
224	"3:	br 1b\n"
225	".previous"
226	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
227	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
228
229	return oldbit != 0;
230}
231
232/*
233 * WARNING: non atomic version.
234 */
235static inline int
236__test_and_clear_bit(unsigned long nr, volatile void * addr)
237{
238	unsigned long mask = 1 << (nr & 0x1f);
239	int *m = ((int *) addr) + (nr >> 5);
240	int old = *m;
241
242	*m = old & ~mask;
243	return (old & mask) != 0;
244}
245
246static inline int
247test_and_change_bit(unsigned long nr, volatile void * addr)
248{
249	unsigned long oldbit;
250	unsigned long temp;
251	int *m = ((int *) addr) + (nr >> 5);
252
253	__asm__ __volatile__(
254#ifdef CONFIG_SMP
255	"	mb\n"
256#endif
257	"1:	ldl_l %0,%4\n"
258	"	and %0,%3,%2\n"
259	"	xor %0,%3,%0\n"
260	"	stl_c %0,%1\n"
261	"	beq %0,3f\n"
262#ifdef CONFIG_SMP
263	"	mb\n"
264#endif
265	".subsection 2\n"
266	"3:	br 1b\n"
267	".previous"
268	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
269	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
270
271	return oldbit != 0;
272}
273
274/*
275 * WARNING: non atomic version.
276 */
277static __inline__ int
278__test_and_change_bit(unsigned long nr, volatile void * addr)
279{
280	unsigned long mask = 1 << (nr & 0x1f);
281	int *m = ((int *) addr) + (nr >> 5);
282	int old = *m;
283
284	*m = old ^ mask;
285	return (old & mask) != 0;
286}
287
288static inline int
289test_bit(int nr, const volatile void * addr)
 
 
 
290{
291	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292}
293
294/*
295 * ffz = Find First Zero in word. Undefined if no zero exists,
296 * so code should check against ~0UL first..
297 *
298 * Do a binary search on the bits.  Due to the nature of large
299 * constants on the alpha, it is worthwhile to split the search.
300 */
301static inline unsigned long ffz_b(unsigned long x)
302{
303	unsigned long sum, x1, x2, x4;
304
305	x = ~x & -~x;		/* set first 0 bit, clear others */
306	x1 = x & 0xAA;
307	x2 = x & 0xCC;
308	x4 = x & 0xF0;
309	sum = x2 ? 2 : 0;
310	sum += (x4 != 0) * 4;
311	sum += (x1 != 0);
312
313	return sum;
314}
315
316static inline unsigned long ffz(unsigned long word)
317{
318#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
319	/* Whee.  EV67 can calculate it directly.  */
320	return __kernel_cttz(~word);
321#else
322	unsigned long bits, qofs, bofs;
323
324	bits = __kernel_cmpbge(word, ~0UL);
325	qofs = ffz_b(bits);
326	bits = __kernel_extbl(word, qofs);
327	bofs = ffz_b(bits);
328
329	return qofs*8 + bofs;
330#endif
331}
332
333/*
334 * __ffs = Find First set bit in word.  Undefined if no set bit exists.
335 */
336static inline unsigned long __ffs(unsigned long word)
337{
338#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
339	/* Whee.  EV67 can calculate it directly.  */
340	return __kernel_cttz(word);
341#else
342	unsigned long bits, qofs, bofs;
343
344	bits = __kernel_cmpbge(0, word);
345	qofs = ffz_b(bits);
346	bits = __kernel_extbl(word, qofs);
347	bofs = ffz_b(~bits);
348
349	return qofs*8 + bofs;
350#endif
351}
352
353#ifdef __KERNEL__
354
355/*
356 * ffs: find first bit set. This is defined the same way as
357 * the libc and compiler builtin ffs routines, therefore
358 * differs in spirit from the above __ffs.
359 */
360
361static inline int ffs(int word)
362{
363	int result = __ffs(word) + 1;
364	return word ? result : 0;
365}
366
367/*
368 * fls: find last bit set.
369 */
370#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
371static inline int fls64(unsigned long word)
372{
373	return 64 - __kernel_ctlz(word);
374}
375#else
376extern const unsigned char __flsm1_tab[256];
377
378static inline int fls64(unsigned long x)
379{
380	unsigned long t, a, r;
381
382	t = __kernel_cmpbge (x, 0x0101010101010101UL);
383	a = __flsm1_tab[t];
384	t = __kernel_extbl (x, a);
385	r = a*8 + __flsm1_tab[t] + (x != 0);
386
387	return r;
388}
389#endif
390
391static inline unsigned long __fls(unsigned long x)
392{
393	return fls64(x) - 1;
394}
395
396static inline int fls(int x)
397{
398	return fls64((unsigned int) x);
399}
400
401/*
402 * hweightN: returns the hamming weight (i.e. the number
403 * of bits set) of a N-bit word
404 */
405
406#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
407/* Whee.  EV67 can calculate it directly.  */
408static inline unsigned long __arch_hweight64(unsigned long w)
409{
410	return __kernel_ctpop(w);
411}
412
413static inline unsigned int __arch_hweight32(unsigned int w)
414{
415	return __arch_hweight64(w);
416}
417
418static inline unsigned int __arch_hweight16(unsigned int w)
419{
420	return __arch_hweight64(w & 0xffff);
421}
422
423static inline unsigned int __arch_hweight8(unsigned int w)
424{
425	return __arch_hweight64(w & 0xff);
426}
427#else
428#include <asm-generic/bitops/arch_hweight.h>
429#endif
430
431#include <asm-generic/bitops/const_hweight.h>
432
433#endif /* __KERNEL__ */
434
435#include <asm-generic/bitops/find.h>
436
437#ifdef __KERNEL__
438
439/*
440 * Every architecture must define this function. It's the fastest
441 * way of searching a 100-bit bitmap.  It's guaranteed that at least
442 * one of the 100 bits is cleared.
443 */
444static inline unsigned long
445sched_find_first_bit(const unsigned long b[2])
446{
447	unsigned long b0, b1, ofs, tmp;
448
449	b0 = b[0];
450	b1 = b[1];
451	ofs = (b0 ? 0 : 64);
452	tmp = (b0 ? b0 : b1);
453
454	return __ffs(tmp) + ofs;
455}
 
 
456
457#include <asm-generic/bitops/le.h>
458
459#include <asm-generic/bitops/ext2-atomic-setbit.h>
460
461#endif /* __KERNEL__ */
462
463#endif /* _ALPHA_BITOPS_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ALPHA_BITOPS_H
  3#define _ALPHA_BITOPS_H
  4
  5#ifndef _LINUX_BITOPS_H
  6#error only <linux/bitops.h> can be included directly
  7#endif
  8
  9#include <asm/compiler.h>
 10#include <asm/barrier.h>
 11
 12/*
 13 * Copyright 1994, Linus Torvalds.
 14 */
 15
 16/*
 17 * These have to be done with inline assembly: that way the bit-setting
 18 * is guaranteed to be atomic. All bit operations return 0 if the bit
 19 * was cleared before the operation and != 0 if it was not.
 20 *
 21 * To get proper branch prediction for the main line, we must branch
 22 * forward to code at the end of this object's .text section, then
 23 * branch back to restart the operation.
 24 *
 25 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
 26 */
 27
 28static inline void
 29set_bit(unsigned long nr, volatile void * addr)
 30{
 31	unsigned long temp;
 32	int *m = ((int *) addr) + (nr >> 5);
 33
 34	__asm__ __volatile__(
 35	"1:	ldl_l %0,%3\n"
 36	"	bis %0,%2,%0\n"
 37	"	stl_c %0,%1\n"
 38	"	beq %0,2f\n"
 39	".subsection 2\n"
 40	"2:	br 1b\n"
 41	".previous"
 42	:"=&r" (temp), "=m" (*m)
 43	:"Ir" (1UL << (nr & 31)), "m" (*m));
 44}
 45
 46/*
 47 * WARNING: non atomic version.
 48 */
 49static __always_inline void
 50arch___set_bit(unsigned long nr, volatile unsigned long *addr)
 51{
 52	int *m = ((int *) addr) + (nr >> 5);
 53
 54	*m |= 1 << (nr & 31);
 55}
 56
 
 
 
 57static inline void
 58clear_bit(unsigned long nr, volatile void * addr)
 59{
 60	unsigned long temp;
 61	int *m = ((int *) addr) + (nr >> 5);
 62
 63	__asm__ __volatile__(
 64	"1:	ldl_l %0,%3\n"
 65	"	bic %0,%2,%0\n"
 66	"	stl_c %0,%1\n"
 67	"	beq %0,2f\n"
 68	".subsection 2\n"
 69	"2:	br 1b\n"
 70	".previous"
 71	:"=&r" (temp), "=m" (*m)
 72	:"Ir" (1UL << (nr & 31)), "m" (*m));
 73}
 74
 75static inline void
 76clear_bit_unlock(unsigned long nr, volatile void * addr)
 77{
 78	smp_mb();
 79	clear_bit(nr, addr);
 80}
 81
 82/*
 83 * WARNING: non atomic version.
 84 */
 85static __always_inline void
 86arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
 87{
 88	int *m = ((int *) addr) + (nr >> 5);
 89
 90	*m &= ~(1 << (nr & 31));
 91}
 92
 93static inline void
 94__clear_bit_unlock(unsigned long nr, volatile void * addr)
 95{
 96	smp_mb();
 97	arch___clear_bit(nr, addr);
 98}
 99
100static inline void
101change_bit(unsigned long nr, volatile void * addr)
102{
103	unsigned long temp;
104	int *m = ((int *) addr) + (nr >> 5);
105
106	__asm__ __volatile__(
107	"1:	ldl_l %0,%3\n"
108	"	xor %0,%2,%0\n"
109	"	stl_c %0,%1\n"
110	"	beq %0,2f\n"
111	".subsection 2\n"
112	"2:	br 1b\n"
113	".previous"
114	:"=&r" (temp), "=m" (*m)
115	:"Ir" (1UL << (nr & 31)), "m" (*m));
116}
117
118/*
119 * WARNING: non atomic version.
120 */
121static __always_inline void
122arch___change_bit(unsigned long nr, volatile unsigned long *addr)
123{
124	int *m = ((int *) addr) + (nr >> 5);
125
126	*m ^= 1 << (nr & 31);
127}
128
129static inline int
130test_and_set_bit(unsigned long nr, volatile void *addr)
131{
132	unsigned long oldbit;
133	unsigned long temp;
134	int *m = ((int *) addr) + (nr >> 5);
135
136	__asm__ __volatile__(
137#ifdef CONFIG_SMP
138	"	mb\n"
139#endif
140	"1:	ldl_l %0,%4\n"
141	"	and %0,%3,%2\n"
142	"	bne %2,2f\n"
143	"	xor %0,%3,%0\n"
144	"	stl_c %0,%1\n"
145	"	beq %0,3f\n"
146	"2:\n"
147#ifdef CONFIG_SMP
148	"	mb\n"
149#endif
150	".subsection 2\n"
151	"3:	br 1b\n"
152	".previous"
153	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
154	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
155
156	return oldbit != 0;
157}
158
159static inline int
160test_and_set_bit_lock(unsigned long nr, volatile void *addr)
161{
162	unsigned long oldbit;
163	unsigned long temp;
164	int *m = ((int *) addr) + (nr >> 5);
165
166	__asm__ __volatile__(
167	"1:	ldl_l %0,%4\n"
168	"	and %0,%3,%2\n"
169	"	bne %2,2f\n"
170	"	xor %0,%3,%0\n"
171	"	stl_c %0,%1\n"
172	"	beq %0,3f\n"
173	"2:\n"
174#ifdef CONFIG_SMP
175	"	mb\n"
176#endif
177	".subsection 2\n"
178	"3:	br 1b\n"
179	".previous"
180	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
181	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
182
183	return oldbit != 0;
184}
185
186/*
187 * WARNING: non atomic version.
188 */
189static __always_inline bool
190arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
191{
192	unsigned long mask = 1 << (nr & 0x1f);
193	int *m = ((int *) addr) + (nr >> 5);
194	int old = *m;
195
196	*m = old | mask;
197	return (old & mask) != 0;
198}
199
200static inline int
201test_and_clear_bit(unsigned long nr, volatile void * addr)
202{
203	unsigned long oldbit;
204	unsigned long temp;
205	int *m = ((int *) addr) + (nr >> 5);
206
207	__asm__ __volatile__(
208#ifdef CONFIG_SMP
209	"	mb\n"
210#endif
211	"1:	ldl_l %0,%4\n"
212	"	and %0,%3,%2\n"
213	"	beq %2,2f\n"
214	"	xor %0,%3,%0\n"
215	"	stl_c %0,%1\n"
216	"	beq %0,3f\n"
217	"2:\n"
218#ifdef CONFIG_SMP
219	"	mb\n"
220#endif
221	".subsection 2\n"
222	"3:	br 1b\n"
223	".previous"
224	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
225	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
226
227	return oldbit != 0;
228}
229
230/*
231 * WARNING: non atomic version.
232 */
233static __always_inline bool
234arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
235{
236	unsigned long mask = 1 << (nr & 0x1f);
237	int *m = ((int *) addr) + (nr >> 5);
238	int old = *m;
239
240	*m = old & ~mask;
241	return (old & mask) != 0;
242}
243
244static inline int
245test_and_change_bit(unsigned long nr, volatile void * addr)
246{
247	unsigned long oldbit;
248	unsigned long temp;
249	int *m = ((int *) addr) + (nr >> 5);
250
251	__asm__ __volatile__(
252#ifdef CONFIG_SMP
253	"	mb\n"
254#endif
255	"1:	ldl_l %0,%4\n"
256	"	and %0,%3,%2\n"
257	"	xor %0,%3,%0\n"
258	"	stl_c %0,%1\n"
259	"	beq %0,3f\n"
260#ifdef CONFIG_SMP
261	"	mb\n"
262#endif
263	".subsection 2\n"
264	"3:	br 1b\n"
265	".previous"
266	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
267	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
268
269	return oldbit != 0;
270}
271
272/*
273 * WARNING: non atomic version.
274 */
275static __always_inline bool
276arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
277{
278	unsigned long mask = 1 << (nr & 0x1f);
279	int *m = ((int *) addr) + (nr >> 5);
280	int old = *m;
281
282	*m = old ^ mask;
283	return (old & mask) != 0;
284}
285
286#define arch_test_bit generic_test_bit
287#define arch_test_bit_acquire generic_test_bit_acquire
288
289static inline bool xor_unlock_is_negative_byte(unsigned long mask,
290		volatile unsigned long *p)
291{
292	unsigned long temp, old;
293
294	__asm__ __volatile__(
295	"1:	ldl_l %0,%4\n"
296	"	mov %0,%2\n"
297	"	xor %0,%3,%0\n"
298	"	stl_c %0,%1\n"
299	"	beq %0,2f\n"
300	".subsection 2\n"
301	"2:	br 1b\n"
302	".previous"
303	:"=&r" (temp), "=m" (*p), "=&r" (old)
304	:"Ir" (mask), "m" (*p));
305
306	return (old & BIT(7)) != 0;
307}
308
309/*
310 * ffz = Find First Zero in word. Undefined if no zero exists,
311 * so code should check against ~0UL first..
312 *
313 * Do a binary search on the bits.  Due to the nature of large
314 * constants on the alpha, it is worthwhile to split the search.
315 */
316static inline unsigned long ffz_b(unsigned long x)
317{
318	unsigned long sum, x1, x2, x4;
319
320	x = ~x & -~x;		/* set first 0 bit, clear others */
321	x1 = x & 0xAA;
322	x2 = x & 0xCC;
323	x4 = x & 0xF0;
324	sum = x2 ? 2 : 0;
325	sum += (x4 != 0) * 4;
326	sum += (x1 != 0);
327
328	return sum;
329}
330
331static inline unsigned long ffz(unsigned long word)
332{
333#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
334	/* Whee.  EV67 can calculate it directly.  */
335	return __kernel_cttz(~word);
336#else
337	unsigned long bits, qofs, bofs;
338
339	bits = __kernel_cmpbge(word, ~0UL);
340	qofs = ffz_b(bits);
341	bits = __kernel_extbl(word, qofs);
342	bofs = ffz_b(bits);
343
344	return qofs*8 + bofs;
345#endif
346}
347
348/*
349 * __ffs = Find First set bit in word.  Undefined if no set bit exists.
350 */
351static inline unsigned long __ffs(unsigned long word)
352{
353#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
354	/* Whee.  EV67 can calculate it directly.  */
355	return __kernel_cttz(word);
356#else
357	unsigned long bits, qofs, bofs;
358
359	bits = __kernel_cmpbge(0, word);
360	qofs = ffz_b(bits);
361	bits = __kernel_extbl(word, qofs);
362	bofs = ffz_b(~bits);
363
364	return qofs*8 + bofs;
365#endif
366}
367
368#ifdef __KERNEL__
369
370/*
371 * ffs: find first bit set. This is defined the same way as
372 * the libc and compiler builtin ffs routines, therefore
373 * differs in spirit from the above __ffs.
374 */
375
376static inline int ffs(int word)
377{
378	int result = __ffs(word) + 1;
379	return word ? result : 0;
380}
381
382/*
383 * fls: find last bit set.
384 */
385#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
386static inline int fls64(unsigned long word)
387{
388	return 64 - __kernel_ctlz(word);
389}
390#else
391extern const unsigned char __flsm1_tab[256];
392
393static inline int fls64(unsigned long x)
394{
395	unsigned long t, a, r;
396
397	t = __kernel_cmpbge (x, 0x0101010101010101UL);
398	a = __flsm1_tab[t];
399	t = __kernel_extbl (x, a);
400	r = a*8 + __flsm1_tab[t] + (x != 0);
401
402	return r;
403}
404#endif
405
406static inline unsigned long __fls(unsigned long x)
407{
408	return fls64(x) - 1;
409}
410
411static inline int fls(unsigned int x)
412{
413	return fls64(x);
414}
415
416/*
417 * hweightN: returns the hamming weight (i.e. the number
418 * of bits set) of a N-bit word
419 */
420
421#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
422/* Whee.  EV67 can calculate it directly.  */
423static inline unsigned long __arch_hweight64(unsigned long w)
424{
425	return __kernel_ctpop(w);
426}
427
428static inline unsigned int __arch_hweight32(unsigned int w)
429{
430	return __arch_hweight64(w);
431}
432
433static inline unsigned int __arch_hweight16(unsigned int w)
434{
435	return __arch_hweight64(w & 0xffff);
436}
437
438static inline unsigned int __arch_hweight8(unsigned int w)
439{
440	return __arch_hweight64(w & 0xff);
441}
442#else
443#include <asm-generic/bitops/arch_hweight.h>
444#endif
445
446#include <asm-generic/bitops/const_hweight.h>
447
448#endif /* __KERNEL__ */
449
 
 
450#ifdef __KERNEL__
451
452/*
453 * Every architecture must define this function. It's the fastest
454 * way of searching a 100-bit bitmap.  It's guaranteed that at least
455 * one of the 100 bits is cleared.
456 */
457static inline unsigned long
458sched_find_first_bit(const unsigned long b[2])
459{
460	unsigned long b0, b1, ofs, tmp;
461
462	b0 = b[0];
463	b1 = b[1];
464	ofs = (b0 ? 0 : 64);
465	tmp = (b0 ? b0 : b1);
466
467	return __ffs(tmp) + ofs;
468}
469
470#include <asm-generic/bitops/non-instrumented-non-atomic.h>
471
472#include <asm-generic/bitops/le.h>
473
474#include <asm-generic/bitops/ext2-atomic-setbit.h>
475
476#endif /* __KERNEL__ */
477
478#endif /* _ALPHA_BITOPS_H */