Linux Audio

Check our new training course

Loading...
v3.5.6
  1#ifndef _S390_BITOPS_H
  2#define _S390_BITOPS_H
  3
  4/*
  5 *  include/asm-s390/bitops.h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 *
  7 *  S390 version
  8 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  9 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 
 10 *
 11 *  Derived from "include/asm-i386/bitops.h"
 12 *    Copyright (C) 1992, Linus Torvalds
 
 
 
 
 
 
 
 
 
 
 
 13 *
 14 */
 15
 
 
 
 16#ifndef _LINUX_BITOPS_H
 17#error only <linux/bitops.h> can be included directly
 18#endif
 19
 
 20#include <linux/compiler.h>
 
 21
 22/*
 23 * 32 bit bitops format:
 24 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
 25 * bit 32 is the LSB of *(addr+4). That combined with the
 26 * big endian byte order on S390 give the following bit
 27 * order in memory:
 28 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
 29 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 30 * after that follows the next long with bit numbers
 31 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 32 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 33 * The reason for this bit ordering is the fact that
 34 * in the architecture independent code bits operations
 35 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 36 * with operation of the form "set_bit(bitnr, flags)".
 37 *
 38 * 64 bit bitops format:
 39 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
 40 * bit 64 is the LSB of *(addr+8). That combined with the
 41 * big endian byte order on S390 give the following bit
 42 * order in memory:
 43 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
 44 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
 45 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
 46 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
 47 * after that follows the next long with bit numbers
 48 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
 49 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
 50 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
 51 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
 52 * The reason for this bit ordering is the fact that
 53 * in the architecture independent code bits operations
 54 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
 55 * with operation of the form "set_bit(bitnr, flags)".
 56 */
 57
 58/* bitmap tables from arch/s390/kernel/bitmap.c */
 59extern const char _oi_bitmap[];
 60extern const char _ni_bitmap[];
 61extern const char _zb_findmap[];
 62extern const char _sb_findmap[];
 63
 64#ifndef CONFIG_64BIT
 65
 66#define __BITOPS_ALIGN		3
 67#define __BITOPS_WORDSIZE	32
 68#define __BITOPS_OR		"or"
 69#define __BITOPS_AND		"nr"
 70#define __BITOPS_XOR		"xr"
 
 71
 72#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)	\
 
 
 
 
 73	asm volatile(						\
 74		"	l	%0,%2\n"			\
 75		"0:	lr	%1,%0\n"			\
 76		__op_string "	%1,%3\n"			\
 77		"	cs	%0,%1,%2\n"			\
 78		"	jl	0b"				\
 79		: "=&d" (__old), "=&d" (__new),			\
 80		  "=Q" (*(unsigned long *) __addr)		\
 81		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
 82		: "cc");
 
 83
 84#else /* CONFIG_64BIT */
 85
 86#define __BITOPS_ALIGN		7
 87#define __BITOPS_WORDSIZE	64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88#define __BITOPS_OR		"ogr"
 89#define __BITOPS_AND		"ngr"
 90#define __BITOPS_XOR		"xgr"
 
 91
 92#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)	\
 
 
 
 
 93	asm volatile(						\
 94		"	lg	%0,%2\n"			\
 95		"0:	lgr	%1,%0\n"			\
 96		__op_string "	%1,%3\n"			\
 97		"	csg	%0,%1,%2\n"			\
 98		"	jl	0b"				\
 99		: "=&d" (__old), "=&d" (__new),			\
100		  "=Q" (*(unsigned long *) __addr)		\
101		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
102		: "cc");
 
 
 
103
104#endif /* CONFIG_64BIT */
105
106#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
107#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
108
109#ifdef CONFIG_SMP
110/*
111 * SMP safe set_bit routine based on compare and swap (CS)
112 */
113static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
114{
115        unsigned long addr, old, new, mask;
116
117	addr = (unsigned long) ptr;
118	/* calculate address for CS */
119	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
120	/* make OR mask */
121	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
122	/* Do the atomic update. */
123	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
124}
125
126/*
127 * SMP safe clear_bit routine based on compare and swap (CS)
128 */
129static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
130{
131        unsigned long addr, old, new, mask;
132
133	addr = (unsigned long) ptr;
134	/* calculate address for CS */
135	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
136	/* make AND mask */
137	mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
138	/* Do the atomic update. */
139	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
140}
141
142/*
143 * SMP safe change_bit routine based on compare and swap (CS)
144 */
145static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
146{
147        unsigned long addr, old, new, mask;
 
 
 
 
 
148
149	addr = (unsigned long) ptr;
150	/* calculate address for CS */
151	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
152	/* make XOR mask */
153	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
154	/* Do the atomic update. */
155	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
 
 
 
156}
157
158/*
159 * SMP safe test_and_set_bit routine based on compare and swap (CS)
160 */
161static inline int
162test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
163{
164        unsigned long addr, old, new, mask;
 
165
166	addr = (unsigned long) ptr;
167	/* calculate address for CS */
168	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
169	/* make OR/test mask */
170	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
171	/* Do the atomic update. */
172	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
173	__BITOPS_BARRIER();
174	return (old & mask) != 0;
 
 
 
 
 
175}
176
177/*
178 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
179 */
180static inline int
181test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
182{
183        unsigned long addr, old, new, mask;
 
 
 
 
 
184
185	addr = (unsigned long) ptr;
186	/* calculate address for CS */
187	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
188	/* make AND/test mask */
189	mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
190	/* Do the atomic update. */
191	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
192	__BITOPS_BARRIER();
193	return (old ^ new) != 0;
 
194}
195
196/*
197 * SMP safe test_and_change_bit routine based on compare and swap (CS) 
198 */
199static inline int
200test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
201{
202        unsigned long addr, old, new, mask;
 
203
204	addr = (unsigned long) ptr;
205	/* calculate address for CS */
206	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
207	/* make XOR/test mask */
208	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
209	/* Do the atomic update. */
210	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
211	__BITOPS_BARRIER();
212	return (old & mask) != 0;
213}
214#endif /* CONFIG_SMP */
215
216/*
217 * fast, non-SMP set_bit routine
218 */
219static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
220{
221	unsigned long addr;
 
222
223	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
224	asm volatile(
225		"	oc	%O0(1,%R0),%1"
226		: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
227}
228
229static inline void 
230__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
231{
232	unsigned long addr;
 
233
234	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
235	*(unsigned char *) addr |= 1 << (nr & 7);
 
236}
237
238#define set_bit_simple(nr,addr) \
239(__builtin_constant_p((nr)) ? \
240 __constant_set_bit((nr),(addr)) : \
241 __set_bit((nr),(addr)) )
242
243/*
244 * fast, non-SMP clear_bit routine
245 */
246static inline void 
247__clear_bit(unsigned long nr, volatile unsigned long *ptr)
248{
249	unsigned long addr;
250
251	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
252	asm volatile(
253		"	nc	%O0(1,%R0),%1"
254		: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
255}
256
257static inline void 
258__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
259{
260	unsigned long addr;
261
262	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
263	*(unsigned char *) addr &= ~(1 << (nr & 7));
264}
265
266#define clear_bit_simple(nr,addr) \
267(__builtin_constant_p((nr)) ? \
268 __constant_clear_bit((nr),(addr)) : \
269 __clear_bit((nr),(addr)) )
270
271/* 
272 * fast, non-SMP change_bit routine 
273 */
274static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
275{
276	unsigned long addr;
277
278	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
279	asm volatile(
280		"	xc	%O0(1,%R0),%1"
281		: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
282}
283
284static inline void 
285__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 
286{
287	unsigned long addr;
288
289	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
290	*(unsigned char *) addr ^= 1 << (nr & 7);
291}
292
293#define change_bit_simple(nr,addr) \
294(__builtin_constant_p((nr)) ? \
295 __constant_change_bit((nr),(addr)) : \
296 __change_bit((nr),(addr)) )
297
298/*
299 * fast, non-SMP test_and_set_bit routine
300 */
301static inline int
302test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
303{
304	unsigned long addr;
305	unsigned char ch;
306
307	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
308	ch = *(unsigned char *) addr;
309	asm volatile(
310		"	oc	%O0(1,%R0),%1"
311		: "=Q" (*(char *) addr)	: "Q" (_oi_bitmap[nr & 7])
312		: "cc", "memory");
313	return (ch >> (nr & 7)) & 1;
314}
315#define __test_and_set_bit(X,Y)		test_and_set_bit_simple(X,Y)
316
317/*
318 * fast, non-SMP test_and_clear_bit routine
319 */
320static inline int
321test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
322{
323	unsigned long addr;
324	unsigned char ch;
325
326	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
327	ch = *(unsigned char *) addr;
328	asm volatile(
329		"	nc	%O0(1,%R0),%1"
330		: "=Q" (*(char *) addr)	: "Q" (_ni_bitmap[nr & 7])
331		: "cc", "memory");
332	return (ch >> (nr & 7)) & 1;
333}
334#define __test_and_clear_bit(X,Y)	test_and_clear_bit_simple(X,Y)
335
336/*
337 * fast, non-SMP test_and_change_bit routine
338 */
339static inline int
340test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
341{
342	unsigned long addr;
343	unsigned char ch;
344
345	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
346	ch = *(unsigned char *) addr;
347	asm volatile(
348		"	xc	%O0(1,%R0),%1"
349		: "=Q" (*(char *) addr)	: "Q" (_oi_bitmap[nr & 7])
350		: "cc", "memory");
351	return (ch >> (nr & 7)) & 1;
352}
353#define __test_and_change_bit(X,Y)	test_and_change_bit_simple(X,Y)
354
355#ifdef CONFIG_SMP
356#define set_bit             set_bit_cs
357#define clear_bit           clear_bit_cs
358#define change_bit          change_bit_cs
359#define test_and_set_bit    test_and_set_bit_cs
360#define test_and_clear_bit  test_and_clear_bit_cs
361#define test_and_change_bit test_and_change_bit_cs
362#else
363#define set_bit             set_bit_simple
364#define clear_bit           clear_bit_simple
365#define change_bit          change_bit_simple
366#define test_and_set_bit    test_and_set_bit_simple
367#define test_and_clear_bit  test_and_clear_bit_simple
368#define test_and_change_bit test_and_change_bit_simple
369#endif
370
371
372/*
373 * This routine doesn't need to be atomic.
374 */
375
376static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
377{
378	unsigned long addr;
379	unsigned char ch;
380
381	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
382	ch = *(volatile unsigned char *) addr;
383	return (ch >> (nr & 7)) & 1;
384}
385
386static inline int 
387__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
388    return (((volatile char *) addr)
389	    [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
390}
391
392#define test_bit(nr,addr) \
393(__builtin_constant_p((nr)) ? \
394 __constant_test_bit((nr),(addr)) : \
395 __test_bit((nr),(addr)) )
396
397/*
398 * Optimized find bit helper functions.
 
 
 
 
399 */
 
 
 
400
401/**
402 * __ffz_word_loop - find byte offset of first long != -1UL
403 * @addr: pointer to array of unsigned long
404 * @size: size of the array in bits
405 */
406static inline unsigned long __ffz_word_loop(const unsigned long *addr,
407					    unsigned long size)
408{
409	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
410	unsigned long bytes = 0;
411
412	asm volatile(
413#ifndef CONFIG_64BIT
414		"	ahi	%1,-1\n"
415		"	sra	%1,5\n"
416		"	jz	1f\n"
417		"0:	c	%2,0(%0,%3)\n"
418		"	jne	1f\n"
419		"	la	%0,4(%0)\n"
420		"	brct	%1,0b\n"
421		"1:\n"
422#else
423		"	aghi	%1,-1\n"
424		"	srag	%1,%1,6\n"
425		"	jz	1f\n"
426		"0:	cg	%2,0(%0,%3)\n"
427		"	jne	1f\n"
428		"	la	%0,8(%0)\n"
429		"	brct	%1,0b\n"
430		"1:\n"
431#endif
432		: "+&a" (bytes), "+&d" (size)
433		: "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
434		: "cc" );
435	return bytes;
436}
437
438/**
439 * __ffs_word_loop - find byte offset of first long != 0UL
440 * @addr: pointer to array of unsigned long
441 * @size: size of the array in bits
442 */
443static inline unsigned long __ffs_word_loop(const unsigned long *addr,
444					    unsigned long size)
445{
446	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
447	unsigned long bytes = 0;
448
449	asm volatile(
450#ifndef CONFIG_64BIT
451		"	ahi	%1,-1\n"
452		"	sra	%1,5\n"
453		"	jz	1f\n"
454		"0:	c	%2,0(%0,%3)\n"
455		"	jne	1f\n"
456		"	la	%0,4(%0)\n"
457		"	brct	%1,0b\n"
458		"1:\n"
459#else
460		"	aghi	%1,-1\n"
461		"	srag	%1,%1,6\n"
462		"	jz	1f\n"
463		"0:	cg	%2,0(%0,%3)\n"
464		"	jne	1f\n"
465		"	la	%0,8(%0)\n"
466		"	brct	%1,0b\n"
467		"1:\n"
468#endif
469		: "+&a" (bytes), "+&a" (size)
470		: "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
471		: "cc" );
472	return bytes;
473}
474
475/**
476 * __ffz_word - add number of the first unset bit
477 * @nr: base value the bit number is added to
478 * @word: the word that is searched for unset bits
479 */
480static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
481{
482#ifdef CONFIG_64BIT
483	if ((word & 0xffffffff) == 0xffffffff) {
484		word >>= 32;
485		nr += 32;
486	}
487#endif
488	if ((word & 0xffff) == 0xffff) {
489		word >>= 16;
490		nr += 16;
491	}
492	if ((word & 0xff) == 0xff) {
493		word >>= 8;
494		nr += 8;
495	}
496	return nr + _zb_findmap[(unsigned char) word];
497}
498
499/**
500 * __ffs_word - add number of the first set bit
501 * @nr: base value the bit number is added to
502 * @word: the word that is searched for set bits
503 */
504static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
505{
506#ifdef CONFIG_64BIT
507	if ((word & 0xffffffff) == 0) {
508		word >>= 32;
509		nr += 32;
510	}
511#endif
512	if ((word & 0xffff) == 0) {
513		word >>= 16;
514		nr += 16;
515	}
516	if ((word & 0xff) == 0) {
517		word >>= 8;
518		nr += 8;
519	}
520	return nr + _sb_findmap[(unsigned char) word];
521}
522
523
524/**
525 * __load_ulong_be - load big endian unsigned long
526 * @p: pointer to array of unsigned long
527 * @offset: byte offset of source value in the array
528 */
529static inline unsigned long __load_ulong_be(const unsigned long *p,
530					    unsigned long offset)
531{
532	p = (unsigned long *)((unsigned long) p + offset);
533	return *p;
534}
535
536/**
537 * __load_ulong_le - load little endian unsigned long
538 * @p: pointer to array of unsigned long
539 * @offset: byte offset of source value in the array
540 */
541static inline unsigned long __load_ulong_le(const unsigned long *p,
542					    unsigned long offset)
543{
544	unsigned long word;
545
546	p = (unsigned long *)((unsigned long) p + offset);
547#ifndef CONFIG_64BIT
548	asm volatile(
549		"	ic	%0,%O1(%R1)\n"
550		"	icm	%0,2,%O1+1(%R1)\n"
551		"	icm	%0,4,%O1+2(%R1)\n"
552		"	icm	%0,8,%O1+3(%R1)"
553		: "=&d" (word) : "Q" (*p) : "cc");
554#else
555	asm volatile(
556		"	lrvg	%0,%1"
557		: "=d" (word) : "m" (*p) );
558#endif
559	return word;
560}
561
562/*
563 * The various find bit functions.
564 */
565
566/*
567 * ffz - find first zero in word.
568 * @word: The word to search
569 *
570 * Undefined if no zero exists, so code should check against ~0UL first.
571 */
572static inline unsigned long ffz(unsigned long word)
573{
574	return __ffz_word(0, word);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575}
576
577/**
578 * __ffs - find first bit in word.
579 * @word: The word to search
580 *
581 * Undefined if no bit exists, so code should check against 0 first.
582 */
583static inline unsigned long __ffs (unsigned long word)
584{
585	return __ffs_word(0, word);
586}
587
588/**
589 * ffs - find first bit set
590 * @x: the word to search
591 *
592 * This is defined the same way as
593 * the libc and compiler builtin ffs routines, therefore
594 * differs in spirit from the above ffz (man ffs).
595 */
596static inline int ffs(int x)
597{
598	if (!x)
599		return 0;
600	return __ffs_word(1, x);
 
601}
602
603/**
604 * find_first_zero_bit - find the first zero bit in a memory region
605 * @addr: The address to start the search at
606 * @size: The maximum size to search
607 *
608 * Returns the bit-number of the first zero bit, not the number of the byte
609 * containing a bit.
610 */
611static inline unsigned long find_first_zero_bit(const unsigned long *addr,
612						unsigned long size)
613{
614	unsigned long bytes, bits;
615
616        if (!size)
617                return 0;
618	bytes = __ffz_word_loop(addr, size);
619	bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
620	return (bits < size) ? bits : size;
621}
622#define find_first_zero_bit find_first_zero_bit
623
624/**
625 * find_first_bit - find the first set bit in a memory region
626 * @addr: The address to start the search at
627 * @size: The maximum size to search
 
 
628 *
629 * Returns the bit-number of the first set bit, not the number of the byte
630 * containing a bit.
 
631 */
632static inline unsigned long find_first_bit(const unsigned long * addr,
633					   unsigned long size)
634{
635	unsigned long bytes, bits;
636
637        if (!size)
638                return 0;
639	bytes = __ffs_word_loop(addr, size);
640	bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
641	return (bits < size) ? bits : size;
642}
643#define find_first_bit find_first_bit
644
645/**
646 * find_next_zero_bit - find the first zero bit in a memory region
647 * @addr: The address to base the search on
648 * @offset: The bitnumber to start searching at
649 * @size: The maximum size to search
650 */
651static inline int find_next_zero_bit (const unsigned long * addr,
652				      unsigned long size,
653				      unsigned long offset)
654{
655        const unsigned long *p;
656	unsigned long bit, set;
657
658	if (offset >= size)
659		return size;
660	bit = offset & (__BITOPS_WORDSIZE - 1);
661	offset -= bit;
662	size -= offset;
663	p = addr + offset / __BITOPS_WORDSIZE;
664	if (bit) {
665		/*
666		 * __ffz_word returns __BITOPS_WORDSIZE
667		 * if no zero bit is present in the word.
668		 */
669		set = __ffz_word(bit, *p >> bit);
670		if (set >= size)
671			return size + offset;
672		if (set < __BITOPS_WORDSIZE)
673			return set + offset;
674		offset += __BITOPS_WORDSIZE;
675		size -= __BITOPS_WORDSIZE;
676		p++;
677	}
678	return offset + find_first_zero_bit(p, size);
679}
680#define find_next_zero_bit find_next_zero_bit
681
682/**
683 * find_next_bit - find the first set bit in a memory region
684 * @addr: The address to base the search on
685 * @offset: The bitnumber to start searching at
686 * @size: The maximum size to search
687 */
688static inline int find_next_bit (const unsigned long * addr,
689				 unsigned long size,
690				 unsigned long offset)
691{
692        const unsigned long *p;
693	unsigned long bit, set;
694
695	if (offset >= size)
696		return size;
697	bit = offset & (__BITOPS_WORDSIZE - 1);
698	offset -= bit;
699	size -= offset;
700	p = addr + offset / __BITOPS_WORDSIZE;
701	if (bit) {
702		/*
703		 * __ffs_word returns __BITOPS_WORDSIZE
704		 * if no one bit is present in the word.
705		 */
706		set = __ffs_word(0, *p & (~0UL << bit));
707		if (set >= size)
708			return size + offset;
709		if (set < __BITOPS_WORDSIZE)
710			return set + offset;
711		offset += __BITOPS_WORDSIZE;
712		size -= __BITOPS_WORDSIZE;
713		p++;
714	}
715	return offset + find_first_bit(p, size);
716}
717#define find_next_bit find_next_bit
718
719/*
720 * Every architecture must define this function. It's the fastest
721 * way of searching a 140-bit bitmap where the first 100 bits are
722 * unlikely to be set. It's guaranteed that at least one of the 140
723 * bits is cleared.
724 */
725static inline int sched_find_first_bit(unsigned long *b)
726{
727	return find_first_bit(b, 140);
728}
729
730#include <asm-generic/bitops/fls.h>
 
 
 
731#include <asm-generic/bitops/__fls.h>
 
732#include <asm-generic/bitops/fls64.h>
733
 
 
 
 
734#include <asm-generic/bitops/hweight.h>
735#include <asm-generic/bitops/lock.h>
736
737/*
738 * ATTENTION: intel byte ordering convention for ext2 and minix !!
739 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
740 * bit 32 is the LSB of (addr+4).
741 * That combined with the little endian byte order of Intel gives the
742 * following bit order in memory:
743 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
744 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
745 */
746
747static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
748{
749	unsigned long bytes, bits;
750
751        if (!size)
752                return 0;
753	bytes = __ffz_word_loop(vaddr, size);
754	bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
755	return (bits < size) ? bits : size;
756}
757#define find_first_zero_bit_le find_first_zero_bit_le
758
759static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
760					  unsigned long offset)
761{
762        unsigned long *addr = vaddr, *p;
763	unsigned long bit, set;
764
765        if (offset >= size)
766                return size;
767	bit = offset & (__BITOPS_WORDSIZE - 1);
768	offset -= bit;
769	size -= offset;
770	p = addr + offset / __BITOPS_WORDSIZE;
771        if (bit) {
772		/*
773		 * s390 version of ffz returns __BITOPS_WORDSIZE
774		 * if no zero bit is present in the word.
775		 */
776		set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
777		if (set >= size)
778			return size + offset;
779		if (set < __BITOPS_WORDSIZE)
780			return set + offset;
781		offset += __BITOPS_WORDSIZE;
782		size -= __BITOPS_WORDSIZE;
783		p++;
784        }
785	return offset + find_first_zero_bit_le(p, size);
786}
787#define find_next_zero_bit_le find_next_zero_bit_le
788
789static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
790{
791	unsigned long bytes, bits;
792
793	if (!size)
794		return 0;
795	bytes = __ffs_word_loop(vaddr, size);
796	bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
797	return (bits < size) ? bits : size;
798}
799#define find_first_bit_le find_first_bit_le
800
801static inline int find_next_bit_le(void *vaddr, unsigned long size,
802				     unsigned long offset)
803{
804	unsigned long *addr = vaddr, *p;
805	unsigned long bit, set;
806
807	if (offset >= size)
808		return size;
809	bit = offset & (__BITOPS_WORDSIZE - 1);
810	offset -= bit;
811	size -= offset;
812	p = addr + offset / __BITOPS_WORDSIZE;
813	if (bit) {
814		/*
815		 * s390 version of ffz returns __BITOPS_WORDSIZE
816		 * if no zero bit is present in the word.
817		 */
818		set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
819		if (set >= size)
820			return size + offset;
821		if (set < __BITOPS_WORDSIZE)
822			return set + offset;
823		offset += __BITOPS_WORDSIZE;
824		size -= __BITOPS_WORDSIZE;
825		p++;
826	}
827	return offset + find_first_bit_le(p, size);
828}
829#define find_next_bit_le find_next_bit_le
830
831#include <asm-generic/bitops/le.h>
832
833#include <asm-generic/bitops/ext2-atomic-setbit.h>
834
835#endif /* _S390_BITOPS_H */
v3.15
 
 
 
  1/*
  2 *    Copyright IBM Corp. 1999,2013
  3 *
  4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  5 *
  6 * The description below was taken in large parts from the powerpc
  7 * bitops header file:
  8 * Within a word, bits are numbered LSB first.  Lot's of places make
  9 * this assumption by directly testing bits with (val & (1<<nr)).
 10 * This can cause confusion for large (> 1 word) bitmaps on a
 11 * big-endian system because, unlike little endian, the number of each
 12 * bit depends on the word size.
 13 *
 14 * The bitop functions are defined to work on unsigned longs, so for an
 15 * s390x system the bits end up numbered:
 16 *   |63..............0|127............64|191...........128|255...........192|
 17 * and on s390:
 18 *   |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
 19 *
 20 * There are a few little-endian macros used mostly for filesystem
 21 * bitmaps, these work on similar bit arrays layouts, but
 22 * byte-oriented:
 23 *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
 24 *
 25 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
 26 * number field needs to be reversed compared to the big-endian bit
 27 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
 28 *
 29 * We also have special functions which work with an MSB0 encoding:
 30 * on an s390x system the bits are numbered:
 31 *   |0..............63|64............127|128...........191|192...........255|
 32 * and on s390:
 33 *   |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
 34 *
 35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
 36 * number field needs to be reversed compared to the LSB0 encoded bit
 37 * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
 38 *
 39 */
 40
 41#ifndef _S390_BITOPS_H
 42#define _S390_BITOPS_H
 43
 44#ifndef _LINUX_BITOPS_H
 45#error only <linux/bitops.h> can be included directly
 46#endif
 47
 48#include <linux/typecheck.h>
 49#include <linux/compiler.h>
 50#include <asm/barrier.h>
 51
 52#define __BITOPS_NO_BARRIER	"\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54#ifndef CONFIG_64BIT
 55
 
 
 56#define __BITOPS_OR		"or"
 57#define __BITOPS_AND		"nr"
 58#define __BITOPS_XOR		"xr"
 59#define __BITOPS_BARRIER	"\n"
 60
 61#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
 62({								\
 63	unsigned long __old, __new;				\
 64								\
 65	typecheck(unsigned long *, (__addr));			\
 66	asm volatile(						\
 67		"	l	%0,%2\n"			\
 68		"0:	lr	%1,%0\n"			\
 69		__op_string "	%1,%3\n"			\
 70		"	cs	%0,%1,%2\n"			\
 71		"	jl	0b"				\
 72		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
 73		: "d" (__val)					\
 74		: "cc", "memory");				\
 75	__old;							\
 76})
 77
 78#else /* CONFIG_64BIT */
 79
 80#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 81
 82#define __BITOPS_OR		"laog"
 83#define __BITOPS_AND		"lang"
 84#define __BITOPS_XOR		"laxg"
 85#define __BITOPS_BARRIER	"bcr	14,0\n"
 86
 87#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
 88({								\
 89	unsigned long __old;					\
 90								\
 91	typecheck(unsigned long *, (__addr));			\
 92	asm volatile(						\
 93		__barrier					\
 94		__op_string "	%0,%2,%1\n"			\
 95		__barrier					\
 96		: "=d" (__old),	"+Q" (*(__addr))		\
 97		: "d" (__val)					\
 98		: "cc", "memory");				\
 99	__old;							\
100})
101
102#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
103
104#define __BITOPS_OR		"ogr"
105#define __BITOPS_AND		"ngr"
106#define __BITOPS_XOR		"xgr"
107#define __BITOPS_BARRIER	"\n"
108
109#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
110({								\
111	unsigned long __old, __new;				\
112								\
113	typecheck(unsigned long *, (__addr));			\
114	asm volatile(						\
115		"	lg	%0,%2\n"			\
116		"0:	lgr	%1,%0\n"			\
117		__op_string "	%1,%3\n"			\
118		"	csg	%0,%1,%2\n"			\
119		"	jl	0b"				\
120		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
121		: "d" (__val)					\
122		: "cc", "memory");				\
123	__old;							\
124})
125
126#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
127
128#endif /* CONFIG_64BIT */
129
130#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
131
132static inline unsigned long *
133__bitops_word(unsigned long nr, volatile unsigned long *ptr)
 
 
 
134{
135	unsigned long addr;
136
137	addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
138	return (unsigned long *)addr;
 
 
 
 
 
139}
140
141static inline unsigned char *
142__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
 
 
143{
144	return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
 
 
 
 
 
 
 
 
145}
146
147static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
 
 
 
148{
149	unsigned long *addr = __bitops_word(nr, ptr);
150	unsigned long mask;
151
152#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
153	if (__builtin_constant_p(nr)) {
154		unsigned char *caddr = __bitops_byte(nr, ptr);
155
156		asm volatile(
157			"oi	%0,%b1\n"
158			: "+Q" (*caddr)
159			: "i" (1 << (nr & 7))
160			: "cc", "memory");
161		return;
162	}
163#endif
164	mask = 1UL << (nr & (BITS_PER_LONG - 1));
165	__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
166}
167
168static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
169{
170	unsigned long *addr = __bitops_word(nr, ptr);
171	unsigned long mask;
172
173#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
174	if (__builtin_constant_p(nr)) {
175		unsigned char *caddr = __bitops_byte(nr, ptr);
176
177		asm volatile(
178			"ni	%0,%b1\n"
179			: "+Q" (*caddr)
180			: "i" (~(1 << (nr & 7)))
181			: "cc", "memory");
182		return;
183	}
184#endif
185	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
186	__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
187}
188
189static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
190{
191	unsigned long *addr = __bitops_word(nr, ptr);
192	unsigned long mask;
193
194#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
195	if (__builtin_constant_p(nr)) {
196		unsigned char *caddr = __bitops_byte(nr, ptr);
197
198		asm volatile(
199			"xi	%0,%b1\n"
200			: "+Q" (*caddr)
201			: "i" (1 << (nr & 7))
202			: "cc", "memory");
203		return;
204	}
205#endif
206	mask = 1UL << (nr & (BITS_PER_LONG - 1));
207	__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
208}
209
 
 
 
210static inline int
211test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
212{
213	unsigned long *addr = __bitops_word(nr, ptr);
214	unsigned long old, mask;
215
216	mask = 1UL << (nr & (BITS_PER_LONG - 1));
217	old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
 
 
 
 
 
 
218	return (old & mask) != 0;
219}
 
220
221static inline int
222test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 
 
223{
224	unsigned long *addr = __bitops_word(nr, ptr);
225	unsigned long old, mask;
226
227	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
228	old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
229	return (old & ~mask) != 0;
 
230}
231
232static inline int
233test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
234{
235	unsigned long *addr = __bitops_word(nr, ptr);
236	unsigned long old, mask;
237
238	mask = 1UL << (nr & (BITS_PER_LONG - 1));
239	old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
240	return (old & mask) != 0;
241}
242
243static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
 
 
 
 
 
244{
245	unsigned char *addr = __bitops_byte(nr, ptr);
246
247	*addr |= 1 << (nr & 7);
 
 
 
248}
249
250static inline void 
251__clear_bit(unsigned long nr, volatile unsigned long *ptr)
252{
253	unsigned char *addr = __bitops_byte(nr, ptr);
254
255	*addr &= ~(1 << (nr & 7));
 
256}
257
 
 
 
 
 
 
 
 
258static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
259{
260	unsigned char *addr = __bitops_byte(nr, ptr);
 
 
 
 
 
 
 
 
 
 
 
261
262	*addr ^= 1 << (nr & 7);
 
263}
264
 
 
 
 
 
 
 
 
265static inline int
266__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
267{
268	unsigned char *addr = __bitops_byte(nr, ptr);
269	unsigned char ch;
270
271	ch = *addr;
272	*addr |= 1 << (nr & 7);
 
 
 
 
273	return (ch >> (nr & 7)) & 1;
274}
 
275
 
 
 
276static inline int
277__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
278{
279	unsigned char *addr = __bitops_byte(nr, ptr);
280	unsigned char ch;
281
282	ch = *addr;
283	*addr &= ~(1 << (nr & 7));
 
 
 
 
284	return (ch >> (nr & 7)) & 1;
285}
 
286
 
 
 
287static inline int
288__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
289{
290	unsigned char *addr = __bitops_byte(nr, ptr);
291	unsigned char ch;
292
293	ch = *addr;
294	*addr ^= 1 << (nr & 7);
 
 
 
 
295	return (ch >> (nr & 7)) & 1;
296}
 
297
298static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299{
300	const volatile unsigned char *addr;
 
 
 
 
 
 
301
302	addr = ((const volatile unsigned char *)ptr);
303	addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
304	return (*addr >> (nr & 7)) & 1;
 
305}
306
 
 
 
 
 
307/*
308 * Functions which use MSB0 bit numbering.
309 * On an s390x system the bits are numbered:
310 *   |0..............63|64............127|128...........191|192...........255|
311 * and on s390:
312 *   |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
313 */
314unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
315unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
316				unsigned long offset);
317
318static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
 
 
319{
320	return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321}
322
323static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
 
 
324{
325	return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326}
327
328static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
 
329{
330	return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331}
332
333static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 
 
 
 
 
334{
335	return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336}
337
338static inline int test_bit_inv(unsigned long nr,
339			       const volatile unsigned long *ptr)
 
 
 
 
 
 
340{
341	return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 
342}
343
344#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 
 
 
 
 
 
 
 
345
346/**
347 * __flogr - find leftmost one
348 * @word - The word to search
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349 *
350 * Returns the bit number of the most significant bit set,
351 * where the most significant bit has bit number 0.
352 * If no bit is set this function returns 64.
353 */
354static inline unsigned char __flogr(unsigned long word)
355{
356	if (__builtin_constant_p(word)) {
357		unsigned long bit = 0;
358
359		if (!word)
360			return 64;
361		if (!(word & 0xffffffff00000000UL)) {
362			word <<= 32;
363			bit += 32;
364		}
365		if (!(word & 0xffff000000000000UL)) {
366			word <<= 16;
367			bit += 16;
368		}
369		if (!(word & 0xff00000000000000UL)) {
370			word <<= 8;
371			bit += 8;
372		}
373		if (!(word & 0xf000000000000000UL)) {
374			word <<= 4;
375			bit += 4;
376		}
377		if (!(word & 0xc000000000000000UL)) {
378			word <<= 2;
379			bit += 2;
380		}
381		if (!(word & 0x8000000000000000UL)) {
382			word <<= 1;
383			bit += 1;
384		}
385		return bit;
386	} else {
387		register unsigned long bit asm("4") = word;
388		register unsigned long out asm("5");
389
390		asm volatile(
391			"       flogr   %[bit],%[bit]\n"
392			: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
393		return bit;
394	}
395}
396
397/**
398 * __ffs - find first bit in word.
399 * @word: The word to search
400 *
401 * Undefined if no bit exists, so code should check against 0 first.
402 */
403static inline unsigned long __ffs(unsigned long word)
404{
405	return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
406}
407
408/**
409 * ffs - find first bit set
410 * @word: the word to search
411 *
412 * This is defined the same way as the libc and
413 * compiler builtin ffs routines (man ffs).
 
414 */
415static inline int ffs(int word)
416{
417	unsigned long mask = 2 * BITS_PER_LONG - 1;
418	unsigned int val = (unsigned int)word;
419
420	return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
421}
422
423/**
424 * __fls - find last (most-significant) set bit in a long word
425 * @word: the word to search
 
426 *
427 * Undefined if no set bit exists, so code should check against 0 first.
 
428 */
429static inline unsigned long __fls(unsigned long word)
 
430{
431	return __flogr(word) ^ (BITS_PER_LONG - 1);
 
 
 
 
 
 
432}
 
433
434/**
435 * fls64 - find last set bit in a 64-bit word
436 * @word: the word to search
437 *
438 * This is defined in a similar way as the libc and compiler builtin
439 * ffsll, but returns the position of the most significant set bit.
440 *
441 * fls64(value) returns 0 if value is 0 or the position of the last
442 * set bit if value is nonzero. The last (most significant) bit is
443 * at position 64.
444 */
445static inline int fls64(unsigned long word)
 
446{
447	unsigned long mask = 2 * BITS_PER_LONG - 1;
448
449	return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450}
 
451
452/**
453 * fls - find last (most-significant) bit set
454 * @word: the word to search
455 *
456 * This is defined the same way as ffs.
457 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458 */
459static inline int fls(int word)
460{
461	return fls64((unsigned int)word);
462}
463
464#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
465
466#include <asm-generic/bitops/__ffs.h>
467#include <asm-generic/bitops/ffs.h>
468#include <asm-generic/bitops/__fls.h>
469#include <asm-generic/bitops/fls.h>
470#include <asm-generic/bitops/fls64.h>
471
472#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
473
474#include <asm-generic/bitops/ffz.h>
475#include <asm-generic/bitops/find.h>
476#include <asm-generic/bitops/hweight.h>
477#include <asm-generic/bitops/lock.h>
478#include <asm-generic/bitops/sched.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479#include <asm-generic/bitops/le.h>
 
480#include <asm-generic/bitops/ext2-atomic-setbit.h>
481
482#endif /* _S390_BITOPS_H */