Loading...
1#ifndef _S390_BITOPS_H
2#define _S390_BITOPS_H
3
4/*
5 * include/asm-s390/bitops.h
6 *
7 * S390 version
8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 * Derived from "include/asm-i386/bitops.h"
12 * Copyright (C) 1992, Linus Torvalds
13 *
14 */
15
16#ifndef _LINUX_BITOPS_H
17#error only <linux/bitops.h> can be included directly
18#endif
19
20#include <linux/compiler.h>
21
22/*
23 * 32 bit bitops format:
24 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
25 * bit 32 is the LSB of *(addr+4). That combined with the
26 * big endian byte order on S390 give the following bit
27 * order in memory:
28 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
29 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
30 * after that follows the next long with bit numbers
31 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
32 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
33 * The reason for this bit ordering is the fact that
34 * in the architecture independent code bits operations
35 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
36 * with operation of the form "set_bit(bitnr, flags)".
37 *
38 * 64 bit bitops format:
39 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
40 * bit 64 is the LSB of *(addr+8). That combined with the
41 * big endian byte order on S390 give the following bit
42 * order in memory:
43 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
44 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
45 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
46 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
47 * after that follows the next long with bit numbers
48 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
49 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
50 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
51 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
52 * The reason for this bit ordering is the fact that
53 * in the architecture independent code bits operations
54 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
55 * with operation of the form "set_bit(bitnr, flags)".
56 */
57
58/* bitmap tables from arch/s390/kernel/bitmap.c */
59extern const char _oi_bitmap[];
60extern const char _ni_bitmap[];
61extern const char _zb_findmap[];
62extern const char _sb_findmap[];
63
64#ifndef CONFIG_64BIT
65
66#define __BITOPS_ALIGN 3
67#define __BITOPS_WORDSIZE 32
68#define __BITOPS_OR "or"
69#define __BITOPS_AND "nr"
70#define __BITOPS_XOR "xr"
71
72#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
73 asm volatile( \
74 " l %0,%2\n" \
75 "0: lr %1,%0\n" \
76 __op_string " %1,%3\n" \
77 " cs %0,%1,%2\n" \
78 " jl 0b" \
79 : "=&d" (__old), "=&d" (__new), \
80 "=Q" (*(unsigned long *) __addr) \
81 : "d" (__val), "Q" (*(unsigned long *) __addr) \
82 : "cc");
83
84#else /* CONFIG_64BIT */
85
86#define __BITOPS_ALIGN 7
87#define __BITOPS_WORDSIZE 64
88#define __BITOPS_OR "ogr"
89#define __BITOPS_AND "ngr"
90#define __BITOPS_XOR "xgr"
91
92#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
93 asm volatile( \
94 " lg %0,%2\n" \
95 "0: lgr %1,%0\n" \
96 __op_string " %1,%3\n" \
97 " csg %0,%1,%2\n" \
98 " jl 0b" \
99 : "=&d" (__old), "=&d" (__new), \
100 "=Q" (*(unsigned long *) __addr) \
101 : "d" (__val), "Q" (*(unsigned long *) __addr) \
102 : "cc");
103
104#endif /* CONFIG_64BIT */
105
106#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
107#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
108
109#ifdef CONFIG_SMP
110/*
111 * SMP safe set_bit routine based on compare and swap (CS)
112 */
113static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
114{
115 unsigned long addr, old, new, mask;
116
117 addr = (unsigned long) ptr;
118 /* calculate address for CS */
119 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
120 /* make OR mask */
121 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
122 /* Do the atomic update. */
123 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
124}
125
126/*
127 * SMP safe clear_bit routine based on compare and swap (CS)
128 */
129static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
130{
131 unsigned long addr, old, new, mask;
132
133 addr = (unsigned long) ptr;
134 /* calculate address for CS */
135 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
136 /* make AND mask */
137 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
138 /* Do the atomic update. */
139 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
140}
141
142/*
143 * SMP safe change_bit routine based on compare and swap (CS)
144 */
145static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
146{
147 unsigned long addr, old, new, mask;
148
149 addr = (unsigned long) ptr;
150 /* calculate address for CS */
151 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
152 /* make XOR mask */
153 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
154 /* Do the atomic update. */
155 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
156}
157
158/*
159 * SMP safe test_and_set_bit routine based on compare and swap (CS)
160 */
161static inline int
162test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
163{
164 unsigned long addr, old, new, mask;
165
166 addr = (unsigned long) ptr;
167 /* calculate address for CS */
168 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
169 /* make OR/test mask */
170 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
171 /* Do the atomic update. */
172 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
173 __BITOPS_BARRIER();
174 return (old & mask) != 0;
175}
176
177/*
178 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
179 */
180static inline int
181test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
182{
183 unsigned long addr, old, new, mask;
184
185 addr = (unsigned long) ptr;
186 /* calculate address for CS */
187 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
188 /* make AND/test mask */
189 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
190 /* Do the atomic update. */
191 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
192 __BITOPS_BARRIER();
193 return (old ^ new) != 0;
194}
195
196/*
197 * SMP safe test_and_change_bit routine based on compare and swap (CS)
198 */
199static inline int
200test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
201{
202 unsigned long addr, old, new, mask;
203
204 addr = (unsigned long) ptr;
205 /* calculate address for CS */
206 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
207 /* make XOR/test mask */
208 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
209 /* Do the atomic update. */
210 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
211 __BITOPS_BARRIER();
212 return (old & mask) != 0;
213}
214#endif /* CONFIG_SMP */
215
216/*
217 * fast, non-SMP set_bit routine
218 */
219static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
220{
221 unsigned long addr;
222
223 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
224 asm volatile(
225 " oc %O0(1,%R0),%1"
226 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
227}
228
229static inline void
230__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
231{
232 unsigned long addr;
233
234 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
235 *(unsigned char *) addr |= 1 << (nr & 7);
236}
237
238#define set_bit_simple(nr,addr) \
239(__builtin_constant_p((nr)) ? \
240 __constant_set_bit((nr),(addr)) : \
241 __set_bit((nr),(addr)) )
242
243/*
244 * fast, non-SMP clear_bit routine
245 */
246static inline void
247__clear_bit(unsigned long nr, volatile unsigned long *ptr)
248{
249 unsigned long addr;
250
251 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
252 asm volatile(
253 " nc %O0(1,%R0),%1"
254 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
255}
256
257static inline void
258__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
259{
260 unsigned long addr;
261
262 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
263 *(unsigned char *) addr &= ~(1 << (nr & 7));
264}
265
266#define clear_bit_simple(nr,addr) \
267(__builtin_constant_p((nr)) ? \
268 __constant_clear_bit((nr),(addr)) : \
269 __clear_bit((nr),(addr)) )
270
271/*
272 * fast, non-SMP change_bit routine
273 */
274static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
275{
276 unsigned long addr;
277
278 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
279 asm volatile(
280 " xc %O0(1,%R0),%1"
281 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
282}
283
284static inline void
285__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
286{
287 unsigned long addr;
288
289 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
290 *(unsigned char *) addr ^= 1 << (nr & 7);
291}
292
293#define change_bit_simple(nr,addr) \
294(__builtin_constant_p((nr)) ? \
295 __constant_change_bit((nr),(addr)) : \
296 __change_bit((nr),(addr)) )
297
298/*
299 * fast, non-SMP test_and_set_bit routine
300 */
301static inline int
302test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
303{
304 unsigned long addr;
305 unsigned char ch;
306
307 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
308 ch = *(unsigned char *) addr;
309 asm volatile(
310 " oc %O0(1,%R0),%1"
311 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
312 : "cc", "memory");
313 return (ch >> (nr & 7)) & 1;
314}
315#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
316
317/*
318 * fast, non-SMP test_and_clear_bit routine
319 */
320static inline int
321test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
322{
323 unsigned long addr;
324 unsigned char ch;
325
326 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
327 ch = *(unsigned char *) addr;
328 asm volatile(
329 " nc %O0(1,%R0),%1"
330 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
331 : "cc", "memory");
332 return (ch >> (nr & 7)) & 1;
333}
334#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
335
336/*
337 * fast, non-SMP test_and_change_bit routine
338 */
339static inline int
340test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
341{
342 unsigned long addr;
343 unsigned char ch;
344
345 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
346 ch = *(unsigned char *) addr;
347 asm volatile(
348 " xc %O0(1,%R0),%1"
349 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
350 : "cc", "memory");
351 return (ch >> (nr & 7)) & 1;
352}
353#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
354
355#ifdef CONFIG_SMP
356#define set_bit set_bit_cs
357#define clear_bit clear_bit_cs
358#define change_bit change_bit_cs
359#define test_and_set_bit test_and_set_bit_cs
360#define test_and_clear_bit test_and_clear_bit_cs
361#define test_and_change_bit test_and_change_bit_cs
362#else
363#define set_bit set_bit_simple
364#define clear_bit clear_bit_simple
365#define change_bit change_bit_simple
366#define test_and_set_bit test_and_set_bit_simple
367#define test_and_clear_bit test_and_clear_bit_simple
368#define test_and_change_bit test_and_change_bit_simple
369#endif
370
371
372/*
373 * This routine doesn't need to be atomic.
374 */
375
376static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
377{
378 unsigned long addr;
379 unsigned char ch;
380
381 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
382 ch = *(volatile unsigned char *) addr;
383 return (ch >> (nr & 7)) & 1;
384}
385
386static inline int
387__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
388 return (((volatile char *) addr)
389 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
390}
391
392#define test_bit(nr,addr) \
393(__builtin_constant_p((nr)) ? \
394 __constant_test_bit((nr),(addr)) : \
395 __test_bit((nr),(addr)) )
396
397/*
398 * Optimized find bit helper functions.
399 */
400
401/**
402 * __ffz_word_loop - find byte offset of first long != -1UL
403 * @addr: pointer to array of unsigned long
404 * @size: size of the array in bits
405 */
406static inline unsigned long __ffz_word_loop(const unsigned long *addr,
407 unsigned long size)
408{
409 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
410 unsigned long bytes = 0;
411
412 asm volatile(
413#ifndef CONFIG_64BIT
414 " ahi %1,-1\n"
415 " sra %1,5\n"
416 " jz 1f\n"
417 "0: c %2,0(%0,%3)\n"
418 " jne 1f\n"
419 " la %0,4(%0)\n"
420 " brct %1,0b\n"
421 "1:\n"
422#else
423 " aghi %1,-1\n"
424 " srag %1,%1,6\n"
425 " jz 1f\n"
426 "0: cg %2,0(%0,%3)\n"
427 " jne 1f\n"
428 " la %0,8(%0)\n"
429 " brct %1,0b\n"
430 "1:\n"
431#endif
432 : "+&a" (bytes), "+&d" (size)
433 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
434 : "cc" );
435 return bytes;
436}
437
438/**
439 * __ffs_word_loop - find byte offset of first long != 0UL
440 * @addr: pointer to array of unsigned long
441 * @size: size of the array in bits
442 */
443static inline unsigned long __ffs_word_loop(const unsigned long *addr,
444 unsigned long size)
445{
446 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
447 unsigned long bytes = 0;
448
449 asm volatile(
450#ifndef CONFIG_64BIT
451 " ahi %1,-1\n"
452 " sra %1,5\n"
453 " jz 1f\n"
454 "0: c %2,0(%0,%3)\n"
455 " jne 1f\n"
456 " la %0,4(%0)\n"
457 " brct %1,0b\n"
458 "1:\n"
459#else
460 " aghi %1,-1\n"
461 " srag %1,%1,6\n"
462 " jz 1f\n"
463 "0: cg %2,0(%0,%3)\n"
464 " jne 1f\n"
465 " la %0,8(%0)\n"
466 " brct %1,0b\n"
467 "1:\n"
468#endif
469 : "+&a" (bytes), "+&a" (size)
470 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
471 : "cc" );
472 return bytes;
473}
474
475/**
476 * __ffz_word - add number of the first unset bit
477 * @nr: base value the bit number is added to
478 * @word: the word that is searched for unset bits
479 */
480static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
481{
482#ifdef CONFIG_64BIT
483 if ((word & 0xffffffff) == 0xffffffff) {
484 word >>= 32;
485 nr += 32;
486 }
487#endif
488 if ((word & 0xffff) == 0xffff) {
489 word >>= 16;
490 nr += 16;
491 }
492 if ((word & 0xff) == 0xff) {
493 word >>= 8;
494 nr += 8;
495 }
496 return nr + _zb_findmap[(unsigned char) word];
497}
498
499/**
500 * __ffs_word - add number of the first set bit
501 * @nr: base value the bit number is added to
502 * @word: the word that is searched for set bits
503 */
504static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
505{
506#ifdef CONFIG_64BIT
507 if ((word & 0xffffffff) == 0) {
508 word >>= 32;
509 nr += 32;
510 }
511#endif
512 if ((word & 0xffff) == 0) {
513 word >>= 16;
514 nr += 16;
515 }
516 if ((word & 0xff) == 0) {
517 word >>= 8;
518 nr += 8;
519 }
520 return nr + _sb_findmap[(unsigned char) word];
521}
522
523
524/**
525 * __load_ulong_be - load big endian unsigned long
526 * @p: pointer to array of unsigned long
527 * @offset: byte offset of source value in the array
528 */
529static inline unsigned long __load_ulong_be(const unsigned long *p,
530 unsigned long offset)
531{
532 p = (unsigned long *)((unsigned long) p + offset);
533 return *p;
534}
535
536/**
537 * __load_ulong_le - load little endian unsigned long
538 * @p: pointer to array of unsigned long
539 * @offset: byte offset of source value in the array
540 */
541static inline unsigned long __load_ulong_le(const unsigned long *p,
542 unsigned long offset)
543{
544 unsigned long word;
545
546 p = (unsigned long *)((unsigned long) p + offset);
547#ifndef CONFIG_64BIT
548 asm volatile(
549 " ic %0,%O1(%R1)\n"
550 " icm %0,2,%O1+1(%R1)\n"
551 " icm %0,4,%O1+2(%R1)\n"
552 " icm %0,8,%O1+3(%R1)"
553 : "=&d" (word) : "Q" (*p) : "cc");
554#else
555 asm volatile(
556 " lrvg %0,%1"
557 : "=d" (word) : "m" (*p) );
558#endif
559 return word;
560}
561
562/*
563 * The various find bit functions.
564 */
565
566/*
567 * ffz - find first zero in word.
568 * @word: The word to search
569 *
570 * Undefined if no zero exists, so code should check against ~0UL first.
571 */
572static inline unsigned long ffz(unsigned long word)
573{
574 return __ffz_word(0, word);
575}
576
577/**
578 * __ffs - find first bit in word.
579 * @word: The word to search
580 *
581 * Undefined if no bit exists, so code should check against 0 first.
582 */
583static inline unsigned long __ffs (unsigned long word)
584{
585 return __ffs_word(0, word);
586}
587
588/**
589 * ffs - find first bit set
590 * @x: the word to search
591 *
592 * This is defined the same way as
593 * the libc and compiler builtin ffs routines, therefore
594 * differs in spirit from the above ffz (man ffs).
595 */
596static inline int ffs(int x)
597{
598 if (!x)
599 return 0;
600 return __ffs_word(1, x);
601}
602
603/**
604 * find_first_zero_bit - find the first zero bit in a memory region
605 * @addr: The address to start the search at
606 * @size: The maximum size to search
607 *
608 * Returns the bit-number of the first zero bit, not the number of the byte
609 * containing a bit.
610 */
611static inline unsigned long find_first_zero_bit(const unsigned long *addr,
612 unsigned long size)
613{
614 unsigned long bytes, bits;
615
616 if (!size)
617 return 0;
618 bytes = __ffz_word_loop(addr, size);
619 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
620 return (bits < size) ? bits : size;
621}
622#define find_first_zero_bit find_first_zero_bit
623
624/**
625 * find_first_bit - find the first set bit in a memory region
626 * @addr: The address to start the search at
627 * @size: The maximum size to search
628 *
629 * Returns the bit-number of the first set bit, not the number of the byte
630 * containing a bit.
631 */
632static inline unsigned long find_first_bit(const unsigned long * addr,
633 unsigned long size)
634{
635 unsigned long bytes, bits;
636
637 if (!size)
638 return 0;
639 bytes = __ffs_word_loop(addr, size);
640 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
641 return (bits < size) ? bits : size;
642}
643#define find_first_bit find_first_bit
644
645/**
646 * find_next_zero_bit - find the first zero bit in a memory region
647 * @addr: The address to base the search on
648 * @offset: The bitnumber to start searching at
649 * @size: The maximum size to search
650 */
651static inline int find_next_zero_bit (const unsigned long * addr,
652 unsigned long size,
653 unsigned long offset)
654{
655 const unsigned long *p;
656 unsigned long bit, set;
657
658 if (offset >= size)
659 return size;
660 bit = offset & (__BITOPS_WORDSIZE - 1);
661 offset -= bit;
662 size -= offset;
663 p = addr + offset / __BITOPS_WORDSIZE;
664 if (bit) {
665 /*
666 * __ffz_word returns __BITOPS_WORDSIZE
667 * if no zero bit is present in the word.
668 */
669 set = __ffz_word(bit, *p >> bit);
670 if (set >= size)
671 return size + offset;
672 if (set < __BITOPS_WORDSIZE)
673 return set + offset;
674 offset += __BITOPS_WORDSIZE;
675 size -= __BITOPS_WORDSIZE;
676 p++;
677 }
678 return offset + find_first_zero_bit(p, size);
679}
680#define find_next_zero_bit find_next_zero_bit
681
682/**
683 * find_next_bit - find the first set bit in a memory region
684 * @addr: The address to base the search on
685 * @offset: The bitnumber to start searching at
686 * @size: The maximum size to search
687 */
688static inline int find_next_bit (const unsigned long * addr,
689 unsigned long size,
690 unsigned long offset)
691{
692 const unsigned long *p;
693 unsigned long bit, set;
694
695 if (offset >= size)
696 return size;
697 bit = offset & (__BITOPS_WORDSIZE - 1);
698 offset -= bit;
699 size -= offset;
700 p = addr + offset / __BITOPS_WORDSIZE;
701 if (bit) {
702 /*
703 * __ffs_word returns __BITOPS_WORDSIZE
704 * if no one bit is present in the word.
705 */
706 set = __ffs_word(0, *p & (~0UL << bit));
707 if (set >= size)
708 return size + offset;
709 if (set < __BITOPS_WORDSIZE)
710 return set + offset;
711 offset += __BITOPS_WORDSIZE;
712 size -= __BITOPS_WORDSIZE;
713 p++;
714 }
715 return offset + find_first_bit(p, size);
716}
717#define find_next_bit find_next_bit
718
719/*
720 * Every architecture must define this function. It's the fastest
721 * way of searching a 140-bit bitmap where the first 100 bits are
722 * unlikely to be set. It's guaranteed that at least one of the 140
723 * bits is cleared.
724 */
725static inline int sched_find_first_bit(unsigned long *b)
726{
727 return find_first_bit(b, 140);
728}
729
730#include <asm-generic/bitops/fls.h>
731#include <asm-generic/bitops/__fls.h>
732#include <asm-generic/bitops/fls64.h>
733
734#include <asm-generic/bitops/hweight.h>
735#include <asm-generic/bitops/lock.h>
736
737/*
738 * ATTENTION: intel byte ordering convention for ext2 and minix !!
739 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
740 * bit 32 is the LSB of (addr+4).
741 * That combined with the little endian byte order of Intel gives the
742 * following bit order in memory:
743 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
744 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
745 */
746
747static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
748{
749 unsigned long bytes, bits;
750
751 if (!size)
752 return 0;
753 bytes = __ffz_word_loop(vaddr, size);
754 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
755 return (bits < size) ? bits : size;
756}
757#define find_first_zero_bit_le find_first_zero_bit_le
758
759static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
760 unsigned long offset)
761{
762 unsigned long *addr = vaddr, *p;
763 unsigned long bit, set;
764
765 if (offset >= size)
766 return size;
767 bit = offset & (__BITOPS_WORDSIZE - 1);
768 offset -= bit;
769 size -= offset;
770 p = addr + offset / __BITOPS_WORDSIZE;
771 if (bit) {
772 /*
773 * s390 version of ffz returns __BITOPS_WORDSIZE
774 * if no zero bit is present in the word.
775 */
776 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
777 if (set >= size)
778 return size + offset;
779 if (set < __BITOPS_WORDSIZE)
780 return set + offset;
781 offset += __BITOPS_WORDSIZE;
782 size -= __BITOPS_WORDSIZE;
783 p++;
784 }
785 return offset + find_first_zero_bit_le(p, size);
786}
787#define find_next_zero_bit_le find_next_zero_bit_le
788
789static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
790{
791 unsigned long bytes, bits;
792
793 if (!size)
794 return 0;
795 bytes = __ffs_word_loop(vaddr, size);
796 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
797 return (bits < size) ? bits : size;
798}
799#define find_first_bit_le find_first_bit_le
800
801static inline int find_next_bit_le(void *vaddr, unsigned long size,
802 unsigned long offset)
803{
804 unsigned long *addr = vaddr, *p;
805 unsigned long bit, set;
806
807 if (offset >= size)
808 return size;
809 bit = offset & (__BITOPS_WORDSIZE - 1);
810 offset -= bit;
811 size -= offset;
812 p = addr + offset / __BITOPS_WORDSIZE;
813 if (bit) {
814 /*
815 * s390 version of ffz returns __BITOPS_WORDSIZE
816 * if no zero bit is present in the word.
817 */
818 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
819 if (set >= size)
820 return size + offset;
821 if (set < __BITOPS_WORDSIZE)
822 return set + offset;
823 offset += __BITOPS_WORDSIZE;
824 size -= __BITOPS_WORDSIZE;
825 p++;
826 }
827 return offset + find_first_bit_le(p, size);
828}
829#define find_next_bit_le find_next_bit_le
830
831#include <asm-generic/bitops/le.h>
832
833#include <asm-generic/bitops/ext2-atomic-setbit.h>
834
835#endif /* _S390_BITOPS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999,2013
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 *
7 * The description below was taken in large parts from the powerpc
8 * bitops header file:
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so the bits
16 * end up numbered:
17 * |63..............0|127............64|191...........128|255...........192|
18 *
19 * We also have special functions which work with an MSB0 encoding.
20 * The bits are numbered:
21 * |0..............63|64............127|128...........191|192...........255|
22 *
23 * The main difference is that bit 0-63 in the bit number field needs to be
24 * reversed compared to the LSB0 encoded bit fields. This can be achieved by
25 * XOR with 0x3f.
26 *
27 */
28
29#ifndef _S390_BITOPS_H
30#define _S390_BITOPS_H
31
32#ifndef _LINUX_BITOPS_H
33#error only <linux/bitops.h> can be included directly
34#endif
35
36#include <linux/typecheck.h>
37#include <linux/compiler.h>
38#include <linux/types.h>
39#include <asm/atomic_ops.h>
40#include <asm/barrier.h>
41
42#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
43
44static inline unsigned long *
45__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
46{
47 unsigned long addr;
48
49 addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
50 return (unsigned long *)addr;
51}
52
53static inline unsigned long __bitops_mask(unsigned long nr)
54{
55 return 1UL << (nr & (BITS_PER_LONG - 1));
56}
57
58static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59{
60 unsigned long *addr = __bitops_word(nr, ptr);
61 unsigned long mask = __bitops_mask(nr);
62
63 __atomic64_or(mask, (long *)addr);
64}
65
66static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
67{
68 unsigned long *addr = __bitops_word(nr, ptr);
69 unsigned long mask = __bitops_mask(nr);
70
71 __atomic64_and(~mask, (long *)addr);
72}
73
74static __always_inline void arch_change_bit(unsigned long nr,
75 volatile unsigned long *ptr)
76{
77 unsigned long *addr = __bitops_word(nr, ptr);
78 unsigned long mask = __bitops_mask(nr);
79
80 __atomic64_xor(mask, (long *)addr);
81}
82
83static inline bool arch_test_and_set_bit(unsigned long nr,
84 volatile unsigned long *ptr)
85{
86 unsigned long *addr = __bitops_word(nr, ptr);
87 unsigned long mask = __bitops_mask(nr);
88 unsigned long old;
89
90 old = __atomic64_or_barrier(mask, (long *)addr);
91 return old & mask;
92}
93
94static inline bool arch_test_and_clear_bit(unsigned long nr,
95 volatile unsigned long *ptr)
96{
97 unsigned long *addr = __bitops_word(nr, ptr);
98 unsigned long mask = __bitops_mask(nr);
99 unsigned long old;
100
101 old = __atomic64_and_barrier(~mask, (long *)addr);
102 return old & mask;
103}
104
105static inline bool arch_test_and_change_bit(unsigned long nr,
106 volatile unsigned long *ptr)
107{
108 unsigned long *addr = __bitops_word(nr, ptr);
109 unsigned long mask = __bitops_mask(nr);
110 unsigned long old;
111
112 old = __atomic64_xor_barrier(mask, (long *)addr);
113 return old & mask;
114}
115
116static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
117{
118 unsigned long *addr = __bitops_word(nr, ptr);
119 unsigned long mask = __bitops_mask(nr);
120
121 *addr |= mask;
122}
123
124static inline void arch___clear_bit(unsigned long nr,
125 volatile unsigned long *ptr)
126{
127 unsigned long *addr = __bitops_word(nr, ptr);
128 unsigned long mask = __bitops_mask(nr);
129
130 *addr &= ~mask;
131}
132
133static inline void arch___change_bit(unsigned long nr,
134 volatile unsigned long *ptr)
135{
136 unsigned long *addr = __bitops_word(nr, ptr);
137 unsigned long mask = __bitops_mask(nr);
138
139 *addr ^= mask;
140}
141
142static inline bool arch___test_and_set_bit(unsigned long nr,
143 volatile unsigned long *ptr)
144{
145 unsigned long *addr = __bitops_word(nr, ptr);
146 unsigned long mask = __bitops_mask(nr);
147 unsigned long old;
148
149 old = *addr;
150 *addr |= mask;
151 return old & mask;
152}
153
154static inline bool arch___test_and_clear_bit(unsigned long nr,
155 volatile unsigned long *ptr)
156{
157 unsigned long *addr = __bitops_word(nr, ptr);
158 unsigned long mask = __bitops_mask(nr);
159 unsigned long old;
160
161 old = *addr;
162 *addr &= ~mask;
163 return old & mask;
164}
165
166static inline bool arch___test_and_change_bit(unsigned long nr,
167 volatile unsigned long *ptr)
168{
169 unsigned long *addr = __bitops_word(nr, ptr);
170 unsigned long mask = __bitops_mask(nr);
171 unsigned long old;
172
173 old = *addr;
174 *addr ^= mask;
175 return old & mask;
176}
177
178static inline bool arch_test_bit(unsigned long nr,
179 const volatile unsigned long *ptr)
180{
181 const volatile unsigned long *addr = __bitops_word(nr, ptr);
182 unsigned long mask = __bitops_mask(nr);
183
184 return *addr & mask;
185}
186
187static inline bool arch_test_and_set_bit_lock(unsigned long nr,
188 volatile unsigned long *ptr)
189{
190 if (arch_test_bit(nr, ptr))
191 return 1;
192 return arch_test_and_set_bit(nr, ptr);
193}
194
195static inline void arch_clear_bit_unlock(unsigned long nr,
196 volatile unsigned long *ptr)
197{
198 smp_mb__before_atomic();
199 arch_clear_bit(nr, ptr);
200}
201
202static inline void arch___clear_bit_unlock(unsigned long nr,
203 volatile unsigned long *ptr)
204{
205 smp_mb();
206 arch___clear_bit(nr, ptr);
207}
208
209#include <asm-generic/bitops/instrumented-atomic.h>
210#include <asm-generic/bitops/instrumented-non-atomic.h>
211#include <asm-generic/bitops/instrumented-lock.h>
212
213/*
214 * Functions which use MSB0 bit numbering.
215 * The bits are numbered:
216 * |0..............63|64............127|128...........191|192...........255|
217 */
218unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
219unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
220 unsigned long offset);
221
222#define for_each_set_bit_inv(bit, addr, size) \
223 for ((bit) = find_first_bit_inv((addr), (size)); \
224 (bit) < (size); \
225 (bit) = find_next_bit_inv((addr), (size), (bit) + 1))
226
227static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
228{
229 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
230}
231
232static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
233{
234 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
235}
236
237static inline bool test_and_clear_bit_inv(unsigned long nr,
238 volatile unsigned long *ptr)
239{
240 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
241}
242
243static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
244{
245 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
246}
247
248static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
249{
250 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
251}
252
253static inline bool test_bit_inv(unsigned long nr,
254 const volatile unsigned long *ptr)
255{
256 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
257}
258
259#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
260
261/**
262 * __flogr - find leftmost one
263 * @word - The word to search
264 *
265 * Returns the bit number of the most significant bit set,
266 * where the most significant bit has bit number 0.
267 * If no bit is set this function returns 64.
268 */
269static inline unsigned char __flogr(unsigned long word)
270{
271 if (__builtin_constant_p(word)) {
272 unsigned long bit = 0;
273
274 if (!word)
275 return 64;
276 if (!(word & 0xffffffff00000000UL)) {
277 word <<= 32;
278 bit += 32;
279 }
280 if (!(word & 0xffff000000000000UL)) {
281 word <<= 16;
282 bit += 16;
283 }
284 if (!(word & 0xff00000000000000UL)) {
285 word <<= 8;
286 bit += 8;
287 }
288 if (!(word & 0xf000000000000000UL)) {
289 word <<= 4;
290 bit += 4;
291 }
292 if (!(word & 0xc000000000000000UL)) {
293 word <<= 2;
294 bit += 2;
295 }
296 if (!(word & 0x8000000000000000UL)) {
297 word <<= 1;
298 bit += 1;
299 }
300 return bit;
301 } else {
302 union register_pair rp;
303
304 rp.even = word;
305 asm volatile(
306 " flogr %[rp],%[rp]\n"
307 : [rp] "+d" (rp.pair) : : "cc");
308 return rp.even;
309 }
310}
311
312/**
313 * __ffs - find first bit in word.
314 * @word: The word to search
315 *
316 * Undefined if no bit exists, so code should check against 0 first.
317 */
318static inline unsigned long __ffs(unsigned long word)
319{
320 return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
321}
322
323/**
324 * ffs - find first bit set
325 * @word: the word to search
326 *
327 * This is defined the same way as the libc and
328 * compiler builtin ffs routines (man ffs).
329 */
330static inline int ffs(int word)
331{
332 unsigned long mask = 2 * BITS_PER_LONG - 1;
333 unsigned int val = (unsigned int)word;
334
335 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
336}
337
338/**
339 * __fls - find last (most-significant) set bit in a long word
340 * @word: the word to search
341 *
342 * Undefined if no set bit exists, so code should check against 0 first.
343 */
344static inline unsigned long __fls(unsigned long word)
345{
346 return __flogr(word) ^ (BITS_PER_LONG - 1);
347}
348
349/**
350 * fls64 - find last set bit in a 64-bit word
351 * @word: the word to search
352 *
353 * This is defined in a similar way as the libc and compiler builtin
354 * ffsll, but returns the position of the most significant set bit.
355 *
356 * fls64(value) returns 0 if value is 0 or the position of the last
357 * set bit if value is nonzero. The last (most significant) bit is
358 * at position 64.
359 */
360static inline int fls64(unsigned long word)
361{
362 unsigned long mask = 2 * BITS_PER_LONG - 1;
363
364 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
365}
366
367/**
368 * fls - find last (most-significant) bit set
369 * @word: the word to search
370 *
371 * This is defined the same way as ffs.
372 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
373 */
374static inline int fls(unsigned int word)
375{
376 return fls64(word);
377}
378
379#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
380
381#include <asm-generic/bitops/__ffs.h>
382#include <asm-generic/bitops/ffs.h>
383#include <asm-generic/bitops/__fls.h>
384#include <asm-generic/bitops/fls.h>
385#include <asm-generic/bitops/fls64.h>
386
387#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
388
389#include <asm-generic/bitops/ffz.h>
390#include <asm-generic/bitops/find.h>
391#include <asm-generic/bitops/hweight.h>
392#include <asm-generic/bitops/sched.h>
393#include <asm-generic/bitops/le.h>
394#include <asm-generic/bitops/ext2-atomic-setbit.h>
395
396#endif /* _S390_BITOPS_H */