Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999,2013
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 *
7 * The description below was taken in large parts from the powerpc
8 * bitops header file:
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so the bits
16 * end up numbered:
17 * |63..............0|127............64|191...........128|255...........192|
18 *
19 * We also have special functions which work with an MSB0 encoding.
20 * The bits are numbered:
21 * |0..............63|64............127|128...........191|192...........255|
22 *
23 * The main difference is that bit 0-63 in the bit number field needs to be
24 * reversed compared to the LSB0 encoded bit fields. This can be achieved by
25 * XOR with 0x3f.
26 *
27 */
28
29#ifndef _S390_BITOPS_H
30#define _S390_BITOPS_H
31
32#ifndef _LINUX_BITOPS_H
33#error only <linux/bitops.h> can be included directly
34#endif
35
36#include <linux/typecheck.h>
37#include <linux/compiler.h>
38#include <linux/types.h>
39#include <asm/atomic_ops.h>
40#include <asm/barrier.h>
41
42#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
43
44static inline unsigned long *
45__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
46{
47 unsigned long addr;
48
49 addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
50 return (unsigned long *)addr;
51}
52
53static inline unsigned long __bitops_mask(unsigned long nr)
54{
55 return 1UL << (nr & (BITS_PER_LONG - 1));
56}
57
58static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59{
60 unsigned long *addr = __bitops_word(nr, ptr);
61 unsigned long mask = __bitops_mask(nr);
62
63 __atomic64_or(mask, (long *)addr);
64}
65
66static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
67{
68 unsigned long *addr = __bitops_word(nr, ptr);
69 unsigned long mask = __bitops_mask(nr);
70
71 __atomic64_and(~mask, (long *)addr);
72}
73
74static __always_inline void arch_change_bit(unsigned long nr,
75 volatile unsigned long *ptr)
76{
77 unsigned long *addr = __bitops_word(nr, ptr);
78 unsigned long mask = __bitops_mask(nr);
79
80 __atomic64_xor(mask, (long *)addr);
81}
82
83static inline bool arch_test_and_set_bit(unsigned long nr,
84 volatile unsigned long *ptr)
85{
86 unsigned long *addr = __bitops_word(nr, ptr);
87 unsigned long mask = __bitops_mask(nr);
88 unsigned long old;
89
90 old = __atomic64_or_barrier(mask, (long *)addr);
91 return old & mask;
92}
93
94static inline bool arch_test_and_clear_bit(unsigned long nr,
95 volatile unsigned long *ptr)
96{
97 unsigned long *addr = __bitops_word(nr, ptr);
98 unsigned long mask = __bitops_mask(nr);
99 unsigned long old;
100
101 old = __atomic64_and_barrier(~mask, (long *)addr);
102 return old & mask;
103}
104
105static inline bool arch_test_and_change_bit(unsigned long nr,
106 volatile unsigned long *ptr)
107{
108 unsigned long *addr = __bitops_word(nr, ptr);
109 unsigned long mask = __bitops_mask(nr);
110 unsigned long old;
111
112 old = __atomic64_xor_barrier(mask, (long *)addr);
113 return old & mask;
114}
115
116static __always_inline void
117arch___set_bit(unsigned long nr, volatile unsigned long *addr)
118{
119 unsigned long *p = __bitops_word(nr, addr);
120 unsigned long mask = __bitops_mask(nr);
121
122 *p |= mask;
123}
124
125static __always_inline void
126arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128 unsigned long *p = __bitops_word(nr, addr);
129 unsigned long mask = __bitops_mask(nr);
130
131 *p &= ~mask;
132}
133
134static __always_inline void
135arch___change_bit(unsigned long nr, volatile unsigned long *addr)
136{
137 unsigned long *p = __bitops_word(nr, addr);
138 unsigned long mask = __bitops_mask(nr);
139
140 *p ^= mask;
141}
142
143static __always_inline bool
144arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
145{
146 unsigned long *p = __bitops_word(nr, addr);
147 unsigned long mask = __bitops_mask(nr);
148 unsigned long old;
149
150 old = *p;
151 *p |= mask;
152 return old & mask;
153}
154
155static __always_inline bool
156arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
157{
158 unsigned long *p = __bitops_word(nr, addr);
159 unsigned long mask = __bitops_mask(nr);
160 unsigned long old;
161
162 old = *p;
163 *p &= ~mask;
164 return old & mask;
165}
166
167static __always_inline bool
168arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
169{
170 unsigned long *p = __bitops_word(nr, addr);
171 unsigned long mask = __bitops_mask(nr);
172 unsigned long old;
173
174 old = *p;
175 *p ^= mask;
176 return old & mask;
177}
178
179#define arch_test_bit generic_test_bit
180#define arch_test_bit_acquire generic_test_bit_acquire
181
182static inline bool arch_test_and_set_bit_lock(unsigned long nr,
183 volatile unsigned long *ptr)
184{
185 if (arch_test_bit(nr, ptr))
186 return true;
187 return arch_test_and_set_bit(nr, ptr);
188}
189
190static inline void arch_clear_bit_unlock(unsigned long nr,
191 volatile unsigned long *ptr)
192{
193 smp_mb__before_atomic();
194 arch_clear_bit(nr, ptr);
195}
196
197static inline void arch___clear_bit_unlock(unsigned long nr,
198 volatile unsigned long *ptr)
199{
200 smp_mb();
201 arch___clear_bit(nr, ptr);
202}
203
204#include <asm-generic/bitops/instrumented-atomic.h>
205#include <asm-generic/bitops/instrumented-non-atomic.h>
206#include <asm-generic/bitops/instrumented-lock.h>
207
208/*
209 * Functions which use MSB0 bit numbering.
210 * The bits are numbered:
211 * |0..............63|64............127|128...........191|192...........255|
212 */
213unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
214unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
215 unsigned long offset);
216
217#define for_each_set_bit_inv(bit, addr, size) \
218 for ((bit) = find_first_bit_inv((addr), (size)); \
219 (bit) < (size); \
220 (bit) = find_next_bit_inv((addr), (size), (bit) + 1))
221
222static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
223{
224 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
225}
226
227static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
228{
229 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
230}
231
232static inline bool test_and_clear_bit_inv(unsigned long nr,
233 volatile unsigned long *ptr)
234{
235 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
236}
237
238static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
239{
240 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
241}
242
243static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
244{
245 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
246}
247
248static inline bool test_bit_inv(unsigned long nr,
249 const volatile unsigned long *ptr)
250{
251 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
252}
253
254/**
255 * __flogr - find leftmost one
256 * @word - The word to search
257 *
258 * Returns the bit number of the most significant bit set,
259 * where the most significant bit has bit number 0.
260 * If no bit is set this function returns 64.
261 */
262static inline unsigned char __flogr(unsigned long word)
263{
264 if (__builtin_constant_p(word)) {
265 unsigned long bit = 0;
266
267 if (!word)
268 return 64;
269 if (!(word & 0xffffffff00000000UL)) {
270 word <<= 32;
271 bit += 32;
272 }
273 if (!(word & 0xffff000000000000UL)) {
274 word <<= 16;
275 bit += 16;
276 }
277 if (!(word & 0xff00000000000000UL)) {
278 word <<= 8;
279 bit += 8;
280 }
281 if (!(word & 0xf000000000000000UL)) {
282 word <<= 4;
283 bit += 4;
284 }
285 if (!(word & 0xc000000000000000UL)) {
286 word <<= 2;
287 bit += 2;
288 }
289 if (!(word & 0x8000000000000000UL)) {
290 word <<= 1;
291 bit += 1;
292 }
293 return bit;
294 } else {
295 union register_pair rp;
296
297 rp.even = word;
298 asm volatile(
299 " flogr %[rp],%[rp]\n"
300 : [rp] "+d" (rp.pair) : : "cc");
301 return rp.even;
302 }
303}
304
305/**
306 * __ffs - find first bit in word.
307 * @word: The word to search
308 *
309 * Undefined if no bit exists, so code should check against 0 first.
310 */
311static inline unsigned long __ffs(unsigned long word)
312{
313 return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
314}
315
316/**
317 * ffs - find first bit set
318 * @word: the word to search
319 *
320 * This is defined the same way as the libc and
321 * compiler builtin ffs routines (man ffs).
322 */
323static inline int ffs(int word)
324{
325 unsigned long mask = 2 * BITS_PER_LONG - 1;
326 unsigned int val = (unsigned int)word;
327
328 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
329}
330
331/**
332 * __fls - find last (most-significant) set bit in a long word
333 * @word: the word to search
334 *
335 * Undefined if no set bit exists, so code should check against 0 first.
336 */
337static inline unsigned long __fls(unsigned long word)
338{
339 return __flogr(word) ^ (BITS_PER_LONG - 1);
340}
341
342/**
343 * fls64 - find last set bit in a 64-bit word
344 * @word: the word to search
345 *
346 * This is defined in a similar way as the libc and compiler builtin
347 * ffsll, but returns the position of the most significant set bit.
348 *
349 * fls64(value) returns 0 if value is 0 or the position of the last
350 * set bit if value is nonzero. The last (most significant) bit is
351 * at position 64.
352 */
353static inline int fls64(unsigned long word)
354{
355 unsigned long mask = 2 * BITS_PER_LONG - 1;
356
357 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
358}
359
360/**
361 * fls - find last (most-significant) bit set
362 * @word: the word to search
363 *
364 * This is defined the same way as ffs.
365 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
366 */
367static inline int fls(unsigned int word)
368{
369 return fls64(word);
370}
371
372#include <asm-generic/bitops/ffz.h>
373#include <asm-generic/bitops/hweight.h>
374#include <asm-generic/bitops/sched.h>
375#include <asm-generic/bitops/le.h>
376#include <asm-generic/bitops/ext2-atomic-setbit.h>
377
378#endif /* _S390_BITOPS_H */
1/*
2 * Copyright IBM Corp. 1999,2013
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 *
6 * The description below was taken in large parts from the powerpc
7 * bitops header file:
8 * Within a word, bits are numbered LSB first. Lot's of places make
9 * this assumption by directly testing bits with (val & (1<<nr)).
10 * This can cause confusion for large (> 1 word) bitmaps on a
11 * big-endian system because, unlike little endian, the number of each
12 * bit depends on the word size.
13 *
14 * The bitop functions are defined to work on unsigned longs, so for an
15 * s390x system the bits end up numbered:
16 * |63..............0|127............64|191...........128|255...........192|
17 * and on s390:
18 * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
19 *
20 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but
22 * byte-oriented:
23 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
24 *
25 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
26 * number field needs to be reversed compared to the big-endian bit
27 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
28 *
29 * We also have special functions which work with an MSB0 encoding:
30 * on an s390x system the bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390:
33 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
36 * number field needs to be reversed compared to the LSB0 encoded bit
37 * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
38 *
39 */
40
41#ifndef _S390_BITOPS_H
42#define _S390_BITOPS_H
43
44#ifndef _LINUX_BITOPS_H
45#error only <linux/bitops.h> can be included directly
46#endif
47
48#include <linux/typecheck.h>
49#include <linux/compiler.h>
50#include <asm/barrier.h>
51
52#define __BITOPS_NO_BARRIER "\n"
53
54#ifndef CONFIG_64BIT
55
56#define __BITOPS_OR "or"
57#define __BITOPS_AND "nr"
58#define __BITOPS_XOR "xr"
59#define __BITOPS_BARRIER "\n"
60
61#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
62({ \
63 unsigned long __old, __new; \
64 \
65 typecheck(unsigned long *, (__addr)); \
66 asm volatile( \
67 " l %0,%2\n" \
68 "0: lr %1,%0\n" \
69 __op_string " %1,%3\n" \
70 " cs %0,%1,%2\n" \
71 " jl 0b" \
72 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
73 : "d" (__val) \
74 : "cc", "memory"); \
75 __old; \
76})
77
78#else /* CONFIG_64BIT */
79
80#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
81
82#define __BITOPS_OR "laog"
83#define __BITOPS_AND "lang"
84#define __BITOPS_XOR "laxg"
85#define __BITOPS_BARRIER "bcr 14,0\n"
86
87#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
88({ \
89 unsigned long __old; \
90 \
91 typecheck(unsigned long *, (__addr)); \
92 asm volatile( \
93 __barrier \
94 __op_string " %0,%2,%1\n" \
95 __barrier \
96 : "=d" (__old), "+Q" (*(__addr)) \
97 : "d" (__val) \
98 : "cc", "memory"); \
99 __old; \
100})
101
102#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
103
104#define __BITOPS_OR "ogr"
105#define __BITOPS_AND "ngr"
106#define __BITOPS_XOR "xgr"
107#define __BITOPS_BARRIER "\n"
108
109#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
110({ \
111 unsigned long __old, __new; \
112 \
113 typecheck(unsigned long *, (__addr)); \
114 asm volatile( \
115 " lg %0,%2\n" \
116 "0: lgr %1,%0\n" \
117 __op_string " %1,%3\n" \
118 " csg %0,%1,%2\n" \
119 " jl 0b" \
120 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
121 : "d" (__val) \
122 : "cc", "memory"); \
123 __old; \
124})
125
126#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
127
128#endif /* CONFIG_64BIT */
129
130#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
131
132static inline unsigned long *
133__bitops_word(unsigned long nr, volatile unsigned long *ptr)
134{
135 unsigned long addr;
136
137 addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
138 return (unsigned long *)addr;
139}
140
141static inline unsigned char *
142__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
143{
144 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
145}
146
147static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
148{
149 unsigned long *addr = __bitops_word(nr, ptr);
150 unsigned long mask;
151
152#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
153 if (__builtin_constant_p(nr)) {
154 unsigned char *caddr = __bitops_byte(nr, ptr);
155
156 asm volatile(
157 "oi %0,%b1\n"
158 : "+Q" (*caddr)
159 : "i" (1 << (nr & 7))
160 : "cc", "memory");
161 return;
162 }
163#endif
164 mask = 1UL << (nr & (BITS_PER_LONG - 1));
165 __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
166}
167
168static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
169{
170 unsigned long *addr = __bitops_word(nr, ptr);
171 unsigned long mask;
172
173#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
174 if (__builtin_constant_p(nr)) {
175 unsigned char *caddr = __bitops_byte(nr, ptr);
176
177 asm volatile(
178 "ni %0,%b1\n"
179 : "+Q" (*caddr)
180 : "i" (~(1 << (nr & 7)))
181 : "cc", "memory");
182 return;
183 }
184#endif
185 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
186 __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
187}
188
189static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
190{
191 unsigned long *addr = __bitops_word(nr, ptr);
192 unsigned long mask;
193
194#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
195 if (__builtin_constant_p(nr)) {
196 unsigned char *caddr = __bitops_byte(nr, ptr);
197
198 asm volatile(
199 "xi %0,%b1\n"
200 : "+Q" (*caddr)
201 : "i" (1 << (nr & 7))
202 : "cc", "memory");
203 return;
204 }
205#endif
206 mask = 1UL << (nr & (BITS_PER_LONG - 1));
207 __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
208}
209
210static inline int
211test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
212{
213 unsigned long *addr = __bitops_word(nr, ptr);
214 unsigned long old, mask;
215
216 mask = 1UL << (nr & (BITS_PER_LONG - 1));
217 old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
218 return (old & mask) != 0;
219}
220
221static inline int
222test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
223{
224 unsigned long *addr = __bitops_word(nr, ptr);
225 unsigned long old, mask;
226
227 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
228 old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
229 return (old & ~mask) != 0;
230}
231
232static inline int
233test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
234{
235 unsigned long *addr = __bitops_word(nr, ptr);
236 unsigned long old, mask;
237
238 mask = 1UL << (nr & (BITS_PER_LONG - 1));
239 old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
240 return (old & mask) != 0;
241}
242
243static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
244{
245 unsigned char *addr = __bitops_byte(nr, ptr);
246
247 *addr |= 1 << (nr & 7);
248}
249
250static inline void
251__clear_bit(unsigned long nr, volatile unsigned long *ptr)
252{
253 unsigned char *addr = __bitops_byte(nr, ptr);
254
255 *addr &= ~(1 << (nr & 7));
256}
257
258static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
259{
260 unsigned char *addr = __bitops_byte(nr, ptr);
261
262 *addr ^= 1 << (nr & 7);
263}
264
265static inline int
266__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
267{
268 unsigned char *addr = __bitops_byte(nr, ptr);
269 unsigned char ch;
270
271 ch = *addr;
272 *addr |= 1 << (nr & 7);
273 return (ch >> (nr & 7)) & 1;
274}
275
276static inline int
277__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
278{
279 unsigned char *addr = __bitops_byte(nr, ptr);
280 unsigned char ch;
281
282 ch = *addr;
283 *addr &= ~(1 << (nr & 7));
284 return (ch >> (nr & 7)) & 1;
285}
286
287static inline int
288__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
289{
290 unsigned char *addr = __bitops_byte(nr, ptr);
291 unsigned char ch;
292
293 ch = *addr;
294 *addr ^= 1 << (nr & 7);
295 return (ch >> (nr & 7)) & 1;
296}
297
298static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
299{
300 const volatile unsigned char *addr;
301
302 addr = ((const volatile unsigned char *)ptr);
303 addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
304 return (*addr >> (nr & 7)) & 1;
305}
306
307/*
308 * Functions which use MSB0 bit numbering.
309 * On an s390x system the bits are numbered:
310 * |0..............63|64............127|128...........191|192...........255|
311 * and on s390:
312 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
313 */
314unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
315unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
316 unsigned long offset);
317
318static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
319{
320 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
321}
322
323static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
324{
325 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
326}
327
328static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
329{
330 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
331}
332
333static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
334{
335 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
336}
337
338static inline int test_bit_inv(unsigned long nr,
339 const volatile unsigned long *ptr)
340{
341 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
342}
343
344#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
345
346/**
347 * __flogr - find leftmost one
348 * @word - The word to search
349 *
350 * Returns the bit number of the most significant bit set,
351 * where the most significant bit has bit number 0.
352 * If no bit is set this function returns 64.
353 */
354static inline unsigned char __flogr(unsigned long word)
355{
356 if (__builtin_constant_p(word)) {
357 unsigned long bit = 0;
358
359 if (!word)
360 return 64;
361 if (!(word & 0xffffffff00000000UL)) {
362 word <<= 32;
363 bit += 32;
364 }
365 if (!(word & 0xffff000000000000UL)) {
366 word <<= 16;
367 bit += 16;
368 }
369 if (!(word & 0xff00000000000000UL)) {
370 word <<= 8;
371 bit += 8;
372 }
373 if (!(word & 0xf000000000000000UL)) {
374 word <<= 4;
375 bit += 4;
376 }
377 if (!(word & 0xc000000000000000UL)) {
378 word <<= 2;
379 bit += 2;
380 }
381 if (!(word & 0x8000000000000000UL)) {
382 word <<= 1;
383 bit += 1;
384 }
385 return bit;
386 } else {
387 register unsigned long bit asm("4") = word;
388 register unsigned long out asm("5");
389
390 asm volatile(
391 " flogr %[bit],%[bit]\n"
392 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
393 return bit;
394 }
395}
396
397/**
398 * __ffs - find first bit in word.
399 * @word: The word to search
400 *
401 * Undefined if no bit exists, so code should check against 0 first.
402 */
403static inline unsigned long __ffs(unsigned long word)
404{
405 return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
406}
407
408/**
409 * ffs - find first bit set
410 * @word: the word to search
411 *
412 * This is defined the same way as the libc and
413 * compiler builtin ffs routines (man ffs).
414 */
415static inline int ffs(int word)
416{
417 unsigned long mask = 2 * BITS_PER_LONG - 1;
418 unsigned int val = (unsigned int)word;
419
420 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
421}
422
423/**
424 * __fls - find last (most-significant) set bit in a long word
425 * @word: the word to search
426 *
427 * Undefined if no set bit exists, so code should check against 0 first.
428 */
429static inline unsigned long __fls(unsigned long word)
430{
431 return __flogr(word) ^ (BITS_PER_LONG - 1);
432}
433
434/**
435 * fls64 - find last set bit in a 64-bit word
436 * @word: the word to search
437 *
438 * This is defined in a similar way as the libc and compiler builtin
439 * ffsll, but returns the position of the most significant set bit.
440 *
441 * fls64(value) returns 0 if value is 0 or the position of the last
442 * set bit if value is nonzero. The last (most significant) bit is
443 * at position 64.
444 */
445static inline int fls64(unsigned long word)
446{
447 unsigned long mask = 2 * BITS_PER_LONG - 1;
448
449 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
450}
451
452/**
453 * fls - find last (most-significant) bit set
454 * @word: the word to search
455 *
456 * This is defined the same way as ffs.
457 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
458 */
459static inline int fls(int word)
460{
461 return fls64((unsigned int)word);
462}
463
464#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
465
466#include <asm-generic/bitops/__ffs.h>
467#include <asm-generic/bitops/ffs.h>
468#include <asm-generic/bitops/__fls.h>
469#include <asm-generic/bitops/fls.h>
470#include <asm-generic/bitops/fls64.h>
471
472#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
473
474#include <asm-generic/bitops/ffz.h>
475#include <asm-generic/bitops/find.h>
476#include <asm-generic/bitops/hweight.h>
477#include <asm-generic/bitops/lock.h>
478#include <asm-generic/bitops/sched.h>
479#include <asm-generic/bitops/le.h>
480#include <asm-generic/bitops/ext2-atomic-setbit.h>
481
482#endif /* _S390_BITOPS_H */