Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#ifndef _ASM_BITOPS_H
  7#define _ASM_BITOPS_H
  8
  9#ifndef _LINUX_BITOPS_H
 10#error only <linux/bitops.h> can be included directly
 11#endif
 12
 
 
 13#ifndef __ASSEMBLY__
 14
 15#include <linux/types.h>
 16#include <linux/compiler.h>
 17#include <asm/barrier.h>
 18#ifndef CONFIG_ARC_HAS_LLSC
 19#include <asm/smp.h>
 20#endif
 21
 22#ifdef CONFIG_ARC_HAS_LLSC
 23
 24/*
 25 * Hardware assisted Atomic-R-M-W
 
 
 26 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28#define BIT_OP(op, c_op, asm_op)					\
 29static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
 30{									\
 31	unsigned int temp;						\
 32									\
 33	m += nr >> 5;							\
 34									\
 35	nr &= 0x1f;							\
 36									\
 37	__asm__ __volatile__(						\
 38	"1:	llock       %0, [%1]		\n"			\
 39	"	" #asm_op " %0, %0, %2	\n"				\
 40	"	scond       %0, [%1]		\n"			\
 41	"	bnz         1b			\n"			\
 42	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
 43	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
 44	  "ir"(nr)							\
 45	: "cc");							\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46}
 47
 48/*
 49 * Semantically:
 50 *    Test the bit
 51 *    if clear
 52 *        set it and return 0 (old value)
 53 *    else
 54 *        return 1 (old value).
 55 *
 56 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
 57 * and the old value of bit is returned
 58 */
 59#define TEST_N_BIT_OP(op, c_op, asm_op)					\
 60static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
 61{									\
 62	unsigned long old, temp;					\
 63									\
 64	m += nr >> 5;							\
 65									\
 66	nr &= 0x1f;							\
 67									\
 68	/*								\
 69	 * Explicit full memory barrier needed before/after as		\
 70	 * LLOCK/SCOND themselves don't provide any such smenatic	\
 71	 */								\
 72	smp_mb();							\
 73									\
 74	__asm__ __volatile__(						\
 75	"1:	llock       %0, [%2]	\n"				\
 76	"	" #asm_op " %1, %0, %3	\n"				\
 77	"	scond       %1, [%2]	\n"				\
 78	"	bnz         1b		\n"				\
 79	: "=&r"(old), "=&r"(temp)					\
 80	: "r"(m), "ir"(nr)						\
 81	: "cc");							\
 82									\
 83	smp_mb();							\
 84									\
 85	return (old & (1 << nr)) != 0;					\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86}
 87
 88#elif !defined(CONFIG_ARC_PLAT_EZNPS)
 
 
 89
 90/*
 91 * Non hardware assisted Atomic-R-M-W
 92 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
 93 *
 94 * There's "significant" micro-optimization in writing our own variants of
 95 * bitops (over generic variants)
 96 *
 97 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
 98 *     This avoids extra code to be generated for pointer arithmatic, since
 99 *     is "not sure" that index is NOT -ve
100 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
101 *     only consider bottom 5 bits of @nr, so NO need to mask them off.
102 *     (GCC Quirk: however for constant @nr we still need to do the masking
103 *             at compile time)
104 */
105
106#define BIT_OP(op, c_op, asm_op)					\
107static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
108{									\
109	unsigned long temp, flags;					\
110	m += nr >> 5;							\
111									\
112	/*								\
113	 * spin lock/unlock provide the needed smp_mb() before/after	\
114	 */								\
115	bitops_lock(flags);						\
116									\
117	temp = *m;							\
118	*m = temp c_op (1UL << (nr & 0x1f));					\
119									\
120	bitops_unlock(flags);						\
121}
122
123#define TEST_N_BIT_OP(op, c_op, asm_op)					\
124static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
125{									\
126	unsigned long old, flags;					\
127	m += nr >> 5;							\
128									\
129	bitops_lock(flags);						\
130									\
131	old = *m;							\
132	*m = old c_op (1UL << (nr & 0x1f));				\
133									\
134	bitops_unlock(flags);						\
135									\
136	return (old & (1UL << (nr & 0x1f))) != 0;			\
137}
138
139#else /* CONFIG_ARC_PLAT_EZNPS */
140
141#define BIT_OP(op, c_op, asm_op)					\
142static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
143{									\
144	m += nr >> 5;							\
145									\
146	nr = (1UL << (nr & 0x1f));					\
147	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
148		nr = ~nr;						\
149									\
150	__asm__ __volatile__(						\
151	"	mov r2, %0\n"						\
152	"	mov r3, %1\n"						\
153	"	.word %2\n"						\
154	:								\
155	: "r"(nr), "r"(m), "i"(asm_op)					\
156	: "r2", "r3", "memory");					\
157}
158
159#define TEST_N_BIT_OP(op, c_op, asm_op)					\
160static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
161{									\
162	unsigned long old;						\
163									\
164	m += nr >> 5;							\
165									\
166	nr = old = (1UL << (nr & 0x1f));				\
167	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
168		old = ~old;						\
169									\
170	/* Explicit full memory barrier needed before/after */		\
171	smp_mb();							\
172									\
173	__asm__ __volatile__(						\
174	"	mov r2, %0\n"						\
175	"	mov r3, %1\n"						\
176	"       .word %2\n"						\
177	"	mov %0, r2"						\
178	: "+r"(old)							\
179	: "r"(m), "i"(asm_op)						\
180	: "r2", "r3", "memory");					\
181									\
182	smp_mb();							\
183									\
184	return (old & nr) != 0;					\
185}
186
187#endif /* CONFIG_ARC_PLAT_EZNPS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
189/***************************************
190 * Non atomic variants
191 **************************************/
192
193#define __BIT_OP(op, c_op, asm_op)					\
194static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
195{									\
196	unsigned long temp;						\
197	m += nr >> 5;							\
198									\
199	temp = *m;							\
200	*m = temp c_op (1UL << (nr & 0x1f));				\
201}
202
203#define __TEST_N_BIT_OP(op, c_op, asm_op)				\
204static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
205{									\
206	unsigned long old;						\
207	m += nr >> 5;							\
208									\
209	old = *m;							\
210	*m = old c_op (1UL << (nr & 0x1f));				\
211									\
212	return (old & (1UL << (nr & 0x1f))) != 0;			\
213}
214
215#define BIT_OPS(op, c_op, asm_op)					\
216									\
217	/* set_bit(), clear_bit(), change_bit() */			\
218	BIT_OP(op, c_op, asm_op)					\
219									\
220	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
221	TEST_N_BIT_OP(op, c_op, asm_op)					\
222									\
223	/* __set_bit(), __clear_bit(), __change_bit() */		\
224	__BIT_OP(op, c_op, asm_op)					\
225									\
226	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
227	__TEST_N_BIT_OP(op, c_op, asm_op)
228
229#ifndef CONFIG_ARC_PLAT_EZNPS
230BIT_OPS(set, |, bset)
231BIT_OPS(clear, & ~, bclr)
232BIT_OPS(change, ^, bxor)
233#else
234BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
235BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
236BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
237#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
239/*
240 * This routine doesn't need to be atomic.
241 */
242static inline int
243test_bit(unsigned int nr, const volatile unsigned long *addr)
 
 
 
 
 
 
 
244{
245	unsigned long mask;
246
247	addr += nr >> 5;
248
249	mask = 1UL << (nr & 0x1f);
 
250
251	return ((mask & *addr) != 0);
252}
253
254#ifdef CONFIG_ISA_ARCOMPACT
 
 
255
256/*
257 * Count the number of zeros, starting from MSB
258 * Helper for fls( ) friends
259 * This is a pure count, so (1-32) or (0-31) doesn't apply
260 * It could be 0 to 32, based on num of 0's in there
261 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
262 */
263static inline __attribute__ ((const)) int clz(unsigned int x)
264{
265	unsigned int res;
266
267	__asm__ __volatile__(
268	"	norm.f  %0, %1		\n"
269	"	mov.n   %0, 0		\n"
270	"	add.p   %0, %0, 1	\n"
271	: "=r"(res)
272	: "r"(x)
273	: "cc");
274
275	return res;
276}
277
278static inline int constant_fls(unsigned int x)
279{
280	int r = 32;
281
282	if (!x)
283		return 0;
284	if (!(x & 0xffff0000u)) {
285		x <<= 16;
286		r -= 16;
287	}
288	if (!(x & 0xff000000u)) {
289		x <<= 8;
290		r -= 8;
291	}
292	if (!(x & 0xf0000000u)) {
293		x <<= 4;
294		r -= 4;
295	}
296	if (!(x & 0xc0000000u)) {
297		x <<= 2;
298		r -= 2;
299	}
300	if (!(x & 0x80000000u)) {
301		x <<= 1;
302		r -= 1;
303	}
304	return r;
305}
306
307/*
308 * fls = Find Last Set in word
309 * @result: [1-32]
310 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
311 */
312static inline __attribute__ ((const)) int fls(unsigned int x)
313{
314	if (__builtin_constant_p(x))
315	       return constant_fls(x);
316
317	return 32 - clz(x);
318}
319
320/*
321 * __fls: Similar to fls, but zero based (0-31)
322 */
323static inline __attribute__ ((const)) int __fls(unsigned long x)
324{
325	if (!x)
326		return 0;
327	else
328		return fls(x) - 1;
329}
330
331/*
332 * ffs = Find First Set in word (LSB to MSB)
333 * @result: [1-32], 0 if all 0's
334 */
335#define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
336
337/*
338 * __ffs: Similar to ffs, but zero based (0-31)
339 */
340static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
341{
342	if (!word)
343		return word;
344
345	return ffs(word) - 1;
346}
347
348#else	/* CONFIG_ISA_ARCV2 */
349
350/*
351 * fls = Find Last Set in word
352 * @result: [1-32]
353 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
354 */
355static inline __attribute__ ((const)) int fls(unsigned long x)
356{
357	int n;
358
359	asm volatile(
360	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
361	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
362	: "=r"(n)	/* Early clobber not needed */
363	: "r"(x)
364	: "cc");
365
366	return n;
367}
368
369/*
370 * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
371 */
372static inline __attribute__ ((const)) int __fls(unsigned long x)
373{
374	/* FLS insn has exactly same semantics as the API */
375	return	__builtin_arc_fls(x);
376}
377
378/*
379 * ffs = Find First Set in word (LSB to MSB)
380 * @result: [1-32], 0 if all 0's
381 */
382static inline __attribute__ ((const)) int ffs(unsigned long x)
383{
384	int n;
385
386	asm volatile(
387	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
388	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
389	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
390	: "=r"(n)	/* Early clobber not needed */
391	: "r"(x)
392	: "cc");
393
394	return n;
395}
396
397/*
398 * __ffs: Similar to ffs, but zero based (0-31)
399 */
400static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
401{
402	unsigned long n;
403
404	asm volatile(
405	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
406	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
407	: "=r"(n)
408	: "r"(x)
409	: "cc");
410
411	return n;
412
413}
414
415#endif	/* CONFIG_ISA_ARCOMPACT */
416
417/*
418 * ffz = Find First Zero in word.
419 * @return:[0-31], 32 if all 1's
420 */
421#define ffz(x)	__ffs(~(x))
422
 
 
 
 
423#include <asm-generic/bitops/hweight.h>
424#include <asm-generic/bitops/fls64.h>
425#include <asm-generic/bitops/sched.h>
426#include <asm-generic/bitops/lock.h>
427
428#include <asm-generic/bitops/find.h>
429#include <asm-generic/bitops/le.h>
430#include <asm-generic/bitops/ext2-atomic-setbit.h>
431
432#endif /* !__ASSEMBLY__ */
 
 
433
434#endif
v3.15
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef _ASM_BITOPS_H
 10#define _ASM_BITOPS_H
 11
 12#ifndef _LINUX_BITOPS_H
 13#error only <linux/bitops.h> can be included directly
 14#endif
 15
 16#ifdef __KERNEL__
 17
 18#ifndef __ASSEMBLY__
 19
 20#include <linux/types.h>
 21#include <linux/compiler.h>
 
 
 
 
 
 
 22
 23/*
 24 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
 25 * The Kconfig glue ensures that in SMP, this is only set if the container
 26 * SoC/platform has cross-core coherent LLOCK/SCOND
 27 */
 28#if defined(CONFIG_ARC_HAS_LLSC)
 29
 30static inline void set_bit(unsigned long nr, volatile unsigned long *m)
 31{
 32	unsigned int temp;
 33
 34	m += nr >> 5;
 35
 36	if (__builtin_constant_p(nr))
 37		nr &= 0x1f;
 38
 39	__asm__ __volatile__(
 40	"1:	llock   %0, [%1]	\n"
 41	"	bset    %0, %0, %2	\n"
 42	"	scond   %0, [%1]	\n"
 43	"	bnz     1b	\n"
 44	: "=&r"(temp)
 45	: "r"(m), "ir"(nr)
 46	: "cc");
 47}
 48
 49static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
 50{
 51	unsigned int temp;
 52
 53	m += nr >> 5;
 54
 55	if (__builtin_constant_p(nr))
 56		nr &= 0x1f;
 57
 58	__asm__ __volatile__(
 59	"1:	llock   %0, [%1]	\n"
 60	"	bclr    %0, %0, %2	\n"
 61	"	scond   %0, [%1]	\n"
 62	"	bnz     1b	\n"
 63	: "=&r"(temp)
 64	: "r"(m), "ir"(nr)
 65	: "cc");
 66}
 67
 68static inline void change_bit(unsigned long nr, volatile unsigned long *m)
 69{
 70	unsigned int temp;
 71
 72	m += nr >> 5;
 73
 74	if (__builtin_constant_p(nr))
 75		nr &= 0x1f;
 76
 77	__asm__ __volatile__(
 78	"1:	llock   %0, [%1]	\n"
 79	"	bxor    %0, %0, %2	\n"
 80	"	scond   %0, [%1]	\n"
 81	"	bnz     1b		\n"
 82	: "=&r"(temp)
 83	: "r"(m), "ir"(nr)
 84	: "cc");
 85}
 86
 87/*
 88 * Semantically:
 89 *    Test the bit
 90 *    if clear
 91 *        set it and return 0 (old value)
 92 *    else
 93 *        return 1 (old value).
 94 *
 95 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
 96 * and the old value of bit is returned
 97 */
 98static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
 99{
100	unsigned long old, temp;
101
102	m += nr >> 5;
103
104	if (__builtin_constant_p(nr))
105		nr &= 0x1f;
106
107	__asm__ __volatile__(
108	"1:	llock   %0, [%2]	\n"
109	"	bset    %1, %0, %3	\n"
110	"	scond   %1, [%2]	\n"
111	"	bnz     1b		\n"
112	: "=&r"(old), "=&r"(temp)
113	: "r"(m), "ir"(nr)
114	: "cc");
115
116	return (old & (1 << nr)) != 0;
117}
118
119static inline int
120test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
121{
122	unsigned int old, temp;
123
124	m += nr >> 5;
125
126	if (__builtin_constant_p(nr))
127		nr &= 0x1f;
128
129	__asm__ __volatile__(
130	"1:	llock   %0, [%2]	\n"
131	"	bclr    %1, %0, %3	\n"
132	"	scond   %1, [%2]	\n"
133	"	bnz     1b		\n"
134	: "=&r"(old), "=&r"(temp)
135	: "r"(m), "ir"(nr)
136	: "cc");
137
138	return (old & (1 << nr)) != 0;
139}
140
141static inline int
142test_and_change_bit(unsigned long nr, volatile unsigned long *m)
143{
144	unsigned int old, temp;
145
146	m += nr >> 5;
147
148	if (__builtin_constant_p(nr))
149		nr &= 0x1f;
150
151	__asm__ __volatile__(
152	"1:	llock   %0, [%2]	\n"
153	"	bxor    %1, %0, %3	\n"
154	"	scond   %1, [%2]	\n"
155	"	bnz     1b		\n"
156	: "=&r"(old), "=&r"(temp)
157	: "r"(m), "ir"(nr)
158	: "cc");
159
160	return (old & (1 << nr)) != 0;
161}
162
163#else	/* !CONFIG_ARC_HAS_LLSC */
164
165#include <asm/smp.h>
166
167/*
168 * Non hardware assisted Atomic-R-M-W
169 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
170 *
171 * There's "significant" micro-optimization in writing our own variants of
172 * bitops (over generic variants)
173 *
174 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
175 *     This avoids extra code to be generated for pointer arithmatic, since
176 *     is "not sure" that index is NOT -ve
177 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
178 *     only consider bottom 5 bits of @nr, so NO need to mask them off.
179 *     (GCC Quirk: however for constant @nr we still need to do the masking
180 *             at compile time)
181 */
182
183static inline void set_bit(unsigned long nr, volatile unsigned long *m)
184{
185	unsigned long temp, flags;
186	m += nr >> 5;
187
188	if (__builtin_constant_p(nr))
189		nr &= 0x1f;
190
191	bitops_lock(flags);
192
193	temp = *m;
194	*m = temp | (1UL << nr);
195
196	bitops_unlock(flags);
197}
198
199static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
200{
201	unsigned long temp, flags;
202	m += nr >> 5;
203
204	if (__builtin_constant_p(nr))
205		nr &= 0x1f;
206
207	bitops_lock(flags);
208
209	temp = *m;
210	*m = temp & ~(1UL << nr);
211
212	bitops_unlock(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213}
214
215static inline void change_bit(unsigned long nr, volatile unsigned long *m)
216{
217	unsigned long temp, flags;
218	m += nr >> 5;
219
220	if (__builtin_constant_p(nr))
221		nr &= 0x1f;
222
223	bitops_lock(flags);
224
225	temp = *m;
226	*m = temp ^ (1UL << nr);
227
228	bitops_unlock(flags);
229}
230
231static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
232{
233	unsigned long old, flags;
234	m += nr >> 5;
235
236	if (__builtin_constant_p(nr))
237		nr &= 0x1f;
238
239	bitops_lock(flags);
240
241	old = *m;
242	*m = old | (1 << nr);
243
244	bitops_unlock(flags);
245
246	return (old & (1 << nr)) != 0;
247}
248
249static inline int
250test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
251{
252	unsigned long old, flags;
253	m += nr >> 5;
254
255	if (__builtin_constant_p(nr))
256		nr &= 0x1f;
257
258	bitops_lock(flags);
259
260	old = *m;
261	*m = old & ~(1 << nr);
262
263	bitops_unlock(flags);
264
265	return (old & (1 << nr)) != 0;
266}
267
268static inline int
269test_and_change_bit(unsigned long nr, volatile unsigned long *m)
270{
271	unsigned long old, flags;
272	m += nr >> 5;
273
274	if (__builtin_constant_p(nr))
275		nr &= 0x1f;
276
277	bitops_lock(flags);
278
279	old = *m;
280	*m = old ^ (1 << nr);
281
282	bitops_unlock(flags);
283
284	return (old & (1 << nr)) != 0;
285}
286
287#endif /* CONFIG_ARC_HAS_LLSC */
288
289/***************************************
290 * Non atomic variants
291 **************************************/
292
293static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
294{
295	unsigned long temp;
296	m += nr >> 5;
297
298	if (__builtin_constant_p(nr))
299		nr &= 0x1f;
300
301	temp = *m;
302	*m = temp | (1UL << nr);
303}
304
305static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
306{
307	unsigned long temp;
308	m += nr >> 5;
309
310	if (__builtin_constant_p(nr))
311		nr &= 0x1f;
312
313	temp = *m;
314	*m = temp & ~(1UL << nr);
315}
316
317static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
318{
319	unsigned long temp;
320	m += nr >> 5;
321
322	if (__builtin_constant_p(nr))
323		nr &= 0x1f;
324
325	temp = *m;
326	*m = temp ^ (1UL << nr);
327}
328
329static inline int
330__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
331{
332	unsigned long old;
333	m += nr >> 5;
334
335	if (__builtin_constant_p(nr))
336		nr &= 0x1f;
337
338	old = *m;
339	*m = old | (1 << nr);
340
341	return (old & (1 << nr)) != 0;
342}
343
344static inline int
345__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
346{
347	unsigned long old;
348	m += nr >> 5;
349
350	if (__builtin_constant_p(nr))
351		nr &= 0x1f;
352
353	old = *m;
354	*m = old & ~(1 << nr);
355
356	return (old & (1 << nr)) != 0;
357}
358
359static inline int
360__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
361{
362	unsigned long old;
363	m += nr >> 5;
364
365	if (__builtin_constant_p(nr))
366		nr &= 0x1f;
367
368	old = *m;
369	*m = old ^ (1 << nr);
370
371	return (old & (1 << nr)) != 0;
372}
373
374/*
375 * This routine doesn't need to be atomic.
376 */
377static inline int
378__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
379{
380	return ((1UL << (nr & 31)) &
381		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
382}
383
384static inline int
385__test_bit(unsigned int nr, const volatile unsigned long *addr)
386{
387	unsigned long mask;
388
389	addr += nr >> 5;
390
391	/* ARC700 only considers 5 bits in bit-fiddling insn */
392	mask = 1 << nr;
393
394	return ((mask & *addr) != 0);
395}
396
397#define test_bit(nr, addr)	(__builtin_constant_p(nr) ? \
398					__constant_test_bit((nr), (addr)) : \
399					__test_bit((nr), (addr)))
400
401/*
402 * Count the number of zeros, starting from MSB
403 * Helper for fls( ) friends
404 * This is a pure count, so (1-32) or (0-31) doesn't apply
405 * It could be 0 to 32, based on num of 0's in there
406 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
407 */
408static inline __attribute__ ((const)) int clz(unsigned int x)
409{
410	unsigned int res;
411
412	__asm__ __volatile__(
413	"	norm.f  %0, %1		\n"
414	"	mov.n   %0, 0		\n"
415	"	add.p   %0, %0, 1	\n"
416	: "=r"(res)
417	: "r"(x)
418	: "cc");
419
420	return res;
421}
422
423static inline int constant_fls(int x)
424{
425	int r = 32;
426
427	if (!x)
428		return 0;
429	if (!(x & 0xffff0000u)) {
430		x <<= 16;
431		r -= 16;
432	}
433	if (!(x & 0xff000000u)) {
434		x <<= 8;
435		r -= 8;
436	}
437	if (!(x & 0xf0000000u)) {
438		x <<= 4;
439		r -= 4;
440	}
441	if (!(x & 0xc0000000u)) {
442		x <<= 2;
443		r -= 2;
444	}
445	if (!(x & 0x80000000u)) {
446		x <<= 1;
447		r -= 1;
448	}
449	return r;
450}
451
452/*
453 * fls = Find Last Set in word
454 * @result: [1-32]
455 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
456 */
457static inline __attribute__ ((const)) int fls(unsigned long x)
458{
459	if (__builtin_constant_p(x))
460	       return constant_fls(x);
461
462	return 32 - clz(x);
463}
464
465/*
466 * __fls: Similar to fls, but zero based (0-31)
467 */
468static inline __attribute__ ((const)) int __fls(unsigned long x)
469{
470	if (!x)
471		return 0;
472	else
473		return fls(x) - 1;
474}
475
476/*
477 * ffs = Find First Set in word (LSB to MSB)
478 * @result: [1-32], 0 if all 0's
479 */
480#define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
481
482/*
483 * __ffs: Similar to ffs, but zero based (0-31)
484 */
485static inline __attribute__ ((const)) int __ffs(unsigned long word)
486{
487	if (!word)
488		return word;
489
490	return ffs(word) - 1;
491}
492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493/*
494 * ffz = Find First Zero in word.
495 * @return:[0-31], 32 if all 1's
496 */
497#define ffz(x)	__ffs(~(x))
498
499/* TODO does this affect uni-processor code */
500#define smp_mb__before_clear_bit()  barrier()
501#define smp_mb__after_clear_bit()   barrier()
502
503#include <asm-generic/bitops/hweight.h>
504#include <asm-generic/bitops/fls64.h>
505#include <asm-generic/bitops/sched.h>
506#include <asm-generic/bitops/lock.h>
507
508#include <asm-generic/bitops/find.h>
509#include <asm-generic/bitops/le.h>
510#include <asm-generic/bitops/ext2-atomic-setbit.h>
511
512#endif /* !__ASSEMBLY__ */
513
514#endif /* __KERNEL__ */
515
516#endif