Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef __ASM_ARC_CMPXCHG_H
 10#define __ASM_ARC_CMPXCHG_H
 11
 12#include <linux/types.h>
 13
 14#include <asm/barrier.h>
 15#include <asm/smp.h>
 16
 17#ifdef CONFIG_ARC_HAS_LLSC
 18
 19static inline unsigned long
 20__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 21{
 22	unsigned long prev;
 23
 24	/*
 25	 * Explicit full memory barrier needed before/after as
 26	 * LLOCK/SCOND thmeselves don't provide any such semantics
 27	 */
 28	smp_mb();
 29
 30	__asm__ __volatile__(
 31	"1:	llock   %0, [%1]	\n"
 32	"	brne    %0, %2, 2f	\n"
 33	"	scond   %3, [%1]	\n"
 34	"	bnz     1b		\n"
 35	"2:				\n"
 36	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
 37	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
 38	  "ir"(expected),
 39	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
 40	: "cc", "memory"); /* so that gcc knows memory is being written here */
 41
 42	smp_mb();
 43
 44	return prev;
 45}
 46
 47#elif !defined(CONFIG_ARC_PLAT_EZNPS)
 48
 49static inline unsigned long
 50__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 51{
 52	unsigned long flags;
 53	int prev;
 54	volatile unsigned long *p = ptr;
 55
 56	/*
 57	 * spin lock/unlock provide the needed smp_mb() before/after
 58	 */
 59	atomic_ops_lock(flags);
 60	prev = *p;
 61	if (prev == expected)
 62		*p = new;
 63	atomic_ops_unlock(flags);
 64	return prev;
 65}
 66
 67#else /* CONFIG_ARC_PLAT_EZNPS */
 68
 69static inline unsigned long
 70__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 71{
 72	/*
 73	 * Explicit full memory barrier needed before/after
 74	 */
 75	smp_mb();
 76
 77	write_aux_reg(CTOP_AUX_GPA1, expected);
 78
 79	__asm__ __volatile__(
 80	"	mov r2, %0\n"
 81	"	mov r3, %1\n"
 82	"	.word %2\n"
 83	"	mov %0, r2"
 84	: "+r"(new)
 85	: "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
 86	: "r2", "r3", "memory");
 87
 88	smp_mb();
 89
 90	return new;
 91}
 92
 93#endif /* CONFIG_ARC_HAS_LLSC */
 94
 95#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
 96				(unsigned long)(o), (unsigned long)(n)))
 
 
 
 97
 98/*
 99 * atomic_cmpxchg is same as cmpxchg
100 *   LLSC: only different in data-type, semantics are exactly same
101 *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
102 *         semantics, and this lock also happens to be used by atomic_*()
103 */
104#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
105
106
107#ifndef CONFIG_ARC_PLAT_EZNPS
108
109/*
110 * xchg (reg with memory) based on "Native atomic" EX insn
111 */
112static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
113				   int size)
114{
115	extern unsigned long __xchg_bad_pointer(void);
116
117	switch (size) {
118	case 4:
119		smp_mb();
120
121		__asm__ __volatile__(
122		"	ex  %0, [%1]	\n"
123		: "+r"(val)
124		: "r"(ptr)
125		: "memory");
126
127		smp_mb();
128
129		return val;
130	}
131	return __xchg_bad_pointer();
132}
133
134#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
135						 sizeof(*(ptr))))
136
137/*
138 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
139 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
140 * due to a subtle reason:
141 *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
142 *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
143 *    Hence xchg() needs to follow same locking rules.
144 *
145 * Technically the lock is also needed for UP (boils down to irq save/restore)
146 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
147 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
148 * Other way around, xchg is one instruction anyways, so can't be interrupted
149 * as such
150 */
151
152#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
153
154#define xchg(ptr, with)			\
155({					\
156	unsigned long flags;		\
157	typeof(*(ptr)) old_val;		\
158					\
159	atomic_ops_lock(flags);		\
160	old_val = _xchg(ptr, with);	\
161	atomic_ops_unlock(flags);	\
162	old_val;			\
163})
164
165#else
166
167#define xchg(ptr, with)  _xchg(ptr, with)
168
169#endif
170
171#else /* CONFIG_ARC_PLAT_EZNPS */
172
173static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
174				   int size)
175{
176	extern unsigned long __xchg_bad_pointer(void);
177
178	switch (size) {
179	case 4:
180		/*
181		 * Explicit full memory barrier needed before/after
182		 */
183		smp_mb();
184
185		__asm__ __volatile__(
186		"	mov r2, %0\n"
187		"	mov r3, %1\n"
188		"	.word %2\n"
189		"	mov %0, r2\n"
190		: "+r"(val)
191		: "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
192		: "r2", "r3", "memory");
193
194		smp_mb();
195
196		return val;
197	}
198	return __xchg_bad_pointer();
199}
200
201#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
202						 sizeof(*(ptr))))
 
 
 
203
204#endif /* CONFIG_ARC_PLAT_EZNPS */
205
206/*
207 * "atomic" variant of xchg()
208 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
209 * Since xchg() doesn't always do that, it would seem that following defintion
210 * is incorrect. But here's the rationale:
211 *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
212 *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
213 *         is natively "SMP safe", no serialization required).
214 *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
215 *         could clobber them. atomic_xchg() itself would be 1 insn, so it
216 *         can't be clobbered by others. Thus no serialization required when
217 *         atomic_xchg is involved.
218 */
219#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
220
221#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#ifndef __ASM_ARC_CMPXCHG_H
  7#define __ASM_ARC_CMPXCHG_H
  8
  9#include <linux/types.h>
 10
 11#include <asm/barrier.h>
 12#include <asm/smp.h>
 13
 14#ifdef CONFIG_ARC_HAS_LLSC
 15
 16static inline unsigned long
 17__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 18{
 19	unsigned long prev;
 20
 21	/*
 22	 * Explicit full memory barrier needed before/after as
 23	 * LLOCK/SCOND thmeselves don't provide any such semantics
 24	 */
 25	smp_mb();
 26
 27	__asm__ __volatile__(
 28	"1:	llock   %0, [%1]	\n"
 29	"	brne    %0, %2, 2f	\n"
 30	"	scond   %3, [%1]	\n"
 31	"	bnz     1b		\n"
 32	"2:				\n"
 33	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
 34	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
 35	  "ir"(expected),
 36	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
 37	: "cc", "memory"); /* so that gcc knows memory is being written here */
 38
 39	smp_mb();
 40
 41	return prev;
 42}
 43
 44#elif !defined(CONFIG_ARC_PLAT_EZNPS)
 45
 46static inline unsigned long
 47__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 48{
 49	unsigned long flags;
 50	int prev;
 51	volatile unsigned long *p = ptr;
 52
 53	/*
 54	 * spin lock/unlock provide the needed smp_mb() before/after
 55	 */
 56	atomic_ops_lock(flags);
 57	prev = *p;
 58	if (prev == expected)
 59		*p = new;
 60	atomic_ops_unlock(flags);
 61	return prev;
 62}
 63
 64#else /* CONFIG_ARC_PLAT_EZNPS */
 65
 66static inline unsigned long
 67__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 68{
 69	/*
 70	 * Explicit full memory barrier needed before/after
 71	 */
 72	smp_mb();
 73
 74	write_aux_reg(CTOP_AUX_GPA1, expected);
 75
 76	__asm__ __volatile__(
 77	"	mov r2, %0\n"
 78	"	mov r3, %1\n"
 79	"	.word %2\n"
 80	"	mov %0, r2"
 81	: "+r"(new)
 82	: "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
 83	: "r2", "r3", "memory");
 84
 85	smp_mb();
 86
 87	return new;
 88}
 89
 90#endif /* CONFIG_ARC_HAS_LLSC */
 91
 92#define cmpxchg(ptr, o, n) ({				\
 93	(typeof(*(ptr)))__cmpxchg((ptr),		\
 94				  (unsigned long)(o),	\
 95				  (unsigned long)(n));	\
 96})
 97
 98/*
 99 * atomic_cmpxchg is same as cmpxchg
100 *   LLSC: only different in data-type, semantics are exactly same
101 *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
102 *         semantics, and this lock also happens to be used by atomic_*()
103 */
104#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
105
106
107#ifndef CONFIG_ARC_PLAT_EZNPS
108
109/*
110 * xchg (reg with memory) based on "Native atomic" EX insn
111 */
112static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
113				   int size)
114{
115	extern unsigned long __xchg_bad_pointer(void);
116
117	switch (size) {
118	case 4:
119		smp_mb();
120
121		__asm__ __volatile__(
122		"	ex  %0, [%1]	\n"
123		: "+r"(val)
124		: "r"(ptr)
125		: "memory");
126
127		smp_mb();
128
129		return val;
130	}
131	return __xchg_bad_pointer();
132}
133
134#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
135						 sizeof(*(ptr))))
136
137/*
138 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
139 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
140 * due to a subtle reason:
141 *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
142 *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
143 *    Hence xchg() needs to follow same locking rules.
144 *
145 * Technically the lock is also needed for UP (boils down to irq save/restore)
146 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
147 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
148 * Other way around, xchg is one instruction anyways, so can't be interrupted
149 * as such
150 */
151
152#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
153
154#define xchg(ptr, with)			\
155({					\
156	unsigned long flags;		\
157	typeof(*(ptr)) old_val;		\
158					\
159	atomic_ops_lock(flags);		\
160	old_val = _xchg(ptr, with);	\
161	atomic_ops_unlock(flags);	\
162	old_val;			\
163})
164
165#else
166
167#define xchg(ptr, with)  _xchg(ptr, with)
168
169#endif
170
171#else /* CONFIG_ARC_PLAT_EZNPS */
172
173static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
174				   int size)
175{
176	extern unsigned long __xchg_bad_pointer(void);
177
178	switch (size) {
179	case 4:
180		/*
181		 * Explicit full memory barrier needed before/after
182		 */
183		smp_mb();
184
185		__asm__ __volatile__(
186		"	mov r2, %0\n"
187		"	mov r3, %1\n"
188		"	.word %2\n"
189		"	mov %0, r2\n"
190		: "+r"(val)
191		: "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
192		: "r2", "r3", "memory");
193
194		smp_mb();
195
196		return val;
197	}
198	return __xchg_bad_pointer();
199}
200
201#define xchg(ptr, with) ({				\
202	(typeof(*(ptr)))__xchg((unsigned long)(with),	\
203			       (ptr),			\
204			       sizeof(*(ptr)));		\
205})
206
207#endif /* CONFIG_ARC_PLAT_EZNPS */
208
209/*
210 * "atomic" variant of xchg()
211 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
212 * Since xchg() doesn't always do that, it would seem that following defintion
213 * is incorrect. But here's the rationale:
214 *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
215 *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
216 *         is natively "SMP safe", no serialization required).
217 *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
218 *         could clobber them. atomic_xchg() itself would be 1 insn, so it
219 *         can't be clobbered by others. Thus no serialization required when
220 *         atomic_xchg is involved.
221 */
222#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
223
224#endif