Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#ifndef __ASM_ARC_CMPXCHG_H
7#define __ASM_ARC_CMPXCHG_H
8
9#include <linux/types.h>
10
11#include <asm/barrier.h>
12#include <asm/smp.h>
13
14#ifdef CONFIG_ARC_HAS_LLSC
15
16static inline unsigned long
17__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
18{
19 unsigned long prev;
20
21 /*
22 * Explicit full memory barrier needed before/after as
23 * LLOCK/SCOND themselves don't provide any such semantics
24 */
25 smp_mb();
26
27 __asm__ __volatile__(
28 "1: llock %0, [%1] \n"
29 " brne %0, %2, 2f \n"
30 " scond %3, [%1] \n"
31 " bnz 1b \n"
32 "2: \n"
33 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
34 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
35 "ir"(expected),
36 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
37 : "cc", "memory"); /* so that gcc knows memory is being written here */
38
39 smp_mb();
40
41 return prev;
42}
43
44#else /* !CONFIG_ARC_HAS_LLSC */
45
46static inline unsigned long
47__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
48{
49 unsigned long flags;
50 int prev;
51 volatile unsigned long *p = ptr;
52
53 /*
54 * spin lock/unlock provide the needed smp_mb() before/after
55 */
56 atomic_ops_lock(flags);
57 prev = *p;
58 if (prev == expected)
59 *p = new;
60 atomic_ops_unlock(flags);
61 return prev;
62}
63
64#endif
65
66#define arch_cmpxchg(ptr, o, n) ({ \
67 (typeof(*(ptr)))__cmpxchg((ptr), \
68 (unsigned long)(o), \
69 (unsigned long)(n)); \
70})
71
72/*
73 * atomic_cmpxchg is same as cmpxchg
74 * LLSC: only different in data-type, semantics are exactly same
75 * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
76 * semantics, and this lock also happens to be used by atomic_*()
77 */
78#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
79
80
81/*
82 * xchg (reg with memory) based on "Native atomic" EX insn
83 */
84static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
85 int size)
86{
87 extern unsigned long __xchg_bad_pointer(void);
88
89 switch (size) {
90 case 4:
91 smp_mb();
92
93 __asm__ __volatile__(
94 " ex %0, [%1] \n"
95 : "+r"(val)
96 : "r"(ptr)
97 : "memory");
98
99 smp_mb();
100
101 return val;
102 }
103 return __xchg_bad_pointer();
104}
105
106#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
107 sizeof(*(ptr))))
108
109/*
110 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
111 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
112 * due to a subtle reason:
113 * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
114 * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
115 * Hence xchg() needs to follow same locking rules.
116 *
117 * Technically the lock is also needed for UP (boils down to irq save/restore)
118 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
119 * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
120 * Other way around, xchg is one instruction anyways, so can't be interrupted
121 * as such
122 */
123
124#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
125
126#define arch_xchg(ptr, with) \
127({ \
128 unsigned long flags; \
129 typeof(*(ptr)) old_val; \
130 \
131 atomic_ops_lock(flags); \
132 old_val = _xchg(ptr, with); \
133 atomic_ops_unlock(flags); \
134 old_val; \
135})
136
137#else
138
139#define arch_xchg(ptr, with) _xchg(ptr, with)
140
141#endif
142
143/*
144 * "atomic" variant of xchg()
145 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
146 * Since xchg() doesn't always do that, it would seem that following definition
147 * is incorrect. But here's the rationale:
148 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
149 * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
150 * is natively "SMP safe", no serialization required).
151 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
152 * could clobber them. atomic_xchg() itself would be 1 insn, so it
153 * can't be clobbered by others. Thus no serialization required when
154 * atomic_xchg is involved.
155 */
156#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
157
158#endif
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
13#include <asm/smp.h>
14
15#ifdef CONFIG_ARC_HAS_LLSC
16
17static inline unsigned long
18__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
19{
20 unsigned long prev;
21
22 __asm__ __volatile__(
23 "1: llock %0, [%1] \n"
24 " brne %0, %2, 2f \n"
25 " scond %3, [%1] \n"
26 " bnz 1b \n"
27 "2: \n"
28 : "=&r"(prev)
29 : "r"(ptr), "ir"(expected),
30 "r"(new) /* can't be "ir". scond can't take limm for "b" */
31 : "cc");
32
33 return prev;
34}
35
36#else
37
38static inline unsigned long
39__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
40{
41 unsigned long flags;
42 int prev;
43 volatile unsigned long *p = ptr;
44
45 atomic_ops_lock(flags);
46 prev = *p;
47 if (prev == expected)
48 *p = new;
49 atomic_ops_unlock(flags);
50 return prev;
51}
52
53#endif /* CONFIG_ARC_HAS_LLSC */
54
55#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
56 (unsigned long)(o), (unsigned long)(n)))
57
58/*
59 * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
60 * just to gaurantee semantics.
61 * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
62 * which also happens to be atomic_ops_lock.
63 *
64 * Thus despite semantically being different, implementation of atomic_cmpxchg()
65 * is same as cmpxchg().
66 */
67#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
68
69
70/*
71 * xchg (reg with memory) based on "Native atomic" EX insn
72 */
73static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
74 int size)
75{
76 extern unsigned long __xchg_bad_pointer(void);
77
78 switch (size) {
79 case 4:
80 __asm__ __volatile__(
81 " ex %0, [%1] \n"
82 : "+r"(val)
83 : "r"(ptr)
84 : "memory");
85
86 return val;
87 }
88 return __xchg_bad_pointer();
89}
90
91#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
92 sizeof(*(ptr))))
93
94/*
95 * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
96 * not require any locking. However there's a quirk.
97 * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
98 * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
99 * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
100 * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
101 *
102 * This however is only relevant if SMP and/or ARC lacks LLSC
103 * if (UP or LLSC)
104 * xchg doesn't need serialization
105 * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
106 * xchg needs serialization
107 */
108
109#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
110
111#define xchg(ptr, with) \
112({ \
113 unsigned long flags; \
114 typeof(*(ptr)) old_val; \
115 \
116 atomic_ops_lock(flags); \
117 old_val = _xchg(ptr, with); \
118 atomic_ops_unlock(flags); \
119 old_val; \
120})
121
122#else
123
124#define xchg(ptr, with) _xchg(ptr, with)
125
126#endif
127
128/*
129 * "atomic" variant of xchg()
130 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
131 * Since xchg() doesn't always do that, it would seem that following defintion
132 * is incorrect. But here's the rationale:
133 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
134 * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
135 * is natively "SMP safe", no serialization required).
136 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
137 * could clobber them. atomic_xchg() itself would be 1 insn, so it
138 * can't be clobbered by others. Thus no serialization required when
139 * atomic_xchg is involved.
140 */
141#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
142
143#endif