Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#ifndef __ASM_SPINLOCK_H
7#define __ASM_SPINLOCK_H
8
9#include <asm/spinlock_types.h>
10#include <asm/processor.h>
11#include <asm/barrier.h>
12
13#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
14
15#ifdef CONFIG_ARC_HAS_LLSC
16
17static inline void arch_spin_lock(arch_spinlock_t *lock)
18{
19 unsigned int val;
20
21 __asm__ __volatile__(
22 "1: llock %[val], [%[slock]] \n"
23 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
24 " scond %[LOCKED], [%[slock]] \n" /* acquire */
25 " bnz 1b \n"
26 " \n"
27 : [val] "=&r" (val)
28 : [slock] "r" (&(lock->slock)),
29 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
30 : "memory", "cc");
31
32 /*
33 * ACQUIRE barrier to ensure load/store after taking the lock
34 * don't "bleed-up" out of the critical section (leak-in is allowed)
35 * http://www.spinics.net/lists/kernel/msg2010409.html
36 *
37 * ARCv2 only has load-load, store-store and all-all barrier
38 * thus need the full all-all barrier
39 */
40 smp_mb();
41}
42
43/* 1 - lock taken successfully */
44static inline int arch_spin_trylock(arch_spinlock_t *lock)
45{
46 unsigned int val, got_it = 0;
47
48 __asm__ __volatile__(
49 "1: llock %[val], [%[slock]] \n"
50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
51 " scond %[LOCKED], [%[slock]] \n" /* acquire */
52 " bnz 1b \n"
53 " mov %[got_it], 1 \n"
54 "4: \n"
55 " \n"
56 : [val] "=&r" (val),
57 [got_it] "+&r" (got_it)
58 : [slock] "r" (&(lock->slock)),
59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
60 : "memory", "cc");
61
62 smp_mb();
63
64 return got_it;
65}
66
67static inline void arch_spin_unlock(arch_spinlock_t *lock)
68{
69 smp_mb();
70
71 WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
72}
73
74/*
75 * Read-write spinlocks, allowing multiple readers but only one writer.
76 * Unfair locking as Writers could be starved indefinitely by Reader(s)
77 */
78
79static inline void arch_read_lock(arch_rwlock_t *rw)
80{
81 unsigned int val;
82
83 /*
84 * zero means writer holds the lock exclusively, deny Reader.
85 * Otherwise grant lock to first/subseq reader
86 *
87 * if (rw->counter > 0) {
88 * rw->counter--;
89 * ret = 1;
90 * }
91 */
92
93 __asm__ __volatile__(
94 "1: llock %[val], [%[rwlock]] \n"
95 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
96 " sub %[val], %[val], 1 \n" /* reader lock */
97 " scond %[val], [%[rwlock]] \n"
98 " bnz 1b \n"
99 " \n"
100 : [val] "=&r" (val)
101 : [rwlock] "r" (&(rw->counter)),
102 [WR_LOCKED] "ir" (0)
103 : "memory", "cc");
104
105 smp_mb();
106}
107
108/* 1 - lock taken successfully */
109static inline int arch_read_trylock(arch_rwlock_t *rw)
110{
111 unsigned int val, got_it = 0;
112
113 __asm__ __volatile__(
114 "1: llock %[val], [%[rwlock]] \n"
115 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
116 " sub %[val], %[val], 1 \n" /* counter-- */
117 " scond %[val], [%[rwlock]] \n"
118 " bnz 1b \n" /* retry if collided with someone */
119 " mov %[got_it], 1 \n"
120 " \n"
121 "4: ; --- done --- \n"
122
123 : [val] "=&r" (val),
124 [got_it] "+&r" (got_it)
125 : [rwlock] "r" (&(rw->counter)),
126 [WR_LOCKED] "ir" (0)
127 : "memory", "cc");
128
129 smp_mb();
130
131 return got_it;
132}
133
134static inline void arch_write_lock(arch_rwlock_t *rw)
135{
136 unsigned int val;
137
138 /*
139 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
140 * deny writer. Otherwise if unlocked grant to writer
141 * Hence the claim that Linux rwlocks are unfair to writers.
142 * (can be starved for an indefinite time by readers).
143 *
144 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
145 * rw->counter = 0;
146 * ret = 1;
147 * }
148 */
149
150 __asm__ __volatile__(
151 "1: llock %[val], [%[rwlock]] \n"
152 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
153 " mov %[val], %[WR_LOCKED] \n"
154 " scond %[val], [%[rwlock]] \n"
155 " bnz 1b \n"
156 " \n"
157 : [val] "=&r" (val)
158 : [rwlock] "r" (&(rw->counter)),
159 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
160 [WR_LOCKED] "ir" (0)
161 : "memory", "cc");
162
163 smp_mb();
164}
165
166/* 1 - lock taken successfully */
167static inline int arch_write_trylock(arch_rwlock_t *rw)
168{
169 unsigned int val, got_it = 0;
170
171 __asm__ __volatile__(
172 "1: llock %[val], [%[rwlock]] \n"
173 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
174 " mov %[val], %[WR_LOCKED] \n"
175 " scond %[val], [%[rwlock]] \n"
176 " bnz 1b \n" /* retry if collided with someone */
177 " mov %[got_it], 1 \n"
178 " \n"
179 "4: ; --- done --- \n"
180
181 : [val] "=&r" (val),
182 [got_it] "+&r" (got_it)
183 : [rwlock] "r" (&(rw->counter)),
184 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
185 [WR_LOCKED] "ir" (0)
186 : "memory", "cc");
187
188 smp_mb();
189
190 return got_it;
191}
192
193static inline void arch_read_unlock(arch_rwlock_t *rw)
194{
195 unsigned int val;
196
197 smp_mb();
198
199 /*
200 * rw->counter++;
201 */
202 __asm__ __volatile__(
203 "1: llock %[val], [%[rwlock]] \n"
204 " add %[val], %[val], 1 \n"
205 " scond %[val], [%[rwlock]] \n"
206 " bnz 1b \n"
207 " \n"
208 : [val] "=&r" (val)
209 : [rwlock] "r" (&(rw->counter))
210 : "memory", "cc");
211}
212
213static inline void arch_write_unlock(arch_rwlock_t *rw)
214{
215 smp_mb();
216
217 WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
218}
219
220#else /* !CONFIG_ARC_HAS_LLSC */
221
222static inline void arch_spin_lock(arch_spinlock_t *lock)
223{
224 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
225
226 /*
227 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
228 * for ACQ and REL semantics respectively. However EX based spinlocks
229 * need the extra smp_mb to workaround a hardware quirk.
230 */
231 smp_mb();
232
233 __asm__ __volatile__(
234 "1: ex %0, [%1] \n"
235 " breq %0, %2, 1b \n"
236 : "+&r" (val)
237 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
238 : "memory");
239
240 smp_mb();
241}
242
243/* 1 - lock taken successfully */
244static inline int arch_spin_trylock(arch_spinlock_t *lock)
245{
246 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
247
248 smp_mb();
249
250 __asm__ __volatile__(
251 "1: ex %0, [%1] \n"
252 : "+r" (val)
253 : "r"(&(lock->slock))
254 : "memory");
255
256 smp_mb();
257
258 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
259}
260
261static inline void arch_spin_unlock(arch_spinlock_t *lock)
262{
263 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
264
265 /*
266 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
267 * is the only option
268 */
269 smp_mb();
270
271 /*
272 * EX is not really required here, a simple STore of 0 suffices.
273 * However this causes tasklist livelocks in SystemC based SMP virtual
274 * platforms where the systemc core scheduler uses EX as a cue for
275 * moving to next core. Do a git log of this file for details
276 */
277 __asm__ __volatile__(
278 " ex %0, [%1] \n"
279 : "+r" (val)
280 : "r"(&(lock->slock))
281 : "memory");
282
283 /*
284 * see pairing version/comment in arch_spin_lock above
285 */
286 smp_mb();
287}
288
289/*
290 * Read-write spinlocks, allowing multiple readers but only one writer.
291 * Unfair locking as Writers could be starved indefinitely by Reader(s)
292 *
293 * The spinlock itself is contained in @counter and access to it is
294 * serialized with @lock_mutex.
295 */
296
297/* 1 - lock taken successfully */
298static inline int arch_read_trylock(arch_rwlock_t *rw)
299{
300 int ret = 0;
301 unsigned long flags;
302
303 local_irq_save(flags);
304 arch_spin_lock(&(rw->lock_mutex));
305
306 /*
307 * zero means writer holds the lock exclusively, deny Reader.
308 * Otherwise grant lock to first/subseq reader
309 */
310 if (rw->counter > 0) {
311 rw->counter--;
312 ret = 1;
313 }
314
315 arch_spin_unlock(&(rw->lock_mutex));
316 local_irq_restore(flags);
317
318 return ret;
319}
320
321/* 1 - lock taken successfully */
322static inline int arch_write_trylock(arch_rwlock_t *rw)
323{
324 int ret = 0;
325 unsigned long flags;
326
327 local_irq_save(flags);
328 arch_spin_lock(&(rw->lock_mutex));
329
330 /*
331 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
332 * deny writer. Otherwise if unlocked grant to writer
333 * Hence the claim that Linux rwlocks are unfair to writers.
334 * (can be starved for an indefinite time by readers).
335 */
336 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
337 rw->counter = 0;
338 ret = 1;
339 }
340 arch_spin_unlock(&(rw->lock_mutex));
341 local_irq_restore(flags);
342
343 return ret;
344}
345
346static inline void arch_read_lock(arch_rwlock_t *rw)
347{
348 while (!arch_read_trylock(rw))
349 cpu_relax();
350}
351
352static inline void arch_write_lock(arch_rwlock_t *rw)
353{
354 while (!arch_write_trylock(rw))
355 cpu_relax();
356}
357
358static inline void arch_read_unlock(arch_rwlock_t *rw)
359{
360 unsigned long flags;
361
362 local_irq_save(flags);
363 arch_spin_lock(&(rw->lock_mutex));
364 rw->counter++;
365 arch_spin_unlock(&(rw->lock_mutex));
366 local_irq_restore(flags);
367}
368
369static inline void arch_write_unlock(arch_rwlock_t *rw)
370{
371 unsigned long flags;
372
373 local_irq_save(flags);
374 arch_spin_lock(&(rw->lock_mutex));
375 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
376 arch_spin_unlock(&(rw->lock_mutex));
377 local_irq_restore(flags);
378}
379
380#endif
381
382#endif /* __ASM_SPINLOCK_H */
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18
19static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20{
21 smp_cond_load_acquire(&lock->slock, !VAL);
22}
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
26static inline void arch_spin_lock(arch_spinlock_t *lock)
27{
28 unsigned int val;
29
30 smp_mb();
31
32 __asm__ __volatile__(
33 "1: llock %[val], [%[slock]] \n"
34 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
35 " scond %[LOCKED], [%[slock]] \n" /* acquire */
36 " bnz 1b \n"
37 " \n"
38 : [val] "=&r" (val)
39 : [slock] "r" (&(lock->slock)),
40 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
41 : "memory", "cc");
42
43 smp_mb();
44}
45
46/* 1 - lock taken successfully */
47static inline int arch_spin_trylock(arch_spinlock_t *lock)
48{
49 unsigned int val, got_it = 0;
50
51 smp_mb();
52
53 __asm__ __volatile__(
54 "1: llock %[val], [%[slock]] \n"
55 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
56 " scond %[LOCKED], [%[slock]] \n" /* acquire */
57 " bnz 1b \n"
58 " mov %[got_it], 1 \n"
59 "4: \n"
60 " \n"
61 : [val] "=&r" (val),
62 [got_it] "+&r" (got_it)
63 : [slock] "r" (&(lock->slock)),
64 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
65 : "memory", "cc");
66
67 smp_mb();
68
69 return got_it;
70}
71
72static inline void arch_spin_unlock(arch_spinlock_t *lock)
73{
74 smp_mb();
75
76 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
77
78 smp_mb();
79}
80
81/*
82 * Read-write spinlocks, allowing multiple readers but only one writer.
83 * Unfair locking as Writers could be starved indefinitely by Reader(s)
84 */
85
86static inline void arch_read_lock(arch_rwlock_t *rw)
87{
88 unsigned int val;
89
90 smp_mb();
91
92 /*
93 * zero means writer holds the lock exclusively, deny Reader.
94 * Otherwise grant lock to first/subseq reader
95 *
96 * if (rw->counter > 0) {
97 * rw->counter--;
98 * ret = 1;
99 * }
100 */
101
102 __asm__ __volatile__(
103 "1: llock %[val], [%[rwlock]] \n"
104 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
105 " sub %[val], %[val], 1 \n" /* reader lock */
106 " scond %[val], [%[rwlock]] \n"
107 " bnz 1b \n"
108 " \n"
109 : [val] "=&r" (val)
110 : [rwlock] "r" (&(rw->counter)),
111 [WR_LOCKED] "ir" (0)
112 : "memory", "cc");
113
114 smp_mb();
115}
116
117/* 1 - lock taken successfully */
118static inline int arch_read_trylock(arch_rwlock_t *rw)
119{
120 unsigned int val, got_it = 0;
121
122 smp_mb();
123
124 __asm__ __volatile__(
125 "1: llock %[val], [%[rwlock]] \n"
126 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
127 " sub %[val], %[val], 1 \n" /* counter-- */
128 " scond %[val], [%[rwlock]] \n"
129 " bnz 1b \n" /* retry if collided with someone */
130 " mov %[got_it], 1 \n"
131 " \n"
132 "4: ; --- done --- \n"
133
134 : [val] "=&r" (val),
135 [got_it] "+&r" (got_it)
136 : [rwlock] "r" (&(rw->counter)),
137 [WR_LOCKED] "ir" (0)
138 : "memory", "cc");
139
140 smp_mb();
141
142 return got_it;
143}
144
145static inline void arch_write_lock(arch_rwlock_t *rw)
146{
147 unsigned int val;
148
149 smp_mb();
150
151 /*
152 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 * deny writer. Otherwise if unlocked grant to writer
154 * Hence the claim that Linux rwlocks are unfair to writers.
155 * (can be starved for an indefinite time by readers).
156 *
157 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158 * rw->counter = 0;
159 * ret = 1;
160 * }
161 */
162
163 __asm__ __volatile__(
164 "1: llock %[val], [%[rwlock]] \n"
165 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
166 " mov %[val], %[WR_LOCKED] \n"
167 " scond %[val], [%[rwlock]] \n"
168 " bnz 1b \n"
169 " \n"
170 : [val] "=&r" (val)
171 : [rwlock] "r" (&(rw->counter)),
172 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
173 [WR_LOCKED] "ir" (0)
174 : "memory", "cc");
175
176 smp_mb();
177}
178
179/* 1 - lock taken successfully */
180static inline int arch_write_trylock(arch_rwlock_t *rw)
181{
182 unsigned int val, got_it = 0;
183
184 smp_mb();
185
186 __asm__ __volatile__(
187 "1: llock %[val], [%[rwlock]] \n"
188 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
189 " mov %[val], %[WR_LOCKED] \n"
190 " scond %[val], [%[rwlock]] \n"
191 " bnz 1b \n" /* retry if collided with someone */
192 " mov %[got_it], 1 \n"
193 " \n"
194 "4: ; --- done --- \n"
195
196 : [val] "=&r" (val),
197 [got_it] "+&r" (got_it)
198 : [rwlock] "r" (&(rw->counter)),
199 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
200 [WR_LOCKED] "ir" (0)
201 : "memory", "cc");
202
203 smp_mb();
204
205 return got_it;
206}
207
208static inline void arch_read_unlock(arch_rwlock_t *rw)
209{
210 unsigned int val;
211
212 smp_mb();
213
214 /*
215 * rw->counter++;
216 */
217 __asm__ __volatile__(
218 "1: llock %[val], [%[rwlock]] \n"
219 " add %[val], %[val], 1 \n"
220 " scond %[val], [%[rwlock]] \n"
221 " bnz 1b \n"
222 " \n"
223 : [val] "=&r" (val)
224 : [rwlock] "r" (&(rw->counter))
225 : "memory", "cc");
226
227 smp_mb();
228}
229
230static inline void arch_write_unlock(arch_rwlock_t *rw)
231{
232 smp_mb();
233
234 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235
236 smp_mb();
237}
238
239#else /* !CONFIG_ARC_HAS_LLSC */
240
241static inline void arch_spin_lock(arch_spinlock_t *lock)
242{
243 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
244
245 /*
246 * This smp_mb() is technically superfluous, we only need the one
247 * after the lock for providing the ACQUIRE semantics.
248 * However doing the "right" thing was regressing hackbench
249 * so keeping this, pending further investigation
250 */
251 smp_mb();
252
253 __asm__ __volatile__(
254 "1: ex %0, [%1] \n"
255 " breq %0, %2, 1b \n"
256 : "+&r" (val)
257 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
258 : "memory");
259
260 /*
261 * ACQUIRE barrier to ensure load/store after taking the lock
262 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 * http://www.spinics.net/lists/kernel/msg2010409.html
264 *
265 * ARCv2 only has load-load, store-store and all-all barrier
266 * thus need the full all-all barrier
267 */
268 smp_mb();
269}
270
271/* 1 - lock taken successfully */
272static inline int arch_spin_trylock(arch_spinlock_t *lock)
273{
274 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "1: ex %0, [%1] \n"
280 : "+r" (val)
281 : "r"(&(lock->slock))
282 : "memory");
283
284 smp_mb();
285
286 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
287}
288
289static inline void arch_spin_unlock(arch_spinlock_t *lock)
290{
291 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
292
293 /*
294 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295 * is the only option
296 */
297 smp_mb();
298
299 __asm__ __volatile__(
300 " ex %0, [%1] \n"
301 : "+r" (val)
302 : "r"(&(lock->slock))
303 : "memory");
304
305 /*
306 * superfluous, but keeping for now - see pairing version in
307 * arch_spin_lock above
308 */
309 smp_mb();
310}
311
312/*
313 * Read-write spinlocks, allowing multiple readers but only one writer.
314 * Unfair locking as Writers could be starved indefinitely by Reader(s)
315 *
316 * The spinlock itself is contained in @counter and access to it is
317 * serialized with @lock_mutex.
318 */
319
320/* 1 - lock taken successfully */
321static inline int arch_read_trylock(arch_rwlock_t *rw)
322{
323 int ret = 0;
324 unsigned long flags;
325
326 local_irq_save(flags);
327 arch_spin_lock(&(rw->lock_mutex));
328
329 /*
330 * zero means writer holds the lock exclusively, deny Reader.
331 * Otherwise grant lock to first/subseq reader
332 */
333 if (rw->counter > 0) {
334 rw->counter--;
335 ret = 1;
336 }
337
338 arch_spin_unlock(&(rw->lock_mutex));
339 local_irq_restore(flags);
340
341 smp_mb();
342 return ret;
343}
344
345/* 1 - lock taken successfully */
346static inline int arch_write_trylock(arch_rwlock_t *rw)
347{
348 int ret = 0;
349 unsigned long flags;
350
351 local_irq_save(flags);
352 arch_spin_lock(&(rw->lock_mutex));
353
354 /*
355 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
356 * deny writer. Otherwise if unlocked grant to writer
357 * Hence the claim that Linux rwlocks are unfair to writers.
358 * (can be starved for an indefinite time by readers).
359 */
360 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
361 rw->counter = 0;
362 ret = 1;
363 }
364 arch_spin_unlock(&(rw->lock_mutex));
365 local_irq_restore(flags);
366
367 return ret;
368}
369
370static inline void arch_read_lock(arch_rwlock_t *rw)
371{
372 while (!arch_read_trylock(rw))
373 cpu_relax();
374}
375
376static inline void arch_write_lock(arch_rwlock_t *rw)
377{
378 while (!arch_write_trylock(rw))
379 cpu_relax();
380}
381
382static inline void arch_read_unlock(arch_rwlock_t *rw)
383{
384 unsigned long flags;
385
386 local_irq_save(flags);
387 arch_spin_lock(&(rw->lock_mutex));
388 rw->counter++;
389 arch_spin_unlock(&(rw->lock_mutex));
390 local_irq_restore(flags);
391}
392
393static inline void arch_write_unlock(arch_rwlock_t *rw)
394{
395 unsigned long flags;
396
397 local_irq_save(flags);
398 arch_spin_lock(&(rw->lock_mutex));
399 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
400 arch_spin_unlock(&(rw->lock_mutex));
401 local_irq_restore(flags);
402}
403
404#endif
405
406#define arch_read_can_lock(x) ((x)->counter > 0)
407#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
408
409#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
410#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
411
412#define arch_spin_relax(lock) cpu_relax()
413#define arch_read_relax(lock) cpu_relax()
414#define arch_write_relax(lock) cpu_relax()
415
416#endif /* __ASM_SPINLOCK_H */