Loading...
1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
16 *
17 * Authors: Waiman Long <waiman.long@hpe.com>
18 */
19#ifndef __ASM_GENERIC_QSPINLOCK_H
20#define __ASM_GENERIC_QSPINLOCK_H
21
22#include <asm-generic/qspinlock_types.h>
23
24/**
25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
26 * @lock : Pointer to queued spinlock structure
27 *
28 * There is a very slight possibility of live-lock if the lockers keep coming
29 * and the waiter is just unfortunate enough to not see any unlock state.
30 */
31#ifndef queued_spin_unlock_wait
32extern void queued_spin_unlock_wait(struct qspinlock *lock);
33#endif
34
35/**
36 * queued_spin_is_locked - is the spinlock locked?
37 * @lock: Pointer to queued spinlock structure
38 * Return: 1 if it is locked, 0 otherwise
39 */
40#ifndef queued_spin_is_locked
41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
42{
43 /*
44 * See queued_spin_unlock_wait().
45 *
46 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
47 * isn't immediately observable.
48 */
49 return atomic_read(&lock->val);
50}
51#endif
52
53/**
54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
55 * @lock: queued spinlock structure
56 * Return: 1 if it is unlocked, 0 otherwise
57 *
58 * N.B. Whenever there are tasks waiting for the lock, it is considered
59 * locked wrt the lockref code to avoid lock stealing by the lockref
60 * code and change things underneath the lock. This also allows some
61 * optimizations to be applied without conflict with lockref.
62 */
63static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
64{
65 return !atomic_read(&lock.val);
66}
67
68/**
69 * queued_spin_is_contended - check if the lock is contended
70 * @lock : Pointer to queued spinlock structure
71 * Return: 1 if lock contended, 0 otherwise
72 */
73static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
74{
75 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
76}
77/**
78 * queued_spin_trylock - try to acquire the queued spinlock
79 * @lock : Pointer to queued spinlock structure
80 * Return: 1 if lock acquired, 0 if failed
81 */
82static __always_inline int queued_spin_trylock(struct qspinlock *lock)
83{
84 if (!atomic_read(&lock->val) &&
85 (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
86 return 1;
87 return 0;
88}
89
90extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
91
92/**
93 * queued_spin_lock - acquire a queued spinlock
94 * @lock: Pointer to queued spinlock structure
95 */
96static __always_inline void queued_spin_lock(struct qspinlock *lock)
97{
98 u32 val;
99
100 val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
101 if (likely(val == 0))
102 return;
103 queued_spin_lock_slowpath(lock, val);
104}
105
106#ifndef queued_spin_unlock
107/**
108 * queued_spin_unlock - release a queued spinlock
109 * @lock : Pointer to queued spinlock structure
110 */
111static __always_inline void queued_spin_unlock(struct qspinlock *lock)
112{
113 /*
114 * unlock() needs release semantics:
115 */
116 (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
117}
118#endif
119
120#ifndef virt_spin_lock
121static __always_inline bool virt_spin_lock(struct qspinlock *lock)
122{
123 return false;
124}
125#endif
126
127/*
128 * Remapping spinlock architecture specific functions to the corresponding
129 * queued spinlock functions.
130 */
131#define arch_spin_is_locked(l) queued_spin_is_locked(l)
132#define arch_spin_is_contended(l) queued_spin_is_contended(l)
133#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
134#define arch_spin_lock(l) queued_spin_lock(l)
135#define arch_spin_trylock(l) queued_spin_trylock(l)
136#define arch_spin_unlock(l) queued_spin_unlock(l)
137#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
138#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
139
140#endif /* __ASM_GENERIC_QSPINLOCK_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queued spinlock
4 *
5 * A 'generic' spinlock implementation that is based on MCS locks. For an
6 * architecture that's looking for a 'generic' spinlock, please first consider
7 * ticket-lock.h and only come looking here when you've considered all the
8 * constraints below and can show your hardware does actually perform better
9 * with qspinlock.
10 *
11 * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
12 * weaker than RCtso if you're power), where regular code only expects atomic_t
13 * to be RCpc.
14 *
15 * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
16 * of atomic operations to behave well together, please audit them carefully to
17 * ensure they all have forward progress. Many atomic operations may default to
18 * cmpxchg() loops which will not have good forward progress properties on
19 * LL/SC architectures.
20 *
21 * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
22 * do. Carefully read the patches that introduced
23 * queued_fetch_set_pending_acquire().
24 *
25 * qspinlock also heavily relies on mixed size atomic operations, in specific
26 * it requires architectures to have xchg16; something which many LL/SC
27 * architectures need to implement as a 32bit and+or in order to satisfy the
28 * forward progress guarantees mentioned above.
29 *
30 * Further reading on mixed size atomics that might be relevant:
31 *
32 * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
33 *
34 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
35 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
36 *
37 * Authors: Waiman Long <waiman.long@hpe.com>
38 */
39#ifndef __ASM_GENERIC_QSPINLOCK_H
40#define __ASM_GENERIC_QSPINLOCK_H
41
42#include <asm-generic/qspinlock_types.h>
43#include <linux/atomic.h>
44
45#ifndef queued_spin_is_locked
46/**
47 * queued_spin_is_locked - is the spinlock locked?
48 * @lock: Pointer to queued spinlock structure
49 * Return: 1 if it is locked, 0 otherwise
50 */
51static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
52{
53 /*
54 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
55 * isn't immediately observable.
56 */
57 return atomic_read(&lock->val);
58}
59#endif
60
61/**
62 * queued_spin_value_unlocked - is the spinlock structure unlocked?
63 * @lock: queued spinlock structure
64 * Return: 1 if it is unlocked, 0 otherwise
65 *
66 * N.B. Whenever there are tasks waiting for the lock, it is considered
67 * locked wrt the lockref code to avoid lock stealing by the lockref
68 * code and change things underneath the lock. This also allows some
69 * optimizations to be applied without conflict with lockref.
70 */
71static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
72{
73 return !lock.val.counter;
74}
75
76/**
77 * queued_spin_is_contended - check if the lock is contended
78 * @lock : Pointer to queued spinlock structure
79 * Return: 1 if lock contended, 0 otherwise
80 */
81static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
82{
83 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
84}
85/**
86 * queued_spin_trylock - try to acquire the queued spinlock
87 * @lock : Pointer to queued spinlock structure
88 * Return: 1 if lock acquired, 0 if failed
89 */
90static __always_inline int queued_spin_trylock(struct qspinlock *lock)
91{
92 int val = atomic_read(&lock->val);
93
94 if (unlikely(val))
95 return 0;
96
97 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
98}
99
100extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
101
102#ifndef queued_spin_lock
103/**
104 * queued_spin_lock - acquire a queued spinlock
105 * @lock: Pointer to queued spinlock structure
106 */
107static __always_inline void queued_spin_lock(struct qspinlock *lock)
108{
109 int val = 0;
110
111 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
112 return;
113
114 queued_spin_lock_slowpath(lock, val);
115}
116#endif
117
118#ifndef queued_spin_unlock
119/**
120 * queued_spin_unlock - release a queued spinlock
121 * @lock : Pointer to queued spinlock structure
122 */
123static __always_inline void queued_spin_unlock(struct qspinlock *lock)
124{
125 /*
126 * unlock() needs release semantics:
127 */
128 smp_store_release(&lock->locked, 0);
129}
130#endif
131
132#ifndef virt_spin_lock
133static __always_inline bool virt_spin_lock(struct qspinlock *lock)
134{
135 return false;
136}
137#endif
138
139#ifndef __no_arch_spinlock_redefine
140/*
141 * Remapping spinlock architecture specific functions to the corresponding
142 * queued spinlock functions.
143 */
144#define arch_spin_is_locked(l) queued_spin_is_locked(l)
145#define arch_spin_is_contended(l) queued_spin_is_contended(l)
146#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
147#define arch_spin_lock(l) queued_spin_lock(l)
148#define arch_spin_trylock(l) queued_spin_trylock(l)
149#define arch_spin_unlock(l) queued_spin_unlock(l)
150#endif
151
152#endif /* __ASM_GENERIC_QSPINLOCK_H */