Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queued spinlock
4 *
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
7 *
8 * Authors: Waiman Long <waiman.long@hpe.com>
9 */
10#ifndef __ASM_GENERIC_QSPINLOCK_H
11#define __ASM_GENERIC_QSPINLOCK_H
12
13#include <asm-generic/qspinlock_types.h>
14
15/**
16 * queued_spin_is_locked - is the spinlock locked?
17 * @lock: Pointer to queued spinlock structure
18 * Return: 1 if it is locked, 0 otherwise
19 */
20static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
21{
22 /*
23 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
24 * isn't immediately observable.
25 */
26 return atomic_read(&lock->val);
27}
28
29/**
30 * queued_spin_value_unlocked - is the spinlock structure unlocked?
31 * @lock: queued spinlock structure
32 * Return: 1 if it is unlocked, 0 otherwise
33 *
34 * N.B. Whenever there are tasks waiting for the lock, it is considered
35 * locked wrt the lockref code to avoid lock stealing by the lockref
36 * code and change things underneath the lock. This also allows some
37 * optimizations to be applied without conflict with lockref.
38 */
39static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
40{
41 return !atomic_read(&lock.val);
42}
43
44/**
45 * queued_spin_is_contended - check if the lock is contended
46 * @lock : Pointer to queued spinlock structure
47 * Return: 1 if lock contended, 0 otherwise
48 */
49static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
50{
51 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
52}
53/**
54 * queued_spin_trylock - try to acquire the queued spinlock
55 * @lock : Pointer to queued spinlock structure
56 * Return: 1 if lock acquired, 0 if failed
57 */
58static __always_inline int queued_spin_trylock(struct qspinlock *lock)
59{
60 u32 val = atomic_read(&lock->val);
61
62 if (unlikely(val))
63 return 0;
64
65 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
66}
67
68extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
69
70/**
71 * queued_spin_lock - acquire a queued spinlock
72 * @lock: Pointer to queued spinlock structure
73 */
74static __always_inline void queued_spin_lock(struct qspinlock *lock)
75{
76 u32 val = 0;
77
78 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
79 return;
80
81 queued_spin_lock_slowpath(lock, val);
82}
83
84#ifndef queued_spin_unlock
85/**
86 * queued_spin_unlock - release a queued spinlock
87 * @lock : Pointer to queued spinlock structure
88 */
89static __always_inline void queued_spin_unlock(struct qspinlock *lock)
90{
91 /*
92 * unlock() needs release semantics:
93 */
94 smp_store_release(&lock->locked, 0);
95}
96#endif
97
98#ifndef virt_spin_lock
99static __always_inline bool virt_spin_lock(struct qspinlock *lock)
100{
101 return false;
102}
103#endif
104
105/*
106 * Remapping spinlock architecture specific functions to the corresponding
107 * queued spinlock functions.
108 */
109#define arch_spin_is_locked(l) queued_spin_is_locked(l)
110#define arch_spin_is_contended(l) queued_spin_is_contended(l)
111#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
112#define arch_spin_lock(l) queued_spin_lock(l)
113#define arch_spin_trylock(l) queued_spin_trylock(l)
114#define arch_spin_unlock(l) queued_spin_unlock(l)
115
116#endif /* __ASM_GENERIC_QSPINLOCK_H */
1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
16 *
17 * Authors: Waiman Long <waiman.long@hpe.com>
18 */
19#ifndef __ASM_GENERIC_QSPINLOCK_H
20#define __ASM_GENERIC_QSPINLOCK_H
21
22#include <asm-generic/qspinlock_types.h>
23
24/**
25 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise
28 */
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{
31 return atomic_read(&lock->val);
32}
33
34/**
35 * queued_spin_value_unlocked - is the spinlock structure unlocked?
36 * @lock: queued spinlock structure
37 * Return: 1 if it is unlocked, 0 otherwise
38 *
39 * N.B. Whenever there are tasks waiting for the lock, it is considered
40 * locked wrt the lockref code to avoid lock stealing by the lockref
41 * code and change things underneath the lock. This also allows some
42 * optimizations to be applied without conflict with lockref.
43 */
44static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
45{
46 return !atomic_read(&lock.val);
47}
48
49/**
50 * queued_spin_is_contended - check if the lock is contended
51 * @lock : Pointer to queued spinlock structure
52 * Return: 1 if lock contended, 0 otherwise
53 */
54static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
55{
56 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
57}
58/**
59 * queued_spin_trylock - try to acquire the queued spinlock
60 * @lock : Pointer to queued spinlock structure
61 * Return: 1 if lock acquired, 0 if failed
62 */
63static __always_inline int queued_spin_trylock(struct qspinlock *lock)
64{
65 if (!atomic_read(&lock->val) &&
66 (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
67 return 1;
68 return 0;
69}
70
71extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
72
73/**
74 * queued_spin_lock - acquire a queued spinlock
75 * @lock: Pointer to queued spinlock structure
76 */
77static __always_inline void queued_spin_lock(struct qspinlock *lock)
78{
79 u32 val;
80
81 val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
82 if (likely(val == 0))
83 return;
84 queued_spin_lock_slowpath(lock, val);
85}
86
87#ifndef queued_spin_unlock
88/**
89 * queued_spin_unlock - release a queued spinlock
90 * @lock : Pointer to queued spinlock structure
91 */
92static __always_inline void queued_spin_unlock(struct qspinlock *lock)
93{
94 /*
95 * smp_mb__before_atomic() in order to guarantee release semantics
96 */
97 smp_mb__before_atomic();
98 atomic_sub(_Q_LOCKED_VAL, &lock->val);
99}
100#endif
101
102/**
103 * queued_spin_unlock_wait - wait until current lock holder releases the lock
104 * @lock : Pointer to queued spinlock structure
105 *
106 * There is a very slight possibility of live-lock if the lockers keep coming
107 * and the waiter is just unfortunate enough to not see any unlock state.
108 */
109static inline void queued_spin_unlock_wait(struct qspinlock *lock)
110{
111 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
112 cpu_relax();
113}
114
115#ifndef virt_spin_lock
116static __always_inline bool virt_spin_lock(struct qspinlock *lock)
117{
118 return false;
119}
120#endif
121
122/*
123 * Remapping spinlock architecture specific functions to the corresponding
124 * queued spinlock functions.
125 */
126#define arch_spin_is_locked(l) queued_spin_is_locked(l)
127#define arch_spin_is_contended(l) queued_spin_is_contended(l)
128#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
129#define arch_spin_lock(l) queued_spin_lock(l)
130#define arch_spin_trylock(l) queued_spin_trylock(l)
131#define arch_spin_unlock(l) queued_spin_unlock(l)
132#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
133#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
134
135#endif /* __ASM_GENERIC_QSPINLOCK_H */