Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queue read/write lock
4 *
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#ifndef __ASM_GENERIC_QRWLOCK_H
10#define __ASM_GENERIC_QRWLOCK_H
11
12#include <linux/atomic.h>
13#include <asm/barrier.h>
14#include <asm/processor.h>
15
16#include <asm-generic/qrwlock_types.h>
17
18/*
19 * Writer states & reader shift and bias.
20 */
21#define _QW_WAITING 0x100 /* A writer is waiting */
22#define _QW_LOCKED 0x0ff /* A writer holds the lock */
23#define _QW_WMASK 0x1ff /* Writer mask */
24#define _QR_SHIFT 9 /* Reader count shift */
25#define _QR_BIAS (1U << _QR_SHIFT)
26
27/*
28 * External function declarations
29 */
30extern void queued_read_lock_slowpath(struct qrwlock *lock);
31extern void queued_write_lock_slowpath(struct qrwlock *lock);
32
33/**
34 * queued_read_trylock - try to acquire read lock of a queue rwlock
35 * @lock : Pointer to queue rwlock structure
36 * Return: 1 if lock acquired, 0 if failed
37 */
38static inline int queued_read_trylock(struct qrwlock *lock)
39{
40 u32 cnts;
41
42 cnts = atomic_read(&lock->cnts);
43 if (likely(!(cnts & _QW_WMASK))) {
44 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
45 if (likely(!(cnts & _QW_WMASK)))
46 return 1;
47 atomic_sub(_QR_BIAS, &lock->cnts);
48 }
49 return 0;
50}
51
52/**
53 * queued_write_trylock - try to acquire write lock of a queue rwlock
54 * @lock : Pointer to queue rwlock structure
55 * Return: 1 if lock acquired, 0 if failed
56 */
57static inline int queued_write_trylock(struct qrwlock *lock)
58{
59 u32 cnts;
60
61 cnts = atomic_read(&lock->cnts);
62 if (unlikely(cnts))
63 return 0;
64
65 return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
66 _QW_LOCKED));
67}
68/**
69 * queued_read_lock - acquire read lock of a queue rwlock
70 * @lock: Pointer to queue rwlock structure
71 */
72static inline void queued_read_lock(struct qrwlock *lock)
73{
74 u32 cnts;
75
76 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
77 if (likely(!(cnts & _QW_WMASK)))
78 return;
79
80 /* The slowpath will decrement the reader count, if necessary. */
81 queued_read_lock_slowpath(lock);
82}
83
84/**
85 * queued_write_lock - acquire write lock of a queue rwlock
86 * @lock : Pointer to queue rwlock structure
87 */
88static inline void queued_write_lock(struct qrwlock *lock)
89{
90 u32 cnts = 0;
91 /* Optimize for the unfair lock case where the fair flag is 0. */
92 if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
93 return;
94
95 queued_write_lock_slowpath(lock);
96}
97
98/**
99 * queued_read_unlock - release read lock of a queue rwlock
100 * @lock : Pointer to queue rwlock structure
101 */
102static inline void queued_read_unlock(struct qrwlock *lock)
103{
104 /*
105 * Atomically decrement the reader count
106 */
107 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
108}
109
110/**
111 * queued_write_unlock - release write lock of a queue rwlock
112 * @lock : Pointer to queue rwlock structure
113 */
114static inline void queued_write_unlock(struct qrwlock *lock)
115{
116 smp_store_release(&lock->wlocked, 0);
117}
118
119/*
120 * Remapping rwlock architecture specific functions to the corresponding
121 * queue rwlock functions.
122 */
123#define arch_read_lock(l) queued_read_lock(l)
124#define arch_write_lock(l) queued_write_lock(l)
125#define arch_read_trylock(l) queued_read_trylock(l)
126#define arch_write_trylock(l) queued_write_trylock(l)
127#define arch_read_unlock(l) queued_read_unlock(l)
128#define arch_write_unlock(l) queued_write_unlock(l)
129
130#endif /* __ASM_GENERIC_QRWLOCK_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queue read/write lock
4 *
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#ifndef __ASM_GENERIC_QRWLOCK_H
10#define __ASM_GENERIC_QRWLOCK_H
11
12#include <linux/atomic.h>
13#include <asm/barrier.h>
14#include <asm/processor.h>
15
16#include <asm-generic/qrwlock_types.h>
17
18/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
19
20/*
21 * Writer states & reader shift and bias.
22 */
23#define _QW_WAITING 0x100 /* A writer is waiting */
24#define _QW_LOCKED 0x0ff /* A writer holds the lock */
25#define _QW_WMASK 0x1ff /* Writer mask */
26#define _QR_SHIFT 9 /* Reader count shift */
27#define _QR_BIAS (1U << _QR_SHIFT)
28
29/*
30 * External function declarations
31 */
32extern void queued_read_lock_slowpath(struct qrwlock *lock);
33extern void queued_write_lock_slowpath(struct qrwlock *lock);
34
35/**
36 * queued_read_trylock - try to acquire read lock of a queue rwlock
37 * @lock : Pointer to queue rwlock structure
38 * Return: 1 if lock acquired, 0 if failed
39 */
40static inline int queued_read_trylock(struct qrwlock *lock)
41{
42 int cnts;
43
44 cnts = atomic_read(&lock->cnts);
45 if (likely(!(cnts & _QW_WMASK))) {
46 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
47 if (likely(!(cnts & _QW_WMASK)))
48 return 1;
49 atomic_sub(_QR_BIAS, &lock->cnts);
50 }
51 return 0;
52}
53
54/**
55 * queued_write_trylock - try to acquire write lock of a queue rwlock
56 * @lock : Pointer to queue rwlock structure
57 * Return: 1 if lock acquired, 0 if failed
58 */
59static inline int queued_write_trylock(struct qrwlock *lock)
60{
61 int cnts;
62
63 cnts = atomic_read(&lock->cnts);
64 if (unlikely(cnts))
65 return 0;
66
67 return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
68 _QW_LOCKED));
69}
70/**
71 * queued_read_lock - acquire read lock of a queue rwlock
72 * @lock: Pointer to queue rwlock structure
73 */
74static inline void queued_read_lock(struct qrwlock *lock)
75{
76 int cnts;
77
78 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
79 if (likely(!(cnts & _QW_WMASK)))
80 return;
81
82 /* The slowpath will decrement the reader count, if necessary. */
83 queued_read_lock_slowpath(lock);
84}
85
86/**
87 * queued_write_lock - acquire write lock of a queue rwlock
88 * @lock : Pointer to queue rwlock structure
89 */
90static inline void queued_write_lock(struct qrwlock *lock)
91{
92 int cnts = 0;
93 /* Optimize for the unfair lock case where the fair flag is 0. */
94 if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
95 return;
96
97 queued_write_lock_slowpath(lock);
98}
99
100/**
101 * queued_read_unlock - release read lock of a queue rwlock
102 * @lock : Pointer to queue rwlock structure
103 */
104static inline void queued_read_unlock(struct qrwlock *lock)
105{
106 /*
107 * Atomically decrement the reader count
108 */
109 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
110}
111
112/**
113 * queued_write_unlock - release write lock of a queue rwlock
114 * @lock : Pointer to queue rwlock structure
115 */
116static inline void queued_write_unlock(struct qrwlock *lock)
117{
118 smp_store_release(&lock->wlocked, 0);
119}
120
121/**
122 * queued_rwlock_is_contended - check if the lock is contended
123 * @lock : Pointer to queue rwlock structure
124 * Return: 1 if lock contended, 0 otherwise
125 */
126static inline int queued_rwlock_is_contended(struct qrwlock *lock)
127{
128 return arch_spin_is_locked(&lock->wait_lock);
129}
130
131/*
132 * Remapping rwlock architecture specific functions to the corresponding
133 * queue rwlock functions.
134 */
135#define arch_read_lock(l) queued_read_lock(l)
136#define arch_write_lock(l) queued_write_lock(l)
137#define arch_read_trylock(l) queued_read_trylock(l)
138#define arch_write_trylock(l) queued_write_trylock(l)
139#define arch_read_unlock(l) queued_read_unlock(l)
140#define arch_write_unlock(l) queued_write_unlock(l)
141#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
142
143#endif /* __ASM_GENERIC_QRWLOCK_H */