Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Queued read/write locks
4 *
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#include <linux/smp.h>
10#include <linux/bug.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/spinlock.h>
15#include <trace/events/lock.h>
16
17/**
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
20 */
21void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock)
22{
23 /*
24 * Readers come here when they cannot get the lock without waiting
25 */
26 if (unlikely(in_interrupt())) {
27 /*
28 * Readers in interrupt context will get the lock immediately
29 * if the writer is just waiting (not holding the lock yet),
30 * so spin with ACQUIRE semantics until the lock is available
31 * without waiting in the queue.
32 */
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34 return;
35 }
36 atomic_sub(_QR_BIAS, &lock->cnts);
37
38 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
39
40 /*
41 * Put the reader into the wait queue
42 */
43 arch_spin_lock(&lock->wait_lock);
44 atomic_add(_QR_BIAS, &lock->cnts);
45
46 /*
47 * The ACQUIRE semantics of the following spinning code ensure
48 * that accesses can't leak upwards out of our subsequent critical
49 * section in the case that the lock is currently held for write.
50 */
51 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
52
53 /*
54 * Signal the next one in queue to become queue head
55 */
56 arch_spin_unlock(&lock->wait_lock);
57
58 trace_contention_end(lock, 0);
59}
60EXPORT_SYMBOL(queued_read_lock_slowpath);
61
62/**
63 * queued_write_lock_slowpath - acquire write lock of a queued rwlock
64 * @lock : Pointer to queued rwlock structure
65 */
66void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
67{
68 int cnts;
69
70 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
71
72 /* Put the writer into the wait queue */
73 arch_spin_lock(&lock->wait_lock);
74
75 /* Try to acquire the lock directly if no reader is present */
76 if (!(cnts = atomic_read(&lock->cnts)) &&
77 atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
78 goto unlock;
79
80 /* Set the waiting flag to notify readers that a writer is pending */
81 atomic_or(_QW_WAITING, &lock->cnts);
82
83 /* When no more readers or writers, set the locked flag */
84 do {
85 cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
86 } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
87unlock:
88 arch_spin_unlock(&lock->wait_lock);
89
90 trace_contention_end(lock, 0);
91}
92EXPORT_SYMBOL(queued_write_lock_slowpath);
1/*
2 * Queued read/write locks
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#include <linux/smp.h>
19#include <linux/bug.h>
20#include <linux/cpumask.h>
21#include <linux/percpu.h>
22#include <linux/hardirq.h>
23#include <asm/qrwlock.h>
24
25/*
26 * This internal data structure is used for optimizing access to some of
27 * the subfields within the atomic_t cnts.
28 */
29struct __qrwlock {
30 union {
31 atomic_t cnts;
32 struct {
33#ifdef __LITTLE_ENDIAN
34 u8 wmode; /* Writer mode */
35 u8 rcnts[3]; /* Reader counts */
36#else
37 u8 rcnts[3]; /* Reader counts */
38 u8 wmode; /* Writer mode */
39#endif
40 };
41 };
42 arch_spinlock_t lock;
43};
44
45/**
46 * rspin_until_writer_unlock - inc reader count & spin until writer is gone
47 * @lock : Pointer to queue rwlock structure
48 * @writer: Current queue rwlock writer status byte
49 *
50 * In interrupt context or at the head of the queue, the reader will just
51 * increment the reader count & wait until the writer releases the lock.
52 */
53static __always_inline void
54rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
55{
56 while ((cnts & _QW_WMASK) == _QW_LOCKED) {
57 cpu_relax_lowlatency();
58 cnts = atomic_read_acquire(&lock->cnts);
59 }
60}
61
62/**
63 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
64 * @lock: Pointer to queue rwlock structure
65 * @cnts: Current qrwlock lock value
66 */
67void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
68{
69 /*
70 * Readers come here when they cannot get the lock without waiting
71 */
72 if (unlikely(in_interrupt())) {
73 /*
74 * Readers in interrupt context will get the lock immediately
75 * if the writer is just waiting (not holding the lock yet).
76 * The rspin_until_writer_unlock() function returns immediately
77 * in this case. Otherwise, they will spin (with ACQUIRE
78 * semantics) until the lock is available without waiting in
79 * the queue.
80 */
81 rspin_until_writer_unlock(lock, cnts);
82 return;
83 }
84 atomic_sub(_QR_BIAS, &lock->cnts);
85
86 /*
87 * Put the reader into the wait queue
88 */
89 arch_spin_lock(&lock->wait_lock);
90
91 /*
92 * The ACQUIRE semantics of the following spinning code ensure
93 * that accesses can't leak upwards out of our subsequent critical
94 * section in the case that the lock is currently held for write.
95 */
96 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
97 rspin_until_writer_unlock(lock, cnts);
98
99 /*
100 * Signal the next one in queue to become queue head
101 */
102 arch_spin_unlock(&lock->wait_lock);
103}
104EXPORT_SYMBOL(queued_read_lock_slowpath);
105
106/**
107 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
108 * @lock : Pointer to queue rwlock structure
109 */
110void queued_write_lock_slowpath(struct qrwlock *lock)
111{
112 u32 cnts;
113
114 /* Put the writer into the wait queue */
115 arch_spin_lock(&lock->wait_lock);
116
117 /* Try to acquire the lock directly if no reader is present */
118 if (!atomic_read(&lock->cnts) &&
119 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
120 goto unlock;
121
122 /*
123 * Set the waiting flag to notify readers that a writer is pending,
124 * or wait for a previous writer to go away.
125 */
126 for (;;) {
127 struct __qrwlock *l = (struct __qrwlock *)lock;
128
129 if (!READ_ONCE(l->wmode) &&
130 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
131 break;
132
133 cpu_relax_lowlatency();
134 }
135
136 /* When no more readers, set the locked flag */
137 for (;;) {
138 cnts = atomic_read(&lock->cnts);
139 if ((cnts == _QW_WAITING) &&
140 (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
141 _QW_LOCKED) == _QW_WAITING))
142 break;
143
144 cpu_relax_lowlatency();
145 }
146unlock:
147 arch_spin_unlock(&lock->wait_lock);
148}
149EXPORT_SYMBOL(queued_write_lock_slowpath);