Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17/*
18 * if we currently have a spinning reader or writer lock
19 * (indicated by the rw flag) this will bump the count
20 * of blocking holders and drop the spinlock.
21 */
22void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
23{
24 /*
25 * no lock is required. The lock owner may change if
26 * we have a read lock, but it won't change to or away
27 * from us. If we have the write lock, we are the owner
28 * and it'll never change.
29 */
30 if (eb->lock_nested && current->pid == eb->lock_owner)
31 return;
32 if (rw == BTRFS_WRITE_LOCK) {
33 if (atomic_read(&eb->blocking_writers) == 0) {
34 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
35 atomic_dec(&eb->spinning_writers);
36 btrfs_assert_tree_locked(eb);
37 atomic_inc(&eb->blocking_writers);
38 write_unlock(&eb->lock);
39 }
40 } else if (rw == BTRFS_READ_LOCK) {
41 btrfs_assert_tree_read_locked(eb);
42 atomic_inc(&eb->blocking_readers);
43 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
44 atomic_dec(&eb->spinning_readers);
45 read_unlock(&eb->lock);
46 }
47}
48
49/*
50 * if we currently have a blocking lock, take the spinlock
51 * and drop our blocking count
52 */
53void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
54{
55 /*
56 * no lock is required. The lock owner may change if
57 * we have a read lock, but it won't change to or away
58 * from us. If we have the write lock, we are the owner
59 * and it'll never change.
60 */
61 if (eb->lock_nested && current->pid == eb->lock_owner)
62 return;
63
64 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
65 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
66 write_lock(&eb->lock);
67 WARN_ON(atomic_read(&eb->spinning_writers));
68 atomic_inc(&eb->spinning_writers);
69 /*
70 * atomic_dec_and_test implies a barrier for waitqueue_active
71 */
72 if (atomic_dec_and_test(&eb->blocking_writers) &&
73 waitqueue_active(&eb->write_lock_wq))
74 wake_up(&eb->write_lock_wq);
75 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
76 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
77 read_lock(&eb->lock);
78 atomic_inc(&eb->spinning_readers);
79 /*
80 * atomic_dec_and_test implies a barrier for waitqueue_active
81 */
82 if (atomic_dec_and_test(&eb->blocking_readers) &&
83 waitqueue_active(&eb->read_lock_wq))
84 wake_up(&eb->read_lock_wq);
85 }
86}
87
88/*
89 * take a spinning read lock. This will wait for any blocking
90 * writers
91 */
92void btrfs_tree_read_lock(struct extent_buffer *eb)
93{
94again:
95 BUG_ON(!atomic_read(&eb->blocking_writers) &&
96 current->pid == eb->lock_owner);
97
98 read_lock(&eb->lock);
99 if (atomic_read(&eb->blocking_writers) &&
100 current->pid == eb->lock_owner) {
101 /*
102 * This extent is already write-locked by our thread. We allow
103 * an additional read lock to be added because it's for the same
104 * thread. btrfs_find_all_roots() depends on this as it may be
105 * called on a partly (write-)locked tree.
106 */
107 BUG_ON(eb->lock_nested);
108 eb->lock_nested = 1;
109 read_unlock(&eb->lock);
110 return;
111 }
112 if (atomic_read(&eb->blocking_writers)) {
113 read_unlock(&eb->lock);
114 wait_event(eb->write_lock_wq,
115 atomic_read(&eb->blocking_writers) == 0);
116 goto again;
117 }
118 atomic_inc(&eb->read_locks);
119 atomic_inc(&eb->spinning_readers);
120}
121
122/*
123 * take a spinning read lock.
124 * returns 1 if we get the read lock and 0 if we don't
125 * this won't wait for blocking writers
126 */
127int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
128{
129 if (atomic_read(&eb->blocking_writers))
130 return 0;
131
132 read_lock(&eb->lock);
133 if (atomic_read(&eb->blocking_writers)) {
134 read_unlock(&eb->lock);
135 return 0;
136 }
137 atomic_inc(&eb->read_locks);
138 atomic_inc(&eb->spinning_readers);
139 return 1;
140}
141
142/*
143 * returns 1 if we get the read lock and 0 if we don't
144 * this won't wait for blocking writers
145 */
146int btrfs_try_tree_read_lock(struct extent_buffer *eb)
147{
148 if (atomic_read(&eb->blocking_writers))
149 return 0;
150
151 if (!read_trylock(&eb->lock))
152 return 0;
153
154 if (atomic_read(&eb->blocking_writers)) {
155 read_unlock(&eb->lock);
156 return 0;
157 }
158 atomic_inc(&eb->read_locks);
159 atomic_inc(&eb->spinning_readers);
160 return 1;
161}
162
163/*
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers or readers
166 */
167int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168{
169 if (atomic_read(&eb->blocking_writers) ||
170 atomic_read(&eb->blocking_readers))
171 return 0;
172
173 write_lock(&eb->lock);
174 if (atomic_read(&eb->blocking_writers) ||
175 atomic_read(&eb->blocking_readers)) {
176 write_unlock(&eb->lock);
177 return 0;
178 }
179 atomic_inc(&eb->write_locks);
180 atomic_inc(&eb->spinning_writers);
181 eb->lock_owner = current->pid;
182 return 1;
183}
184
185/*
186 * drop a spinning read lock
187 */
188void btrfs_tree_read_unlock(struct extent_buffer *eb)
189{
190 /*
191 * if we're nested, we have the write lock. No new locking
192 * is needed as long as we are the lock owner.
193 * The write unlock will do a barrier for us, and the lock_nested
194 * field only matters to the lock owner.
195 */
196 if (eb->lock_nested && current->pid == eb->lock_owner) {
197 eb->lock_nested = 0;
198 return;
199 }
200 btrfs_assert_tree_read_locked(eb);
201 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
202 atomic_dec(&eb->spinning_readers);
203 atomic_dec(&eb->read_locks);
204 read_unlock(&eb->lock);
205}
206
207/*
208 * drop a blocking read lock
209 */
210void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
211{
212 /*
213 * if we're nested, we have the write lock. No new locking
214 * is needed as long as we are the lock owner.
215 * The write unlock will do a barrier for us, and the lock_nested
216 * field only matters to the lock owner.
217 */
218 if (eb->lock_nested && current->pid == eb->lock_owner) {
219 eb->lock_nested = 0;
220 return;
221 }
222 btrfs_assert_tree_read_locked(eb);
223 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
224 /*
225 * atomic_dec_and_test implies a barrier for waitqueue_active
226 */
227 if (atomic_dec_and_test(&eb->blocking_readers) &&
228 waitqueue_active(&eb->read_lock_wq))
229 wake_up(&eb->read_lock_wq);
230 atomic_dec(&eb->read_locks);
231}
232
233/*
234 * take a spinning write lock. This will wait for both
235 * blocking readers or writers
236 */
237void btrfs_tree_lock(struct extent_buffer *eb)
238{
239 WARN_ON(eb->lock_owner == current->pid);
240again:
241 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
242 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
243 write_lock(&eb->lock);
244 if (atomic_read(&eb->blocking_readers)) {
245 write_unlock(&eb->lock);
246 wait_event(eb->read_lock_wq,
247 atomic_read(&eb->blocking_readers) == 0);
248 goto again;
249 }
250 if (atomic_read(&eb->blocking_writers)) {
251 write_unlock(&eb->lock);
252 wait_event(eb->write_lock_wq,
253 atomic_read(&eb->blocking_writers) == 0);
254 goto again;
255 }
256 WARN_ON(atomic_read(&eb->spinning_writers));
257 atomic_inc(&eb->spinning_writers);
258 atomic_inc(&eb->write_locks);
259 eb->lock_owner = current->pid;
260}
261
262/*
263 * drop a spinning or a blocking write lock.
264 */
265void btrfs_tree_unlock(struct extent_buffer *eb)
266{
267 int blockers = atomic_read(&eb->blocking_writers);
268
269 BUG_ON(blockers > 1);
270
271 btrfs_assert_tree_locked(eb);
272 eb->lock_owner = 0;
273 atomic_dec(&eb->write_locks);
274
275 if (blockers) {
276 WARN_ON(atomic_read(&eb->spinning_writers));
277 atomic_dec(&eb->blocking_writers);
278 /*
279 * Make sure counter is updated before we wake up waiters.
280 */
281 smp_mb__after_atomic();
282 if (waitqueue_active(&eb->write_lock_wq))
283 wake_up(&eb->write_lock_wq);
284 } else {
285 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
286 atomic_dec(&eb->spinning_writers);
287 write_unlock(&eb->lock);
288 }
289}
290
291void btrfs_assert_tree_locked(struct extent_buffer *eb)
292{
293 BUG_ON(!atomic_read(&eb->write_locks));
294}
295
296static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
297{
298 BUG_ON(!atomic_read(&eb->read_locks));
299}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
16/*
17 * Extent buffer locking
18 * =====================
19 *
20 * We use a rw_semaphore for tree locking, and the semantics are exactly the
21 * same:
22 *
23 * - reader/writer exclusion
24 * - writer/writer exclusion
25 * - reader/reader sharing
26 * - try-lock semantics for readers and writers
27 *
28 * The rwsem implementation does opportunistic spinning which reduces number of
29 * times the locking task needs to sleep.
30 */
31
32/*
33 * __btrfs_tree_read_lock - lock extent buffer for read
34 * @eb: the eb to be locked
35 * @nest: the nesting level to be used for lockdep
36 *
37 * This takes the read lock on the extent buffer, using the specified nesting
38 * level for lockdep purposes.
39 */
40void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
41{
42 u64 start_ns = 0;
43
44 if (trace_btrfs_tree_read_lock_enabled())
45 start_ns = ktime_get_ns();
46
47 down_read_nested(&eb->lock, nest);
48 eb->lock_owner = current->pid;
49 trace_btrfs_tree_read_lock(eb, start_ns);
50}
51
52void btrfs_tree_read_lock(struct extent_buffer *eb)
53{
54 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
55}
56
57/*
58 * Try-lock for read.
59 *
60 * Return 1 if the rwlock has been taken, 0 otherwise
61 */
62int btrfs_try_tree_read_lock(struct extent_buffer *eb)
63{
64 if (down_read_trylock(&eb->lock)) {
65 eb->lock_owner = current->pid;
66 trace_btrfs_try_tree_read_lock(eb);
67 return 1;
68 }
69 return 0;
70}
71
72/*
73 * Try-lock for write.
74 *
75 * Return 1 if the rwlock has been taken, 0 otherwise
76 */
77int btrfs_try_tree_write_lock(struct extent_buffer *eb)
78{
79 if (down_write_trylock(&eb->lock)) {
80 eb->lock_owner = current->pid;
81 trace_btrfs_try_tree_write_lock(eb);
82 return 1;
83 }
84 return 0;
85}
86
87/*
88 * Release read lock.
89 */
90void btrfs_tree_read_unlock(struct extent_buffer *eb)
91{
92 trace_btrfs_tree_read_unlock(eb);
93 eb->lock_owner = 0;
94 up_read(&eb->lock);
95}
96
97/*
98 * __btrfs_tree_lock - lock eb for write
99 * @eb: the eb to lock
100 * @nest: the nesting to use for the lock
101 *
102 * Returns with the eb->lock write locked.
103 */
104void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
105 __acquires(&eb->lock)
106{
107 u64 start_ns = 0;
108
109 if (trace_btrfs_tree_lock_enabled())
110 start_ns = ktime_get_ns();
111
112 down_write_nested(&eb->lock, nest);
113 eb->lock_owner = current->pid;
114 trace_btrfs_tree_lock(eb, start_ns);
115}
116
117void btrfs_tree_lock(struct extent_buffer *eb)
118{
119 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
120}
121
122/*
123 * Release the write lock.
124 */
125void btrfs_tree_unlock(struct extent_buffer *eb)
126{
127 trace_btrfs_tree_unlock(eb);
128 eb->lock_owner = 0;
129 up_write(&eb->lock);
130}
131
132/*
133 * This releases any locks held in the path starting at level and going all the
134 * way up to the root.
135 *
136 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
137 * cases, such as COW of the block at slot zero in the node. This ignores
138 * those rules, and it should only be called when there are no more updates to
139 * be done higher up in the tree.
140 */
141void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
142{
143 int i;
144
145 if (path->keep_locks)
146 return;
147
148 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
149 if (!path->nodes[i])
150 continue;
151 if (!path->locks[i])
152 continue;
153 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
154 path->locks[i] = 0;
155 }
156}
157
158/*
159 * Loop around taking references on and locking the root node of the tree until
160 * we end up with a lock on the root node.
161 *
162 * Return: root extent buffer with write lock held
163 */
164struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
165{
166 struct extent_buffer *eb;
167
168 while (1) {
169 eb = btrfs_root_node(root);
170 btrfs_tree_lock(eb);
171 if (eb == root->node)
172 break;
173 btrfs_tree_unlock(eb);
174 free_extent_buffer(eb);
175 }
176 return eb;
177}
178
179/*
180 * Loop around taking references on and locking the root node of the tree until
181 * we end up with a lock on the root node.
182 *
183 * Return: root extent buffer with read lock held
184 */
185struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
186{
187 struct extent_buffer *eb;
188
189 while (1) {
190 eb = btrfs_root_node(root);
191 btrfs_tree_read_lock(eb);
192 if (eb == root->node)
193 break;
194 btrfs_tree_read_unlock(eb);
195 free_extent_buffer(eb);
196 }
197 return eb;
198}
199
200/*
201 * DREW locks
202 * ==========
203 *
204 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
205 * where you want to provide A-B exclusion but not AA or BB.
206 *
207 * Currently implementation gives more priority to reader. If a reader and a
208 * writer both race to acquire their respective sides of the lock the writer
209 * would yield its lock as soon as it detects a concurrent reader. Additionally
210 * if there are pending readers no new writers would be allowed to come in and
211 * acquire the lock.
212 */
213
214int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
215{
216 int ret;
217
218 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
219 if (ret)
220 return ret;
221
222 atomic_set(&lock->readers, 0);
223 init_waitqueue_head(&lock->pending_readers);
224 init_waitqueue_head(&lock->pending_writers);
225
226 return 0;
227}
228
229void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
230{
231 percpu_counter_destroy(&lock->writers);
232}
233
234/* Return true if acquisition is successful, false otherwise */
235bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
236{
237 if (atomic_read(&lock->readers))
238 return false;
239
240 percpu_counter_inc(&lock->writers);
241
242 /* Ensure writers count is updated before we check for pending readers */
243 smp_mb();
244 if (atomic_read(&lock->readers)) {
245 btrfs_drew_write_unlock(lock);
246 return false;
247 }
248
249 return true;
250}
251
252void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
253{
254 while (true) {
255 if (btrfs_drew_try_write_lock(lock))
256 return;
257 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
258 }
259}
260
261void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
262{
263 percpu_counter_dec(&lock->writers);
264 cond_wake_up(&lock->pending_readers);
265}
266
267void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
268{
269 atomic_inc(&lock->readers);
270
271 /*
272 * Ensure the pending reader count is perceieved BEFORE this reader
273 * goes to sleep in case of active writers. This guarantees new writers
274 * won't be allowed and that the current reader will be woken up when
275 * the last active writer finishes its jobs.
276 */
277 smp_mb__after_atomic();
278
279 wait_event(lock->pending_readers,
280 percpu_counter_sum(&lock->writers) == 0);
281}
282
283void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
284{
285 /*
286 * atomic_dec_and_test implies a full barrier, so woken up writers
287 * are guaranteed to see the decrement
288 */
289 if (atomic_dec_and_test(&lock->readers))
290 wake_up(&lock->pending_writers);
291}