Loading...
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/spinlock.h>
21#include <linux/page-flags.h>
22#include <asm/bug.h>
23#include "ctree.h"
24#include "extent_io.h"
25#include "locking.h"
26
27static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28
29/*
30 * if we currently have a spinning reader or writer lock
31 * (indicated by the rw flag) this will bump the count
32 * of blocking holders and drop the spinlock.
33 */
34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35{
36 if (eb->lock_nested) {
37 read_lock(&eb->lock);
38 if (eb->lock_nested && current->pid == eb->lock_owner) {
39 read_unlock(&eb->lock);
40 return;
41 }
42 read_unlock(&eb->lock);
43 }
44 if (rw == BTRFS_WRITE_LOCK) {
45 if (atomic_read(&eb->blocking_writers) == 0) {
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 atomic_dec(&eb->spinning_writers);
48 btrfs_assert_tree_locked(eb);
49 atomic_inc(&eb->blocking_writers);
50 write_unlock(&eb->lock);
51 }
52 } else if (rw == BTRFS_READ_LOCK) {
53 btrfs_assert_tree_read_locked(eb);
54 atomic_inc(&eb->blocking_readers);
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 atomic_dec(&eb->spinning_readers);
57 read_unlock(&eb->lock);
58 }
59 return;
60}
61
62/*
63 * if we currently have a blocking lock, take the spinlock
64 * and drop our blocking count
65 */
66void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67{
68 if (eb->lock_nested) {
69 read_lock(&eb->lock);
70 if (eb->lock_nested && current->pid == eb->lock_owner) {
71 read_unlock(&eb->lock);
72 return;
73 }
74 read_unlock(&eb->lock);
75 }
76 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
77 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
78 write_lock(&eb->lock);
79 WARN_ON(atomic_read(&eb->spinning_writers));
80 atomic_inc(&eb->spinning_writers);
81 if (atomic_dec_and_test(&eb->blocking_writers) &&
82 waitqueue_active(&eb->write_lock_wq))
83 wake_up(&eb->write_lock_wq);
84 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
85 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
86 read_lock(&eb->lock);
87 atomic_inc(&eb->spinning_readers);
88 if (atomic_dec_and_test(&eb->blocking_readers) &&
89 waitqueue_active(&eb->read_lock_wq))
90 wake_up(&eb->read_lock_wq);
91 }
92 return;
93}
94
95/*
96 * take a spinning read lock. This will wait for any blocking
97 * writers
98 */
99void btrfs_tree_read_lock(struct extent_buffer *eb)
100{
101again:
102 read_lock(&eb->lock);
103 if (atomic_read(&eb->blocking_writers) &&
104 current->pid == eb->lock_owner) {
105 /*
106 * This extent is already write-locked by our thread. We allow
107 * an additional read lock to be added because it's for the same
108 * thread. btrfs_find_all_roots() depends on this as it may be
109 * called on a partly (write-)locked tree.
110 */
111 BUG_ON(eb->lock_nested);
112 eb->lock_nested = 1;
113 read_unlock(&eb->lock);
114 return;
115 }
116 if (atomic_read(&eb->blocking_writers)) {
117 read_unlock(&eb->lock);
118 wait_event(eb->write_lock_wq,
119 atomic_read(&eb->blocking_writers) == 0);
120 goto again;
121 }
122 atomic_inc(&eb->read_locks);
123 atomic_inc(&eb->spinning_readers);
124}
125
126/*
127 * returns 1 if we get the read lock and 0 if we don't
128 * this won't wait for blocking writers
129 */
130int btrfs_try_tree_read_lock(struct extent_buffer *eb)
131{
132 if (atomic_read(&eb->blocking_writers))
133 return 0;
134
135 read_lock(&eb->lock);
136 if (atomic_read(&eb->blocking_writers)) {
137 read_unlock(&eb->lock);
138 return 0;
139 }
140 atomic_inc(&eb->read_locks);
141 atomic_inc(&eb->spinning_readers);
142 return 1;
143}
144
145/*
146 * returns 1 if we get the read lock and 0 if we don't
147 * this won't wait for blocking writers or readers
148 */
149int btrfs_try_tree_write_lock(struct extent_buffer *eb)
150{
151 if (atomic_read(&eb->blocking_writers) ||
152 atomic_read(&eb->blocking_readers))
153 return 0;
154 write_lock(&eb->lock);
155 if (atomic_read(&eb->blocking_writers) ||
156 atomic_read(&eb->blocking_readers)) {
157 write_unlock(&eb->lock);
158 return 0;
159 }
160 atomic_inc(&eb->write_locks);
161 atomic_inc(&eb->spinning_writers);
162 eb->lock_owner = current->pid;
163 return 1;
164}
165
166/*
167 * drop a spinning read lock
168 */
169void btrfs_tree_read_unlock(struct extent_buffer *eb)
170{
171 if (eb->lock_nested) {
172 read_lock(&eb->lock);
173 if (eb->lock_nested && current->pid == eb->lock_owner) {
174 eb->lock_nested = 0;
175 read_unlock(&eb->lock);
176 return;
177 }
178 read_unlock(&eb->lock);
179 }
180 btrfs_assert_tree_read_locked(eb);
181 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
182 atomic_dec(&eb->spinning_readers);
183 atomic_dec(&eb->read_locks);
184 read_unlock(&eb->lock);
185}
186
187/*
188 * drop a blocking read lock
189 */
190void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
191{
192 if (eb->lock_nested) {
193 read_lock(&eb->lock);
194 if (eb->lock_nested && current->pid == eb->lock_owner) {
195 eb->lock_nested = 0;
196 read_unlock(&eb->lock);
197 return;
198 }
199 read_unlock(&eb->lock);
200 }
201 btrfs_assert_tree_read_locked(eb);
202 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
203 if (atomic_dec_and_test(&eb->blocking_readers) &&
204 waitqueue_active(&eb->read_lock_wq))
205 wake_up(&eb->read_lock_wq);
206 atomic_dec(&eb->read_locks);
207}
208
209/*
210 * take a spinning write lock. This will wait for both
211 * blocking readers or writers
212 */
213void btrfs_tree_lock(struct extent_buffer *eb)
214{
215again:
216 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
217 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
218 write_lock(&eb->lock);
219 if (atomic_read(&eb->blocking_readers)) {
220 write_unlock(&eb->lock);
221 wait_event(eb->read_lock_wq,
222 atomic_read(&eb->blocking_readers) == 0);
223 goto again;
224 }
225 if (atomic_read(&eb->blocking_writers)) {
226 write_unlock(&eb->lock);
227 wait_event(eb->write_lock_wq,
228 atomic_read(&eb->blocking_writers) == 0);
229 goto again;
230 }
231 WARN_ON(atomic_read(&eb->spinning_writers));
232 atomic_inc(&eb->spinning_writers);
233 atomic_inc(&eb->write_locks);
234 eb->lock_owner = current->pid;
235}
236
237/*
238 * drop a spinning or a blocking write lock.
239 */
240void btrfs_tree_unlock(struct extent_buffer *eb)
241{
242 int blockers = atomic_read(&eb->blocking_writers);
243
244 BUG_ON(blockers > 1);
245
246 btrfs_assert_tree_locked(eb);
247 atomic_dec(&eb->write_locks);
248
249 if (blockers) {
250 WARN_ON(atomic_read(&eb->spinning_writers));
251 atomic_dec(&eb->blocking_writers);
252 smp_mb();
253 if (waitqueue_active(&eb->write_lock_wq))
254 wake_up(&eb->write_lock_wq);
255 } else {
256 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
257 atomic_dec(&eb->spinning_writers);
258 write_unlock(&eb->lock);
259 }
260}
261
262void btrfs_assert_tree_locked(struct extent_buffer *eb)
263{
264 BUG_ON(!atomic_read(&eb->write_locks));
265}
266
267static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
268{
269 BUG_ON(!atomic_read(&eb->read_locks));
270}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
16/*
17 * Extent buffer locking
18 * =====================
19 *
20 * We use a rw_semaphore for tree locking, and the semantics are exactly the
21 * same:
22 *
23 * - reader/writer exclusion
24 * - writer/writer exclusion
25 * - reader/reader sharing
26 * - try-lock semantics for readers and writers
27 *
28 * The rwsem implementation does opportunistic spinning which reduces number of
29 * times the locking task needs to sleep.
30 */
31
32/*
33 * __btrfs_tree_read_lock - lock extent buffer for read
34 * @eb: the eb to be locked
35 * @nest: the nesting level to be used for lockdep
36 *
37 * This takes the read lock on the extent buffer, using the specified nesting
38 * level for lockdep purposes.
39 */
40void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
41{
42 u64 start_ns = 0;
43
44 if (trace_btrfs_tree_read_lock_enabled())
45 start_ns = ktime_get_ns();
46
47 down_read_nested(&eb->lock, nest);
48 eb->lock_owner = current->pid;
49 trace_btrfs_tree_read_lock(eb, start_ns);
50}
51
52void btrfs_tree_read_lock(struct extent_buffer *eb)
53{
54 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
55}
56
57/*
58 * Try-lock for read.
59 *
60 * Return 1 if the rwlock has been taken, 0 otherwise
61 */
62int btrfs_try_tree_read_lock(struct extent_buffer *eb)
63{
64 if (down_read_trylock(&eb->lock)) {
65 eb->lock_owner = current->pid;
66 trace_btrfs_try_tree_read_lock(eb);
67 return 1;
68 }
69 return 0;
70}
71
72/*
73 * Try-lock for write.
74 *
75 * Return 1 if the rwlock has been taken, 0 otherwise
76 */
77int btrfs_try_tree_write_lock(struct extent_buffer *eb)
78{
79 if (down_write_trylock(&eb->lock)) {
80 eb->lock_owner = current->pid;
81 trace_btrfs_try_tree_write_lock(eb);
82 return 1;
83 }
84 return 0;
85}
86
87/*
88 * Release read lock.
89 */
90void btrfs_tree_read_unlock(struct extent_buffer *eb)
91{
92 trace_btrfs_tree_read_unlock(eb);
93 eb->lock_owner = 0;
94 up_read(&eb->lock);
95}
96
97/*
98 * __btrfs_tree_lock - lock eb for write
99 * @eb: the eb to lock
100 * @nest: the nesting to use for the lock
101 *
102 * Returns with the eb->lock write locked.
103 */
104void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
105 __acquires(&eb->lock)
106{
107 u64 start_ns = 0;
108
109 if (trace_btrfs_tree_lock_enabled())
110 start_ns = ktime_get_ns();
111
112 down_write_nested(&eb->lock, nest);
113 eb->lock_owner = current->pid;
114 trace_btrfs_tree_lock(eb, start_ns);
115}
116
117void btrfs_tree_lock(struct extent_buffer *eb)
118{
119 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
120}
121
122/*
123 * Release the write lock.
124 */
125void btrfs_tree_unlock(struct extent_buffer *eb)
126{
127 trace_btrfs_tree_unlock(eb);
128 eb->lock_owner = 0;
129 up_write(&eb->lock);
130}
131
132/*
133 * This releases any locks held in the path starting at level and going all the
134 * way up to the root.
135 *
136 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
137 * cases, such as COW of the block at slot zero in the node. This ignores
138 * those rules, and it should only be called when there are no more updates to
139 * be done higher up in the tree.
140 */
141void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
142{
143 int i;
144
145 if (path->keep_locks)
146 return;
147
148 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
149 if (!path->nodes[i])
150 continue;
151 if (!path->locks[i])
152 continue;
153 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
154 path->locks[i] = 0;
155 }
156}
157
158/*
159 * Loop around taking references on and locking the root node of the tree until
160 * we end up with a lock on the root node.
161 *
162 * Return: root extent buffer with write lock held
163 */
164struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
165{
166 struct extent_buffer *eb;
167
168 while (1) {
169 eb = btrfs_root_node(root);
170 btrfs_tree_lock(eb);
171 if (eb == root->node)
172 break;
173 btrfs_tree_unlock(eb);
174 free_extent_buffer(eb);
175 }
176 return eb;
177}
178
179/*
180 * Loop around taking references on and locking the root node of the tree until
181 * we end up with a lock on the root node.
182 *
183 * Return: root extent buffer with read lock held
184 */
185struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
186{
187 struct extent_buffer *eb;
188
189 while (1) {
190 eb = btrfs_root_node(root);
191 btrfs_tree_read_lock(eb);
192 if (eb == root->node)
193 break;
194 btrfs_tree_read_unlock(eb);
195 free_extent_buffer(eb);
196 }
197 return eb;
198}
199
200/*
201 * DREW locks
202 * ==========
203 *
204 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
205 * where you want to provide A-B exclusion but not AA or BB.
206 *
207 * Currently implementation gives more priority to reader. If a reader and a
208 * writer both race to acquire their respective sides of the lock the writer
209 * would yield its lock as soon as it detects a concurrent reader. Additionally
210 * if there are pending readers no new writers would be allowed to come in and
211 * acquire the lock.
212 */
213
214int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
215{
216 int ret;
217
218 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
219 if (ret)
220 return ret;
221
222 atomic_set(&lock->readers, 0);
223 init_waitqueue_head(&lock->pending_readers);
224 init_waitqueue_head(&lock->pending_writers);
225
226 return 0;
227}
228
229void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
230{
231 percpu_counter_destroy(&lock->writers);
232}
233
234/* Return true if acquisition is successful, false otherwise */
235bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
236{
237 if (atomic_read(&lock->readers))
238 return false;
239
240 percpu_counter_inc(&lock->writers);
241
242 /* Ensure writers count is updated before we check for pending readers */
243 smp_mb();
244 if (atomic_read(&lock->readers)) {
245 btrfs_drew_write_unlock(lock);
246 return false;
247 }
248
249 return true;
250}
251
252void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
253{
254 while (true) {
255 if (btrfs_drew_try_write_lock(lock))
256 return;
257 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
258 }
259}
260
261void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
262{
263 percpu_counter_dec(&lock->writers);
264 cond_wake_up(&lock->pending_readers);
265}
266
267void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
268{
269 atomic_inc(&lock->readers);
270
271 /*
272 * Ensure the pending reader count is perceieved BEFORE this reader
273 * goes to sleep in case of active writers. This guarantees new writers
274 * won't be allowed and that the current reader will be woken up when
275 * the last active writer finishes its jobs.
276 */
277 smp_mb__after_atomic();
278
279 wait_event(lock->pending_readers,
280 percpu_counter_sum(&lock->writers) == 0);
281}
282
283void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
284{
285 /*
286 * atomic_dec_and_test implies a full barrier, so woken up writers
287 * are guaranteed to see the decrement
288 */
289 if (atomic_dec_and_test(&lock->readers))
290 wake_up(&lock->pending_writers);
291}