Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
16#ifdef CONFIG_BTRFS_DEBUG
17static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18{
19 WARN_ON(eb->spinning_writers);
20 eb->spinning_writers++;
21}
22
23static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24{
25 WARN_ON(eb->spinning_writers != 1);
26 eb->spinning_writers--;
27}
28
29static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30{
31 WARN_ON(eb->spinning_writers);
32}
33
34static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35{
36 atomic_inc(&eb->spinning_readers);
37}
38
39static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40{
41 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 atomic_dec(&eb->spinning_readers);
43}
44
45static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46{
47 atomic_inc(&eb->read_locks);
48}
49
50static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51{
52 atomic_dec(&eb->read_locks);
53}
54
55static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56{
57 BUG_ON(!atomic_read(&eb->read_locks));
58}
59
60static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61{
62 eb->write_locks++;
63}
64
65static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66{
67 eb->write_locks--;
68}
69
70void btrfs_assert_tree_locked(struct extent_buffer *eb)
71{
72 BUG_ON(!eb->write_locks);
73}
74
75#else
76static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
77static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
78static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
79static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
80static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
81static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
83static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
84void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
86static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
87#endif
88
89void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
90{
91 trace_btrfs_set_lock_blocking_read(eb);
92 /*
93 * No lock is required. The lock owner may change if we have a read
94 * lock, but it won't change to or away from us. If we have the write
95 * lock, we are the owner and it'll never change.
96 */
97 if (eb->lock_nested && current->pid == eb->lock_owner)
98 return;
99 btrfs_assert_tree_read_locked(eb);
100 atomic_inc(&eb->blocking_readers);
101 btrfs_assert_spinning_readers_put(eb);
102 read_unlock(&eb->lock);
103}
104
105void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106{
107 trace_btrfs_set_lock_blocking_write(eb);
108 /*
109 * No lock is required. The lock owner may change if we have a read
110 * lock, but it won't change to or away from us. If we have the write
111 * lock, we are the owner and it'll never change.
112 */
113 if (eb->lock_nested && current->pid == eb->lock_owner)
114 return;
115 if (eb->blocking_writers == 0) {
116 btrfs_assert_spinning_writers_put(eb);
117 btrfs_assert_tree_locked(eb);
118 eb->blocking_writers++;
119 write_unlock(&eb->lock);
120 }
121}
122
123/*
124 * take a spinning read lock. This will wait for any blocking
125 * writers
126 */
127void btrfs_tree_read_lock(struct extent_buffer *eb)
128{
129 u64 start_ns = 0;
130
131 if (trace_btrfs_tree_read_lock_enabled())
132 start_ns = ktime_get_ns();
133again:
134 read_lock(&eb->lock);
135 BUG_ON(eb->blocking_writers == 0 &&
136 current->pid == eb->lock_owner);
137 if (eb->blocking_writers && current->pid == eb->lock_owner) {
138 /*
139 * This extent is already write-locked by our thread. We allow
140 * an additional read lock to be added because it's for the same
141 * thread. btrfs_find_all_roots() depends on this as it may be
142 * called on a partly (write-)locked tree.
143 */
144 BUG_ON(eb->lock_nested);
145 eb->lock_nested = true;
146 read_unlock(&eb->lock);
147 trace_btrfs_tree_read_lock(eb, start_ns);
148 return;
149 }
150 if (eb->blocking_writers) {
151 read_unlock(&eb->lock);
152 wait_event(eb->write_lock_wq,
153 eb->blocking_writers == 0);
154 goto again;
155 }
156 btrfs_assert_tree_read_locks_get(eb);
157 btrfs_assert_spinning_readers_get(eb);
158 trace_btrfs_tree_read_lock(eb, start_ns);
159}
160
161/*
162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
165 */
166int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167{
168 if (eb->blocking_writers)
169 return 0;
170
171 read_lock(&eb->lock);
172 if (eb->blocking_writers) {
173 read_unlock(&eb->lock);
174 return 0;
175 }
176 btrfs_assert_tree_read_locks_get(eb);
177 btrfs_assert_spinning_readers_get(eb);
178 trace_btrfs_tree_read_lock_atomic(eb);
179 return 1;
180}
181
182/*
183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
185 */
186int btrfs_try_tree_read_lock(struct extent_buffer *eb)
187{
188 if (eb->blocking_writers)
189 return 0;
190
191 if (!read_trylock(&eb->lock))
192 return 0;
193
194 if (eb->blocking_writers) {
195 read_unlock(&eb->lock);
196 return 0;
197 }
198 btrfs_assert_tree_read_locks_get(eb);
199 btrfs_assert_spinning_readers_get(eb);
200 trace_btrfs_try_tree_read_lock(eb);
201 return 1;
202}
203
204/*
205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
207 */
208int btrfs_try_tree_write_lock(struct extent_buffer *eb)
209{
210 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
211 return 0;
212
213 write_lock(&eb->lock);
214 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
215 write_unlock(&eb->lock);
216 return 0;
217 }
218 btrfs_assert_tree_write_locks_get(eb);
219 btrfs_assert_spinning_writers_get(eb);
220 eb->lock_owner = current->pid;
221 trace_btrfs_try_tree_write_lock(eb);
222 return 1;
223}
224
225/*
226 * drop a spinning read lock
227 */
228void btrfs_tree_read_unlock(struct extent_buffer *eb)
229{
230 trace_btrfs_tree_read_unlock(eb);
231 /*
232 * if we're nested, we have the write lock. No new locking
233 * is needed as long as we are the lock owner.
234 * The write unlock will do a barrier for us, and the lock_nested
235 * field only matters to the lock owner.
236 */
237 if (eb->lock_nested && current->pid == eb->lock_owner) {
238 eb->lock_nested = false;
239 return;
240 }
241 btrfs_assert_tree_read_locked(eb);
242 btrfs_assert_spinning_readers_put(eb);
243 btrfs_assert_tree_read_locks_put(eb);
244 read_unlock(&eb->lock);
245}
246
247/*
248 * drop a blocking read lock
249 */
250void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251{
252 trace_btrfs_tree_read_unlock_blocking(eb);
253 /*
254 * if we're nested, we have the write lock. No new locking
255 * is needed as long as we are the lock owner.
256 * The write unlock will do a barrier for us, and the lock_nested
257 * field only matters to the lock owner.
258 */
259 if (eb->lock_nested && current->pid == eb->lock_owner) {
260 eb->lock_nested = false;
261 return;
262 }
263 btrfs_assert_tree_read_locked(eb);
264 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
265 /* atomic_dec_and_test implies a barrier */
266 if (atomic_dec_and_test(&eb->blocking_readers))
267 cond_wake_up_nomb(&eb->read_lock_wq);
268 btrfs_assert_tree_read_locks_put(eb);
269}
270
271/*
272 * take a spinning write lock. This will wait for both
273 * blocking readers or writers
274 */
275void btrfs_tree_lock(struct extent_buffer *eb)
276{
277 u64 start_ns = 0;
278
279 if (trace_btrfs_tree_lock_enabled())
280 start_ns = ktime_get_ns();
281
282 WARN_ON(eb->lock_owner == current->pid);
283again:
284 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
285 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
286 write_lock(&eb->lock);
287 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
288 write_unlock(&eb->lock);
289 goto again;
290 }
291 btrfs_assert_spinning_writers_get(eb);
292 btrfs_assert_tree_write_locks_get(eb);
293 eb->lock_owner = current->pid;
294 trace_btrfs_tree_lock(eb, start_ns);
295}
296
297/*
298 * drop a spinning or a blocking write lock.
299 */
300void btrfs_tree_unlock(struct extent_buffer *eb)
301{
302 int blockers = eb->blocking_writers;
303
304 BUG_ON(blockers > 1);
305
306 btrfs_assert_tree_locked(eb);
307 trace_btrfs_tree_unlock(eb);
308 eb->lock_owner = 0;
309 btrfs_assert_tree_write_locks_put(eb);
310
311 if (blockers) {
312 btrfs_assert_no_spinning_writers(eb);
313 eb->blocking_writers--;
314 /*
315 * We need to order modifying blocking_writers above with
316 * actually waking up the sleepers to ensure they see the
317 * updated value of blocking_writers
318 */
319 cond_wake_up(&eb->write_lock_wq);
320 } else {
321 btrfs_assert_spinning_writers_put(eb);
322 write_unlock(&eb->lock);
323 }
324}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17/*
18 * if we currently have a spinning reader or writer lock
19 * (indicated by the rw flag) this will bump the count
20 * of blocking holders and drop the spinlock.
21 */
22void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
23{
24 /*
25 * no lock is required. The lock owner may change if
26 * we have a read lock, but it won't change to or away
27 * from us. If we have the write lock, we are the owner
28 * and it'll never change.
29 */
30 if (eb->lock_nested && current->pid == eb->lock_owner)
31 return;
32 if (rw == BTRFS_WRITE_LOCK) {
33 if (atomic_read(&eb->blocking_writers) == 0) {
34 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
35 atomic_dec(&eb->spinning_writers);
36 btrfs_assert_tree_locked(eb);
37 atomic_inc(&eb->blocking_writers);
38 write_unlock(&eb->lock);
39 }
40 } else if (rw == BTRFS_READ_LOCK) {
41 btrfs_assert_tree_read_locked(eb);
42 atomic_inc(&eb->blocking_readers);
43 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
44 atomic_dec(&eb->spinning_readers);
45 read_unlock(&eb->lock);
46 }
47}
48
49/*
50 * if we currently have a blocking lock, take the spinlock
51 * and drop our blocking count
52 */
53void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
54{
55 /*
56 * no lock is required. The lock owner may change if
57 * we have a read lock, but it won't change to or away
58 * from us. If we have the write lock, we are the owner
59 * and it'll never change.
60 */
61 if (eb->lock_nested && current->pid == eb->lock_owner)
62 return;
63
64 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
65 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
66 write_lock(&eb->lock);
67 WARN_ON(atomic_read(&eb->spinning_writers));
68 atomic_inc(&eb->spinning_writers);
69 /*
70 * atomic_dec_and_test implies a barrier for waitqueue_active
71 */
72 if (atomic_dec_and_test(&eb->blocking_writers) &&
73 waitqueue_active(&eb->write_lock_wq))
74 wake_up(&eb->write_lock_wq);
75 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
76 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
77 read_lock(&eb->lock);
78 atomic_inc(&eb->spinning_readers);
79 /*
80 * atomic_dec_and_test implies a barrier for waitqueue_active
81 */
82 if (atomic_dec_and_test(&eb->blocking_readers) &&
83 waitqueue_active(&eb->read_lock_wq))
84 wake_up(&eb->read_lock_wq);
85 }
86}
87
88/*
89 * take a spinning read lock. This will wait for any blocking
90 * writers
91 */
92void btrfs_tree_read_lock(struct extent_buffer *eb)
93{
94again:
95 BUG_ON(!atomic_read(&eb->blocking_writers) &&
96 current->pid == eb->lock_owner);
97
98 read_lock(&eb->lock);
99 if (atomic_read(&eb->blocking_writers) &&
100 current->pid == eb->lock_owner) {
101 /*
102 * This extent is already write-locked by our thread. We allow
103 * an additional read lock to be added because it's for the same
104 * thread. btrfs_find_all_roots() depends on this as it may be
105 * called on a partly (write-)locked tree.
106 */
107 BUG_ON(eb->lock_nested);
108 eb->lock_nested = 1;
109 read_unlock(&eb->lock);
110 return;
111 }
112 if (atomic_read(&eb->blocking_writers)) {
113 read_unlock(&eb->lock);
114 wait_event(eb->write_lock_wq,
115 atomic_read(&eb->blocking_writers) == 0);
116 goto again;
117 }
118 atomic_inc(&eb->read_locks);
119 atomic_inc(&eb->spinning_readers);
120}
121
122/*
123 * take a spinning read lock.
124 * returns 1 if we get the read lock and 0 if we don't
125 * this won't wait for blocking writers
126 */
127int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
128{
129 if (atomic_read(&eb->blocking_writers))
130 return 0;
131
132 read_lock(&eb->lock);
133 if (atomic_read(&eb->blocking_writers)) {
134 read_unlock(&eb->lock);
135 return 0;
136 }
137 atomic_inc(&eb->read_locks);
138 atomic_inc(&eb->spinning_readers);
139 return 1;
140}
141
142/*
143 * returns 1 if we get the read lock and 0 if we don't
144 * this won't wait for blocking writers
145 */
146int btrfs_try_tree_read_lock(struct extent_buffer *eb)
147{
148 if (atomic_read(&eb->blocking_writers))
149 return 0;
150
151 if (!read_trylock(&eb->lock))
152 return 0;
153
154 if (atomic_read(&eb->blocking_writers)) {
155 read_unlock(&eb->lock);
156 return 0;
157 }
158 atomic_inc(&eb->read_locks);
159 atomic_inc(&eb->spinning_readers);
160 return 1;
161}
162
163/*
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers or readers
166 */
167int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168{
169 if (atomic_read(&eb->blocking_writers) ||
170 atomic_read(&eb->blocking_readers))
171 return 0;
172
173 write_lock(&eb->lock);
174 if (atomic_read(&eb->blocking_writers) ||
175 atomic_read(&eb->blocking_readers)) {
176 write_unlock(&eb->lock);
177 return 0;
178 }
179 atomic_inc(&eb->write_locks);
180 atomic_inc(&eb->spinning_writers);
181 eb->lock_owner = current->pid;
182 return 1;
183}
184
185/*
186 * drop a spinning read lock
187 */
188void btrfs_tree_read_unlock(struct extent_buffer *eb)
189{
190 /*
191 * if we're nested, we have the write lock. No new locking
192 * is needed as long as we are the lock owner.
193 * The write unlock will do a barrier for us, and the lock_nested
194 * field only matters to the lock owner.
195 */
196 if (eb->lock_nested && current->pid == eb->lock_owner) {
197 eb->lock_nested = 0;
198 return;
199 }
200 btrfs_assert_tree_read_locked(eb);
201 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
202 atomic_dec(&eb->spinning_readers);
203 atomic_dec(&eb->read_locks);
204 read_unlock(&eb->lock);
205}
206
207/*
208 * drop a blocking read lock
209 */
210void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
211{
212 /*
213 * if we're nested, we have the write lock. No new locking
214 * is needed as long as we are the lock owner.
215 * The write unlock will do a barrier for us, and the lock_nested
216 * field only matters to the lock owner.
217 */
218 if (eb->lock_nested && current->pid == eb->lock_owner) {
219 eb->lock_nested = 0;
220 return;
221 }
222 btrfs_assert_tree_read_locked(eb);
223 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
224 /*
225 * atomic_dec_and_test implies a barrier for waitqueue_active
226 */
227 if (atomic_dec_and_test(&eb->blocking_readers) &&
228 waitqueue_active(&eb->read_lock_wq))
229 wake_up(&eb->read_lock_wq);
230 atomic_dec(&eb->read_locks);
231}
232
233/*
234 * take a spinning write lock. This will wait for both
235 * blocking readers or writers
236 */
237void btrfs_tree_lock(struct extent_buffer *eb)
238{
239 WARN_ON(eb->lock_owner == current->pid);
240again:
241 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
242 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
243 write_lock(&eb->lock);
244 if (atomic_read(&eb->blocking_readers)) {
245 write_unlock(&eb->lock);
246 wait_event(eb->read_lock_wq,
247 atomic_read(&eb->blocking_readers) == 0);
248 goto again;
249 }
250 if (atomic_read(&eb->blocking_writers)) {
251 write_unlock(&eb->lock);
252 wait_event(eb->write_lock_wq,
253 atomic_read(&eb->blocking_writers) == 0);
254 goto again;
255 }
256 WARN_ON(atomic_read(&eb->spinning_writers));
257 atomic_inc(&eb->spinning_writers);
258 atomic_inc(&eb->write_locks);
259 eb->lock_owner = current->pid;
260}
261
262/*
263 * drop a spinning or a blocking write lock.
264 */
265void btrfs_tree_unlock(struct extent_buffer *eb)
266{
267 int blockers = atomic_read(&eb->blocking_writers);
268
269 BUG_ON(blockers > 1);
270
271 btrfs_assert_tree_locked(eb);
272 eb->lock_owner = 0;
273 atomic_dec(&eb->write_locks);
274
275 if (blockers) {
276 WARN_ON(atomic_read(&eb->spinning_writers));
277 atomic_dec(&eb->blocking_writers);
278 /*
279 * Make sure counter is updated before we wake up waiters.
280 */
281 smp_mb__after_atomic();
282 if (waitqueue_active(&eb->write_lock_wq))
283 wake_up(&eb->write_lock_wq);
284 } else {
285 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
286 atomic_dec(&eb->spinning_writers);
287 write_unlock(&eb->lock);
288 }
289}
290
291void btrfs_assert_tree_locked(struct extent_buffer *eb)
292{
293 BUG_ON(!atomic_read(&eb->write_locks));
294}
295
296static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
297{
298 BUG_ON(!atomic_read(&eb->read_locks));
299}