Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15#include "accessors.h"
16
17/*
18 * Lockdep class keys for extent_buffer->lock's in this root. For a given
19 * eb, the lockdep key is determined by the btrfs_root it belongs to and
20 * the level the eb occupies in the tree.
21 *
22 * Different roots are used for different purposes and may nest inside each
23 * other and they require separate keysets. As lockdep keys should be
24 * static, assign keysets according to the purpose of the root as indicated
25 * by btrfs_root->root_key.objectid. This ensures that all special purpose
26 * roots have separate keysets.
27 *
28 * Lock-nesting across peer nodes is always done with the immediate parent
29 * node locked thus preventing deadlock. As lockdep doesn't know this, use
30 * subclass to avoid triggering lockdep warning in such cases.
31 *
32 * The key is set by the readpage_end_io_hook after the buffer has passed
33 * csum validation but before the pages are unlocked. It is also set by
34 * btrfs_init_new_buffer on freshly allocated blocks.
35 *
36 * We also add a check to make sure the highest level of the tree is the
37 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
38 * needs update as well.
39 */
40#ifdef CONFIG_DEBUG_LOCK_ALLOC
41#if BTRFS_MAX_LEVEL != 8
42#error
43#endif
44
45#define DEFINE_LEVEL(stem, level) \
46 .names[level] = "btrfs-" stem "-0" #level,
47
48#define DEFINE_NAME(stem) \
49 DEFINE_LEVEL(stem, 0) \
50 DEFINE_LEVEL(stem, 1) \
51 DEFINE_LEVEL(stem, 2) \
52 DEFINE_LEVEL(stem, 3) \
53 DEFINE_LEVEL(stem, 4) \
54 DEFINE_LEVEL(stem, 5) \
55 DEFINE_LEVEL(stem, 6) \
56 DEFINE_LEVEL(stem, 7)
57
58static struct btrfs_lockdep_keyset {
59 u64 id; /* root objectid */
60 /* Longest entry: btrfs-free-space-00 */
61 char names[BTRFS_MAX_LEVEL][20];
62 struct lock_class_key keys[BTRFS_MAX_LEVEL];
63} btrfs_lockdep_keysets[] = {
64 { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
65 { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
66 { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
67 { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
68 { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
69 { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
70 { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
71 { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
72 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
73 { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
74 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
75 { .id = 0, DEFINE_NAME("tree") },
76};
77
78#undef DEFINE_LEVEL
79#undef DEFINE_NAME
80
81void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
82{
83 struct btrfs_lockdep_keyset *ks;
84
85 BUG_ON(level >= ARRAY_SIZE(ks->keys));
86
87 /* Find the matching keyset, id 0 is the default entry */
88 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
89 if (ks->id == objectid)
90 break;
91
92 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
93}
94
95void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
96{
97 if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
98 btrfs_set_buffer_lockdep_class(root->root_key.objectid,
99 eb, btrfs_header_level(eb));
100}
101
102#endif
103
104/*
105 * Extent buffer locking
106 * =====================
107 *
108 * We use a rw_semaphore for tree locking, and the semantics are exactly the
109 * same:
110 *
111 * - reader/writer exclusion
112 * - writer/writer exclusion
113 * - reader/reader sharing
114 * - try-lock semantics for readers and writers
115 *
116 * The rwsem implementation does opportunistic spinning which reduces number of
117 * times the locking task needs to sleep.
118 */
119
120/*
121 * __btrfs_tree_read_lock - lock extent buffer for read
122 * @eb: the eb to be locked
123 * @nest: the nesting level to be used for lockdep
124 *
125 * This takes the read lock on the extent buffer, using the specified nesting
126 * level for lockdep purposes.
127 */
128void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
129{
130 u64 start_ns = 0;
131
132 if (trace_btrfs_tree_read_lock_enabled())
133 start_ns = ktime_get_ns();
134
135 down_read_nested(&eb->lock, nest);
136 trace_btrfs_tree_read_lock(eb, start_ns);
137}
138
139void btrfs_tree_read_lock(struct extent_buffer *eb)
140{
141 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
142}
143
144/*
145 * Try-lock for read.
146 *
147 * Return 1 if the rwlock has been taken, 0 otherwise
148 */
149int btrfs_try_tree_read_lock(struct extent_buffer *eb)
150{
151 if (down_read_trylock(&eb->lock)) {
152 trace_btrfs_try_tree_read_lock(eb);
153 return 1;
154 }
155 return 0;
156}
157
158/*
159 * Try-lock for write.
160 *
161 * Return 1 if the rwlock has been taken, 0 otherwise
162 */
163int btrfs_try_tree_write_lock(struct extent_buffer *eb)
164{
165 if (down_write_trylock(&eb->lock)) {
166 eb->lock_owner = current->pid;
167 trace_btrfs_try_tree_write_lock(eb);
168 return 1;
169 }
170 return 0;
171}
172
173/*
174 * Release read lock.
175 */
176void btrfs_tree_read_unlock(struct extent_buffer *eb)
177{
178 trace_btrfs_tree_read_unlock(eb);
179 up_read(&eb->lock);
180}
181
182/*
183 * __btrfs_tree_lock - lock eb for write
184 * @eb: the eb to lock
185 * @nest: the nesting to use for the lock
186 *
187 * Returns with the eb->lock write locked.
188 */
189void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
190 __acquires(&eb->lock)
191{
192 u64 start_ns = 0;
193
194 if (trace_btrfs_tree_lock_enabled())
195 start_ns = ktime_get_ns();
196
197 down_write_nested(&eb->lock, nest);
198 eb->lock_owner = current->pid;
199 trace_btrfs_tree_lock(eb, start_ns);
200}
201
202void btrfs_tree_lock(struct extent_buffer *eb)
203{
204 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
205}
206
207/*
208 * Release the write lock.
209 */
210void btrfs_tree_unlock(struct extent_buffer *eb)
211{
212 trace_btrfs_tree_unlock(eb);
213 eb->lock_owner = 0;
214 up_write(&eb->lock);
215}
216
217/*
218 * This releases any locks held in the path starting at level and going all the
219 * way up to the root.
220 *
221 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
222 * cases, such as COW of the block at slot zero in the node. This ignores
223 * those rules, and it should only be called when there are no more updates to
224 * be done higher up in the tree.
225 */
226void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
227{
228 int i;
229
230 if (path->keep_locks)
231 return;
232
233 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
234 if (!path->nodes[i])
235 continue;
236 if (!path->locks[i])
237 continue;
238 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
239 path->locks[i] = 0;
240 }
241}
242
243/*
244 * Loop around taking references on and locking the root node of the tree until
245 * we end up with a lock on the root node.
246 *
247 * Return: root extent buffer with write lock held
248 */
249struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
250{
251 struct extent_buffer *eb;
252
253 while (1) {
254 eb = btrfs_root_node(root);
255
256 btrfs_maybe_reset_lockdep_class(root, eb);
257 btrfs_tree_lock(eb);
258 if (eb == root->node)
259 break;
260 btrfs_tree_unlock(eb);
261 free_extent_buffer(eb);
262 }
263 return eb;
264}
265
266/*
267 * Loop around taking references on and locking the root node of the tree until
268 * we end up with a lock on the root node.
269 *
270 * Return: root extent buffer with read lock held
271 */
272struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
273{
274 struct extent_buffer *eb;
275
276 while (1) {
277 eb = btrfs_root_node(root);
278
279 btrfs_maybe_reset_lockdep_class(root, eb);
280 btrfs_tree_read_lock(eb);
281 if (eb == root->node)
282 break;
283 btrfs_tree_read_unlock(eb);
284 free_extent_buffer(eb);
285 }
286 return eb;
287}
288
289/*
290 * Loop around taking references on and locking the root node of the tree in
291 * nowait mode until we end up with a lock on the root node or returning to
292 * avoid blocking.
293 *
294 * Return: root extent buffer with read lock held or -EAGAIN.
295 */
296struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
297{
298 struct extent_buffer *eb;
299
300 while (1) {
301 eb = btrfs_root_node(root);
302 if (!btrfs_try_tree_read_lock(eb)) {
303 free_extent_buffer(eb);
304 return ERR_PTR(-EAGAIN);
305 }
306 if (eb == root->node)
307 break;
308 btrfs_tree_read_unlock(eb);
309 free_extent_buffer(eb);
310 }
311 return eb;
312}
313
314/*
315 * DREW locks
316 * ==========
317 *
318 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
319 * where you want to provide A-B exclusion but not AA or BB.
320 *
321 * Currently implementation gives more priority to reader. If a reader and a
322 * writer both race to acquire their respective sides of the lock the writer
323 * would yield its lock as soon as it detects a concurrent reader. Additionally
324 * if there are pending readers no new writers would be allowed to come in and
325 * acquire the lock.
326 */
327
328int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
329{
330 int ret;
331
332 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
333 if (ret)
334 return ret;
335
336 atomic_set(&lock->readers, 0);
337 init_waitqueue_head(&lock->pending_readers);
338 init_waitqueue_head(&lock->pending_writers);
339
340 return 0;
341}
342
343void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
344{
345 percpu_counter_destroy(&lock->writers);
346}
347
348/* Return true if acquisition is successful, false otherwise */
349bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
350{
351 if (atomic_read(&lock->readers))
352 return false;
353
354 percpu_counter_inc(&lock->writers);
355
356 /* Ensure writers count is updated before we check for pending readers */
357 smp_mb();
358 if (atomic_read(&lock->readers)) {
359 btrfs_drew_write_unlock(lock);
360 return false;
361 }
362
363 return true;
364}
365
366void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
367{
368 while (true) {
369 if (btrfs_drew_try_write_lock(lock))
370 return;
371 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
372 }
373}
374
375void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
376{
377 percpu_counter_dec(&lock->writers);
378 cond_wake_up(&lock->pending_readers);
379}
380
381void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
382{
383 atomic_inc(&lock->readers);
384
385 /*
386 * Ensure the pending reader count is perceieved BEFORE this reader
387 * goes to sleep in case of active writers. This guarantees new writers
388 * won't be allowed and that the current reader will be woken up when
389 * the last active writer finishes its jobs.
390 */
391 smp_mb__after_atomic();
392
393 wait_event(lock->pending_readers,
394 percpu_counter_sum(&lock->writers) == 0);
395}
396
397void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
398{
399 /*
400 * atomic_dec_and_test implies a full barrier, so woken up writers
401 * are guaranteed to see the decrement
402 */
403 if (atomic_dec_and_test(&lock->readers))
404 wake_up(&lock->pending_writers);
405}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
16#ifdef CONFIG_BTRFS_DEBUG
17static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18{
19 WARN_ON(eb->spinning_writers);
20 eb->spinning_writers++;
21}
22
23static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24{
25 WARN_ON(eb->spinning_writers != 1);
26 eb->spinning_writers--;
27}
28
29static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30{
31 WARN_ON(eb->spinning_writers);
32}
33
34static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35{
36 atomic_inc(&eb->spinning_readers);
37}
38
39static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40{
41 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 atomic_dec(&eb->spinning_readers);
43}
44
45static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46{
47 atomic_inc(&eb->read_locks);
48}
49
50static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51{
52 atomic_dec(&eb->read_locks);
53}
54
55static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56{
57 BUG_ON(!atomic_read(&eb->read_locks));
58}
59
60static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61{
62 eb->write_locks++;
63}
64
65static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66{
67 eb->write_locks--;
68}
69
70void btrfs_assert_tree_locked(struct extent_buffer *eb)
71{
72 BUG_ON(!eb->write_locks);
73}
74
75#else
76static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
77static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
78static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
79static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
80static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
81static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
83static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
84void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
86static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
87#endif
88
89void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
90{
91 trace_btrfs_set_lock_blocking_read(eb);
92 /*
93 * No lock is required. The lock owner may change if we have a read
94 * lock, but it won't change to or away from us. If we have the write
95 * lock, we are the owner and it'll never change.
96 */
97 if (eb->lock_nested && current->pid == eb->lock_owner)
98 return;
99 btrfs_assert_tree_read_locked(eb);
100 atomic_inc(&eb->blocking_readers);
101 btrfs_assert_spinning_readers_put(eb);
102 read_unlock(&eb->lock);
103}
104
105void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106{
107 trace_btrfs_set_lock_blocking_write(eb);
108 /*
109 * No lock is required. The lock owner may change if we have a read
110 * lock, but it won't change to or away from us. If we have the write
111 * lock, we are the owner and it'll never change.
112 */
113 if (eb->lock_nested && current->pid == eb->lock_owner)
114 return;
115 if (eb->blocking_writers == 0) {
116 btrfs_assert_spinning_writers_put(eb);
117 btrfs_assert_tree_locked(eb);
118 eb->blocking_writers++;
119 write_unlock(&eb->lock);
120 }
121}
122
123/*
124 * take a spinning read lock. This will wait for any blocking
125 * writers
126 */
127void btrfs_tree_read_lock(struct extent_buffer *eb)
128{
129 u64 start_ns = 0;
130
131 if (trace_btrfs_tree_read_lock_enabled())
132 start_ns = ktime_get_ns();
133again:
134 read_lock(&eb->lock);
135 BUG_ON(eb->blocking_writers == 0 &&
136 current->pid == eb->lock_owner);
137 if (eb->blocking_writers && current->pid == eb->lock_owner) {
138 /*
139 * This extent is already write-locked by our thread. We allow
140 * an additional read lock to be added because it's for the same
141 * thread. btrfs_find_all_roots() depends on this as it may be
142 * called on a partly (write-)locked tree.
143 */
144 BUG_ON(eb->lock_nested);
145 eb->lock_nested = true;
146 read_unlock(&eb->lock);
147 trace_btrfs_tree_read_lock(eb, start_ns);
148 return;
149 }
150 if (eb->blocking_writers) {
151 read_unlock(&eb->lock);
152 wait_event(eb->write_lock_wq,
153 eb->blocking_writers == 0);
154 goto again;
155 }
156 btrfs_assert_tree_read_locks_get(eb);
157 btrfs_assert_spinning_readers_get(eb);
158 trace_btrfs_tree_read_lock(eb, start_ns);
159}
160
161/*
162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
165 */
166int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167{
168 if (eb->blocking_writers)
169 return 0;
170
171 read_lock(&eb->lock);
172 if (eb->blocking_writers) {
173 read_unlock(&eb->lock);
174 return 0;
175 }
176 btrfs_assert_tree_read_locks_get(eb);
177 btrfs_assert_spinning_readers_get(eb);
178 trace_btrfs_tree_read_lock_atomic(eb);
179 return 1;
180}
181
182/*
183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
185 */
186int btrfs_try_tree_read_lock(struct extent_buffer *eb)
187{
188 if (eb->blocking_writers)
189 return 0;
190
191 if (!read_trylock(&eb->lock))
192 return 0;
193
194 if (eb->blocking_writers) {
195 read_unlock(&eb->lock);
196 return 0;
197 }
198 btrfs_assert_tree_read_locks_get(eb);
199 btrfs_assert_spinning_readers_get(eb);
200 trace_btrfs_try_tree_read_lock(eb);
201 return 1;
202}
203
204/*
205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
207 */
208int btrfs_try_tree_write_lock(struct extent_buffer *eb)
209{
210 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
211 return 0;
212
213 write_lock(&eb->lock);
214 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
215 write_unlock(&eb->lock);
216 return 0;
217 }
218 btrfs_assert_tree_write_locks_get(eb);
219 btrfs_assert_spinning_writers_get(eb);
220 eb->lock_owner = current->pid;
221 trace_btrfs_try_tree_write_lock(eb);
222 return 1;
223}
224
225/*
226 * drop a spinning read lock
227 */
228void btrfs_tree_read_unlock(struct extent_buffer *eb)
229{
230 trace_btrfs_tree_read_unlock(eb);
231 /*
232 * if we're nested, we have the write lock. No new locking
233 * is needed as long as we are the lock owner.
234 * The write unlock will do a barrier for us, and the lock_nested
235 * field only matters to the lock owner.
236 */
237 if (eb->lock_nested && current->pid == eb->lock_owner) {
238 eb->lock_nested = false;
239 return;
240 }
241 btrfs_assert_tree_read_locked(eb);
242 btrfs_assert_spinning_readers_put(eb);
243 btrfs_assert_tree_read_locks_put(eb);
244 read_unlock(&eb->lock);
245}
246
247/*
248 * drop a blocking read lock
249 */
250void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251{
252 trace_btrfs_tree_read_unlock_blocking(eb);
253 /*
254 * if we're nested, we have the write lock. No new locking
255 * is needed as long as we are the lock owner.
256 * The write unlock will do a barrier for us, and the lock_nested
257 * field only matters to the lock owner.
258 */
259 if (eb->lock_nested && current->pid == eb->lock_owner) {
260 eb->lock_nested = false;
261 return;
262 }
263 btrfs_assert_tree_read_locked(eb);
264 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
265 /* atomic_dec_and_test implies a barrier */
266 if (atomic_dec_and_test(&eb->blocking_readers))
267 cond_wake_up_nomb(&eb->read_lock_wq);
268 btrfs_assert_tree_read_locks_put(eb);
269}
270
271/*
272 * take a spinning write lock. This will wait for both
273 * blocking readers or writers
274 */
275void btrfs_tree_lock(struct extent_buffer *eb)
276{
277 u64 start_ns = 0;
278
279 if (trace_btrfs_tree_lock_enabled())
280 start_ns = ktime_get_ns();
281
282 WARN_ON(eb->lock_owner == current->pid);
283again:
284 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
285 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
286 write_lock(&eb->lock);
287 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
288 write_unlock(&eb->lock);
289 goto again;
290 }
291 btrfs_assert_spinning_writers_get(eb);
292 btrfs_assert_tree_write_locks_get(eb);
293 eb->lock_owner = current->pid;
294 trace_btrfs_tree_lock(eb, start_ns);
295}
296
297/*
298 * drop a spinning or a blocking write lock.
299 */
300void btrfs_tree_unlock(struct extent_buffer *eb)
301{
302 int blockers = eb->blocking_writers;
303
304 BUG_ON(blockers > 1);
305
306 btrfs_assert_tree_locked(eb);
307 trace_btrfs_tree_unlock(eb);
308 eb->lock_owner = 0;
309 btrfs_assert_tree_write_locks_put(eb);
310
311 if (blockers) {
312 btrfs_assert_no_spinning_writers(eb);
313 eb->blocking_writers--;
314 /*
315 * We need to order modifying blocking_writers above with
316 * actually waking up the sleepers to ensure they see the
317 * updated value of blocking_writers
318 */
319 cond_wake_up(&eb->write_lock_wq);
320 } else {
321 btrfs_assert_spinning_writers_put(eb);
322 write_unlock(&eb->lock);
323 }
324}