Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/pagemap.h>
  8#include <linux/spinlock.h>
  9#include <linux/page-flags.h>
 10#include <asm/bug.h>
 
 11#include "ctree.h"
 12#include "extent_io.h"
 13#include "locking.h"
 14
 15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16
 17/*
 18 * if we currently have a spinning reader or writer lock
 19 * (indicated by the rw flag) this will bump the count
 20 * of blocking holders and drop the spinlock.
 
 
 
 
 21 */
 22void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 23{
 
 24	/*
 25	 * no lock is required.  The lock owner may change if
 26	 * we have a read lock, but it won't change to or away
 27	 * from us.  If we have the write lock, we are the owner
 28	 * and it'll never change.
 29	 */
 30	if (eb->lock_nested && current->pid == eb->lock_owner)
 31		return;
 32	if (rw == BTRFS_WRITE_LOCK) {
 33		if (atomic_read(&eb->blocking_writers) == 0) {
 34			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
 35			atomic_dec(&eb->spinning_writers);
 36			btrfs_assert_tree_locked(eb);
 37			atomic_inc(&eb->blocking_writers);
 38			write_unlock(&eb->lock);
 39		}
 40	} else if (rw == BTRFS_READ_LOCK) {
 41		btrfs_assert_tree_read_locked(eb);
 42		atomic_inc(&eb->blocking_readers);
 43		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 44		atomic_dec(&eb->spinning_readers);
 45		read_unlock(&eb->lock);
 46	}
 47}
 48
 49/*
 50 * if we currently have a blocking lock, take the spinlock
 51 * and drop our blocking count
 
 
 
 
 52 */
 53void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 54{
 
 55	/*
 56	 * no lock is required.  The lock owner may change if
 57	 * we have a read lock, but it won't change to or away
 58	 * from us.  If we have the write lock, we are the owner
 59	 * and it'll never change.
 60	 */
 61	if (eb->lock_nested && current->pid == eb->lock_owner)
 62		return;
 63
 64	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
 65		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
 66		write_lock(&eb->lock);
 67		WARN_ON(atomic_read(&eb->spinning_writers));
 68		atomic_inc(&eb->spinning_writers);
 69		/*
 70		 * atomic_dec_and_test implies a barrier for waitqueue_active
 71		 */
 72		if (atomic_dec_and_test(&eb->blocking_writers) &&
 73		    waitqueue_active(&eb->write_lock_wq))
 74			wake_up(&eb->write_lock_wq);
 75	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
 76		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
 77		read_lock(&eb->lock);
 78		atomic_inc(&eb->spinning_readers);
 79		/*
 80		 * atomic_dec_and_test implies a barrier for waitqueue_active
 81		 */
 82		if (atomic_dec_and_test(&eb->blocking_readers) &&
 83		    waitqueue_active(&eb->read_lock_wq))
 84			wake_up(&eb->read_lock_wq);
 85	}
 86}
 87
 88/*
 89 * take a spinning read lock.  This will wait for any blocking
 90 * writers
 
 
 
 
 
 91 */
 92void btrfs_tree_read_lock(struct extent_buffer *eb)
 93{
 94again:
 95	BUG_ON(!atomic_read(&eb->blocking_writers) &&
 96	       current->pid == eb->lock_owner);
 97
 
 
 
 98	read_lock(&eb->lock);
 99	if (atomic_read(&eb->blocking_writers) &&
100	    current->pid == eb->lock_owner) {
101		/*
102		 * This extent is already write-locked by our thread. We allow
103		 * an additional read lock to be added because it's for the same
104		 * thread. btrfs_find_all_roots() depends on this as it may be
105		 * called on a partly (write-)locked tree.
106		 */
107		BUG_ON(eb->lock_nested);
108		eb->lock_nested = 1;
109		read_unlock(&eb->lock);
110		return;
111	}
112	if (atomic_read(&eb->blocking_writers)) {
 
 
 
113		read_unlock(&eb->lock);
114		wait_event(eb->write_lock_wq,
115			   atomic_read(&eb->blocking_writers) == 0);
116		goto again;
117	}
118	atomic_inc(&eb->read_locks);
119	atomic_inc(&eb->spinning_readers);
 
120}
121
122/*
123 * take a spinning read lock.
124 * returns 1 if we get the read lock and 0 if we don't
125 * this won't wait for blocking writers
 
126 */
127int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
128{
129	if (atomic_read(&eb->blocking_writers))
130		return 0;
131
132	read_lock(&eb->lock);
133	if (atomic_read(&eb->blocking_writers)) {
 
134		read_unlock(&eb->lock);
135		return 0;
136	}
137	atomic_inc(&eb->read_locks);
138	atomic_inc(&eb->spinning_readers);
 
139	return 1;
140}
141
142/*
143 * returns 1 if we get the read lock and 0 if we don't
144 * this won't wait for blocking writers
 
145 */
146int btrfs_try_tree_read_lock(struct extent_buffer *eb)
147{
148	if (atomic_read(&eb->blocking_writers))
149		return 0;
150
151	if (!read_trylock(&eb->lock))
152		return 0;
153
154	if (atomic_read(&eb->blocking_writers)) {
 
155		read_unlock(&eb->lock);
156		return 0;
157	}
158	atomic_inc(&eb->read_locks);
159	atomic_inc(&eb->spinning_readers);
 
160	return 1;
161}
162
163/*
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers or readers
 
 
166 */
167int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168{
169	if (atomic_read(&eb->blocking_writers) ||
170	    atomic_read(&eb->blocking_readers))
171		return 0;
172
173	write_lock(&eb->lock);
174	if (atomic_read(&eb->blocking_writers) ||
175	    atomic_read(&eb->blocking_readers)) {
176		write_unlock(&eb->lock);
177		return 0;
178	}
179	atomic_inc(&eb->write_locks);
180	atomic_inc(&eb->spinning_writers);
181	eb->lock_owner = current->pid;
 
182	return 1;
183}
184
185/*
186 * drop a spinning read lock
 
 
 
187 */
188void btrfs_tree_read_unlock(struct extent_buffer *eb)
189{
 
190	/*
191	 * if we're nested, we have the write lock.  No new locking
192	 * is needed as long as we are the lock owner.
193	 * The write unlock will do a barrier for us, and the lock_nested
194	 * field only matters to the lock owner.
195	 */
196	if (eb->lock_nested && current->pid == eb->lock_owner) {
197		eb->lock_nested = 0;
198		return;
199	}
200	btrfs_assert_tree_read_locked(eb);
201	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
202	atomic_dec(&eb->spinning_readers);
203	atomic_dec(&eb->read_locks);
204	read_unlock(&eb->lock);
205}
206
207/*
208 * drop a blocking read lock
 
 
 
 
209 */
210void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
211{
 
212	/*
213	 * if we're nested, we have the write lock.  No new locking
214	 * is needed as long as we are the lock owner.
215	 * The write unlock will do a barrier for us, and the lock_nested
216	 * field only matters to the lock owner.
217	 */
218	if (eb->lock_nested && current->pid == eb->lock_owner) {
219		eb->lock_nested = 0;
220		return;
221	}
222	btrfs_assert_tree_read_locked(eb);
223	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
224	/*
225	 * atomic_dec_and_test implies a barrier for waitqueue_active
226	 */
227	if (atomic_dec_and_test(&eb->blocking_readers) &&
228	    waitqueue_active(&eb->read_lock_wq))
229		wake_up(&eb->read_lock_wq);
230	atomic_dec(&eb->read_locks);
231}
232
233/*
234 * take a spinning write lock.  This will wait for both
235 * blocking readers or writers
 
 
236 */
237void btrfs_tree_lock(struct extent_buffer *eb)
 
238{
 
 
 
 
 
239	WARN_ON(eb->lock_owner == current->pid);
240again:
241	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
242	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
243	write_lock(&eb->lock);
244	if (atomic_read(&eb->blocking_readers)) {
 
 
245		write_unlock(&eb->lock);
246		wait_event(eb->read_lock_wq,
247			   atomic_read(&eb->blocking_readers) == 0);
248		goto again;
249	}
250	if (atomic_read(&eb->blocking_writers)) {
251		write_unlock(&eb->lock);
252		wait_event(eb->write_lock_wq,
253			   atomic_read(&eb->blocking_writers) == 0);
254		goto again;
255	}
256	WARN_ON(atomic_read(&eb->spinning_writers));
257	atomic_inc(&eb->spinning_writers);
258	atomic_inc(&eb->write_locks);
259	eb->lock_owner = current->pid;
 
260}
261
262/*
263 * drop a spinning or a blocking write lock.
 
 
 
 
 
264 */
265void btrfs_tree_unlock(struct extent_buffer *eb)
266{
267	int blockers = atomic_read(&eb->blocking_writers);
 
 
 
 
268
269	BUG_ON(blockers > 1);
270
271	btrfs_assert_tree_locked(eb);
 
272	eb->lock_owner = 0;
273	atomic_dec(&eb->write_locks);
274
275	if (blockers) {
276		WARN_ON(atomic_read(&eb->spinning_writers));
277		atomic_dec(&eb->blocking_writers);
 
278		/*
279		 * Make sure counter is updated before we wake up waiters.
 
 
280		 */
281		smp_mb__after_atomic();
282		if (waitqueue_active(&eb->write_lock_wq))
283			wake_up(&eb->write_lock_wq);
284	} else {
285		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
286		atomic_dec(&eb->spinning_writers);
287		write_unlock(&eb->lock);
288	}
289}
290
291void btrfs_assert_tree_locked(struct extent_buffer *eb)
 
 
 
 
292{
293	BUG_ON(!atomic_read(&eb->write_locks));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294}
295
296static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
 
 
 
 
 
 
 
 
 
297{
298	BUG_ON(!atomic_read(&eb->read_locks));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/pagemap.h>
  8#include <linux/spinlock.h>
  9#include <linux/page-flags.h>
 10#include <asm/bug.h>
 11#include "misc.h"
 12#include "ctree.h"
 13#include "extent_io.h"
 14#include "locking.h"
 15
 16/*
 17 * Extent buffer locking
 18 * =====================
 19 *
 20 * The locks use a custom scheme that allows to do more operations than are
 21 * available fromt current locking primitives. The building blocks are still
 22 * rwlock and wait queues.
 23 *
 24 * Required semantics:
 25 *
 26 * - reader/writer exclusion
 27 * - writer/writer exclusion
 28 * - reader/reader sharing
 29 * - spinning lock semantics
 30 * - blocking lock semantics
 31 * - try-lock semantics for readers and writers
 32 * - one level nesting, allowing read lock to be taken by the same thread that
 33 *   already has write lock
 34 *
 35 * The extent buffer locks (also called tree locks) manage access to eb data
 36 * related to the storage in the b-tree (keys, items, but not the individual
 37 * members of eb).
 38 * We want concurrency of many readers and safe updates. The underlying locking
 39 * is done by read-write spinlock and the blocking part is implemented using
 40 * counters and wait queues.
 41 *
 42 * spinning semantics - the low-level rwlock is held so all other threads that
 43 *                      want to take it are spinning on it.
 44 *
 45 * blocking semantics - the low-level rwlock is not held but the counter
 46 *                      denotes how many times the blocking lock was held;
 47 *                      sleeping is possible
 48 *
 49 * Write lock always allows only one thread to access the data.
 50 *
 51 *
 52 * Debugging
 53 * ---------
 54 *
 55 * There are additional state counters that are asserted in various contexts,
 56 * removed from non-debug build to reduce extent_buffer size and for
 57 * performance reasons.
 58 *
 59 *
 60 * Lock nesting
 61 * ------------
 62 *
 63 * A write operation on a tree might indirectly start a look up on the same
 64 * tree.  This can happen when btrfs_cow_block locks the tree and needs to
 65 * lookup free extents.
 66 *
 67 * btrfs_cow_block
 68 *   ..
 69 *   alloc_tree_block_no_bg_flush
 70 *     btrfs_alloc_tree_block
 71 *       btrfs_reserve_extent
 72 *         ..
 73 *         load_free_space_cache
 74 *           ..
 75 *           btrfs_lookup_file_extent
 76 *             btrfs_search_slot
 77 *
 78 *
 79 * Locking pattern - spinning
 80 * --------------------------
 81 *
 82 * The simple locking scenario, the +--+ denotes the spinning section.
 83 *
 84 * +- btrfs_tree_lock
 85 * | - extent_buffer::rwlock is held
 86 * | - no heavy operations should happen, eg. IO, memory allocations, large
 87 * |   structure traversals
 88 * +- btrfs_tree_unock
 89*
 90*
 91 * Locking pattern - blocking
 92 * --------------------------
 93 *
 94 * The blocking write uses the following scheme.  The +--+ denotes the spinning
 95 * section.
 96 *
 97 * +- btrfs_tree_lock
 98 * |
 99 * +- btrfs_set_lock_blocking_write
100 *
101 *   - allowed: IO, memory allocations, etc.
102 *
103 * -- btrfs_tree_unlock - note, no explicit unblocking necessary
104 *
105 *
106 * Blocking read is similar.
107 *
108 * +- btrfs_tree_read_lock
109 * |
110 * +- btrfs_set_lock_blocking_read
111 *
112 *  - heavy operations allowed
113 *
114 * +- btrfs_tree_read_unlock_blocking
115 * |
116 * +- btrfs_tree_read_unlock
117 *
118 */
119
120#ifdef CONFIG_BTRFS_DEBUG
121static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
122{
123	WARN_ON(eb->spinning_writers);
124	eb->spinning_writers++;
125}
126
127static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
128{
129	WARN_ON(eb->spinning_writers != 1);
130	eb->spinning_writers--;
131}
132
133static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
134{
135	WARN_ON(eb->spinning_writers);
136}
137
138static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
139{
140	atomic_inc(&eb->spinning_readers);
141}
142
143static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
144{
145	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
146	atomic_dec(&eb->spinning_readers);
147}
148
149static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
150{
151	atomic_inc(&eb->read_locks);
152}
153
154static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
155{
156	atomic_dec(&eb->read_locks);
157}
158
159static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
160{
161	BUG_ON(!atomic_read(&eb->read_locks));
162}
163
164static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
165{
166	eb->write_locks++;
167}
168
169static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
170{
171	eb->write_locks--;
172}
173
174#else
175static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
176static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
177static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
178static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
179static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
180static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
181static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
182static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
183static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
184static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
185#endif
186
187/*
188 * Mark already held read lock as blocking. Can be nested in write lock by the
189 * same thread.
190 *
191 * Use when there are potentially long operations ahead so other thread waiting
192 * on the lock will not actively spin but sleep instead.
193 *
194 * The rwlock is released and blocking reader counter is increased.
195 */
196void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
197{
198	trace_btrfs_set_lock_blocking_read(eb);
199	/*
200	 * No lock is required.  The lock owner may change if we have a read
201	 * lock, but it won't change to or away from us.  If we have the write
202	 * lock, we are the owner and it'll never change.
 
203	 */
204	if (eb->lock_nested && current->pid == eb->lock_owner)
205		return;
206	btrfs_assert_tree_read_locked(eb);
207	atomic_inc(&eb->blocking_readers);
208	btrfs_assert_spinning_readers_put(eb);
209	read_unlock(&eb->lock);
 
 
 
 
 
 
 
 
 
 
 
210}
211
212/*
213 * Mark already held write lock as blocking.
214 *
215 * Use when there are potentially long operations ahead so other threads
216 * waiting on the lock will not actively spin but sleep instead.
217 *
218 * The rwlock is released and blocking writers is set.
219 */
220void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
221{
222	trace_btrfs_set_lock_blocking_write(eb);
223	/*
224	 * No lock is required.  The lock owner may change if we have a read
225	 * lock, but it won't change to or away from us.  If we have the write
226	 * lock, we are the owner and it'll never change.
 
227	 */
228	if (eb->lock_nested && current->pid == eb->lock_owner)
229		return;
230	if (eb->blocking_writers == 0) {
231		btrfs_assert_spinning_writers_put(eb);
232		btrfs_assert_tree_locked(eb);
233		WRITE_ONCE(eb->blocking_writers, 1);
234		write_unlock(&eb->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	}
236}
237
238/*
239 * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
240 * Can be nested in write lock by the same thread.
241 *
242 * Use when the locked section does only lightweight actions and busy waiting
243 * would be cheaper than making other threads do the wait/wake loop.
244 *
245 * The rwlock is held upon exit.
246 */
247void btrfs_tree_read_lock(struct extent_buffer *eb)
248{
249	u64 start_ns = 0;
 
 
250
251	if (trace_btrfs_tree_read_lock_enabled())
252		start_ns = ktime_get_ns();
253again:
254	read_lock(&eb->lock);
255	BUG_ON(eb->blocking_writers == 0 &&
256	       current->pid == eb->lock_owner);
257	if (eb->blocking_writers) {
258		if (current->pid == eb->lock_owner) {
259			/*
260			 * This extent is already write-locked by our thread.
261			 * We allow an additional read lock to be added because
262			 * it's for the same thread. btrfs_find_all_roots()
263			 * depends on this as it may be called on a partly
264			 * (write-)locked tree.
265			 */
266			BUG_ON(eb->lock_nested);
267			eb->lock_nested = true;
268			read_unlock(&eb->lock);
269			trace_btrfs_tree_read_lock(eb, start_ns);
270			return;
271		}
272		read_unlock(&eb->lock);
273		wait_event(eb->write_lock_wq,
274			   READ_ONCE(eb->blocking_writers) == 0);
275		goto again;
276	}
277	btrfs_assert_tree_read_locks_get(eb);
278	btrfs_assert_spinning_readers_get(eb);
279	trace_btrfs_tree_read_lock(eb, start_ns);
280}
281
282/*
283 * Lock extent buffer for read, optimistically expecting that there are no
284 * contending blocking writers. If there are, don't wait.
285 *
286 * Return 1 if the rwlock has been taken, 0 otherwise
287 */
288int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
289{
290	if (READ_ONCE(eb->blocking_writers))
291		return 0;
292
293	read_lock(&eb->lock);
294	/* Refetch value after lock */
295	if (READ_ONCE(eb->blocking_writers)) {
296		read_unlock(&eb->lock);
297		return 0;
298	}
299	btrfs_assert_tree_read_locks_get(eb);
300	btrfs_assert_spinning_readers_get(eb);
301	trace_btrfs_tree_read_lock_atomic(eb);
302	return 1;
303}
304
305/*
306 * Try-lock for read. Don't block or wait for contending writers.
307 *
308 * Retrun 1 if the rwlock has been taken, 0 otherwise
309 */
310int btrfs_try_tree_read_lock(struct extent_buffer *eb)
311{
312	if (READ_ONCE(eb->blocking_writers))
313		return 0;
314
315	if (!read_trylock(&eb->lock))
316		return 0;
317
318	/* Refetch value after lock */
319	if (READ_ONCE(eb->blocking_writers)) {
320		read_unlock(&eb->lock);
321		return 0;
322	}
323	btrfs_assert_tree_read_locks_get(eb);
324	btrfs_assert_spinning_readers_get(eb);
325	trace_btrfs_try_tree_read_lock(eb);
326	return 1;
327}
328
329/*
330 * Try-lock for write. May block until the lock is uncontended, but does not
331 * wait until it is free.
332 *
333 * Retrun 1 if the rwlock has been taken, 0 otherwise
334 */
335int btrfs_try_tree_write_lock(struct extent_buffer *eb)
336{
337	if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
 
338		return 0;
339
340	write_lock(&eb->lock);
341	/* Refetch value after lock */
342	if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
343		write_unlock(&eb->lock);
344		return 0;
345	}
346	btrfs_assert_tree_write_locks_get(eb);
347	btrfs_assert_spinning_writers_get(eb);
348	eb->lock_owner = current->pid;
349	trace_btrfs_try_tree_write_lock(eb);
350	return 1;
351}
352
353/*
354 * Release read lock. Must be used only if the lock is in spinning mode.  If
355 * the read lock is nested, must pair with read lock before the write unlock.
356 *
357 * The rwlock is not held upon exit.
358 */
359void btrfs_tree_read_unlock(struct extent_buffer *eb)
360{
361	trace_btrfs_tree_read_unlock(eb);
362	/*
363	 * if we're nested, we have the write lock.  No new locking
364	 * is needed as long as we are the lock owner.
365	 * The write unlock will do a barrier for us, and the lock_nested
366	 * field only matters to the lock owner.
367	 */
368	if (eb->lock_nested && current->pid == eb->lock_owner) {
369		eb->lock_nested = false;
370		return;
371	}
372	btrfs_assert_tree_read_locked(eb);
373	btrfs_assert_spinning_readers_put(eb);
374	btrfs_assert_tree_read_locks_put(eb);
 
375	read_unlock(&eb->lock);
376}
377
378/*
379 * Release read lock, previously set to blocking by a pairing call to
380 * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
381 * thread.
382 *
383 * State of rwlock is unchanged, last reader wakes waiting threads.
384 */
385void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
386{
387	trace_btrfs_tree_read_unlock_blocking(eb);
388	/*
389	 * if we're nested, we have the write lock.  No new locking
390	 * is needed as long as we are the lock owner.
391	 * The write unlock will do a barrier for us, and the lock_nested
392	 * field only matters to the lock owner.
393	 */
394	if (eb->lock_nested && current->pid == eb->lock_owner) {
395		eb->lock_nested = false;
396		return;
397	}
398	btrfs_assert_tree_read_locked(eb);
399	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
400	/* atomic_dec_and_test implies a barrier */
401	if (atomic_dec_and_test(&eb->blocking_readers))
402		cond_wake_up_nomb(&eb->read_lock_wq);
403	btrfs_assert_tree_read_locks_put(eb);
 
 
 
404}
405
406/*
407 * Lock for write. Wait for all blocking and spinning readers and writers. This
408 * starts context where reader lock could be nested by the same thread.
409 *
410 * The rwlock is held for write upon exit.
411 */
412void btrfs_tree_lock(struct extent_buffer *eb)
413	__acquires(&eb->lock)
414{
415	u64 start_ns = 0;
416
417	if (trace_btrfs_tree_lock_enabled())
418		start_ns = ktime_get_ns();
419
420	WARN_ON(eb->lock_owner == current->pid);
421again:
422	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
423	wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
424	write_lock(&eb->lock);
425	/* Refetch value after lock */
426	if (atomic_read(&eb->blocking_readers) ||
427	    READ_ONCE(eb->blocking_writers)) {
428		write_unlock(&eb->lock);
 
 
 
 
 
 
 
 
429		goto again;
430	}
431	btrfs_assert_spinning_writers_get(eb);
432	btrfs_assert_tree_write_locks_get(eb);
 
433	eb->lock_owner = current->pid;
434	trace_btrfs_tree_lock(eb, start_ns);
435}
436
437/*
438 * Release the write lock, either blocking or spinning (ie. there's no need
439 * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
440 * This also ends the context for nesting, the read lock must have been
441 * released already.
442 *
443 * Tasks blocked and waiting are woken, rwlock is not held upon exit.
444 */
445void btrfs_tree_unlock(struct extent_buffer *eb)
446{
447	/*
448	 * This is read both locked and unlocked but always by the same thread
449	 * that already owns the lock so we don't need to use READ_ONCE
450	 */
451	int blockers = eb->blocking_writers;
452
453	BUG_ON(blockers > 1);
454
455	btrfs_assert_tree_locked(eb);
456	trace_btrfs_tree_unlock(eb);
457	eb->lock_owner = 0;
458	btrfs_assert_tree_write_locks_put(eb);
459
460	if (blockers) {
461		btrfs_assert_no_spinning_writers(eb);
462		/* Unlocked write */
463		WRITE_ONCE(eb->blocking_writers, 0);
464		/*
465		 * We need to order modifying blocking_writers above with
466		 * actually waking up the sleepers to ensure they see the
467		 * updated value of blocking_writers
468		 */
469		cond_wake_up(&eb->write_lock_wq);
 
 
470	} else {
471		btrfs_assert_spinning_writers_put(eb);
 
472		write_unlock(&eb->lock);
473	}
474}
475
476/*
477 * Set all locked nodes in the path to blocking locks.  This should be done
478 * before scheduling
479 */
480void btrfs_set_path_blocking(struct btrfs_path *p)
481{
482	int i;
483
484	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
485		if (!p->nodes[i] || !p->locks[i])
486			continue;
487		/*
488		 * If we currently have a spinning reader or writer lock this
489		 * will bump the count of blocking holders and drop the
490		 * spinlock.
491		 */
492		if (p->locks[i] == BTRFS_READ_LOCK) {
493			btrfs_set_lock_blocking_read(p->nodes[i]);
494			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
495		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
496			btrfs_set_lock_blocking_write(p->nodes[i]);
497			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
498		}
499	}
500}
501
502/*
503 * This releases any locks held in the path starting at level and going all the
504 * way up to the root.
505 *
506 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
507 * cases, such as COW of the block at slot zero in the node.  This ignores
508 * those rules, and it should only be called when there are no more updates to
509 * be done higher up in the tree.
510 */
511void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
512{
513	int i;
514
515	if (path->keep_locks)
516		return;
517
518	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
519		if (!path->nodes[i])
520			continue;
521		if (!path->locks[i])
522			continue;
523		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
524		path->locks[i] = 0;
525	}
526}
527
528/*
529 * Loop around taking references on and locking the root node of the tree until
530 * we end up with a lock on the root node.
531 *
532 * Return: root extent buffer with write lock held
533 */
534struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
535{
536	struct extent_buffer *eb;
537
538	while (1) {
539		eb = btrfs_root_node(root);
540		btrfs_tree_lock(eb);
541		if (eb == root->node)
542			break;
543		btrfs_tree_unlock(eb);
544		free_extent_buffer(eb);
545	}
546	return eb;
547}
548
549/*
550 * Loop around taking references on and locking the root node of the tree until
551 * we end up with a lock on the root node.
552 *
553 * Return: root extent buffer with read lock held
554 */
555struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
556{
557	struct extent_buffer *eb;
558
559	while (1) {
560		eb = btrfs_root_node(root);
561		btrfs_tree_read_lock(eb);
562		if (eb == root->node)
563			break;
564		btrfs_tree_read_unlock(eb);
565		free_extent_buffer(eb);
566	}
567	return eb;
568}
569
570/*
571 * DREW locks
572 * ==========
573 *
574 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
575 * where you want to provide A-B exclusion but not AA or BB.
576 *
577 * Currently implementation gives more priority to reader. If a reader and a
578 * writer both race to acquire their respective sides of the lock the writer
579 * would yield its lock as soon as it detects a concurrent reader. Additionally
580 * if there are pending readers no new writers would be allowed to come in and
581 * acquire the lock.
582 */
583
584int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
585{
586	int ret;
587
588	ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
589	if (ret)
590		return ret;
591
592	atomic_set(&lock->readers, 0);
593	init_waitqueue_head(&lock->pending_readers);
594	init_waitqueue_head(&lock->pending_writers);
595
596	return 0;
597}
598
599void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
600{
601	percpu_counter_destroy(&lock->writers);
602}
603
604/* Return true if acquisition is successful, false otherwise */
605bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
606{
607	if (atomic_read(&lock->readers))
608		return false;
609
610	percpu_counter_inc(&lock->writers);
611
612	/* Ensure writers count is updated before we check for pending readers */
613	smp_mb();
614	if (atomic_read(&lock->readers)) {
615		btrfs_drew_write_unlock(lock);
616		return false;
617	}
618
619	return true;
620}
621
622void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
623{
624	while (true) {
625		if (btrfs_drew_try_write_lock(lock))
626			return;
627		wait_event(lock->pending_writers, !atomic_read(&lock->readers));
628	}
629}
630
631void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
632{
633	percpu_counter_dec(&lock->writers);
634	cond_wake_up(&lock->pending_readers);
635}
636
637void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
638{
639	atomic_inc(&lock->readers);
640
641	/*
642	 * Ensure the pending reader count is perceieved BEFORE this reader
643	 * goes to sleep in case of active writers. This guarantees new writers
644	 * won't be allowed and that the current reader will be woken up when
645	 * the last active writer finishes its jobs.
646	 */
647	smp_mb__after_atomic();
648
649	wait_event(lock->pending_readers,
650		   percpu_counter_sum(&lock->writers) == 0);
651}
652
653void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
654{
655	/*
656	 * atomic_dec_and_test implies a full barrier, so woken up writers
657	 * are guaranteed to see the decrement
658	 */
659	if (atomic_dec_and_test(&lock->readers))
660		wake_up(&lock->pending_writers);
661}