Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/pagemap.h>
  8#include <linux/spinlock.h>
  9#include <linux/page-flags.h>
 10#include <asm/bug.h>
 11#include "misc.h"
 12#include "ctree.h"
 13#include "extent_io.h"
 14#include "locking.h"
 15
 16#ifdef CONFIG_BTRFS_DEBUG
 17static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
 18{
 19	WARN_ON(eb->spinning_writers);
 20	eb->spinning_writers++;
 21}
 22
 23static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
 24{
 25	WARN_ON(eb->spinning_writers != 1);
 26	eb->spinning_writers--;
 27}
 28
 29static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
 30{
 31	WARN_ON(eb->spinning_writers);
 32}
 33
 34static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
 35{
 36	atomic_inc(&eb->spinning_readers);
 37}
 38
 39static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
 40{
 41	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 42	atomic_dec(&eb->spinning_readers);
 43}
 44
 45static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
 46{
 47	atomic_inc(&eb->read_locks);
 48}
 49
 50static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
 51{
 52	atomic_dec(&eb->read_locks);
 53}
 54
 55static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
 56{
 57	BUG_ON(!atomic_read(&eb->read_locks));
 58}
 59
 60static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
 61{
 62	eb->write_locks++;
 63}
 64
 65static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
 66{
 67	eb->write_locks--;
 68}
 69
 70void btrfs_assert_tree_locked(struct extent_buffer *eb)
 71{
 72	BUG_ON(!eb->write_locks);
 73}
 74
 75#else
 76static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
 77static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
 78static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
 79static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
 80static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
 81static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
 82static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
 83static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
 84void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
 85static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
 86static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
 87#endif
 88
 89void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
 90{
 91	trace_btrfs_set_lock_blocking_read(eb);
 92	/*
 93	 * No lock is required.  The lock owner may change if we have a read
 94	 * lock, but it won't change to or away from us.  If we have the write
 95	 * lock, we are the owner and it'll never change.
 
 96	 */
 97	if (eb->lock_nested && current->pid == eb->lock_owner)
 98		return;
 99	btrfs_assert_tree_read_locked(eb);
100	atomic_inc(&eb->blocking_readers);
101	btrfs_assert_spinning_readers_put(eb);
102	read_unlock(&eb->lock);
 
 
 
 
 
 
 
 
 
 
 
103}
104
105void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
 
 
 
 
106{
107	trace_btrfs_set_lock_blocking_write(eb);
108	/*
109	 * No lock is required.  The lock owner may change if we have a read
110	 * lock, but it won't change to or away from us.  If we have the write
111	 * lock, we are the owner and it'll never change.
 
112	 */
113	if (eb->lock_nested && current->pid == eb->lock_owner)
114		return;
115	if (eb->blocking_writers == 0) {
116		btrfs_assert_spinning_writers_put(eb);
117		btrfs_assert_tree_locked(eb);
118		eb->blocking_writers++;
119		write_unlock(&eb->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120	}
121}
122
123/*
124 * take a spinning read lock.  This will wait for any blocking
125 * writers
126 */
127void btrfs_tree_read_lock(struct extent_buffer *eb)
128{
129	u64 start_ns = 0;
130
131	if (trace_btrfs_tree_read_lock_enabled())
132		start_ns = ktime_get_ns();
133again:
134	read_lock(&eb->lock);
135	BUG_ON(eb->blocking_writers == 0 &&
136	       current->pid == eb->lock_owner);
137	if (eb->blocking_writers && current->pid == eb->lock_owner) {
 
 
 
138		/*
139		 * This extent is already write-locked by our thread. We allow
140		 * an additional read lock to be added because it's for the same
141		 * thread. btrfs_find_all_roots() depends on this as it may be
142		 * called on a partly (write-)locked tree.
143		 */
144		BUG_ON(eb->lock_nested);
145		eb->lock_nested = true;
146		read_unlock(&eb->lock);
147		trace_btrfs_tree_read_lock(eb, start_ns);
148		return;
149	}
150	if (eb->blocking_writers) {
151		read_unlock(&eb->lock);
152		wait_event(eb->write_lock_wq,
153			   eb->blocking_writers == 0);
154		goto again;
155	}
156	btrfs_assert_tree_read_locks_get(eb);
157	btrfs_assert_spinning_readers_get(eb);
158	trace_btrfs_tree_read_lock(eb, start_ns);
159}
160
161/*
162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
165 */
166int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167{
168	if (eb->blocking_writers)
169		return 0;
170
171	read_lock(&eb->lock);
172	if (eb->blocking_writers) {
173		read_unlock(&eb->lock);
174		return 0;
175	}
176	btrfs_assert_tree_read_locks_get(eb);
177	btrfs_assert_spinning_readers_get(eb);
178	trace_btrfs_tree_read_lock_atomic(eb);
179	return 1;
180}
181
182/*
183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
185 */
186int btrfs_try_tree_read_lock(struct extent_buffer *eb)
187{
188	if (eb->blocking_writers)
189		return 0;
190
191	if (!read_trylock(&eb->lock))
192		return 0;
193
194	if (eb->blocking_writers) {
195		read_unlock(&eb->lock);
196		return 0;
197	}
198	btrfs_assert_tree_read_locks_get(eb);
199	btrfs_assert_spinning_readers_get(eb);
200	trace_btrfs_try_tree_read_lock(eb);
201	return 1;
202}
203
204/*
205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
207 */
208int btrfs_try_tree_write_lock(struct extent_buffer *eb)
209{
210	if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
 
211		return 0;
212
213	write_lock(&eb->lock);
214	if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
 
215		write_unlock(&eb->lock);
216		return 0;
217	}
218	btrfs_assert_tree_write_locks_get(eb);
219	btrfs_assert_spinning_writers_get(eb);
220	eb->lock_owner = current->pid;
221	trace_btrfs_try_tree_write_lock(eb);
222	return 1;
223}
224
225/*
226 * drop a spinning read lock
227 */
228void btrfs_tree_read_unlock(struct extent_buffer *eb)
229{
230	trace_btrfs_tree_read_unlock(eb);
231	/*
232	 * if we're nested, we have the write lock.  No new locking
233	 * is needed as long as we are the lock owner.
234	 * The write unlock will do a barrier for us, and the lock_nested
235	 * field only matters to the lock owner.
236	 */
237	if (eb->lock_nested && current->pid == eb->lock_owner) {
238		eb->lock_nested = false;
239		return;
240	}
241	btrfs_assert_tree_read_locked(eb);
242	btrfs_assert_spinning_readers_put(eb);
243	btrfs_assert_tree_read_locks_put(eb);
 
244	read_unlock(&eb->lock);
245}
246
247/*
248 * drop a blocking read lock
249 */
250void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251{
252	trace_btrfs_tree_read_unlock_blocking(eb);
253	/*
254	 * if we're nested, we have the write lock.  No new locking
255	 * is needed as long as we are the lock owner.
256	 * The write unlock will do a barrier for us, and the lock_nested
257	 * field only matters to the lock owner.
258	 */
259	if (eb->lock_nested && current->pid == eb->lock_owner) {
260		eb->lock_nested = false;
261		return;
262	}
263	btrfs_assert_tree_read_locked(eb);
264	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
265	/* atomic_dec_and_test implies a barrier */
266	if (atomic_dec_and_test(&eb->blocking_readers))
267		cond_wake_up_nomb(&eb->read_lock_wq);
268	btrfs_assert_tree_read_locks_put(eb);
 
 
 
269}
270
271/*
272 * take a spinning write lock.  This will wait for both
273 * blocking readers or writers
274 */
275void btrfs_tree_lock(struct extent_buffer *eb)
276{
277	u64 start_ns = 0;
278
279	if (trace_btrfs_tree_lock_enabled())
280		start_ns = ktime_get_ns();
281
282	WARN_ON(eb->lock_owner == current->pid);
283again:
284	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
285	wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
286	write_lock(&eb->lock);
287	if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
288		write_unlock(&eb->lock);
 
 
289		goto again;
290	}
291	btrfs_assert_spinning_writers_get(eb);
292	btrfs_assert_tree_write_locks_get(eb);
 
 
 
 
 
 
 
293	eb->lock_owner = current->pid;
294	trace_btrfs_tree_lock(eb, start_ns);
295}
296
297/*
298 * drop a spinning or a blocking write lock.
299 */
300void btrfs_tree_unlock(struct extent_buffer *eb)
301{
302	int blockers = eb->blocking_writers;
303
304	BUG_ON(blockers > 1);
305
306	btrfs_assert_tree_locked(eb);
307	trace_btrfs_tree_unlock(eb);
308	eb->lock_owner = 0;
309	btrfs_assert_tree_write_locks_put(eb);
310
311	if (blockers) {
312		btrfs_assert_no_spinning_writers(eb);
313		eb->blocking_writers--;
314		/*
315		 * We need to order modifying blocking_writers above with
316		 * actually waking up the sleepers to ensure they see the
317		 * updated value of blocking_writers
318		 */
319		cond_wake_up(&eb->write_lock_wq);
 
 
320	} else {
321		btrfs_assert_spinning_writers_put(eb);
 
322		write_unlock(&eb->lock);
323	}
 
 
 
 
 
 
 
 
 
 
324}
v4.10.11
 
  1/*
  2 * Copyright (C) 2008 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 
 18#include <linux/sched.h>
 19#include <linux/pagemap.h>
 20#include <linux/spinlock.h>
 21#include <linux/page-flags.h>
 22#include <asm/bug.h>
 
 23#include "ctree.h"
 24#include "extent_io.h"
 25#include "locking.h"
 26
 27static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29/*
 30 * if we currently have a spinning reader or writer lock
 31 * (indicated by the rw flag) this will bump the count
 32 * of blocking holders and drop the spinlock.
 33 */
 34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35{
 
 36	/*
 37	 * no lock is required.  The lock owner may change if
 38	 * we have a read lock, but it won't change to or away
 39	 * from us.  If we have the write lock, we are the owner
 40	 * and it'll never change.
 41	 */
 42	if (eb->lock_nested && current->pid == eb->lock_owner)
 43		return;
 44	if (rw == BTRFS_WRITE_LOCK) {
 45		if (atomic_read(&eb->blocking_writers) == 0) {
 46			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
 47			atomic_dec(&eb->spinning_writers);
 48			btrfs_assert_tree_locked(eb);
 49			atomic_inc(&eb->blocking_writers);
 50			write_unlock(&eb->lock);
 51		}
 52	} else if (rw == BTRFS_READ_LOCK) {
 53		btrfs_assert_tree_read_locked(eb);
 54		atomic_inc(&eb->blocking_readers);
 55		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 56		atomic_dec(&eb->spinning_readers);
 57		read_unlock(&eb->lock);
 58	}
 59}
 60
 61/*
 62 * if we currently have a blocking lock, take the spinlock
 63 * and drop our blocking count
 64 */
 65void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 66{
 
 67	/*
 68	 * no lock is required.  The lock owner may change if
 69	 * we have a read lock, but it won't change to or away
 70	 * from us.  If we have the write lock, we are the owner
 71	 * and it'll never change.
 72	 */
 73	if (eb->lock_nested && current->pid == eb->lock_owner)
 74		return;
 75
 76	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
 77		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
 78		write_lock(&eb->lock);
 79		WARN_ON(atomic_read(&eb->spinning_writers));
 80		atomic_inc(&eb->spinning_writers);
 81		/*
 82		 * atomic_dec_and_test implies a barrier for waitqueue_active
 83		 */
 84		if (atomic_dec_and_test(&eb->blocking_writers) &&
 85		    waitqueue_active(&eb->write_lock_wq))
 86			wake_up(&eb->write_lock_wq);
 87	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
 88		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
 89		read_lock(&eb->lock);
 90		atomic_inc(&eb->spinning_readers);
 91		/*
 92		 * atomic_dec_and_test implies a barrier for waitqueue_active
 93		 */
 94		if (atomic_dec_and_test(&eb->blocking_readers) &&
 95		    waitqueue_active(&eb->read_lock_wq))
 96			wake_up(&eb->read_lock_wq);
 97	}
 98}
 99
100/*
101 * take a spinning read lock.  This will wait for any blocking
102 * writers
103 */
104void btrfs_tree_read_lock(struct extent_buffer *eb)
105{
 
 
 
 
106again:
107	BUG_ON(!atomic_read(&eb->blocking_writers) &&
 
108	       current->pid == eb->lock_owner);
109
110	read_lock(&eb->lock);
111	if (atomic_read(&eb->blocking_writers) &&
112	    current->pid == eb->lock_owner) {
113		/*
114		 * This extent is already write-locked by our thread. We allow
115		 * an additional read lock to be added because it's for the same
116		 * thread. btrfs_find_all_roots() depends on this as it may be
117		 * called on a partly (write-)locked tree.
118		 */
119		BUG_ON(eb->lock_nested);
120		eb->lock_nested = 1;
121		read_unlock(&eb->lock);
 
122		return;
123	}
124	if (atomic_read(&eb->blocking_writers)) {
125		read_unlock(&eb->lock);
126		wait_event(eb->write_lock_wq,
127			   atomic_read(&eb->blocking_writers) == 0);
128		goto again;
129	}
130	atomic_inc(&eb->read_locks);
131	atomic_inc(&eb->spinning_readers);
 
132}
133
134/*
135 * take a spinning read lock.
136 * returns 1 if we get the read lock and 0 if we don't
137 * this won't wait for blocking writers
138 */
139int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
140{
141	if (atomic_read(&eb->blocking_writers))
142		return 0;
143
144	read_lock(&eb->lock);
145	if (atomic_read(&eb->blocking_writers)) {
146		read_unlock(&eb->lock);
147		return 0;
148	}
149	atomic_inc(&eb->read_locks);
150	atomic_inc(&eb->spinning_readers);
 
151	return 1;
152}
153
154/*
155 * returns 1 if we get the read lock and 0 if we don't
156 * this won't wait for blocking writers
157 */
158int btrfs_try_tree_read_lock(struct extent_buffer *eb)
159{
160	if (atomic_read(&eb->blocking_writers))
161		return 0;
162
163	if (!read_trylock(&eb->lock))
164		return 0;
165
166	if (atomic_read(&eb->blocking_writers)) {
167		read_unlock(&eb->lock);
168		return 0;
169	}
170	atomic_inc(&eb->read_locks);
171	atomic_inc(&eb->spinning_readers);
 
172	return 1;
173}
174
175/*
176 * returns 1 if we get the read lock and 0 if we don't
177 * this won't wait for blocking writers or readers
178 */
179int btrfs_try_tree_write_lock(struct extent_buffer *eb)
180{
181	if (atomic_read(&eb->blocking_writers) ||
182	    atomic_read(&eb->blocking_readers))
183		return 0;
184
185	write_lock(&eb->lock);
186	if (atomic_read(&eb->blocking_writers) ||
187	    atomic_read(&eb->blocking_readers)) {
188		write_unlock(&eb->lock);
189		return 0;
190	}
191	atomic_inc(&eb->write_locks);
192	atomic_inc(&eb->spinning_writers);
193	eb->lock_owner = current->pid;
 
194	return 1;
195}
196
197/*
198 * drop a spinning read lock
199 */
200void btrfs_tree_read_unlock(struct extent_buffer *eb)
201{
 
202	/*
203	 * if we're nested, we have the write lock.  No new locking
204	 * is needed as long as we are the lock owner.
205	 * The write unlock will do a barrier for us, and the lock_nested
206	 * field only matters to the lock owner.
207	 */
208	if (eb->lock_nested && current->pid == eb->lock_owner) {
209		eb->lock_nested = 0;
210		return;
211	}
212	btrfs_assert_tree_read_locked(eb);
213	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
214	atomic_dec(&eb->spinning_readers);
215	atomic_dec(&eb->read_locks);
216	read_unlock(&eb->lock);
217}
218
219/*
220 * drop a blocking read lock
221 */
222void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
223{
 
224	/*
225	 * if we're nested, we have the write lock.  No new locking
226	 * is needed as long as we are the lock owner.
227	 * The write unlock will do a barrier for us, and the lock_nested
228	 * field only matters to the lock owner.
229	 */
230	if (eb->lock_nested && current->pid == eb->lock_owner) {
231		eb->lock_nested = 0;
232		return;
233	}
234	btrfs_assert_tree_read_locked(eb);
235	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
236	/*
237	 * atomic_dec_and_test implies a barrier for waitqueue_active
238	 */
239	if (atomic_dec_and_test(&eb->blocking_readers) &&
240	    waitqueue_active(&eb->read_lock_wq))
241		wake_up(&eb->read_lock_wq);
242	atomic_dec(&eb->read_locks);
243}
244
245/*
246 * take a spinning write lock.  This will wait for both
247 * blocking readers or writers
248 */
249void btrfs_tree_lock(struct extent_buffer *eb)
250{
 
 
 
 
 
251	WARN_ON(eb->lock_owner == current->pid);
252again:
253	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
254	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
255	write_lock(&eb->lock);
256	if (atomic_read(&eb->blocking_readers)) {
257		write_unlock(&eb->lock);
258		wait_event(eb->read_lock_wq,
259			   atomic_read(&eb->blocking_readers) == 0);
260		goto again;
261	}
262	if (atomic_read(&eb->blocking_writers)) {
263		write_unlock(&eb->lock);
264		wait_event(eb->write_lock_wq,
265			   atomic_read(&eb->blocking_writers) == 0);
266		goto again;
267	}
268	WARN_ON(atomic_read(&eb->spinning_writers));
269	atomic_inc(&eb->spinning_writers);
270	atomic_inc(&eb->write_locks);
271	eb->lock_owner = current->pid;
 
272}
273
274/*
275 * drop a spinning or a blocking write lock.
276 */
277void btrfs_tree_unlock(struct extent_buffer *eb)
278{
279	int blockers = atomic_read(&eb->blocking_writers);
280
281	BUG_ON(blockers > 1);
282
283	btrfs_assert_tree_locked(eb);
 
284	eb->lock_owner = 0;
285	atomic_dec(&eb->write_locks);
286
287	if (blockers) {
288		WARN_ON(atomic_read(&eb->spinning_writers));
289		atomic_dec(&eb->blocking_writers);
290		/*
291		 * Make sure counter is updated before we wake up waiters.
 
 
292		 */
293		smp_mb();
294		if (waitqueue_active(&eb->write_lock_wq))
295			wake_up(&eb->write_lock_wq);
296	} else {
297		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
298		atomic_dec(&eb->spinning_writers);
299		write_unlock(&eb->lock);
300	}
301}
302
303void btrfs_assert_tree_locked(struct extent_buffer *eb)
304{
305	BUG_ON(!atomic_read(&eb->write_locks));
306}
307
308static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
309{
310	BUG_ON(!atomic_read(&eb->read_locks));
311}