Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright (C) 2008 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 
 18#include <linux/sched.h>
 19#include <linux/pagemap.h>
 20#include <linux/spinlock.h>
 21#include <linux/page-flags.h>
 22#include <asm/bug.h>
 23#include "ctree.h"
 24#include "extent_io.h"
 25#include "locking.h"
 26
 27void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 28
 29/*
 30 * if we currently have a spinning reader or writer lock
 31 * (indicated by the rw flag) this will bump the count
 32 * of blocking holders and drop the spinlock.
 33 */
 34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 35{
 36	if (eb->lock_nested) {
 37		read_lock(&eb->lock);
 38		if (eb->lock_nested && current->pid == eb->lock_owner) {
 39			read_unlock(&eb->lock);
 40			return;
 41		}
 42		read_unlock(&eb->lock);
 43	}
 44	if (rw == BTRFS_WRITE_LOCK) {
 45		if (atomic_read(&eb->blocking_writers) == 0) {
 46			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
 47			atomic_dec(&eb->spinning_writers);
 48			btrfs_assert_tree_locked(eb);
 49			atomic_inc(&eb->blocking_writers);
 50			write_unlock(&eb->lock);
 51		}
 52	} else if (rw == BTRFS_READ_LOCK) {
 53		btrfs_assert_tree_read_locked(eb);
 54		atomic_inc(&eb->blocking_readers);
 55		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 56		atomic_dec(&eb->spinning_readers);
 57		read_unlock(&eb->lock);
 58	}
 59	return;
 60}
 61
 62/*
 63 * if we currently have a blocking lock, take the spinlock
 64 * and drop our blocking count
 65 */
 66void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 67{
 68	if (eb->lock_nested) {
 69		read_lock(&eb->lock);
 70		if (&eb->lock_nested && current->pid == eb->lock_owner) {
 71			read_unlock(&eb->lock);
 72			return;
 73		}
 74		read_unlock(&eb->lock);
 75	}
 
 76	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
 77		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
 78		write_lock(&eb->lock);
 79		WARN_ON(atomic_read(&eb->spinning_writers));
 80		atomic_inc(&eb->spinning_writers);
 81		if (atomic_dec_and_test(&eb->blocking_writers))
 
 
 
 
 82			wake_up(&eb->write_lock_wq);
 83	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
 84		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
 85		read_lock(&eb->lock);
 86		atomic_inc(&eb->spinning_readers);
 87		if (atomic_dec_and_test(&eb->blocking_readers))
 
 
 
 
 88			wake_up(&eb->read_lock_wq);
 89	}
 90	return;
 91}
 92
 93/*
 94 * take a spinning read lock.  This will wait for any blocking
 95 * writers
 96 */
 97void btrfs_tree_read_lock(struct extent_buffer *eb)
 98{
 99again:
 
 
 
100	read_lock(&eb->lock);
101	if (atomic_read(&eb->blocking_writers) &&
102	    current->pid == eb->lock_owner) {
103		/*
104		 * This extent is already write-locked by our thread. We allow
105		 * an additional read lock to be added because it's for the same
106		 * thread. btrfs_find_all_roots() depends on this as it may be
107		 * called on a partly (write-)locked tree.
108		 */
109		BUG_ON(eb->lock_nested);
110		eb->lock_nested = 1;
111		read_unlock(&eb->lock);
112		return;
113	}
114	read_unlock(&eb->lock);
115	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
116	read_lock(&eb->lock);
117	if (atomic_read(&eb->blocking_writers)) {
118		read_unlock(&eb->lock);
 
 
119		goto again;
120	}
121	atomic_inc(&eb->read_locks);
122	atomic_inc(&eb->spinning_readers);
123}
124
125/*
 
126 * returns 1 if we get the read lock and 0 if we don't
127 * this won't wait for blocking writers
128 */
129int btrfs_try_tree_read_lock(struct extent_buffer *eb)
130{
131	if (atomic_read(&eb->blocking_writers))
132		return 0;
133
134	read_lock(&eb->lock);
135	if (atomic_read(&eb->blocking_writers)) {
136		read_unlock(&eb->lock);
137		return 0;
138	}
139	atomic_inc(&eb->read_locks);
140	atomic_inc(&eb->spinning_readers);
141	return 1;
142}
143
144/*
145 * returns 1 if we get the read lock and 0 if we don't
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146 * this won't wait for blocking writers or readers
147 */
148int btrfs_try_tree_write_lock(struct extent_buffer *eb)
149{
150	if (atomic_read(&eb->blocking_writers) ||
151	    atomic_read(&eb->blocking_readers))
152		return 0;
 
153	write_lock(&eb->lock);
154	if (atomic_read(&eb->blocking_writers) ||
155	    atomic_read(&eb->blocking_readers)) {
156		write_unlock(&eb->lock);
157		return 0;
158	}
159	atomic_inc(&eb->write_locks);
160	atomic_inc(&eb->spinning_writers);
161	eb->lock_owner = current->pid;
162	return 1;
163}
164
165/*
166 * drop a spinning read lock
167 */
168void btrfs_tree_read_unlock(struct extent_buffer *eb)
169{
170	if (eb->lock_nested) {
171		read_lock(&eb->lock);
172		if (eb->lock_nested && current->pid == eb->lock_owner) {
173			eb->lock_nested = 0;
174			read_unlock(&eb->lock);
175			return;
176		}
177		read_unlock(&eb->lock);
 
178	}
179	btrfs_assert_tree_read_locked(eb);
180	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
181	atomic_dec(&eb->spinning_readers);
182	atomic_dec(&eb->read_locks);
183	read_unlock(&eb->lock);
184}
185
186/*
187 * drop a blocking read lock
188 */
189void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
190{
191	if (eb->lock_nested) {
192		read_lock(&eb->lock);
193		if (eb->lock_nested && current->pid == eb->lock_owner) {
194			eb->lock_nested = 0;
195			read_unlock(&eb->lock);
196			return;
197		}
198		read_unlock(&eb->lock);
 
199	}
200	btrfs_assert_tree_read_locked(eb);
201	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
202	if (atomic_dec_and_test(&eb->blocking_readers))
 
 
 
 
203		wake_up(&eb->read_lock_wq);
204	atomic_dec(&eb->read_locks);
205}
206
207/*
208 * take a spinning write lock.  This will wait for both
209 * blocking readers or writers
210 */
211void btrfs_tree_lock(struct extent_buffer *eb)
212{
 
213again:
214	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
215	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
216	write_lock(&eb->lock);
217	if (atomic_read(&eb->blocking_readers)) {
218		write_unlock(&eb->lock);
219		wait_event(eb->read_lock_wq,
220			   atomic_read(&eb->blocking_readers) == 0);
221		goto again;
222	}
223	if (atomic_read(&eb->blocking_writers)) {
224		write_unlock(&eb->lock);
225		wait_event(eb->write_lock_wq,
226			   atomic_read(&eb->blocking_writers) == 0);
227		goto again;
228	}
229	WARN_ON(atomic_read(&eb->spinning_writers));
230	atomic_inc(&eb->spinning_writers);
231	atomic_inc(&eb->write_locks);
232	eb->lock_owner = current->pid;
233}
234
235/*
236 * drop a spinning or a blocking write lock.
237 */
238void btrfs_tree_unlock(struct extent_buffer *eb)
239{
240	int blockers = atomic_read(&eb->blocking_writers);
241
242	BUG_ON(blockers > 1);
243
244	btrfs_assert_tree_locked(eb);
 
245	atomic_dec(&eb->write_locks);
246
247	if (blockers) {
248		WARN_ON(atomic_read(&eb->spinning_writers));
249		atomic_dec(&eb->blocking_writers);
250		smp_wmb();
251		wake_up(&eb->write_lock_wq);
 
 
 
 
252	} else {
253		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
254		atomic_dec(&eb->spinning_writers);
255		write_unlock(&eb->lock);
256	}
257}
258
259void btrfs_assert_tree_locked(struct extent_buffer *eb)
260{
261	BUG_ON(!atomic_read(&eb->write_locks));
262}
263
264void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
265{
266	BUG_ON(!atomic_read(&eb->read_locks));
267}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/pagemap.h>
  8#include <linux/spinlock.h>
  9#include <linux/page-flags.h>
 10#include <asm/bug.h>
 11#include "ctree.h"
 12#include "extent_io.h"
 13#include "locking.h"
 14
 15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 16
 17/*
 18 * if we currently have a spinning reader or writer lock
 19 * (indicated by the rw flag) this will bump the count
 20 * of blocking holders and drop the spinlock.
 21 */
 22void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 23{
 24	/*
 25	 * no lock is required.  The lock owner may change if
 26	 * we have a read lock, but it won't change to or away
 27	 * from us.  If we have the write lock, we are the owner
 28	 * and it'll never change.
 29	 */
 30	if (eb->lock_nested && current->pid == eb->lock_owner)
 31		return;
 32	if (rw == BTRFS_WRITE_LOCK) {
 33		if (atomic_read(&eb->blocking_writers) == 0) {
 34			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
 35			atomic_dec(&eb->spinning_writers);
 36			btrfs_assert_tree_locked(eb);
 37			atomic_inc(&eb->blocking_writers);
 38			write_unlock(&eb->lock);
 39		}
 40	} else if (rw == BTRFS_READ_LOCK) {
 41		btrfs_assert_tree_read_locked(eb);
 42		atomic_inc(&eb->blocking_readers);
 43		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 44		atomic_dec(&eb->spinning_readers);
 45		read_unlock(&eb->lock);
 46	}
 
 47}
 48
 49/*
 50 * if we currently have a blocking lock, take the spinlock
 51 * and drop our blocking count
 52 */
 53void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 54{
 55	/*
 56	 * no lock is required.  The lock owner may change if
 57	 * we have a read lock, but it won't change to or away
 58	 * from us.  If we have the write lock, we are the owner
 59	 * and it'll never change.
 60	 */
 61	if (eb->lock_nested && current->pid == eb->lock_owner)
 62		return;
 63
 64	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
 65		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
 66		write_lock(&eb->lock);
 67		WARN_ON(atomic_read(&eb->spinning_writers));
 68		atomic_inc(&eb->spinning_writers);
 69		/*
 70		 * atomic_dec_and_test implies a barrier for waitqueue_active
 71		 */
 72		if (atomic_dec_and_test(&eb->blocking_writers) &&
 73		    waitqueue_active(&eb->write_lock_wq))
 74			wake_up(&eb->write_lock_wq);
 75	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
 76		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
 77		read_lock(&eb->lock);
 78		atomic_inc(&eb->spinning_readers);
 79		/*
 80		 * atomic_dec_and_test implies a barrier for waitqueue_active
 81		 */
 82		if (atomic_dec_and_test(&eb->blocking_readers) &&
 83		    waitqueue_active(&eb->read_lock_wq))
 84			wake_up(&eb->read_lock_wq);
 85	}
 
 86}
 87
 88/*
 89 * take a spinning read lock.  This will wait for any blocking
 90 * writers
 91 */
 92void btrfs_tree_read_lock(struct extent_buffer *eb)
 93{
 94again:
 95	BUG_ON(!atomic_read(&eb->blocking_writers) &&
 96	       current->pid == eb->lock_owner);
 97
 98	read_lock(&eb->lock);
 99	if (atomic_read(&eb->blocking_writers) &&
100	    current->pid == eb->lock_owner) {
101		/*
102		 * This extent is already write-locked by our thread. We allow
103		 * an additional read lock to be added because it's for the same
104		 * thread. btrfs_find_all_roots() depends on this as it may be
105		 * called on a partly (write-)locked tree.
106		 */
107		BUG_ON(eb->lock_nested);
108		eb->lock_nested = 1;
109		read_unlock(&eb->lock);
110		return;
111	}
 
 
 
112	if (atomic_read(&eb->blocking_writers)) {
113		read_unlock(&eb->lock);
114		wait_event(eb->write_lock_wq,
115			   atomic_read(&eb->blocking_writers) == 0);
116		goto again;
117	}
118	atomic_inc(&eb->read_locks);
119	atomic_inc(&eb->spinning_readers);
120}
121
122/*
123 * take a spinning read lock.
124 * returns 1 if we get the read lock and 0 if we don't
125 * this won't wait for blocking writers
126 */
127int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
128{
129	if (atomic_read(&eb->blocking_writers))
130		return 0;
131
132	read_lock(&eb->lock);
133	if (atomic_read(&eb->blocking_writers)) {
134		read_unlock(&eb->lock);
135		return 0;
136	}
137	atomic_inc(&eb->read_locks);
138	atomic_inc(&eb->spinning_readers);
139	return 1;
140}
141
142/*
143 * returns 1 if we get the read lock and 0 if we don't
144 * this won't wait for blocking writers
145 */
146int btrfs_try_tree_read_lock(struct extent_buffer *eb)
147{
148	if (atomic_read(&eb->blocking_writers))
149		return 0;
150
151	if (!read_trylock(&eb->lock))
152		return 0;
153
154	if (atomic_read(&eb->blocking_writers)) {
155		read_unlock(&eb->lock);
156		return 0;
157	}
158	atomic_inc(&eb->read_locks);
159	atomic_inc(&eb->spinning_readers);
160	return 1;
161}
162
163/*
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers or readers
166 */
167int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168{
169	if (atomic_read(&eb->blocking_writers) ||
170	    atomic_read(&eb->blocking_readers))
171		return 0;
172
173	write_lock(&eb->lock);
174	if (atomic_read(&eb->blocking_writers) ||
175	    atomic_read(&eb->blocking_readers)) {
176		write_unlock(&eb->lock);
177		return 0;
178	}
179	atomic_inc(&eb->write_locks);
180	atomic_inc(&eb->spinning_writers);
181	eb->lock_owner = current->pid;
182	return 1;
183}
184
185/*
186 * drop a spinning read lock
187 */
188void btrfs_tree_read_unlock(struct extent_buffer *eb)
189{
190	/*
191	 * if we're nested, we have the write lock.  No new locking
192	 * is needed as long as we are the lock owner.
193	 * The write unlock will do a barrier for us, and the lock_nested
194	 * field only matters to the lock owner.
195	 */
196	if (eb->lock_nested && current->pid == eb->lock_owner) {
197		eb->lock_nested = 0;
198		return;
199	}
200	btrfs_assert_tree_read_locked(eb);
201	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
202	atomic_dec(&eb->spinning_readers);
203	atomic_dec(&eb->read_locks);
204	read_unlock(&eb->lock);
205}
206
207/*
208 * drop a blocking read lock
209 */
210void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
211{
212	/*
213	 * if we're nested, we have the write lock.  No new locking
214	 * is needed as long as we are the lock owner.
215	 * The write unlock will do a barrier for us, and the lock_nested
216	 * field only matters to the lock owner.
217	 */
218	if (eb->lock_nested && current->pid == eb->lock_owner) {
219		eb->lock_nested = 0;
220		return;
221	}
222	btrfs_assert_tree_read_locked(eb);
223	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
224	/*
225	 * atomic_dec_and_test implies a barrier for waitqueue_active
226	 */
227	if (atomic_dec_and_test(&eb->blocking_readers) &&
228	    waitqueue_active(&eb->read_lock_wq))
229		wake_up(&eb->read_lock_wq);
230	atomic_dec(&eb->read_locks);
231}
232
233/*
234 * take a spinning write lock.  This will wait for both
235 * blocking readers or writers
236 */
237void btrfs_tree_lock(struct extent_buffer *eb)
238{
239	WARN_ON(eb->lock_owner == current->pid);
240again:
241	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
242	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
243	write_lock(&eb->lock);
244	if (atomic_read(&eb->blocking_readers)) {
245		write_unlock(&eb->lock);
246		wait_event(eb->read_lock_wq,
247			   atomic_read(&eb->blocking_readers) == 0);
248		goto again;
249	}
250	if (atomic_read(&eb->blocking_writers)) {
251		write_unlock(&eb->lock);
252		wait_event(eb->write_lock_wq,
253			   atomic_read(&eb->blocking_writers) == 0);
254		goto again;
255	}
256	WARN_ON(atomic_read(&eb->spinning_writers));
257	atomic_inc(&eb->spinning_writers);
258	atomic_inc(&eb->write_locks);
259	eb->lock_owner = current->pid;
260}
261
262/*
263 * drop a spinning or a blocking write lock.
264 */
265void btrfs_tree_unlock(struct extent_buffer *eb)
266{
267	int blockers = atomic_read(&eb->blocking_writers);
268
269	BUG_ON(blockers > 1);
270
271	btrfs_assert_tree_locked(eb);
272	eb->lock_owner = 0;
273	atomic_dec(&eb->write_locks);
274
275	if (blockers) {
276		WARN_ON(atomic_read(&eb->spinning_writers));
277		atomic_dec(&eb->blocking_writers);
278		/*
279		 * Make sure counter is updated before we wake up waiters.
280		 */
281		smp_mb__after_atomic();
282		if (waitqueue_active(&eb->write_lock_wq))
283			wake_up(&eb->write_lock_wq);
284	} else {
285		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
286		atomic_dec(&eb->spinning_writers);
287		write_unlock(&eb->lock);
288	}
289}
290
291void btrfs_assert_tree_locked(struct extent_buffer *eb)
292{
293	BUG_ON(!atomic_read(&eb->write_locks));
294}
295
296static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
297{
298	BUG_ON(!atomic_read(&eb->read_locks));
299}