Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  3 * Copyright (c) 2010 David Chinner.
  4 * Copyright (c) 2011 Christoph Hellwig.
  5 * All Rights Reserved.
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it would be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write the Free Software Foundation,
 18 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 19 */
 20#include "xfs.h"
 21#include "xfs_fs.h"
 22#include "xfs_format.h"
 23#include "xfs_log_format.h"
 24#include "xfs_shared.h"
 25#include "xfs_trans_resv.h"
 26#include "xfs_sb.h"
 27#include "xfs_mount.h"
 28#include "xfs_alloc.h"
 29#include "xfs_extent_busy.h"
 30#include "xfs_trace.h"
 31#include "xfs_trans.h"
 32#include "xfs_log.h"
 33
 34void
 35xfs_extent_busy_insert(
 36	struct xfs_trans	*tp,
 37	xfs_agnumber_t		agno,
 38	xfs_agblock_t		bno,
 39	xfs_extlen_t		len,
 40	unsigned int		flags)
 41{
 42	struct xfs_extent_busy	*new;
 43	struct xfs_extent_busy	*busyp;
 44	struct xfs_perag	*pag;
 45	struct rb_node		**rbp;
 46	struct rb_node		*parent = NULL;
 47
 48	new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP);
 49	new->agno = agno;
 50	new->bno = bno;
 51	new->length = len;
 52	INIT_LIST_HEAD(&new->list);
 53	new->flags = flags;
 54
 55	/* trace before insert to be able to see failed inserts */
 56	trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
 57
 58	pag = xfs_perag_get(tp->t_mountp, new->agno);
 59	spin_lock(&pag->pagb_lock);
 60	rbp = &pag->pagb_tree.rb_node;
 61	while (*rbp) {
 62		parent = *rbp;
 63		busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
 64
 65		if (new->bno < busyp->bno) {
 66			rbp = &(*rbp)->rb_left;
 67			ASSERT(new->bno + new->length <= busyp->bno);
 68		} else if (new->bno > busyp->bno) {
 69			rbp = &(*rbp)->rb_right;
 70			ASSERT(bno >= busyp->bno + busyp->length);
 71		} else {
 72			ASSERT(0);
 73		}
 74	}
 75
 76	rb_link_node(&new->rb_node, parent, rbp);
 77	rb_insert_color(&new->rb_node, &pag->pagb_tree);
 78
 79	list_add(&new->list, &tp->t_busy);
 80	spin_unlock(&pag->pagb_lock);
 81	xfs_perag_put(pag);
 82}
 83
 84/*
 85 * Search for a busy extent within the range of the extent we are about to
 86 * allocate.  You need to be holding the busy extent tree lock when calling
 87 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
 88 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
 89 * match. This is done so that a non-zero return indicates an overlap that
 90 * will require a synchronous transaction, but it can still be
 91 * used to distinguish between a partial or exact match.
 92 */
 93int
 94xfs_extent_busy_search(
 95	struct xfs_mount	*mp,
 96	xfs_agnumber_t		agno,
 97	xfs_agblock_t		bno,
 98	xfs_extlen_t		len)
 99{
100	struct xfs_perag	*pag;
101	struct rb_node		*rbp;
102	struct xfs_extent_busy	*busyp;
103	int			match = 0;
104
105	pag = xfs_perag_get(mp, agno);
106	spin_lock(&pag->pagb_lock);
107
108	rbp = pag->pagb_tree.rb_node;
109
110	/* find closest start bno overlap */
111	while (rbp) {
112		busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
113		if (bno < busyp->bno) {
114			/* may overlap, but exact start block is lower */
115			if (bno + len > busyp->bno)
116				match = -1;
117			rbp = rbp->rb_left;
118		} else if (bno > busyp->bno) {
119			/* may overlap, but exact start block is higher */
120			if (bno < busyp->bno + busyp->length)
121				match = -1;
122			rbp = rbp->rb_right;
123		} else {
124			/* bno matches busyp, length determines exact match */
125			match = (busyp->length == len) ? 1 : -1;
126			break;
127		}
128	}
129	spin_unlock(&pag->pagb_lock);
130	xfs_perag_put(pag);
131	return match;
132}
133
134/*
135 * The found free extent [fbno, fend] overlaps part or all of the given busy
136 * extent.  If the overlap covers the beginning, the end, or all of the busy
137 * extent, the overlapping portion can be made unbusy and used for the
138 * allocation.  We can't split a busy extent because we can't modify a
139 * transaction/CIL context busy list, but we can update an entry's block
140 * number or length.
141 *
142 * Returns true if the extent can safely be reused, or false if the search
143 * needs to be restarted.
144 */
145STATIC bool
146xfs_extent_busy_update_extent(
147	struct xfs_mount	*mp,
148	struct xfs_perag	*pag,
149	struct xfs_extent_busy	*busyp,
150	xfs_agblock_t		fbno,
151	xfs_extlen_t		flen,
152	bool			userdata) __releases(&pag->pagb_lock)
153					  __acquires(&pag->pagb_lock)
154{
155	xfs_agblock_t		fend = fbno + flen;
156	xfs_agblock_t		bbno = busyp->bno;
157	xfs_agblock_t		bend = bbno + busyp->length;
158
159	/*
160	 * This extent is currently being discarded.  Give the thread
161	 * performing the discard a chance to mark the extent unbusy
162	 * and retry.
163	 */
164	if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
165		spin_unlock(&pag->pagb_lock);
166		delay(1);
167		spin_lock(&pag->pagb_lock);
168		return false;
169	}
170
171	/*
172	 * If there is a busy extent overlapping a user allocation, we have
173	 * no choice but to force the log and retry the search.
174	 *
175	 * Fortunately this does not happen during normal operation, but
176	 * only if the filesystem is very low on space and has to dip into
177	 * the AGFL for normal allocations.
178	 */
179	if (userdata)
180		goto out_force_log;
181
182	if (bbno < fbno && bend > fend) {
183		/*
184		 * Case 1:
185		 *    bbno           bend
186		 *    +BBBBBBBBBBBBBBBBB+
187		 *        +---------+
188		 *        fbno   fend
189		 */
190
191		/*
192		 * We would have to split the busy extent to be able to track
193		 * it correct, which we cannot do because we would have to
194		 * modify the list of busy extents attached to the transaction
195		 * or CIL context, which is immutable.
196		 *
197		 * Force out the log to clear the busy extent and retry the
198		 * search.
199		 */
200		goto out_force_log;
201	} else if (bbno >= fbno && bend <= fend) {
202		/*
203		 * Case 2:
204		 *    bbno           bend
205		 *    +BBBBBBBBBBBBBBBBB+
206		 *    +-----------------+
207		 *    fbno           fend
208		 *
209		 * Case 3:
210		 *    bbno           bend
211		 *    +BBBBBBBBBBBBBBBBB+
212		 *    +--------------------------+
213		 *    fbno                    fend
214		 *
215		 * Case 4:
216		 *             bbno           bend
217		 *             +BBBBBBBBBBBBBBBBB+
218		 *    +--------------------------+
219		 *    fbno                    fend
220		 *
221		 * Case 5:
222		 *             bbno           bend
223		 *             +BBBBBBBBBBBBBBBBB+
224		 *    +-----------------------------------+
225		 *    fbno                             fend
226		 *
227		 */
228
229		/*
230		 * The busy extent is fully covered by the extent we are
231		 * allocating, and can simply be removed from the rbtree.
232		 * However we cannot remove it from the immutable list
233		 * tracking busy extents in the transaction or CIL context,
234		 * so set the length to zero to mark it invalid.
235		 *
236		 * We also need to restart the busy extent search from the
237		 * tree root, because erasing the node can rearrange the
238		 * tree topology.
239		 */
240		rb_erase(&busyp->rb_node, &pag->pagb_tree);
241		busyp->length = 0;
242		return false;
243	} else if (fend < bend) {
244		/*
245		 * Case 6:
246		 *              bbno           bend
247		 *             +BBBBBBBBBBBBBBBBB+
248		 *             +---------+
249		 *             fbno   fend
250		 *
251		 * Case 7:
252		 *             bbno           bend
253		 *             +BBBBBBBBBBBBBBBBB+
254		 *    +------------------+
255		 *    fbno            fend
256		 *
257		 */
258		busyp->bno = fend;
259	} else if (bbno < fbno) {
260		/*
261		 * Case 8:
262		 *    bbno           bend
263		 *    +BBBBBBBBBBBBBBBBB+
264		 *        +-------------+
265		 *        fbno       fend
266		 *
267		 * Case 9:
268		 *    bbno           bend
269		 *    +BBBBBBBBBBBBBBBBB+
270		 *        +----------------------+
271		 *        fbno                fend
272		 */
273		busyp->length = fbno - busyp->bno;
274	} else {
275		ASSERT(0);
276	}
277
278	trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
279	return true;
280
281out_force_log:
282	spin_unlock(&pag->pagb_lock);
283	xfs_log_force(mp, XFS_LOG_SYNC);
284	trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
285	spin_lock(&pag->pagb_lock);
286	return false;
287}
288
289
290/*
291 * For a given extent [fbno, flen], make sure we can reuse it safely.
292 */
293void
294xfs_extent_busy_reuse(
295	struct xfs_mount	*mp,
296	xfs_agnumber_t		agno,
297	xfs_agblock_t		fbno,
298	xfs_extlen_t		flen,
299	bool			userdata)
300{
301	struct xfs_perag	*pag;
302	struct rb_node		*rbp;
303
304	ASSERT(flen > 0);
305
306	pag = xfs_perag_get(mp, agno);
307	spin_lock(&pag->pagb_lock);
308restart:
309	rbp = pag->pagb_tree.rb_node;
310	while (rbp) {
311		struct xfs_extent_busy *busyp =
312			rb_entry(rbp, struct xfs_extent_busy, rb_node);
313		xfs_agblock_t	bbno = busyp->bno;
314		xfs_agblock_t	bend = bbno + busyp->length;
315
316		if (fbno + flen <= bbno) {
317			rbp = rbp->rb_left;
318			continue;
319		} else if (fbno >= bend) {
320			rbp = rbp->rb_right;
321			continue;
322		}
323
324		if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
325						  userdata))
326			goto restart;
327	}
328	spin_unlock(&pag->pagb_lock);
329	xfs_perag_put(pag);
330}
331
332/*
333 * For a given extent [fbno, flen], search the busy extent list to find a
334 * subset of the extent that is not busy.  If *rlen is smaller than
335 * args->minlen no suitable extent could be found, and the higher level
336 * code needs to force out the log and retry the allocation.
 
 
 
 
 
 
337 */
338void
339xfs_extent_busy_trim(
340	struct xfs_alloc_arg	*args,
341	xfs_agblock_t		bno,
342	xfs_extlen_t		len,
343	xfs_agblock_t		*rbno,
344	xfs_extlen_t		*rlen)
345{
346	xfs_agblock_t		fbno;
347	xfs_extlen_t		flen;
348	struct rb_node		*rbp;
 
349
350	ASSERT(len > 0);
351
352	spin_lock(&args->pag->pagb_lock);
353restart:
354	fbno = bno;
355	flen = len;
356	rbp = args->pag->pagb_tree.rb_node;
357	while (rbp && flen >= args->minlen) {
358		struct xfs_extent_busy *busyp =
359			rb_entry(rbp, struct xfs_extent_busy, rb_node);
360		xfs_agblock_t	fend = fbno + flen;
361		xfs_agblock_t	bbno = busyp->bno;
362		xfs_agblock_t	bend = bbno + busyp->length;
363
364		if (fend <= bbno) {
365			rbp = rbp->rb_left;
366			continue;
367		} else if (fbno >= bend) {
368			rbp = rbp->rb_right;
369			continue;
370		}
371
372		/*
373		 * If this is a metadata allocation, try to reuse the busy
374		 * extent instead of trimming the allocation.
375		 */
376		if (!xfs_alloc_is_userdata(args->datatype) &&
377		    !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
378			if (!xfs_extent_busy_update_extent(args->mp, args->pag,
379							  busyp, fbno, flen,
380							  false))
381				goto restart;
382			continue;
383		}
384
385		if (bbno <= fbno) {
386			/* start overlap */
387
388			/*
389			 * Case 1:
390			 *    bbno           bend
391			 *    +BBBBBBBBBBBBBBBBB+
392			 *        +---------+
393			 *        fbno   fend
394			 *
395			 * Case 2:
396			 *    bbno           bend
397			 *    +BBBBBBBBBBBBBBBBB+
398			 *    +-------------+
399			 *    fbno       fend
400			 *
401			 * Case 3:
402			 *    bbno           bend
403			 *    +BBBBBBBBBBBBBBBBB+
404			 *        +-------------+
405			 *        fbno       fend
406			 *
407			 * Case 4:
408			 *    bbno           bend
409			 *    +BBBBBBBBBBBBBBBBB+
410			 *    +-----------------+
411			 *    fbno           fend
412			 *
413			 * No unbusy region in extent, return failure.
414			 */
415			if (fend <= bend)
416				goto fail;
417
418			/*
419			 * Case 5:
420			 *    bbno           bend
421			 *    +BBBBBBBBBBBBBBBBB+
422			 *        +----------------------+
423			 *        fbno                fend
424			 *
425			 * Case 6:
426			 *    bbno           bend
427			 *    +BBBBBBBBBBBBBBBBB+
428			 *    +--------------------------+
429			 *    fbno                    fend
430			 *
431			 * Needs to be trimmed to:
432			 *                       +-------+
433			 *                       fbno fend
434			 */
435			fbno = bend;
436		} else if (bend >= fend) {
437			/* end overlap */
438
439			/*
440			 * Case 7:
441			 *             bbno           bend
442			 *             +BBBBBBBBBBBBBBBBB+
443			 *    +------------------+
444			 *    fbno            fend
445			 *
446			 * Case 8:
447			 *             bbno           bend
448			 *             +BBBBBBBBBBBBBBBBB+
449			 *    +--------------------------+
450			 *    fbno                    fend
451			 *
452			 * Needs to be trimmed to:
453			 *    +-------+
454			 *    fbno fend
455			 */
456			fend = bbno;
457		} else {
458			/* middle overlap */
459
460			/*
461			 * Case 9:
462			 *             bbno           bend
463			 *             +BBBBBBBBBBBBBBBBB+
464			 *    +-----------------------------------+
465			 *    fbno                             fend
466			 *
467			 * Can be trimmed to:
468			 *    +-------+        OR         +-------+
469			 *    fbno fend                   fbno fend
470			 *
471			 * Backward allocation leads to significant
472			 * fragmentation of directories, which degrades
473			 * directory performance, therefore we always want to
474			 * choose the option that produces forward allocation
475			 * patterns.
476			 * Preferring the lower bno extent will make the next
477			 * request use "fend" as the start of the next
478			 * allocation;  if the segment is no longer busy at
479			 * that point, we'll get a contiguous allocation, but
480			 * even if it is still busy, we will get a forward
481			 * allocation.
482			 * We try to avoid choosing the segment at "bend",
483			 * because that can lead to the next allocation
484			 * taking the segment at "fbno", which would be a
485			 * backward allocation.  We only use the segment at
486			 * "fbno" if it is much larger than the current
487			 * requested size, because in that case there's a
488			 * good chance subsequent allocations will be
489			 * contiguous.
490			 */
491			if (bbno - fbno >= args->maxlen) {
492				/* left candidate fits perfect */
493				fend = bbno;
494			} else if (fend - bend >= args->maxlen * 4) {
495				/* right candidate has enough free space */
496				fbno = bend;
497			} else if (bbno - fbno >= args->minlen) {
498				/* left candidate fits minimum requirement */
499				fend = bbno;
500			} else {
501				goto fail;
502			}
503		}
504
505		flen = fend - fbno;
506	}
507	spin_unlock(&args->pag->pagb_lock);
508
509	if (fbno != bno || flen != len) {
510		trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
511					  fbno, flen);
 
 
 
 
512	}
513	*rbno = fbno;
514	*rlen = flen;
515	return;
516fail:
517	/*
518	 * Return a zero extent length as failure indications.  All callers
519	 * re-check if the trimmed extent satisfies the minlen requirement.
520	 */
521	spin_unlock(&args->pag->pagb_lock);
522	trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
523	*rbno = fbno;
524	*rlen = 0;
525}
526
527STATIC void
528xfs_extent_busy_clear_one(
529	struct xfs_mount	*mp,
530	struct xfs_perag	*pag,
531	struct xfs_extent_busy	*busyp)
532{
533	if (busyp->length) {
534		trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
535						busyp->length);
536		rb_erase(&busyp->rb_node, &pag->pagb_tree);
537	}
538
539	list_del_init(&busyp->list);
540	kmem_free(busyp);
541}
542
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543/*
544 * Remove all extents on the passed in list from the busy extents tree.
545 * If do_discard is set skip extents that need to be discarded, and mark
546 * these as undergoing a discard operation instead.
547 */
548void
549xfs_extent_busy_clear(
550	struct xfs_mount	*mp,
551	struct list_head	*list,
552	bool			do_discard)
553{
554	struct xfs_extent_busy	*busyp, *n;
555	struct xfs_perag	*pag = NULL;
556	xfs_agnumber_t		agno = NULLAGNUMBER;
 
557
558	list_for_each_entry_safe(busyp, n, list, list) {
559		if (busyp->agno != agno) {
560			if (pag) {
561				spin_unlock(&pag->pagb_lock);
562				xfs_perag_put(pag);
563			}
564			pag = xfs_perag_get(mp, busyp->agno);
565			spin_lock(&pag->pagb_lock);
566			agno = busyp->agno;
 
 
 
567		}
568
569		if (do_discard && busyp->length &&
570		    !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
571			busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
572		else
573			xfs_extent_busy_clear_one(mp, pag, busyp);
 
 
574	}
575
576	if (pag) {
577		spin_unlock(&pag->pagb_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578		xfs_perag_put(pag);
579	}
580}
581
582/*
583 * Callback for list_sort to sort busy extents by the AG they reside in.
584 */
585int
586xfs_extent_busy_ag_cmp(
587	void			*priv,
588	struct list_head	*a,
589	struct list_head	*b)
590{
591	return container_of(a, struct xfs_extent_busy, list)->agno -
592		container_of(b, struct xfs_extent_busy, list)->agno;
 
 
 
 
 
 
 
 
593}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4 * Copyright (c) 2010 David Chinner.
  5 * Copyright (c) 2011 Christoph Hellwig.
  6 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8#include "xfs.h"
  9#include "xfs_fs.h"
 10#include "xfs_format.h"
 11#include "xfs_log_format.h"
 12#include "xfs_shared.h"
 13#include "xfs_trans_resv.h"
 14#include "xfs_sb.h"
 15#include "xfs_mount.h"
 16#include "xfs_alloc.h"
 17#include "xfs_extent_busy.h"
 18#include "xfs_trace.h"
 19#include "xfs_trans.h"
 20#include "xfs_log.h"
 21
 22void
 23xfs_extent_busy_insert(
 24	struct xfs_trans	*tp,
 25	xfs_agnumber_t		agno,
 26	xfs_agblock_t		bno,
 27	xfs_extlen_t		len,
 28	unsigned int		flags)
 29{
 30	struct xfs_extent_busy	*new;
 31	struct xfs_extent_busy	*busyp;
 32	struct xfs_perag	*pag;
 33	struct rb_node		**rbp;
 34	struct rb_node		*parent = NULL;
 35
 36	new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
 37	new->agno = agno;
 38	new->bno = bno;
 39	new->length = len;
 40	INIT_LIST_HEAD(&new->list);
 41	new->flags = flags;
 42
 43	/* trace before insert to be able to see failed inserts */
 44	trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
 45
 46	pag = xfs_perag_get(tp->t_mountp, new->agno);
 47	spin_lock(&pag->pagb_lock);
 48	rbp = &pag->pagb_tree.rb_node;
 49	while (*rbp) {
 50		parent = *rbp;
 51		busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
 52
 53		if (new->bno < busyp->bno) {
 54			rbp = &(*rbp)->rb_left;
 55			ASSERT(new->bno + new->length <= busyp->bno);
 56		} else if (new->bno > busyp->bno) {
 57			rbp = &(*rbp)->rb_right;
 58			ASSERT(bno >= busyp->bno + busyp->length);
 59		} else {
 60			ASSERT(0);
 61		}
 62	}
 63
 64	rb_link_node(&new->rb_node, parent, rbp);
 65	rb_insert_color(&new->rb_node, &pag->pagb_tree);
 66
 67	list_add(&new->list, &tp->t_busy);
 68	spin_unlock(&pag->pagb_lock);
 69	xfs_perag_put(pag);
 70}
 71
 72/*
 73 * Search for a busy extent within the range of the extent we are about to
 74 * allocate.  You need to be holding the busy extent tree lock when calling
 75 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
 76 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
 77 * match. This is done so that a non-zero return indicates an overlap that
 78 * will require a synchronous transaction, but it can still be
 79 * used to distinguish between a partial or exact match.
 80 */
 81int
 82xfs_extent_busy_search(
 83	struct xfs_mount	*mp,
 84	xfs_agnumber_t		agno,
 85	xfs_agblock_t		bno,
 86	xfs_extlen_t		len)
 87{
 88	struct xfs_perag	*pag;
 89	struct rb_node		*rbp;
 90	struct xfs_extent_busy	*busyp;
 91	int			match = 0;
 92
 93	pag = xfs_perag_get(mp, agno);
 94	spin_lock(&pag->pagb_lock);
 95
 96	rbp = pag->pagb_tree.rb_node;
 97
 98	/* find closest start bno overlap */
 99	while (rbp) {
100		busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
101		if (bno < busyp->bno) {
102			/* may overlap, but exact start block is lower */
103			if (bno + len > busyp->bno)
104				match = -1;
105			rbp = rbp->rb_left;
106		} else if (bno > busyp->bno) {
107			/* may overlap, but exact start block is higher */
108			if (bno < busyp->bno + busyp->length)
109				match = -1;
110			rbp = rbp->rb_right;
111		} else {
112			/* bno matches busyp, length determines exact match */
113			match = (busyp->length == len) ? 1 : -1;
114			break;
115		}
116	}
117	spin_unlock(&pag->pagb_lock);
118	xfs_perag_put(pag);
119	return match;
120}
121
122/*
123 * The found free extent [fbno, fend] overlaps part or all of the given busy
124 * extent.  If the overlap covers the beginning, the end, or all of the busy
125 * extent, the overlapping portion can be made unbusy and used for the
126 * allocation.  We can't split a busy extent because we can't modify a
127 * transaction/CIL context busy list, but we can update an entry's block
128 * number or length.
129 *
130 * Returns true if the extent can safely be reused, or false if the search
131 * needs to be restarted.
132 */
133STATIC bool
134xfs_extent_busy_update_extent(
135	struct xfs_mount	*mp,
136	struct xfs_perag	*pag,
137	struct xfs_extent_busy	*busyp,
138	xfs_agblock_t		fbno,
139	xfs_extlen_t		flen,
140	bool			userdata) __releases(&pag->pagb_lock)
141					  __acquires(&pag->pagb_lock)
142{
143	xfs_agblock_t		fend = fbno + flen;
144	xfs_agblock_t		bbno = busyp->bno;
145	xfs_agblock_t		bend = bbno + busyp->length;
146
147	/*
148	 * This extent is currently being discarded.  Give the thread
149	 * performing the discard a chance to mark the extent unbusy
150	 * and retry.
151	 */
152	if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
153		spin_unlock(&pag->pagb_lock);
154		delay(1);
155		spin_lock(&pag->pagb_lock);
156		return false;
157	}
158
159	/*
160	 * If there is a busy extent overlapping a user allocation, we have
161	 * no choice but to force the log and retry the search.
162	 *
163	 * Fortunately this does not happen during normal operation, but
164	 * only if the filesystem is very low on space and has to dip into
165	 * the AGFL for normal allocations.
166	 */
167	if (userdata)
168		goto out_force_log;
169
170	if (bbno < fbno && bend > fend) {
171		/*
172		 * Case 1:
173		 *    bbno           bend
174		 *    +BBBBBBBBBBBBBBBBB+
175		 *        +---------+
176		 *        fbno   fend
177		 */
178
179		/*
180		 * We would have to split the busy extent to be able to track
181		 * it correct, which we cannot do because we would have to
182		 * modify the list of busy extents attached to the transaction
183		 * or CIL context, which is immutable.
184		 *
185		 * Force out the log to clear the busy extent and retry the
186		 * search.
187		 */
188		goto out_force_log;
189	} else if (bbno >= fbno && bend <= fend) {
190		/*
191		 * Case 2:
192		 *    bbno           bend
193		 *    +BBBBBBBBBBBBBBBBB+
194		 *    +-----------------+
195		 *    fbno           fend
196		 *
197		 * Case 3:
198		 *    bbno           bend
199		 *    +BBBBBBBBBBBBBBBBB+
200		 *    +--------------------------+
201		 *    fbno                    fend
202		 *
203		 * Case 4:
204		 *             bbno           bend
205		 *             +BBBBBBBBBBBBBBBBB+
206		 *    +--------------------------+
207		 *    fbno                    fend
208		 *
209		 * Case 5:
210		 *             bbno           bend
211		 *             +BBBBBBBBBBBBBBBBB+
212		 *    +-----------------------------------+
213		 *    fbno                             fend
214		 *
215		 */
216
217		/*
218		 * The busy extent is fully covered by the extent we are
219		 * allocating, and can simply be removed from the rbtree.
220		 * However we cannot remove it from the immutable list
221		 * tracking busy extents in the transaction or CIL context,
222		 * so set the length to zero to mark it invalid.
223		 *
224		 * We also need to restart the busy extent search from the
225		 * tree root, because erasing the node can rearrange the
226		 * tree topology.
227		 */
228		rb_erase(&busyp->rb_node, &pag->pagb_tree);
229		busyp->length = 0;
230		return false;
231	} else if (fend < bend) {
232		/*
233		 * Case 6:
234		 *              bbno           bend
235		 *             +BBBBBBBBBBBBBBBBB+
236		 *             +---------+
237		 *             fbno   fend
238		 *
239		 * Case 7:
240		 *             bbno           bend
241		 *             +BBBBBBBBBBBBBBBBB+
242		 *    +------------------+
243		 *    fbno            fend
244		 *
245		 */
246		busyp->bno = fend;
247	} else if (bbno < fbno) {
248		/*
249		 * Case 8:
250		 *    bbno           bend
251		 *    +BBBBBBBBBBBBBBBBB+
252		 *        +-------------+
253		 *        fbno       fend
254		 *
255		 * Case 9:
256		 *    bbno           bend
257		 *    +BBBBBBBBBBBBBBBBB+
258		 *        +----------------------+
259		 *        fbno                fend
260		 */
261		busyp->length = fbno - busyp->bno;
262	} else {
263		ASSERT(0);
264	}
265
266	trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
267	return true;
268
269out_force_log:
270	spin_unlock(&pag->pagb_lock);
271	xfs_log_force(mp, XFS_LOG_SYNC);
272	trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
273	spin_lock(&pag->pagb_lock);
274	return false;
275}
276
277
278/*
279 * For a given extent [fbno, flen], make sure we can reuse it safely.
280 */
281void
282xfs_extent_busy_reuse(
283	struct xfs_mount	*mp,
284	xfs_agnumber_t		agno,
285	xfs_agblock_t		fbno,
286	xfs_extlen_t		flen,
287	bool			userdata)
288{
289	struct xfs_perag	*pag;
290	struct rb_node		*rbp;
291
292	ASSERT(flen > 0);
293
294	pag = xfs_perag_get(mp, agno);
295	spin_lock(&pag->pagb_lock);
296restart:
297	rbp = pag->pagb_tree.rb_node;
298	while (rbp) {
299		struct xfs_extent_busy *busyp =
300			rb_entry(rbp, struct xfs_extent_busy, rb_node);
301		xfs_agblock_t	bbno = busyp->bno;
302		xfs_agblock_t	bend = bbno + busyp->length;
303
304		if (fbno + flen <= bbno) {
305			rbp = rbp->rb_left;
306			continue;
307		} else if (fbno >= bend) {
308			rbp = rbp->rb_right;
309			continue;
310		}
311
312		if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
313						  userdata))
314			goto restart;
315	}
316	spin_unlock(&pag->pagb_lock);
317	xfs_perag_put(pag);
318}
319
320/*
321 * For a given extent [fbno, flen], search the busy extent list to find a
322 * subset of the extent that is not busy.  If *rlen is smaller than
323 * args->minlen no suitable extent could be found, and the higher level
324 * code needs to force out the log and retry the allocation.
325 *
326 * Return the current busy generation for the AG if the extent is busy. This
327 * value can be used to wait for at least one of the currently busy extents
328 * to be cleared. Note that the busy list is not guaranteed to be empty after
329 * the gen is woken. The state of a specific extent must always be confirmed
330 * with another call to xfs_extent_busy_trim() before it can be used.
331 */
332bool
333xfs_extent_busy_trim(
334	struct xfs_alloc_arg	*args,
335	xfs_agblock_t		*bno,
336	xfs_extlen_t		*len,
337	unsigned		*busy_gen)
 
338{
339	xfs_agblock_t		fbno;
340	xfs_extlen_t		flen;
341	struct rb_node		*rbp;
342	bool			ret = false;
343
344	ASSERT(*len > 0);
345
346	spin_lock(&args->pag->pagb_lock);
347restart:
348	fbno = *bno;
349	flen = *len;
350	rbp = args->pag->pagb_tree.rb_node;
351	while (rbp && flen >= args->minlen) {
352		struct xfs_extent_busy *busyp =
353			rb_entry(rbp, struct xfs_extent_busy, rb_node);
354		xfs_agblock_t	fend = fbno + flen;
355		xfs_agblock_t	bbno = busyp->bno;
356		xfs_agblock_t	bend = bbno + busyp->length;
357
358		if (fend <= bbno) {
359			rbp = rbp->rb_left;
360			continue;
361		} else if (fbno >= bend) {
362			rbp = rbp->rb_right;
363			continue;
364		}
365
366		/*
367		 * If this is a metadata allocation, try to reuse the busy
368		 * extent instead of trimming the allocation.
369		 */
370		if (!xfs_alloc_is_userdata(args->datatype) &&
371		    !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
372			if (!xfs_extent_busy_update_extent(args->mp, args->pag,
373							  busyp, fbno, flen,
374							  false))
375				goto restart;
376			continue;
377		}
378
379		if (bbno <= fbno) {
380			/* start overlap */
381
382			/*
383			 * Case 1:
384			 *    bbno           bend
385			 *    +BBBBBBBBBBBBBBBBB+
386			 *        +---------+
387			 *        fbno   fend
388			 *
389			 * Case 2:
390			 *    bbno           bend
391			 *    +BBBBBBBBBBBBBBBBB+
392			 *    +-------------+
393			 *    fbno       fend
394			 *
395			 * Case 3:
396			 *    bbno           bend
397			 *    +BBBBBBBBBBBBBBBBB+
398			 *        +-------------+
399			 *        fbno       fend
400			 *
401			 * Case 4:
402			 *    bbno           bend
403			 *    +BBBBBBBBBBBBBBBBB+
404			 *    +-----------------+
405			 *    fbno           fend
406			 *
407			 * No unbusy region in extent, return failure.
408			 */
409			if (fend <= bend)
410				goto fail;
411
412			/*
413			 * Case 5:
414			 *    bbno           bend
415			 *    +BBBBBBBBBBBBBBBBB+
416			 *        +----------------------+
417			 *        fbno                fend
418			 *
419			 * Case 6:
420			 *    bbno           bend
421			 *    +BBBBBBBBBBBBBBBBB+
422			 *    +--------------------------+
423			 *    fbno                    fend
424			 *
425			 * Needs to be trimmed to:
426			 *                       +-------+
427			 *                       fbno fend
428			 */
429			fbno = bend;
430		} else if (bend >= fend) {
431			/* end overlap */
432
433			/*
434			 * Case 7:
435			 *             bbno           bend
436			 *             +BBBBBBBBBBBBBBBBB+
437			 *    +------------------+
438			 *    fbno            fend
439			 *
440			 * Case 8:
441			 *             bbno           bend
442			 *             +BBBBBBBBBBBBBBBBB+
443			 *    +--------------------------+
444			 *    fbno                    fend
445			 *
446			 * Needs to be trimmed to:
447			 *    +-------+
448			 *    fbno fend
449			 */
450			fend = bbno;
451		} else {
452			/* middle overlap */
453
454			/*
455			 * Case 9:
456			 *             bbno           bend
457			 *             +BBBBBBBBBBBBBBBBB+
458			 *    +-----------------------------------+
459			 *    fbno                             fend
460			 *
461			 * Can be trimmed to:
462			 *    +-------+        OR         +-------+
463			 *    fbno fend                   fbno fend
464			 *
465			 * Backward allocation leads to significant
466			 * fragmentation of directories, which degrades
467			 * directory performance, therefore we always want to
468			 * choose the option that produces forward allocation
469			 * patterns.
470			 * Preferring the lower bno extent will make the next
471			 * request use "fend" as the start of the next
472			 * allocation;  if the segment is no longer busy at
473			 * that point, we'll get a contiguous allocation, but
474			 * even if it is still busy, we will get a forward
475			 * allocation.
476			 * We try to avoid choosing the segment at "bend",
477			 * because that can lead to the next allocation
478			 * taking the segment at "fbno", which would be a
479			 * backward allocation.  We only use the segment at
480			 * "fbno" if it is much larger than the current
481			 * requested size, because in that case there's a
482			 * good chance subsequent allocations will be
483			 * contiguous.
484			 */
485			if (bbno - fbno >= args->maxlen) {
486				/* left candidate fits perfect */
487				fend = bbno;
488			} else if (fend - bend >= args->maxlen * 4) {
489				/* right candidate has enough free space */
490				fbno = bend;
491			} else if (bbno - fbno >= args->minlen) {
492				/* left candidate fits minimum requirement */
493				fend = bbno;
494			} else {
495				goto fail;
496			}
497		}
498
499		flen = fend - fbno;
500	}
501out:
502
503	if (fbno != *bno || flen != *len) {
504		trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
505					  fbno, flen);
506		*bno = fbno;
507		*len = flen;
508		*busy_gen = args->pag->pagb_gen;
509		ret = true;
510	}
511	spin_unlock(&args->pag->pagb_lock);
512	return ret;
 
513fail:
514	/*
515	 * Return a zero extent length as failure indications.  All callers
516	 * re-check if the trimmed extent satisfies the minlen requirement.
517	 */
518	flen = 0;
519	goto out;
 
 
520}
521
522STATIC void
523xfs_extent_busy_clear_one(
524	struct xfs_mount	*mp,
525	struct xfs_perag	*pag,
526	struct xfs_extent_busy	*busyp)
527{
528	if (busyp->length) {
529		trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
530						busyp->length);
531		rb_erase(&busyp->rb_node, &pag->pagb_tree);
532	}
533
534	list_del_init(&busyp->list);
535	kmem_free(busyp);
536}
537
538static void
539xfs_extent_busy_put_pag(
540	struct xfs_perag	*pag,
541	bool			wakeup)
542		__releases(pag->pagb_lock)
543{
544	if (wakeup) {
545		pag->pagb_gen++;
546		wake_up_all(&pag->pagb_wait);
547	}
548
549	spin_unlock(&pag->pagb_lock);
550	xfs_perag_put(pag);
551}
552
553/*
554 * Remove all extents on the passed in list from the busy extents tree.
555 * If do_discard is set skip extents that need to be discarded, and mark
556 * these as undergoing a discard operation instead.
557 */
558void
559xfs_extent_busy_clear(
560	struct xfs_mount	*mp,
561	struct list_head	*list,
562	bool			do_discard)
563{
564	struct xfs_extent_busy	*busyp, *n;
565	struct xfs_perag	*pag = NULL;
566	xfs_agnumber_t		agno = NULLAGNUMBER;
567	bool			wakeup = false;
568
569	list_for_each_entry_safe(busyp, n, list, list) {
570		if (busyp->agno != agno) {
571			if (pag)
572				xfs_extent_busy_put_pag(pag, wakeup);
 
 
 
 
573			agno = busyp->agno;
574			pag = xfs_perag_get(mp, agno);
575			spin_lock(&pag->pagb_lock);
576			wakeup = false;
577		}
578
579		if (do_discard && busyp->length &&
580		    !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
581			busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
582		} else {
583			xfs_extent_busy_clear_one(mp, pag, busyp);
584			wakeup = true;
585		}
586	}
587
588	if (pag)
589		xfs_extent_busy_put_pag(pag, wakeup);
590}
591
592/*
593 * Flush out all busy extents for this AG.
594 */
595void
596xfs_extent_busy_flush(
597	struct xfs_mount	*mp,
598	struct xfs_perag	*pag,
599	unsigned		busy_gen)
600{
601	DEFINE_WAIT		(wait);
602	int			error;
603
604	error = xfs_log_force(mp, XFS_LOG_SYNC);
605	if (error)
606		return;
607
608	do {
609		prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
610		if  (busy_gen != READ_ONCE(pag->pagb_gen))
611			break;
612		schedule();
613	} while (1);
614
615	finish_wait(&pag->pagb_wait, &wait);
616}
617
618void
619xfs_extent_busy_wait_all(
620	struct xfs_mount	*mp)
621{
622	DEFINE_WAIT		(wait);
623	xfs_agnumber_t		agno;
624
625	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
626		struct xfs_perag *pag = xfs_perag_get(mp, agno);
627
628		do {
629			prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
630			if  (RB_EMPTY_ROOT(&pag->pagb_tree))
631				break;
632			schedule();
633		} while (1);
634		finish_wait(&pag->pagb_wait, &wait);
635
636		xfs_perag_put(pag);
637	}
638}
639
640/*
641 * Callback for list_sort to sort busy extents by the AG they reside in.
642 */
643int
644xfs_extent_busy_ag_cmp(
645	void			*priv,
646	struct list_head	*l1,
647	struct list_head	*l2)
648{
649	struct xfs_extent_busy	*b1 =
650		container_of(l1, struct xfs_extent_busy, list);
651	struct xfs_extent_busy	*b2 =
652		container_of(l2, struct xfs_extent_busy, list);
653	s32 diff;
654
655	diff = b1->agno - b2->agno;
656	if (!diff)
657		diff = b1->bno - b2->bno;
658	return diff;
659}