Loading...
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_shared.h"
25#include "xfs_trans_resv.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
29#include "xfs_alloc.h"
30#include "xfs_extent_busy.h"
31#include "xfs_trace.h"
32#include "xfs_trans.h"
33#include "xfs_log.h"
34
35void
36xfs_extent_busy_insert(
37 struct xfs_trans *tp,
38 xfs_agnumber_t agno,
39 xfs_agblock_t bno,
40 xfs_extlen_t len,
41 unsigned int flags)
42{
43 struct xfs_extent_busy *new;
44 struct xfs_extent_busy *busyp;
45 struct xfs_perag *pag;
46 struct rb_node **rbp;
47 struct rb_node *parent = NULL;
48
49 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
50 if (!new) {
51 /*
52 * No Memory! Since it is now not possible to track the free
53 * block, make this a synchronous transaction to insure that
54 * the block is not reused before this transaction commits.
55 */
56 trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
57 xfs_trans_set_sync(tp);
58 return;
59 }
60
61 new->agno = agno;
62 new->bno = bno;
63 new->length = len;
64 INIT_LIST_HEAD(&new->list);
65 new->flags = flags;
66
67 /* trace before insert to be able to see failed inserts */
68 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
69
70 pag = xfs_perag_get(tp->t_mountp, new->agno);
71 spin_lock(&pag->pagb_lock);
72 rbp = &pag->pagb_tree.rb_node;
73 while (*rbp) {
74 parent = *rbp;
75 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
76
77 if (new->bno < busyp->bno) {
78 rbp = &(*rbp)->rb_left;
79 ASSERT(new->bno + new->length <= busyp->bno);
80 } else if (new->bno > busyp->bno) {
81 rbp = &(*rbp)->rb_right;
82 ASSERT(bno >= busyp->bno + busyp->length);
83 } else {
84 ASSERT(0);
85 }
86 }
87
88 rb_link_node(&new->rb_node, parent, rbp);
89 rb_insert_color(&new->rb_node, &pag->pagb_tree);
90
91 list_add(&new->list, &tp->t_busy);
92 spin_unlock(&pag->pagb_lock);
93 xfs_perag_put(pag);
94}
95
96/*
97 * Search for a busy extent within the range of the extent we are about to
98 * allocate. You need to be holding the busy extent tree lock when calling
99 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
100 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
101 * match. This is done so that a non-zero return indicates an overlap that
102 * will require a synchronous transaction, but it can still be
103 * used to distinguish between a partial or exact match.
104 */
105int
106xfs_extent_busy_search(
107 struct xfs_mount *mp,
108 xfs_agnumber_t agno,
109 xfs_agblock_t bno,
110 xfs_extlen_t len)
111{
112 struct xfs_perag *pag;
113 struct rb_node *rbp;
114 struct xfs_extent_busy *busyp;
115 int match = 0;
116
117 pag = xfs_perag_get(mp, agno);
118 spin_lock(&pag->pagb_lock);
119
120 rbp = pag->pagb_tree.rb_node;
121
122 /* find closest start bno overlap */
123 while (rbp) {
124 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
125 if (bno < busyp->bno) {
126 /* may overlap, but exact start block is lower */
127 if (bno + len > busyp->bno)
128 match = -1;
129 rbp = rbp->rb_left;
130 } else if (bno > busyp->bno) {
131 /* may overlap, but exact start block is higher */
132 if (bno < busyp->bno + busyp->length)
133 match = -1;
134 rbp = rbp->rb_right;
135 } else {
136 /* bno matches busyp, length determines exact match */
137 match = (busyp->length == len) ? 1 : -1;
138 break;
139 }
140 }
141 spin_unlock(&pag->pagb_lock);
142 xfs_perag_put(pag);
143 return match;
144}
145
146/*
147 * The found free extent [fbno, fend] overlaps part or all of the given busy
148 * extent. If the overlap covers the beginning, the end, or all of the busy
149 * extent, the overlapping portion can be made unbusy and used for the
150 * allocation. We can't split a busy extent because we can't modify a
151 * transaction/CIL context busy list, but we can update an entry's block
152 * number or length.
153 *
154 * Returns true if the extent can safely be reused, or false if the search
155 * needs to be restarted.
156 */
157STATIC bool
158xfs_extent_busy_update_extent(
159 struct xfs_mount *mp,
160 struct xfs_perag *pag,
161 struct xfs_extent_busy *busyp,
162 xfs_agblock_t fbno,
163 xfs_extlen_t flen,
164 bool userdata) __releases(&pag->pagb_lock)
165 __acquires(&pag->pagb_lock)
166{
167 xfs_agblock_t fend = fbno + flen;
168 xfs_agblock_t bbno = busyp->bno;
169 xfs_agblock_t bend = bbno + busyp->length;
170
171 /*
172 * This extent is currently being discarded. Give the thread
173 * performing the discard a chance to mark the extent unbusy
174 * and retry.
175 */
176 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
177 spin_unlock(&pag->pagb_lock);
178 delay(1);
179 spin_lock(&pag->pagb_lock);
180 return false;
181 }
182
183 /*
184 * If there is a busy extent overlapping a user allocation, we have
185 * no choice but to force the log and retry the search.
186 *
187 * Fortunately this does not happen during normal operation, but
188 * only if the filesystem is very low on space and has to dip into
189 * the AGFL for normal allocations.
190 */
191 if (userdata)
192 goto out_force_log;
193
194 if (bbno < fbno && bend > fend) {
195 /*
196 * Case 1:
197 * bbno bend
198 * +BBBBBBBBBBBBBBBBB+
199 * +---------+
200 * fbno fend
201 */
202
203 /*
204 * We would have to split the busy extent to be able to track
205 * it correct, which we cannot do because we would have to
206 * modify the list of busy extents attached to the transaction
207 * or CIL context, which is immutable.
208 *
209 * Force out the log to clear the busy extent and retry the
210 * search.
211 */
212 goto out_force_log;
213 } else if (bbno >= fbno && bend <= fend) {
214 /*
215 * Case 2:
216 * bbno bend
217 * +BBBBBBBBBBBBBBBBB+
218 * +-----------------+
219 * fbno fend
220 *
221 * Case 3:
222 * bbno bend
223 * +BBBBBBBBBBBBBBBBB+
224 * +--------------------------+
225 * fbno fend
226 *
227 * Case 4:
228 * bbno bend
229 * +BBBBBBBBBBBBBBBBB+
230 * +--------------------------+
231 * fbno fend
232 *
233 * Case 5:
234 * bbno bend
235 * +BBBBBBBBBBBBBBBBB+
236 * +-----------------------------------+
237 * fbno fend
238 *
239 */
240
241 /*
242 * The busy extent is fully covered by the extent we are
243 * allocating, and can simply be removed from the rbtree.
244 * However we cannot remove it from the immutable list
245 * tracking busy extents in the transaction or CIL context,
246 * so set the length to zero to mark it invalid.
247 *
248 * We also need to restart the busy extent search from the
249 * tree root, because erasing the node can rearrange the
250 * tree topology.
251 */
252 rb_erase(&busyp->rb_node, &pag->pagb_tree);
253 busyp->length = 0;
254 return false;
255 } else if (fend < bend) {
256 /*
257 * Case 6:
258 * bbno bend
259 * +BBBBBBBBBBBBBBBBB+
260 * +---------+
261 * fbno fend
262 *
263 * Case 7:
264 * bbno bend
265 * +BBBBBBBBBBBBBBBBB+
266 * +------------------+
267 * fbno fend
268 *
269 */
270 busyp->bno = fend;
271 } else if (bbno < fbno) {
272 /*
273 * Case 8:
274 * bbno bend
275 * +BBBBBBBBBBBBBBBBB+
276 * +-------------+
277 * fbno fend
278 *
279 * Case 9:
280 * bbno bend
281 * +BBBBBBBBBBBBBBBBB+
282 * +----------------------+
283 * fbno fend
284 */
285 busyp->length = fbno - busyp->bno;
286 } else {
287 ASSERT(0);
288 }
289
290 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
291 return true;
292
293out_force_log:
294 spin_unlock(&pag->pagb_lock);
295 xfs_log_force(mp, XFS_LOG_SYNC);
296 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
297 spin_lock(&pag->pagb_lock);
298 return false;
299}
300
301
302/*
303 * For a given extent [fbno, flen], make sure we can reuse it safely.
304 */
305void
306xfs_extent_busy_reuse(
307 struct xfs_mount *mp,
308 xfs_agnumber_t agno,
309 xfs_agblock_t fbno,
310 xfs_extlen_t flen,
311 bool userdata)
312{
313 struct xfs_perag *pag;
314 struct rb_node *rbp;
315
316 ASSERT(flen > 0);
317
318 pag = xfs_perag_get(mp, agno);
319 spin_lock(&pag->pagb_lock);
320restart:
321 rbp = pag->pagb_tree.rb_node;
322 while (rbp) {
323 struct xfs_extent_busy *busyp =
324 rb_entry(rbp, struct xfs_extent_busy, rb_node);
325 xfs_agblock_t bbno = busyp->bno;
326 xfs_agblock_t bend = bbno + busyp->length;
327
328 if (fbno + flen <= bbno) {
329 rbp = rbp->rb_left;
330 continue;
331 } else if (fbno >= bend) {
332 rbp = rbp->rb_right;
333 continue;
334 }
335
336 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
337 userdata))
338 goto restart;
339 }
340 spin_unlock(&pag->pagb_lock);
341 xfs_perag_put(pag);
342}
343
344/*
345 * For a given extent [fbno, flen], search the busy extent list to find a
346 * subset of the extent that is not busy. If *rlen is smaller than
347 * args->minlen no suitable extent could be found, and the higher level
348 * code needs to force out the log and retry the allocation.
349 */
350void
351xfs_extent_busy_trim(
352 struct xfs_alloc_arg *args,
353 xfs_agblock_t bno,
354 xfs_extlen_t len,
355 xfs_agblock_t *rbno,
356 xfs_extlen_t *rlen)
357{
358 xfs_agblock_t fbno;
359 xfs_extlen_t flen;
360 struct rb_node *rbp;
361
362 ASSERT(len > 0);
363
364 spin_lock(&args->pag->pagb_lock);
365restart:
366 fbno = bno;
367 flen = len;
368 rbp = args->pag->pagb_tree.rb_node;
369 while (rbp && flen >= args->minlen) {
370 struct xfs_extent_busy *busyp =
371 rb_entry(rbp, struct xfs_extent_busy, rb_node);
372 xfs_agblock_t fend = fbno + flen;
373 xfs_agblock_t bbno = busyp->bno;
374 xfs_agblock_t bend = bbno + busyp->length;
375
376 if (fend <= bbno) {
377 rbp = rbp->rb_left;
378 continue;
379 } else if (fbno >= bend) {
380 rbp = rbp->rb_right;
381 continue;
382 }
383
384 /*
385 * If this is a metadata allocation, try to reuse the busy
386 * extent instead of trimming the allocation.
387 */
388 if (!args->userdata &&
389 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
390 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
391 busyp, fbno, flen,
392 false))
393 goto restart;
394 continue;
395 }
396
397 if (bbno <= fbno) {
398 /* start overlap */
399
400 /*
401 * Case 1:
402 * bbno bend
403 * +BBBBBBBBBBBBBBBBB+
404 * +---------+
405 * fbno fend
406 *
407 * Case 2:
408 * bbno bend
409 * +BBBBBBBBBBBBBBBBB+
410 * +-------------+
411 * fbno fend
412 *
413 * Case 3:
414 * bbno bend
415 * +BBBBBBBBBBBBBBBBB+
416 * +-------------+
417 * fbno fend
418 *
419 * Case 4:
420 * bbno bend
421 * +BBBBBBBBBBBBBBBBB+
422 * +-----------------+
423 * fbno fend
424 *
425 * No unbusy region in extent, return failure.
426 */
427 if (fend <= bend)
428 goto fail;
429
430 /*
431 * Case 5:
432 * bbno bend
433 * +BBBBBBBBBBBBBBBBB+
434 * +----------------------+
435 * fbno fend
436 *
437 * Case 6:
438 * bbno bend
439 * +BBBBBBBBBBBBBBBBB+
440 * +--------------------------+
441 * fbno fend
442 *
443 * Needs to be trimmed to:
444 * +-------+
445 * fbno fend
446 */
447 fbno = bend;
448 } else if (bend >= fend) {
449 /* end overlap */
450
451 /*
452 * Case 7:
453 * bbno bend
454 * +BBBBBBBBBBBBBBBBB+
455 * +------------------+
456 * fbno fend
457 *
458 * Case 8:
459 * bbno bend
460 * +BBBBBBBBBBBBBBBBB+
461 * +--------------------------+
462 * fbno fend
463 *
464 * Needs to be trimmed to:
465 * +-------+
466 * fbno fend
467 */
468 fend = bbno;
469 } else {
470 /* middle overlap */
471
472 /*
473 * Case 9:
474 * bbno bend
475 * +BBBBBBBBBBBBBBBBB+
476 * +-----------------------------------+
477 * fbno fend
478 *
479 * Can be trimmed to:
480 * +-------+ OR +-------+
481 * fbno fend fbno fend
482 *
483 * Backward allocation leads to significant
484 * fragmentation of directories, which degrades
485 * directory performance, therefore we always want to
486 * choose the option that produces forward allocation
487 * patterns.
488 * Preferring the lower bno extent will make the next
489 * request use "fend" as the start of the next
490 * allocation; if the segment is no longer busy at
491 * that point, we'll get a contiguous allocation, but
492 * even if it is still busy, we will get a forward
493 * allocation.
494 * We try to avoid choosing the segment at "bend",
495 * because that can lead to the next allocation
496 * taking the segment at "fbno", which would be a
497 * backward allocation. We only use the segment at
498 * "fbno" if it is much larger than the current
499 * requested size, because in that case there's a
500 * good chance subsequent allocations will be
501 * contiguous.
502 */
503 if (bbno - fbno >= args->maxlen) {
504 /* left candidate fits perfect */
505 fend = bbno;
506 } else if (fend - bend >= args->maxlen * 4) {
507 /* right candidate has enough free space */
508 fbno = bend;
509 } else if (bbno - fbno >= args->minlen) {
510 /* left candidate fits minimum requirement */
511 fend = bbno;
512 } else {
513 goto fail;
514 }
515 }
516
517 flen = fend - fbno;
518 }
519 spin_unlock(&args->pag->pagb_lock);
520
521 if (fbno != bno || flen != len) {
522 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
523 fbno, flen);
524 }
525 *rbno = fbno;
526 *rlen = flen;
527 return;
528fail:
529 /*
530 * Return a zero extent length as failure indications. All callers
531 * re-check if the trimmed extent satisfies the minlen requirement.
532 */
533 spin_unlock(&args->pag->pagb_lock);
534 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
535 *rbno = fbno;
536 *rlen = 0;
537}
538
539STATIC void
540xfs_extent_busy_clear_one(
541 struct xfs_mount *mp,
542 struct xfs_perag *pag,
543 struct xfs_extent_busy *busyp)
544{
545 if (busyp->length) {
546 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
547 busyp->length);
548 rb_erase(&busyp->rb_node, &pag->pagb_tree);
549 }
550
551 list_del_init(&busyp->list);
552 kmem_free(busyp);
553}
554
555/*
556 * Remove all extents on the passed in list from the busy extents tree.
557 * If do_discard is set skip extents that need to be discarded, and mark
558 * these as undergoing a discard operation instead.
559 */
560void
561xfs_extent_busy_clear(
562 struct xfs_mount *mp,
563 struct list_head *list,
564 bool do_discard)
565{
566 struct xfs_extent_busy *busyp, *n;
567 struct xfs_perag *pag = NULL;
568 xfs_agnumber_t agno = NULLAGNUMBER;
569
570 list_for_each_entry_safe(busyp, n, list, list) {
571 if (busyp->agno != agno) {
572 if (pag) {
573 spin_unlock(&pag->pagb_lock);
574 xfs_perag_put(pag);
575 }
576 pag = xfs_perag_get(mp, busyp->agno);
577 spin_lock(&pag->pagb_lock);
578 agno = busyp->agno;
579 }
580
581 if (do_discard && busyp->length &&
582 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
583 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
584 else
585 xfs_extent_busy_clear_one(mp, pag, busyp);
586 }
587
588 if (pag) {
589 spin_unlock(&pag->pagb_lock);
590 xfs_perag_put(pag);
591 }
592}
593
594/*
595 * Callback for list_sort to sort busy extents by the AG they reside in.
596 */
597int
598xfs_extent_busy_ag_cmp(
599 void *priv,
600 struct list_head *a,
601 struct list_head *b)
602{
603 return container_of(a, struct xfs_extent_busy, list)->agno -
604 container_of(b, struct xfs_extent_busy, list)->agno;
605}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
6 * All Rights Reserved.
7 */
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_shared.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_alloc.h"
16#include "xfs_extent_busy.h"
17#include "xfs_trace.h"
18#include "xfs_trans.h"
19#include "xfs_log.h"
20#include "xfs_ag.h"
21#include "xfs_rtgroup.h"
22
23struct xfs_extent_busy_tree {
24 spinlock_t eb_lock;
25 struct rb_root eb_tree;
26 unsigned int eb_gen;
27 wait_queue_head_t eb_wait;
28};
29
30static void
31xfs_extent_busy_insert_list(
32 struct xfs_group *xg,
33 xfs_agblock_t bno,
34 xfs_extlen_t len,
35 unsigned int flags,
36 struct list_head *busy_list)
37{
38 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
39 struct xfs_extent_busy *new;
40 struct xfs_extent_busy *busyp;
41 struct rb_node **rbp;
42 struct rb_node *parent = NULL;
43
44 new = kzalloc(sizeof(struct xfs_extent_busy),
45 GFP_KERNEL | __GFP_NOFAIL);
46 new->group = xfs_group_hold(xg);
47 new->bno = bno;
48 new->length = len;
49 INIT_LIST_HEAD(&new->list);
50 new->flags = flags;
51
52 /* trace before insert to be able to see failed inserts */
53 trace_xfs_extent_busy(xg, bno, len);
54
55 spin_lock(&eb->eb_lock);
56 rbp = &eb->eb_tree.rb_node;
57 while (*rbp) {
58 parent = *rbp;
59 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
60
61 if (new->bno < busyp->bno) {
62 rbp = &(*rbp)->rb_left;
63 ASSERT(new->bno + new->length <= busyp->bno);
64 } else if (new->bno > busyp->bno) {
65 rbp = &(*rbp)->rb_right;
66 ASSERT(bno >= busyp->bno + busyp->length);
67 } else {
68 ASSERT(0);
69 }
70 }
71
72 rb_link_node(&new->rb_node, parent, rbp);
73 rb_insert_color(&new->rb_node, &eb->eb_tree);
74
75 /* always process discard lists in fifo order */
76 list_add_tail(&new->list, busy_list);
77 spin_unlock(&eb->eb_lock);
78}
79
80void
81xfs_extent_busy_insert(
82 struct xfs_trans *tp,
83 struct xfs_group *xg,
84 xfs_agblock_t bno,
85 xfs_extlen_t len,
86 unsigned int flags)
87{
88 xfs_extent_busy_insert_list(xg, bno, len, flags, &tp->t_busy);
89}
90
91void
92xfs_extent_busy_insert_discard(
93 struct xfs_group *xg,
94 xfs_agblock_t bno,
95 xfs_extlen_t len,
96 struct list_head *busy_list)
97{
98 xfs_extent_busy_insert_list(xg, bno, len, XFS_EXTENT_BUSY_DISCARDED,
99 busy_list);
100}
101
102/*
103 * Search for a busy extent within the range of the extent we are about to
104 * allocate. You need to be holding the busy extent tree lock when calling
105 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
106 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
107 * match. This is done so that a non-zero return indicates an overlap that
108 * will require a synchronous transaction, but it can still be
109 * used to distinguish between a partial or exact match.
110 */
111int
112xfs_extent_busy_search(
113 struct xfs_group *xg,
114 xfs_agblock_t bno,
115 xfs_extlen_t len)
116{
117 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
118 struct rb_node *rbp;
119 struct xfs_extent_busy *busyp;
120 int match = 0;
121
122 /* find closest start bno overlap */
123 spin_lock(&eb->eb_lock);
124 rbp = eb->eb_tree.rb_node;
125 while (rbp) {
126 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
127 if (bno < busyp->bno) {
128 /* may overlap, but exact start block is lower */
129 if (bno + len > busyp->bno)
130 match = -1;
131 rbp = rbp->rb_left;
132 } else if (bno > busyp->bno) {
133 /* may overlap, but exact start block is higher */
134 if (bno < busyp->bno + busyp->length)
135 match = -1;
136 rbp = rbp->rb_right;
137 } else {
138 /* bno matches busyp, length determines exact match */
139 match = (busyp->length == len) ? 1 : -1;
140 break;
141 }
142 }
143 spin_unlock(&eb->eb_lock);
144 return match;
145}
146
147/*
148 * The found free extent [fbno, fend] overlaps part or all of the given busy
149 * extent. If the overlap covers the beginning, the end, or all of the busy
150 * extent, the overlapping portion can be made unbusy and used for the
151 * allocation. We can't split a busy extent because we can't modify a
152 * transaction/CIL context busy list, but we can update an entry's block
153 * number or length.
154 *
155 * Returns true if the extent can safely be reused, or false if the search
156 * needs to be restarted.
157 */
158STATIC bool
159xfs_extent_busy_update_extent(
160 struct xfs_group *xg,
161 struct xfs_extent_busy *busyp,
162 xfs_agblock_t fbno,
163 xfs_extlen_t flen,
164 bool userdata)
165 __releases(&eb->eb_lock)
166 __acquires(&eb->eb_lock)
167{
168 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
169 xfs_agblock_t fend = fbno + flen;
170 xfs_agblock_t bbno = busyp->bno;
171 xfs_agblock_t bend = bbno + busyp->length;
172
173 /*
174 * This extent is currently being discarded. Give the thread
175 * performing the discard a chance to mark the extent unbusy
176 * and retry.
177 */
178 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
179 spin_unlock(&eb->eb_lock);
180 delay(1);
181 spin_lock(&eb->eb_lock);
182 return false;
183 }
184
185 /*
186 * If there is a busy extent overlapping a user allocation, we have
187 * no choice but to force the log and retry the search.
188 *
189 * Fortunately this does not happen during normal operation, but
190 * only if the filesystem is very low on space and has to dip into
191 * the AGFL for normal allocations.
192 */
193 if (userdata)
194 goto out_force_log;
195
196 if (bbno < fbno && bend > fend) {
197 /*
198 * Case 1:
199 * bbno bend
200 * +BBBBBBBBBBBBBBBBB+
201 * +---------+
202 * fbno fend
203 */
204
205 /*
206 * We would have to split the busy extent to be able to track
207 * it correct, which we cannot do because we would have to
208 * modify the list of busy extents attached to the transaction
209 * or CIL context, which is immutable.
210 *
211 * Force out the log to clear the busy extent and retry the
212 * search.
213 */
214 goto out_force_log;
215 } else if (bbno >= fbno && bend <= fend) {
216 /*
217 * Case 2:
218 * bbno bend
219 * +BBBBBBBBBBBBBBBBB+
220 * +-----------------+
221 * fbno fend
222 *
223 * Case 3:
224 * bbno bend
225 * +BBBBBBBBBBBBBBBBB+
226 * +--------------------------+
227 * fbno fend
228 *
229 * Case 4:
230 * bbno bend
231 * +BBBBBBBBBBBBBBBBB+
232 * +--------------------------+
233 * fbno fend
234 *
235 * Case 5:
236 * bbno bend
237 * +BBBBBBBBBBBBBBBBB+
238 * +-----------------------------------+
239 * fbno fend
240 *
241 */
242
243 /*
244 * The busy extent is fully covered by the extent we are
245 * allocating, and can simply be removed from the rbtree.
246 * However we cannot remove it from the immutable list
247 * tracking busy extents in the transaction or CIL context,
248 * so set the length to zero to mark it invalid.
249 *
250 * We also need to restart the busy extent search from the
251 * tree root, because erasing the node can rearrange the
252 * tree topology.
253 */
254 rb_erase(&busyp->rb_node, &eb->eb_tree);
255 busyp->length = 0;
256 return false;
257 } else if (fend < bend) {
258 /*
259 * Case 6:
260 * bbno bend
261 * +BBBBBBBBBBBBBBBBB+
262 * +---------+
263 * fbno fend
264 *
265 * Case 7:
266 * bbno bend
267 * +BBBBBBBBBBBBBBBBB+
268 * +------------------+
269 * fbno fend
270 *
271 */
272 busyp->bno = fend;
273 busyp->length = bend - fend;
274 } else if (bbno < fbno) {
275 /*
276 * Case 8:
277 * bbno bend
278 * +BBBBBBBBBBBBBBBBB+
279 * +-------------+
280 * fbno fend
281 *
282 * Case 9:
283 * bbno bend
284 * +BBBBBBBBBBBBBBBBB+
285 * +----------------------+
286 * fbno fend
287 */
288 busyp->length = fbno - busyp->bno;
289 } else {
290 ASSERT(0);
291 }
292
293 trace_xfs_extent_busy_reuse(xg, fbno, flen);
294 return true;
295
296out_force_log:
297 spin_unlock(&eb->eb_lock);
298 xfs_log_force(xg->xg_mount, XFS_LOG_SYNC);
299 trace_xfs_extent_busy_force(xg, fbno, flen);
300 spin_lock(&eb->eb_lock);
301 return false;
302}
303
304/*
305 * For a given extent [fbno, flen], make sure we can reuse it safely.
306 */
307void
308xfs_extent_busy_reuse(
309 struct xfs_group *xg,
310 xfs_agblock_t fbno,
311 xfs_extlen_t flen,
312 bool userdata)
313{
314 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
315 struct rb_node *rbp;
316
317 ASSERT(flen > 0);
318 spin_lock(&eb->eb_lock);
319restart:
320 rbp = eb->eb_tree.rb_node;
321 while (rbp) {
322 struct xfs_extent_busy *busyp =
323 rb_entry(rbp, struct xfs_extent_busy, rb_node);
324 xfs_agblock_t bbno = busyp->bno;
325 xfs_agblock_t bend = bbno + busyp->length;
326
327 if (fbno + flen <= bbno) {
328 rbp = rbp->rb_left;
329 continue;
330 } else if (fbno >= bend) {
331 rbp = rbp->rb_right;
332 continue;
333 }
334
335 if (!xfs_extent_busy_update_extent(xg, busyp, fbno, flen,
336 userdata))
337 goto restart;
338 }
339 spin_unlock(&eb->eb_lock);
340}
341
342/*
343 * For a given extent [fbno, flen], search the busy extent list to find a
344 * subset of the extent that is not busy. If *rlen is smaller than
345 * args->minlen no suitable extent could be found, and the higher level
346 * code needs to force out the log and retry the allocation.
347 *
348 * Return the current busy generation for the group if the extent is busy. This
349 * value can be used to wait for at least one of the currently busy extents
350 * to be cleared. Note that the busy list is not guaranteed to be empty after
351 * the gen is woken. The state of a specific extent must always be confirmed
352 * with another call to xfs_extent_busy_trim() before it can be used.
353 */
354bool
355xfs_extent_busy_trim(
356 struct xfs_group *xg,
357 xfs_extlen_t minlen,
358 xfs_extlen_t maxlen,
359 xfs_agblock_t *bno,
360 xfs_extlen_t *len,
361 unsigned *busy_gen)
362{
363 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
364 xfs_agblock_t fbno;
365 xfs_extlen_t flen;
366 struct rb_node *rbp;
367 bool ret = false;
368
369 ASSERT(*len > 0);
370
371 spin_lock(&eb->eb_lock);
372 fbno = *bno;
373 flen = *len;
374 rbp = eb->eb_tree.rb_node;
375 while (rbp && flen >= minlen) {
376 struct xfs_extent_busy *busyp =
377 rb_entry(rbp, struct xfs_extent_busy, rb_node);
378 xfs_agblock_t fend = fbno + flen;
379 xfs_agblock_t bbno = busyp->bno;
380 xfs_agblock_t bend = bbno + busyp->length;
381
382 if (fend <= bbno) {
383 rbp = rbp->rb_left;
384 continue;
385 } else if (fbno >= bend) {
386 rbp = rbp->rb_right;
387 continue;
388 }
389
390 if (bbno <= fbno) {
391 /* start overlap */
392
393 /*
394 * Case 1:
395 * bbno bend
396 * +BBBBBBBBBBBBBBBBB+
397 * +---------+
398 * fbno fend
399 *
400 * Case 2:
401 * bbno bend
402 * +BBBBBBBBBBBBBBBBB+
403 * +-------------+
404 * fbno fend
405 *
406 * Case 3:
407 * bbno bend
408 * +BBBBBBBBBBBBBBBBB+
409 * +-------------+
410 * fbno fend
411 *
412 * Case 4:
413 * bbno bend
414 * +BBBBBBBBBBBBBBBBB+
415 * +-----------------+
416 * fbno fend
417 *
418 * No unbusy region in extent, return failure.
419 */
420 if (fend <= bend)
421 goto fail;
422
423 /*
424 * Case 5:
425 * bbno bend
426 * +BBBBBBBBBBBBBBBBB+
427 * +----------------------+
428 * fbno fend
429 *
430 * Case 6:
431 * bbno bend
432 * +BBBBBBBBBBBBBBBBB+
433 * +--------------------------+
434 * fbno fend
435 *
436 * Needs to be trimmed to:
437 * +-------+
438 * fbno fend
439 */
440 fbno = bend;
441 } else if (bend >= fend) {
442 /* end overlap */
443
444 /*
445 * Case 7:
446 * bbno bend
447 * +BBBBBBBBBBBBBBBBB+
448 * +------------------+
449 * fbno fend
450 *
451 * Case 8:
452 * bbno bend
453 * +BBBBBBBBBBBBBBBBB+
454 * +--------------------------+
455 * fbno fend
456 *
457 * Needs to be trimmed to:
458 * +-------+
459 * fbno fend
460 */
461 fend = bbno;
462 } else {
463 /* middle overlap */
464
465 /*
466 * Case 9:
467 * bbno bend
468 * +BBBBBBBBBBBBBBBBB+
469 * +-----------------------------------+
470 * fbno fend
471 *
472 * Can be trimmed to:
473 * +-------+ OR +-------+
474 * fbno fend fbno fend
475 *
476 * Backward allocation leads to significant
477 * fragmentation of directories, which degrades
478 * directory performance, therefore we always want to
479 * choose the option that produces forward allocation
480 * patterns.
481 * Preferring the lower bno extent will make the next
482 * request use "fend" as the start of the next
483 * allocation; if the segment is no longer busy at
484 * that point, we'll get a contiguous allocation, but
485 * even if it is still busy, we will get a forward
486 * allocation.
487 * We try to avoid choosing the segment at "bend",
488 * because that can lead to the next allocation
489 * taking the segment at "fbno", which would be a
490 * backward allocation. We only use the segment at
491 * "fbno" if it is much larger than the current
492 * requested size, because in that case there's a
493 * good chance subsequent allocations will be
494 * contiguous.
495 */
496 if (bbno - fbno >= maxlen) {
497 /* left candidate fits perfect */
498 fend = bbno;
499 } else if (fend - bend >= maxlen * 4) {
500 /* right candidate has enough free space */
501 fbno = bend;
502 } else if (bbno - fbno >= minlen) {
503 /* left candidate fits minimum requirement */
504 fend = bbno;
505 } else {
506 goto fail;
507 }
508 }
509
510 flen = fend - fbno;
511 }
512out:
513
514 if (fbno != *bno || flen != *len) {
515 trace_xfs_extent_busy_trim(xg, *bno, *len, fbno, flen);
516 *bno = fbno;
517 *len = flen;
518 *busy_gen = eb->eb_gen;
519 ret = true;
520 }
521 spin_unlock(&eb->eb_lock);
522 return ret;
523fail:
524 /*
525 * Return a zero extent length as failure indications. All callers
526 * re-check if the trimmed extent satisfies the minlen requirement.
527 */
528 flen = 0;
529 goto out;
530}
531
532static bool
533xfs_extent_busy_clear_one(
534 struct xfs_extent_busy *busyp,
535 bool do_discard)
536{
537 struct xfs_extent_busy_tree *eb = busyp->group->xg_busy_extents;
538
539 if (busyp->length) {
540 if (do_discard &&
541 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
542 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
543 return false;
544 }
545 trace_xfs_extent_busy_clear(busyp->group, busyp->bno,
546 busyp->length);
547 rb_erase(&busyp->rb_node, &eb->eb_tree);
548 }
549
550 list_del_init(&busyp->list);
551 xfs_group_put(busyp->group);
552 kfree(busyp);
553 return true;
554}
555
556/*
557 * Remove all extents on the passed in list from the busy extents tree.
558 * If do_discard is set skip extents that need to be discarded, and mark
559 * these as undergoing a discard operation instead.
560 */
561void
562xfs_extent_busy_clear(
563 struct list_head *list,
564 bool do_discard)
565{
566 struct xfs_extent_busy *busyp, *next;
567
568 busyp = list_first_entry_or_null(list, typeof(*busyp), list);
569 if (!busyp)
570 return;
571
572 do {
573 struct xfs_group *xg = xfs_group_hold(busyp->group);
574 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
575 bool wakeup = false;
576
577 spin_lock(&eb->eb_lock);
578 do {
579 next = list_next_entry(busyp, list);
580 if (xfs_extent_busy_clear_one(busyp, do_discard))
581 wakeup = true;
582 busyp = next;
583 } while (!list_entry_is_head(busyp, list, list) &&
584 busyp->group == xg);
585
586 if (wakeup) {
587 eb->eb_gen++;
588 wake_up_all(&eb->eb_wait);
589 }
590 spin_unlock(&eb->eb_lock);
591 xfs_group_put(xg);
592 } while (!list_entry_is_head(busyp, list, list));
593}
594
595/*
596 * Flush out all busy extents for this group.
597 *
598 * If the current transaction is holding busy extents, the caller may not want
599 * to wait for committed busy extents to resolve. If we are being told just to
600 * try a flush or progress has been made since we last skipped a busy extent,
601 * return immediately to allow the caller to try again.
602 *
603 * If we are freeing extents, we might actually be holding the only free extents
604 * in the transaction busy list and the log force won't resolve that situation.
605 * In this case, we must return -EAGAIN to avoid a deadlock by informing the
606 * caller it needs to commit the busy extents it holds before retrying the
607 * extent free operation.
608 */
609int
610xfs_extent_busy_flush(
611 struct xfs_trans *tp,
612 struct xfs_group *xg,
613 unsigned busy_gen,
614 uint32_t alloc_flags)
615{
616 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
617 DEFINE_WAIT (wait);
618 int error;
619
620 error = xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
621 if (error)
622 return error;
623
624 /* Avoid deadlocks on uncommitted busy extents. */
625 if (!list_empty(&tp->t_busy)) {
626 if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
627 return 0;
628
629 if (busy_gen != READ_ONCE(eb->eb_gen))
630 return 0;
631
632 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
633 return -EAGAIN;
634 }
635
636 /* Wait for committed busy extents to resolve. */
637 do {
638 prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE);
639 if (busy_gen != READ_ONCE(eb->eb_gen))
640 break;
641 schedule();
642 } while (1);
643
644 finish_wait(&eb->eb_wait, &wait);
645 return 0;
646}
647
648static void
649xfs_extent_busy_wait_group(
650 struct xfs_group *xg)
651{
652 DEFINE_WAIT (wait);
653 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
654
655 do {
656 prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE);
657 if (RB_EMPTY_ROOT(&eb->eb_tree))
658 break;
659 schedule();
660 } while (1);
661 finish_wait(&eb->eb_wait, &wait);
662}
663
664void
665xfs_extent_busy_wait_all(
666 struct xfs_mount *mp)
667{
668 struct xfs_perag *pag = NULL;
669 struct xfs_rtgroup *rtg = NULL;
670
671 while ((pag = xfs_perag_next(mp, pag)))
672 xfs_extent_busy_wait_group(pag_group(pag));
673
674 if (xfs_has_rtgroups(mp))
675 while ((rtg = xfs_rtgroup_next(mp, rtg)))
676 xfs_extent_busy_wait_group(rtg_group(rtg));
677}
678
679/*
680 * Callback for list_sort to sort busy extents by the group they reside in.
681 */
682int
683xfs_extent_busy_ag_cmp(
684 void *priv,
685 const struct list_head *l1,
686 const struct list_head *l2)
687{
688 struct xfs_extent_busy *b1 =
689 container_of(l1, struct xfs_extent_busy, list);
690 struct xfs_extent_busy *b2 =
691 container_of(l2, struct xfs_extent_busy, list);
692 s32 diff;
693
694 diff = b1->group->xg_gno - b2->group->xg_gno;
695 if (!diff)
696 diff = b1->bno - b2->bno;
697 return diff;
698}
699
700/* Are there any busy extents in this group? */
701bool
702xfs_extent_busy_list_empty(
703 struct xfs_group *xg,
704 unsigned *busy_gen)
705{
706 struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
707 bool res;
708
709 spin_lock(&eb->eb_lock);
710 res = RB_EMPTY_ROOT(&eb->eb_tree);
711 *busy_gen = READ_ONCE(eb->eb_gen);
712 spin_unlock(&eb->eb_lock);
713 return res;
714}
715
716struct xfs_extent_busy_tree *
717xfs_extent_busy_alloc(void)
718{
719 struct xfs_extent_busy_tree *eb;
720
721 eb = kzalloc(sizeof(*eb), GFP_KERNEL);
722 if (!eb)
723 return NULL;
724 spin_lock_init(&eb->eb_lock);
725 init_waitqueue_head(&eb->eb_wait);
726 eb->eb_tree = RB_ROOT;
727 return eb;
728}