Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
6 * All Rights Reserved.
7 */
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_shared.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_alloc.h"
16#include "xfs_extent_busy.h"
17#include "xfs_trace.h"
18#include "xfs_trans.h"
19#include "xfs_log.h"
20#include "xfs_ag.h"
21
22void
23xfs_extent_busy_insert(
24 struct xfs_trans *tp,
25 struct xfs_perag *pag,
26 xfs_agblock_t bno,
27 xfs_extlen_t len,
28 unsigned int flags)
29{
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct rb_node **rbp;
33 struct rb_node *parent = NULL;
34
35 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
36 new->agno = pag->pag_agno;
37 new->bno = bno;
38 new->length = len;
39 INIT_LIST_HEAD(&new->list);
40 new->flags = flags;
41
42 /* trace before insert to be able to see failed inserts */
43 trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
44
45 spin_lock(&pag->pagb_lock);
46 rbp = &pag->pagb_tree.rb_node;
47 while (*rbp) {
48 parent = *rbp;
49 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
50
51 if (new->bno < busyp->bno) {
52 rbp = &(*rbp)->rb_left;
53 ASSERT(new->bno + new->length <= busyp->bno);
54 } else if (new->bno > busyp->bno) {
55 rbp = &(*rbp)->rb_right;
56 ASSERT(bno >= busyp->bno + busyp->length);
57 } else {
58 ASSERT(0);
59 }
60 }
61
62 rb_link_node(&new->rb_node, parent, rbp);
63 rb_insert_color(&new->rb_node, &pag->pagb_tree);
64
65 list_add(&new->list, &tp->t_busy);
66 spin_unlock(&pag->pagb_lock);
67}
68
69/*
70 * Search for a busy extent within the range of the extent we are about to
71 * allocate. You need to be holding the busy extent tree lock when calling
72 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
73 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
74 * match. This is done so that a non-zero return indicates an overlap that
75 * will require a synchronous transaction, but it can still be
76 * used to distinguish between a partial or exact match.
77 */
78int
79xfs_extent_busy_search(
80 struct xfs_mount *mp,
81 struct xfs_perag *pag,
82 xfs_agblock_t bno,
83 xfs_extlen_t len)
84{
85 struct rb_node *rbp;
86 struct xfs_extent_busy *busyp;
87 int match = 0;
88
89 /* find closest start bno overlap */
90 spin_lock(&pag->pagb_lock);
91 rbp = pag->pagb_tree.rb_node;
92 while (rbp) {
93 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
94 if (bno < busyp->bno) {
95 /* may overlap, but exact start block is lower */
96 if (bno + len > busyp->bno)
97 match = -1;
98 rbp = rbp->rb_left;
99 } else if (bno > busyp->bno) {
100 /* may overlap, but exact start block is higher */
101 if (bno < busyp->bno + busyp->length)
102 match = -1;
103 rbp = rbp->rb_right;
104 } else {
105 /* bno matches busyp, length determines exact match */
106 match = (busyp->length == len) ? 1 : -1;
107 break;
108 }
109 }
110 spin_unlock(&pag->pagb_lock);
111 return match;
112}
113
114/*
115 * The found free extent [fbno, fend] overlaps part or all of the given busy
116 * extent. If the overlap covers the beginning, the end, or all of the busy
117 * extent, the overlapping portion can be made unbusy and used for the
118 * allocation. We can't split a busy extent because we can't modify a
119 * transaction/CIL context busy list, but we can update an entry's block
120 * number or length.
121 *
122 * Returns true if the extent can safely be reused, or false if the search
123 * needs to be restarted.
124 */
125STATIC bool
126xfs_extent_busy_update_extent(
127 struct xfs_mount *mp,
128 struct xfs_perag *pag,
129 struct xfs_extent_busy *busyp,
130 xfs_agblock_t fbno,
131 xfs_extlen_t flen,
132 bool userdata) __releases(&pag->pagb_lock)
133 __acquires(&pag->pagb_lock)
134{
135 xfs_agblock_t fend = fbno + flen;
136 xfs_agblock_t bbno = busyp->bno;
137 xfs_agblock_t bend = bbno + busyp->length;
138
139 /*
140 * This extent is currently being discarded. Give the thread
141 * performing the discard a chance to mark the extent unbusy
142 * and retry.
143 */
144 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
145 spin_unlock(&pag->pagb_lock);
146 delay(1);
147 spin_lock(&pag->pagb_lock);
148 return false;
149 }
150
151 /*
152 * If there is a busy extent overlapping a user allocation, we have
153 * no choice but to force the log and retry the search.
154 *
155 * Fortunately this does not happen during normal operation, but
156 * only if the filesystem is very low on space and has to dip into
157 * the AGFL for normal allocations.
158 */
159 if (userdata)
160 goto out_force_log;
161
162 if (bbno < fbno && bend > fend) {
163 /*
164 * Case 1:
165 * bbno bend
166 * +BBBBBBBBBBBBBBBBB+
167 * +---------+
168 * fbno fend
169 */
170
171 /*
172 * We would have to split the busy extent to be able to track
173 * it correct, which we cannot do because we would have to
174 * modify the list of busy extents attached to the transaction
175 * or CIL context, which is immutable.
176 *
177 * Force out the log to clear the busy extent and retry the
178 * search.
179 */
180 goto out_force_log;
181 } else if (bbno >= fbno && bend <= fend) {
182 /*
183 * Case 2:
184 * bbno bend
185 * +BBBBBBBBBBBBBBBBB+
186 * +-----------------+
187 * fbno fend
188 *
189 * Case 3:
190 * bbno bend
191 * +BBBBBBBBBBBBBBBBB+
192 * +--------------------------+
193 * fbno fend
194 *
195 * Case 4:
196 * bbno bend
197 * +BBBBBBBBBBBBBBBBB+
198 * +--------------------------+
199 * fbno fend
200 *
201 * Case 5:
202 * bbno bend
203 * +BBBBBBBBBBBBBBBBB+
204 * +-----------------------------------+
205 * fbno fend
206 *
207 */
208
209 /*
210 * The busy extent is fully covered by the extent we are
211 * allocating, and can simply be removed from the rbtree.
212 * However we cannot remove it from the immutable list
213 * tracking busy extents in the transaction or CIL context,
214 * so set the length to zero to mark it invalid.
215 *
216 * We also need to restart the busy extent search from the
217 * tree root, because erasing the node can rearrange the
218 * tree topology.
219 */
220 rb_erase(&busyp->rb_node, &pag->pagb_tree);
221 busyp->length = 0;
222 return false;
223 } else if (fend < bend) {
224 /*
225 * Case 6:
226 * bbno bend
227 * +BBBBBBBBBBBBBBBBB+
228 * +---------+
229 * fbno fend
230 *
231 * Case 7:
232 * bbno bend
233 * +BBBBBBBBBBBBBBBBB+
234 * +------------------+
235 * fbno fend
236 *
237 */
238 busyp->bno = fend;
239 busyp->length = bend - fend;
240 } else if (bbno < fbno) {
241 /*
242 * Case 8:
243 * bbno bend
244 * +BBBBBBBBBBBBBBBBB+
245 * +-------------+
246 * fbno fend
247 *
248 * Case 9:
249 * bbno bend
250 * +BBBBBBBBBBBBBBBBB+
251 * +----------------------+
252 * fbno fend
253 */
254 busyp->length = fbno - busyp->bno;
255 } else {
256 ASSERT(0);
257 }
258
259 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
260 return true;
261
262out_force_log:
263 spin_unlock(&pag->pagb_lock);
264 xfs_log_force(mp, XFS_LOG_SYNC);
265 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
266 spin_lock(&pag->pagb_lock);
267 return false;
268}
269
270
271/*
272 * For a given extent [fbno, flen], make sure we can reuse it safely.
273 */
274void
275xfs_extent_busy_reuse(
276 struct xfs_mount *mp,
277 struct xfs_perag *pag,
278 xfs_agblock_t fbno,
279 xfs_extlen_t flen,
280 bool userdata)
281{
282 struct rb_node *rbp;
283
284 ASSERT(flen > 0);
285 spin_lock(&pag->pagb_lock);
286restart:
287 rbp = pag->pagb_tree.rb_node;
288 while (rbp) {
289 struct xfs_extent_busy *busyp =
290 rb_entry(rbp, struct xfs_extent_busy, rb_node);
291 xfs_agblock_t bbno = busyp->bno;
292 xfs_agblock_t bend = bbno + busyp->length;
293
294 if (fbno + flen <= bbno) {
295 rbp = rbp->rb_left;
296 continue;
297 } else if (fbno >= bend) {
298 rbp = rbp->rb_right;
299 continue;
300 }
301
302 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
303 userdata))
304 goto restart;
305 }
306 spin_unlock(&pag->pagb_lock);
307}
308
309/*
310 * For a given extent [fbno, flen], search the busy extent list to find a
311 * subset of the extent that is not busy. If *rlen is smaller than
312 * args->minlen no suitable extent could be found, and the higher level
313 * code needs to force out the log and retry the allocation.
314 *
315 * Return the current busy generation for the AG if the extent is busy. This
316 * value can be used to wait for at least one of the currently busy extents
317 * to be cleared. Note that the busy list is not guaranteed to be empty after
318 * the gen is woken. The state of a specific extent must always be confirmed
319 * with another call to xfs_extent_busy_trim() before it can be used.
320 */
321bool
322xfs_extent_busy_trim(
323 struct xfs_alloc_arg *args,
324 xfs_agblock_t *bno,
325 xfs_extlen_t *len,
326 unsigned *busy_gen)
327{
328 xfs_agblock_t fbno;
329 xfs_extlen_t flen;
330 struct rb_node *rbp;
331 bool ret = false;
332
333 ASSERT(*len > 0);
334
335 spin_lock(&args->pag->pagb_lock);
336 fbno = *bno;
337 flen = *len;
338 rbp = args->pag->pagb_tree.rb_node;
339 while (rbp && flen >= args->minlen) {
340 struct xfs_extent_busy *busyp =
341 rb_entry(rbp, struct xfs_extent_busy, rb_node);
342 xfs_agblock_t fend = fbno + flen;
343 xfs_agblock_t bbno = busyp->bno;
344 xfs_agblock_t bend = bbno + busyp->length;
345
346 if (fend <= bbno) {
347 rbp = rbp->rb_left;
348 continue;
349 } else if (fbno >= bend) {
350 rbp = rbp->rb_right;
351 continue;
352 }
353
354 if (bbno <= fbno) {
355 /* start overlap */
356
357 /*
358 * Case 1:
359 * bbno bend
360 * +BBBBBBBBBBBBBBBBB+
361 * +---------+
362 * fbno fend
363 *
364 * Case 2:
365 * bbno bend
366 * +BBBBBBBBBBBBBBBBB+
367 * +-------------+
368 * fbno fend
369 *
370 * Case 3:
371 * bbno bend
372 * +BBBBBBBBBBBBBBBBB+
373 * +-------------+
374 * fbno fend
375 *
376 * Case 4:
377 * bbno bend
378 * +BBBBBBBBBBBBBBBBB+
379 * +-----------------+
380 * fbno fend
381 *
382 * No unbusy region in extent, return failure.
383 */
384 if (fend <= bend)
385 goto fail;
386
387 /*
388 * Case 5:
389 * bbno bend
390 * +BBBBBBBBBBBBBBBBB+
391 * +----------------------+
392 * fbno fend
393 *
394 * Case 6:
395 * bbno bend
396 * +BBBBBBBBBBBBBBBBB+
397 * +--------------------------+
398 * fbno fend
399 *
400 * Needs to be trimmed to:
401 * +-------+
402 * fbno fend
403 */
404 fbno = bend;
405 } else if (bend >= fend) {
406 /* end overlap */
407
408 /*
409 * Case 7:
410 * bbno bend
411 * +BBBBBBBBBBBBBBBBB+
412 * +------------------+
413 * fbno fend
414 *
415 * Case 8:
416 * bbno bend
417 * +BBBBBBBBBBBBBBBBB+
418 * +--------------------------+
419 * fbno fend
420 *
421 * Needs to be trimmed to:
422 * +-------+
423 * fbno fend
424 */
425 fend = bbno;
426 } else {
427 /* middle overlap */
428
429 /*
430 * Case 9:
431 * bbno bend
432 * +BBBBBBBBBBBBBBBBB+
433 * +-----------------------------------+
434 * fbno fend
435 *
436 * Can be trimmed to:
437 * +-------+ OR +-------+
438 * fbno fend fbno fend
439 *
440 * Backward allocation leads to significant
441 * fragmentation of directories, which degrades
442 * directory performance, therefore we always want to
443 * choose the option that produces forward allocation
444 * patterns.
445 * Preferring the lower bno extent will make the next
446 * request use "fend" as the start of the next
447 * allocation; if the segment is no longer busy at
448 * that point, we'll get a contiguous allocation, but
449 * even if it is still busy, we will get a forward
450 * allocation.
451 * We try to avoid choosing the segment at "bend",
452 * because that can lead to the next allocation
453 * taking the segment at "fbno", which would be a
454 * backward allocation. We only use the segment at
455 * "fbno" if it is much larger than the current
456 * requested size, because in that case there's a
457 * good chance subsequent allocations will be
458 * contiguous.
459 */
460 if (bbno - fbno >= args->maxlen) {
461 /* left candidate fits perfect */
462 fend = bbno;
463 } else if (fend - bend >= args->maxlen * 4) {
464 /* right candidate has enough free space */
465 fbno = bend;
466 } else if (bbno - fbno >= args->minlen) {
467 /* left candidate fits minimum requirement */
468 fend = bbno;
469 } else {
470 goto fail;
471 }
472 }
473
474 flen = fend - fbno;
475 }
476out:
477
478 if (fbno != *bno || flen != *len) {
479 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
480 fbno, flen);
481 *bno = fbno;
482 *len = flen;
483 *busy_gen = args->pag->pagb_gen;
484 ret = true;
485 }
486 spin_unlock(&args->pag->pagb_lock);
487 return ret;
488fail:
489 /*
490 * Return a zero extent length as failure indications. All callers
491 * re-check if the trimmed extent satisfies the minlen requirement.
492 */
493 flen = 0;
494 goto out;
495}
496
497STATIC void
498xfs_extent_busy_clear_one(
499 struct xfs_mount *mp,
500 struct xfs_perag *pag,
501 struct xfs_extent_busy *busyp)
502{
503 if (busyp->length) {
504 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
505 busyp->length);
506 rb_erase(&busyp->rb_node, &pag->pagb_tree);
507 }
508
509 list_del_init(&busyp->list);
510 kmem_free(busyp);
511}
512
513static void
514xfs_extent_busy_put_pag(
515 struct xfs_perag *pag,
516 bool wakeup)
517 __releases(pag->pagb_lock)
518{
519 if (wakeup) {
520 pag->pagb_gen++;
521 wake_up_all(&pag->pagb_wait);
522 }
523
524 spin_unlock(&pag->pagb_lock);
525 xfs_perag_put(pag);
526}
527
528/*
529 * Remove all extents on the passed in list from the busy extents tree.
530 * If do_discard is set skip extents that need to be discarded, and mark
531 * these as undergoing a discard operation instead.
532 */
533void
534xfs_extent_busy_clear(
535 struct xfs_mount *mp,
536 struct list_head *list,
537 bool do_discard)
538{
539 struct xfs_extent_busy *busyp, *n;
540 struct xfs_perag *pag = NULL;
541 xfs_agnumber_t agno = NULLAGNUMBER;
542 bool wakeup = false;
543
544 list_for_each_entry_safe(busyp, n, list, list) {
545 if (busyp->agno != agno) {
546 if (pag)
547 xfs_extent_busy_put_pag(pag, wakeup);
548 agno = busyp->agno;
549 pag = xfs_perag_get(mp, agno);
550 spin_lock(&pag->pagb_lock);
551 wakeup = false;
552 }
553
554 if (do_discard && busyp->length &&
555 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
556 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
557 } else {
558 xfs_extent_busy_clear_one(mp, pag, busyp);
559 wakeup = true;
560 }
561 }
562
563 if (pag)
564 xfs_extent_busy_put_pag(pag, wakeup);
565}
566
567/*
568 * Flush out all busy extents for this AG.
569 */
570void
571xfs_extent_busy_flush(
572 struct xfs_mount *mp,
573 struct xfs_perag *pag,
574 unsigned busy_gen)
575{
576 DEFINE_WAIT (wait);
577 int error;
578
579 error = xfs_log_force(mp, XFS_LOG_SYNC);
580 if (error)
581 return;
582
583 do {
584 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
585 if (busy_gen != READ_ONCE(pag->pagb_gen))
586 break;
587 schedule();
588 } while (1);
589
590 finish_wait(&pag->pagb_wait, &wait);
591}
592
593void
594xfs_extent_busy_wait_all(
595 struct xfs_mount *mp)
596{
597 struct xfs_perag *pag;
598 DEFINE_WAIT (wait);
599 xfs_agnumber_t agno;
600
601 for_each_perag(mp, agno, pag) {
602 do {
603 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
604 if (RB_EMPTY_ROOT(&pag->pagb_tree))
605 break;
606 schedule();
607 } while (1);
608 finish_wait(&pag->pagb_wait, &wait);
609 }
610}
611
612/*
613 * Callback for list_sort to sort busy extents by the AG they reside in.
614 */
615int
616xfs_extent_busy_ag_cmp(
617 void *priv,
618 const struct list_head *l1,
619 const struct list_head *l2)
620{
621 struct xfs_extent_busy *b1 =
622 container_of(l1, struct xfs_extent_busy, list);
623 struct xfs_extent_busy *b2 =
624 container_of(l2, struct xfs_extent_busy, list);
625 s32 diff;
626
627 diff = b1->agno - b2->agno;
628 if (!diff)
629 diff = b1->bno - b2->bno;
630 return diff;
631}
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_shared.h"
25#include "xfs_trans_resv.h"
26#include "xfs_sb.h"
27#include "xfs_mount.h"
28#include "xfs_alloc.h"
29#include "xfs_extent_busy.h"
30#include "xfs_trace.h"
31#include "xfs_trans.h"
32#include "xfs_log.h"
33
34void
35xfs_extent_busy_insert(
36 struct xfs_trans *tp,
37 xfs_agnumber_t agno,
38 xfs_agblock_t bno,
39 xfs_extlen_t len,
40 unsigned int flags)
41{
42 struct xfs_extent_busy *new;
43 struct xfs_extent_busy *busyp;
44 struct xfs_perag *pag;
45 struct rb_node **rbp;
46 struct rb_node *parent = NULL;
47
48 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
49 if (!new) {
50 /*
51 * No Memory! Since it is now not possible to track the free
52 * block, make this a synchronous transaction to insure that
53 * the block is not reused before this transaction commits.
54 */
55 trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
56 xfs_trans_set_sync(tp);
57 return;
58 }
59
60 new->agno = agno;
61 new->bno = bno;
62 new->length = len;
63 INIT_LIST_HEAD(&new->list);
64 new->flags = flags;
65
66 /* trace before insert to be able to see failed inserts */
67 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
68
69 pag = xfs_perag_get(tp->t_mountp, new->agno);
70 spin_lock(&pag->pagb_lock);
71 rbp = &pag->pagb_tree.rb_node;
72 while (*rbp) {
73 parent = *rbp;
74 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
75
76 if (new->bno < busyp->bno) {
77 rbp = &(*rbp)->rb_left;
78 ASSERT(new->bno + new->length <= busyp->bno);
79 } else if (new->bno > busyp->bno) {
80 rbp = &(*rbp)->rb_right;
81 ASSERT(bno >= busyp->bno + busyp->length);
82 } else {
83 ASSERT(0);
84 }
85 }
86
87 rb_link_node(&new->rb_node, parent, rbp);
88 rb_insert_color(&new->rb_node, &pag->pagb_tree);
89
90 list_add(&new->list, &tp->t_busy);
91 spin_unlock(&pag->pagb_lock);
92 xfs_perag_put(pag);
93}
94
95/*
96 * Search for a busy extent within the range of the extent we are about to
97 * allocate. You need to be holding the busy extent tree lock when calling
98 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
99 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
100 * match. This is done so that a non-zero return indicates an overlap that
101 * will require a synchronous transaction, but it can still be
102 * used to distinguish between a partial or exact match.
103 */
104int
105xfs_extent_busy_search(
106 struct xfs_mount *mp,
107 xfs_agnumber_t agno,
108 xfs_agblock_t bno,
109 xfs_extlen_t len)
110{
111 struct xfs_perag *pag;
112 struct rb_node *rbp;
113 struct xfs_extent_busy *busyp;
114 int match = 0;
115
116 pag = xfs_perag_get(mp, agno);
117 spin_lock(&pag->pagb_lock);
118
119 rbp = pag->pagb_tree.rb_node;
120
121 /* find closest start bno overlap */
122 while (rbp) {
123 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
124 if (bno < busyp->bno) {
125 /* may overlap, but exact start block is lower */
126 if (bno + len > busyp->bno)
127 match = -1;
128 rbp = rbp->rb_left;
129 } else if (bno > busyp->bno) {
130 /* may overlap, but exact start block is higher */
131 if (bno < busyp->bno + busyp->length)
132 match = -1;
133 rbp = rbp->rb_right;
134 } else {
135 /* bno matches busyp, length determines exact match */
136 match = (busyp->length == len) ? 1 : -1;
137 break;
138 }
139 }
140 spin_unlock(&pag->pagb_lock);
141 xfs_perag_put(pag);
142 return match;
143}
144
145/*
146 * The found free extent [fbno, fend] overlaps part or all of the given busy
147 * extent. If the overlap covers the beginning, the end, or all of the busy
148 * extent, the overlapping portion can be made unbusy and used for the
149 * allocation. We can't split a busy extent because we can't modify a
150 * transaction/CIL context busy list, but we can update an entry's block
151 * number or length.
152 *
153 * Returns true if the extent can safely be reused, or false if the search
154 * needs to be restarted.
155 */
156STATIC bool
157xfs_extent_busy_update_extent(
158 struct xfs_mount *mp,
159 struct xfs_perag *pag,
160 struct xfs_extent_busy *busyp,
161 xfs_agblock_t fbno,
162 xfs_extlen_t flen,
163 bool userdata) __releases(&pag->pagb_lock)
164 __acquires(&pag->pagb_lock)
165{
166 xfs_agblock_t fend = fbno + flen;
167 xfs_agblock_t bbno = busyp->bno;
168 xfs_agblock_t bend = bbno + busyp->length;
169
170 /*
171 * This extent is currently being discarded. Give the thread
172 * performing the discard a chance to mark the extent unbusy
173 * and retry.
174 */
175 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
176 spin_unlock(&pag->pagb_lock);
177 delay(1);
178 spin_lock(&pag->pagb_lock);
179 return false;
180 }
181
182 /*
183 * If there is a busy extent overlapping a user allocation, we have
184 * no choice but to force the log and retry the search.
185 *
186 * Fortunately this does not happen during normal operation, but
187 * only if the filesystem is very low on space and has to dip into
188 * the AGFL for normal allocations.
189 */
190 if (userdata)
191 goto out_force_log;
192
193 if (bbno < fbno && bend > fend) {
194 /*
195 * Case 1:
196 * bbno bend
197 * +BBBBBBBBBBBBBBBBB+
198 * +---------+
199 * fbno fend
200 */
201
202 /*
203 * We would have to split the busy extent to be able to track
204 * it correct, which we cannot do because we would have to
205 * modify the list of busy extents attached to the transaction
206 * or CIL context, which is immutable.
207 *
208 * Force out the log to clear the busy extent and retry the
209 * search.
210 */
211 goto out_force_log;
212 } else if (bbno >= fbno && bend <= fend) {
213 /*
214 * Case 2:
215 * bbno bend
216 * +BBBBBBBBBBBBBBBBB+
217 * +-----------------+
218 * fbno fend
219 *
220 * Case 3:
221 * bbno bend
222 * +BBBBBBBBBBBBBBBBB+
223 * +--------------------------+
224 * fbno fend
225 *
226 * Case 4:
227 * bbno bend
228 * +BBBBBBBBBBBBBBBBB+
229 * +--------------------------+
230 * fbno fend
231 *
232 * Case 5:
233 * bbno bend
234 * +BBBBBBBBBBBBBBBBB+
235 * +-----------------------------------+
236 * fbno fend
237 *
238 */
239
240 /*
241 * The busy extent is fully covered by the extent we are
242 * allocating, and can simply be removed from the rbtree.
243 * However we cannot remove it from the immutable list
244 * tracking busy extents in the transaction or CIL context,
245 * so set the length to zero to mark it invalid.
246 *
247 * We also need to restart the busy extent search from the
248 * tree root, because erasing the node can rearrange the
249 * tree topology.
250 */
251 rb_erase(&busyp->rb_node, &pag->pagb_tree);
252 busyp->length = 0;
253 return false;
254 } else if (fend < bend) {
255 /*
256 * Case 6:
257 * bbno bend
258 * +BBBBBBBBBBBBBBBBB+
259 * +---------+
260 * fbno fend
261 *
262 * Case 7:
263 * bbno bend
264 * +BBBBBBBBBBBBBBBBB+
265 * +------------------+
266 * fbno fend
267 *
268 */
269 busyp->bno = fend;
270 } else if (bbno < fbno) {
271 /*
272 * Case 8:
273 * bbno bend
274 * +BBBBBBBBBBBBBBBBB+
275 * +-------------+
276 * fbno fend
277 *
278 * Case 9:
279 * bbno bend
280 * +BBBBBBBBBBBBBBBBB+
281 * +----------------------+
282 * fbno fend
283 */
284 busyp->length = fbno - busyp->bno;
285 } else {
286 ASSERT(0);
287 }
288
289 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
290 return true;
291
292out_force_log:
293 spin_unlock(&pag->pagb_lock);
294 xfs_log_force(mp, XFS_LOG_SYNC);
295 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
296 spin_lock(&pag->pagb_lock);
297 return false;
298}
299
300
301/*
302 * For a given extent [fbno, flen], make sure we can reuse it safely.
303 */
304void
305xfs_extent_busy_reuse(
306 struct xfs_mount *mp,
307 xfs_agnumber_t agno,
308 xfs_agblock_t fbno,
309 xfs_extlen_t flen,
310 bool userdata)
311{
312 struct xfs_perag *pag;
313 struct rb_node *rbp;
314
315 ASSERT(flen > 0);
316
317 pag = xfs_perag_get(mp, agno);
318 spin_lock(&pag->pagb_lock);
319restart:
320 rbp = pag->pagb_tree.rb_node;
321 while (rbp) {
322 struct xfs_extent_busy *busyp =
323 rb_entry(rbp, struct xfs_extent_busy, rb_node);
324 xfs_agblock_t bbno = busyp->bno;
325 xfs_agblock_t bend = bbno + busyp->length;
326
327 if (fbno + flen <= bbno) {
328 rbp = rbp->rb_left;
329 continue;
330 } else if (fbno >= bend) {
331 rbp = rbp->rb_right;
332 continue;
333 }
334
335 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
336 userdata))
337 goto restart;
338 }
339 spin_unlock(&pag->pagb_lock);
340 xfs_perag_put(pag);
341}
342
343/*
344 * For a given extent [fbno, flen], search the busy extent list to find a
345 * subset of the extent that is not busy. If *rlen is smaller than
346 * args->minlen no suitable extent could be found, and the higher level
347 * code needs to force out the log and retry the allocation.
348 */
349void
350xfs_extent_busy_trim(
351 struct xfs_alloc_arg *args,
352 xfs_agblock_t bno,
353 xfs_extlen_t len,
354 xfs_agblock_t *rbno,
355 xfs_extlen_t *rlen)
356{
357 xfs_agblock_t fbno;
358 xfs_extlen_t flen;
359 struct rb_node *rbp;
360
361 ASSERT(len > 0);
362
363 spin_lock(&args->pag->pagb_lock);
364restart:
365 fbno = bno;
366 flen = len;
367 rbp = args->pag->pagb_tree.rb_node;
368 while (rbp && flen >= args->minlen) {
369 struct xfs_extent_busy *busyp =
370 rb_entry(rbp, struct xfs_extent_busy, rb_node);
371 xfs_agblock_t fend = fbno + flen;
372 xfs_agblock_t bbno = busyp->bno;
373 xfs_agblock_t bend = bbno + busyp->length;
374
375 if (fend <= bbno) {
376 rbp = rbp->rb_left;
377 continue;
378 } else if (fbno >= bend) {
379 rbp = rbp->rb_right;
380 continue;
381 }
382
383 /*
384 * If this is a metadata allocation, try to reuse the busy
385 * extent instead of trimming the allocation.
386 */
387 if (!args->userdata &&
388 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
389 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
390 busyp, fbno, flen,
391 false))
392 goto restart;
393 continue;
394 }
395
396 if (bbno <= fbno) {
397 /* start overlap */
398
399 /*
400 * Case 1:
401 * bbno bend
402 * +BBBBBBBBBBBBBBBBB+
403 * +---------+
404 * fbno fend
405 *
406 * Case 2:
407 * bbno bend
408 * +BBBBBBBBBBBBBBBBB+
409 * +-------------+
410 * fbno fend
411 *
412 * Case 3:
413 * bbno bend
414 * +BBBBBBBBBBBBBBBBB+
415 * +-------------+
416 * fbno fend
417 *
418 * Case 4:
419 * bbno bend
420 * +BBBBBBBBBBBBBBBBB+
421 * +-----------------+
422 * fbno fend
423 *
424 * No unbusy region in extent, return failure.
425 */
426 if (fend <= bend)
427 goto fail;
428
429 /*
430 * Case 5:
431 * bbno bend
432 * +BBBBBBBBBBBBBBBBB+
433 * +----------------------+
434 * fbno fend
435 *
436 * Case 6:
437 * bbno bend
438 * +BBBBBBBBBBBBBBBBB+
439 * +--------------------------+
440 * fbno fend
441 *
442 * Needs to be trimmed to:
443 * +-------+
444 * fbno fend
445 */
446 fbno = bend;
447 } else if (bend >= fend) {
448 /* end overlap */
449
450 /*
451 * Case 7:
452 * bbno bend
453 * +BBBBBBBBBBBBBBBBB+
454 * +------------------+
455 * fbno fend
456 *
457 * Case 8:
458 * bbno bend
459 * +BBBBBBBBBBBBBBBBB+
460 * +--------------------------+
461 * fbno fend
462 *
463 * Needs to be trimmed to:
464 * +-------+
465 * fbno fend
466 */
467 fend = bbno;
468 } else {
469 /* middle overlap */
470
471 /*
472 * Case 9:
473 * bbno bend
474 * +BBBBBBBBBBBBBBBBB+
475 * +-----------------------------------+
476 * fbno fend
477 *
478 * Can be trimmed to:
479 * +-------+ OR +-------+
480 * fbno fend fbno fend
481 *
482 * Backward allocation leads to significant
483 * fragmentation of directories, which degrades
484 * directory performance, therefore we always want to
485 * choose the option that produces forward allocation
486 * patterns.
487 * Preferring the lower bno extent will make the next
488 * request use "fend" as the start of the next
489 * allocation; if the segment is no longer busy at
490 * that point, we'll get a contiguous allocation, but
491 * even if it is still busy, we will get a forward
492 * allocation.
493 * We try to avoid choosing the segment at "bend",
494 * because that can lead to the next allocation
495 * taking the segment at "fbno", which would be a
496 * backward allocation. We only use the segment at
497 * "fbno" if it is much larger than the current
498 * requested size, because in that case there's a
499 * good chance subsequent allocations will be
500 * contiguous.
501 */
502 if (bbno - fbno >= args->maxlen) {
503 /* left candidate fits perfect */
504 fend = bbno;
505 } else if (fend - bend >= args->maxlen * 4) {
506 /* right candidate has enough free space */
507 fbno = bend;
508 } else if (bbno - fbno >= args->minlen) {
509 /* left candidate fits minimum requirement */
510 fend = bbno;
511 } else {
512 goto fail;
513 }
514 }
515
516 flen = fend - fbno;
517 }
518 spin_unlock(&args->pag->pagb_lock);
519
520 if (fbno != bno || flen != len) {
521 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
522 fbno, flen);
523 }
524 *rbno = fbno;
525 *rlen = flen;
526 return;
527fail:
528 /*
529 * Return a zero extent length as failure indications. All callers
530 * re-check if the trimmed extent satisfies the minlen requirement.
531 */
532 spin_unlock(&args->pag->pagb_lock);
533 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
534 *rbno = fbno;
535 *rlen = 0;
536}
537
538STATIC void
539xfs_extent_busy_clear_one(
540 struct xfs_mount *mp,
541 struct xfs_perag *pag,
542 struct xfs_extent_busy *busyp)
543{
544 if (busyp->length) {
545 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
546 busyp->length);
547 rb_erase(&busyp->rb_node, &pag->pagb_tree);
548 }
549
550 list_del_init(&busyp->list);
551 kmem_free(busyp);
552}
553
554/*
555 * Remove all extents on the passed in list from the busy extents tree.
556 * If do_discard is set skip extents that need to be discarded, and mark
557 * these as undergoing a discard operation instead.
558 */
559void
560xfs_extent_busy_clear(
561 struct xfs_mount *mp,
562 struct list_head *list,
563 bool do_discard)
564{
565 struct xfs_extent_busy *busyp, *n;
566 struct xfs_perag *pag = NULL;
567 xfs_agnumber_t agno = NULLAGNUMBER;
568
569 list_for_each_entry_safe(busyp, n, list, list) {
570 if (busyp->agno != agno) {
571 if (pag) {
572 spin_unlock(&pag->pagb_lock);
573 xfs_perag_put(pag);
574 }
575 pag = xfs_perag_get(mp, busyp->agno);
576 spin_lock(&pag->pagb_lock);
577 agno = busyp->agno;
578 }
579
580 if (do_discard && busyp->length &&
581 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
582 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
583 else
584 xfs_extent_busy_clear_one(mp, pag, busyp);
585 }
586
587 if (pag) {
588 spin_unlock(&pag->pagb_lock);
589 xfs_perag_put(pag);
590 }
591}
592
593/*
594 * Callback for list_sort to sort busy extents by the AG they reside in.
595 */
596int
597xfs_extent_busy_ag_cmp(
598 void *priv,
599 struct list_head *a,
600 struct list_head *b)
601{
602 return container_of(a, struct xfs_extent_busy, list)->agno -
603 container_of(b, struct xfs_extent_busy, list)->agno;
604}