Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_shared.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_btree.h"
16#include "xfs_rmap.h"
17#include "xfs_alloc_btree.h"
18#include "xfs_alloc.h"
19#include "xfs_extent_busy.h"
20#include "xfs_errortag.h"
21#include "xfs_error.h"
22#include "xfs_trace.h"
23#include "xfs_trans.h"
24#include "xfs_buf_item.h"
25#include "xfs_log.h"
26#include "xfs_ag.h"
27#include "xfs_ag_resv.h"
28#include "xfs_bmap.h"
29
30struct kmem_cache *xfs_extfree_item_cache;
31
32struct workqueue_struct *xfs_alloc_wq;
33
34#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
35
36#define XFSA_FIXUP_BNO_OK 1
37#define XFSA_FIXUP_CNT_OK 2
38
39/*
40 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
41 * the beginning of the block for a proper header with the location information
42 * and CRC.
43 */
44unsigned int
45xfs_agfl_size(
46 struct xfs_mount *mp)
47{
48 unsigned int size = mp->m_sb.sb_sectsize;
49
50 if (xfs_has_crc(mp))
51 size -= sizeof(struct xfs_agfl);
52
53 return size / sizeof(xfs_agblock_t);
54}
55
56unsigned int
57xfs_refc_block(
58 struct xfs_mount *mp)
59{
60 if (xfs_has_rmapbt(mp))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_has_finobt(mp))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
65}
66
67xfs_extlen_t
68xfs_prealloc_blocks(
69 struct xfs_mount *mp)
70{
71 if (xfs_has_reflink(mp))
72 return xfs_refc_block(mp) + 1;
73 if (xfs_has_rmapbt(mp))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_has_finobt(mp))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78}
79
80/*
81 * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
82 * guarantee that we can refill the AGFL prior to allocating space in a nearly
83 * full AG. Although the space described by the free space btrees, the
84 * blocks used by the freesp btrees themselves, and the blocks owned by the
85 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
86 * free space in the AG drop so low that the free space btrees cannot refill an
87 * empty AGFL up to the minimum level. Rather than grind through empty AGs
88 * until the fs goes down, we subtract this many AG blocks from the incore
89 * fdblocks to ensure user allocation does not overcommit the space the
90 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
91 * withhold space from xfs_mod_fdblocks, so we do not account for that here.
92 */
93#define XFS_ALLOCBT_AGFL_RESERVE 4
94
95/*
96 * Compute the number of blocks that we set aside to guarantee the ability to
97 * refill the AGFL and handle a full bmap btree split.
98 *
99 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
100 * AGF buffer (PV 947395), we place constraints on the relationship among
101 * actual allocations for data blocks, freelist blocks, and potential file data
102 * bmap btree blocks. However, these restrictions may result in no actual space
103 * allocated for a delayed extent, for example, a data block in a certain AG is
104 * allocated but there is no additional block for the additional bmap btree
105 * block due to a split of the bmap btree of the file. The result of this may
106 * lead to an infinite loop when the file gets flushed to disk and all delayed
107 * extents need to be actually allocated. To get around this, we explicitly set
108 * aside a few blocks which will not be reserved in delayed allocation.
109 *
110 * For each AG, we need to reserve enough blocks to replenish a totally empty
111 * AGFL and 4 more to handle a potential split of the file's bmap btree.
112 */
113unsigned int
114xfs_alloc_set_aside(
115 struct xfs_mount *mp)
116{
117 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
118}
119
120/*
121 * When deciding how much space to allocate out of an AG, we limit the
122 * allocation maximum size to the size the AG. However, we cannot use all the
123 * blocks in the AG - some are permanently used by metadata. These
124 * blocks are generally:
125 * - the AG superblock, AGF, AGI and AGFL
126 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
127 * the AGI free inode and rmap btree root blocks.
128 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
129 * - the rmapbt root block
130 *
131 * The AG headers are sector sized, so the amount of space they take up is
132 * dependent on filesystem geometry. The others are all single blocks.
133 */
134unsigned int
135xfs_alloc_ag_max_usable(
136 struct xfs_mount *mp)
137{
138 unsigned int blocks;
139
140 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
141 blocks += XFS_ALLOCBT_AGFL_RESERVE;
142 blocks += 3; /* AGF, AGI btree root blocks */
143 if (xfs_has_finobt(mp))
144 blocks++; /* finobt root block */
145 if (xfs_has_rmapbt(mp))
146 blocks++; /* rmap root block */
147 if (xfs_has_reflink(mp))
148 blocks++; /* refcount root block */
149
150 return mp->m_sb.sb_agblocks - blocks;
151}
152
153/*
154 * Lookup the record equal to [bno, len] in the btree given by cur.
155 */
156STATIC int /* error */
157xfs_alloc_lookup_eq(
158 struct xfs_btree_cur *cur, /* btree cursor */
159 xfs_agblock_t bno, /* starting block of extent */
160 xfs_extlen_t len, /* length of extent */
161 int *stat) /* success/failure */
162{
163 int error;
164
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
168 cur->bc_ag.abt.active = (*stat == 1);
169 return error;
170}
171
172/*
173 * Lookup the first record greater than or equal to [bno, len]
174 * in the btree given by cur.
175 */
176int /* error */
177xfs_alloc_lookup_ge(
178 struct xfs_btree_cur *cur, /* btree cursor */
179 xfs_agblock_t bno, /* starting block of extent */
180 xfs_extlen_t len, /* length of extent */
181 int *stat) /* success/failure */
182{
183 int error;
184
185 cur->bc_rec.a.ar_startblock = bno;
186 cur->bc_rec.a.ar_blockcount = len;
187 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
188 cur->bc_ag.abt.active = (*stat == 1);
189 return error;
190}
191
192/*
193 * Lookup the first record less than or equal to [bno, len]
194 * in the btree given by cur.
195 */
196int /* error */
197xfs_alloc_lookup_le(
198 struct xfs_btree_cur *cur, /* btree cursor */
199 xfs_agblock_t bno, /* starting block of extent */
200 xfs_extlen_t len, /* length of extent */
201 int *stat) /* success/failure */
202{
203 int error;
204 cur->bc_rec.a.ar_startblock = bno;
205 cur->bc_rec.a.ar_blockcount = len;
206 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
207 cur->bc_ag.abt.active = (*stat == 1);
208 return error;
209}
210
211static inline bool
212xfs_alloc_cur_active(
213 struct xfs_btree_cur *cur)
214{
215 return cur && cur->bc_ag.abt.active;
216}
217
218/*
219 * Update the record referred to by cur to the value given
220 * by [bno, len].
221 * This either works (return 0) or gets an EFSCORRUPTED error.
222 */
223STATIC int /* error */
224xfs_alloc_update(
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t bno, /* starting block of extent */
227 xfs_extlen_t len) /* length of extent */
228{
229 union xfs_btree_rec rec;
230
231 rec.alloc.ar_startblock = cpu_to_be32(bno);
232 rec.alloc.ar_blockcount = cpu_to_be32(len);
233 return xfs_btree_update(cur, &rec);
234}
235
236/* Convert the ondisk btree record to its incore representation. */
237void
238xfs_alloc_btrec_to_irec(
239 const union xfs_btree_rec *rec,
240 struct xfs_alloc_rec_incore *irec)
241{
242 irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
243 irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
244}
245
246/* Simple checks for free space records. */
247xfs_failaddr_t
248xfs_alloc_check_irec(
249 struct xfs_perag *pag,
250 const struct xfs_alloc_rec_incore *irec)
251{
252 if (irec->ar_blockcount == 0)
253 return __this_address;
254
255 /* check for valid extent range, including overflow */
256 if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
257 return __this_address;
258
259 return NULL;
260}
261
262static inline int
263xfs_alloc_complain_bad_rec(
264 struct xfs_btree_cur *cur,
265 xfs_failaddr_t fa,
266 const struct xfs_alloc_rec_incore *irec)
267{
268 struct xfs_mount *mp = cur->bc_mp;
269
270 xfs_warn(mp,
271 "%s Freespace BTree record corruption in AG %d detected at %pS!",
272 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
273 cur->bc_ag.pag->pag_agno, fa);
274 xfs_warn(mp,
275 "start block 0x%x block count 0x%x", irec->ar_startblock,
276 irec->ar_blockcount);
277 return -EFSCORRUPTED;
278}
279
280/*
281 * Get the data from the pointed-to record.
282 */
283int /* error */
284xfs_alloc_get_rec(
285 struct xfs_btree_cur *cur, /* btree cursor */
286 xfs_agblock_t *bno, /* output: starting block of extent */
287 xfs_extlen_t *len, /* output: length of extent */
288 int *stat) /* output: success/failure */
289{
290 struct xfs_alloc_rec_incore irec;
291 union xfs_btree_rec *rec;
292 xfs_failaddr_t fa;
293 int error;
294
295 error = xfs_btree_get_rec(cur, &rec, stat);
296 if (error || !(*stat))
297 return error;
298
299 xfs_alloc_btrec_to_irec(rec, &irec);
300 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
301 if (fa)
302 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
303
304 *bno = irec.ar_startblock;
305 *len = irec.ar_blockcount;
306 return 0;
307}
308
309/*
310 * Compute aligned version of the found extent.
311 * Takes alignment and min length into account.
312 */
313STATIC bool
314xfs_alloc_compute_aligned(
315 xfs_alloc_arg_t *args, /* allocation argument structure */
316 xfs_agblock_t foundbno, /* starting block in found extent */
317 xfs_extlen_t foundlen, /* length in found extent */
318 xfs_agblock_t *resbno, /* result block number */
319 xfs_extlen_t *reslen, /* result length */
320 unsigned *busy_gen)
321{
322 xfs_agblock_t bno = foundbno;
323 xfs_extlen_t len = foundlen;
324 xfs_extlen_t diff;
325 bool busy;
326
327 /* Trim busy sections out of found extent */
328 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
329
330 /*
331 * If we have a largish extent that happens to start before min_agbno,
332 * see if we can shift it into range...
333 */
334 if (bno < args->min_agbno && bno + len > args->min_agbno) {
335 diff = args->min_agbno - bno;
336 if (len > diff) {
337 bno += diff;
338 len -= diff;
339 }
340 }
341
342 if (args->alignment > 1 && len >= args->minlen) {
343 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
344
345 diff = aligned_bno - bno;
346
347 *resbno = aligned_bno;
348 *reslen = diff >= len ? 0 : len - diff;
349 } else {
350 *resbno = bno;
351 *reslen = len;
352 }
353
354 return busy;
355}
356
357/*
358 * Compute best start block and diff for "near" allocations.
359 * freelen >= wantlen already checked by caller.
360 */
361STATIC xfs_extlen_t /* difference value (absolute) */
362xfs_alloc_compute_diff(
363 xfs_agblock_t wantbno, /* target starting block */
364 xfs_extlen_t wantlen, /* target length */
365 xfs_extlen_t alignment, /* target alignment */
366 int datatype, /* are we allocating data? */
367 xfs_agblock_t freebno, /* freespace's starting block */
368 xfs_extlen_t freelen, /* freespace's length */
369 xfs_agblock_t *newbnop) /* result: best start block from free */
370{
371 xfs_agblock_t freeend; /* end of freespace extent */
372 xfs_agblock_t newbno1; /* return block number */
373 xfs_agblock_t newbno2; /* other new block number */
374 xfs_extlen_t newlen1=0; /* length with newbno1 */
375 xfs_extlen_t newlen2=0; /* length with newbno2 */
376 xfs_agblock_t wantend; /* end of target extent */
377 bool userdata = datatype & XFS_ALLOC_USERDATA;
378
379 ASSERT(freelen >= wantlen);
380 freeend = freebno + freelen;
381 wantend = wantbno + wantlen;
382 /*
383 * We want to allocate from the start of a free extent if it is past
384 * the desired block or if we are allocating user data and the free
385 * extent is before desired block. The second case is there to allow
386 * for contiguous allocation from the remaining free space if the file
387 * grows in the short term.
388 */
389 if (freebno >= wantbno || (userdata && freeend < wantend)) {
390 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
391 newbno1 = NULLAGBLOCK;
392 } else if (freeend >= wantend && alignment > 1) {
393 newbno1 = roundup(wantbno, alignment);
394 newbno2 = newbno1 - alignment;
395 if (newbno1 >= freeend)
396 newbno1 = NULLAGBLOCK;
397 else
398 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
399 if (newbno2 < freebno)
400 newbno2 = NULLAGBLOCK;
401 else
402 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
403 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
404 if (newlen1 < newlen2 ||
405 (newlen1 == newlen2 &&
406 XFS_ABSDIFF(newbno1, wantbno) >
407 XFS_ABSDIFF(newbno2, wantbno)))
408 newbno1 = newbno2;
409 } else if (newbno2 != NULLAGBLOCK)
410 newbno1 = newbno2;
411 } else if (freeend >= wantend) {
412 newbno1 = wantbno;
413 } else if (alignment > 1) {
414 newbno1 = roundup(freeend - wantlen, alignment);
415 if (newbno1 > freeend - wantlen &&
416 newbno1 - alignment >= freebno)
417 newbno1 -= alignment;
418 else if (newbno1 >= freeend)
419 newbno1 = NULLAGBLOCK;
420 } else
421 newbno1 = freeend - wantlen;
422 *newbnop = newbno1;
423 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
424}
425
426/*
427 * Fix up the length, based on mod and prod.
428 * len should be k * prod + mod for some k.
429 * If len is too small it is returned unchanged.
430 * If len hits maxlen it is left alone.
431 */
432STATIC void
433xfs_alloc_fix_len(
434 xfs_alloc_arg_t *args) /* allocation argument structure */
435{
436 xfs_extlen_t k;
437 xfs_extlen_t rlen;
438
439 ASSERT(args->mod < args->prod);
440 rlen = args->len;
441 ASSERT(rlen >= args->minlen);
442 ASSERT(rlen <= args->maxlen);
443 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
444 (args->mod == 0 && rlen < args->prod))
445 return;
446 k = rlen % args->prod;
447 if (k == args->mod)
448 return;
449 if (k > args->mod)
450 rlen = rlen - (k - args->mod);
451 else
452 rlen = rlen - args->prod + (args->mod - k);
453 /* casts to (int) catch length underflows */
454 if ((int)rlen < (int)args->minlen)
455 return;
456 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
457 ASSERT(rlen % args->prod == args->mod);
458 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
459 rlen + args->minleft);
460 args->len = rlen;
461}
462
463/*
464 * Update the two btrees, logically removing from freespace the extent
465 * starting at rbno, rlen blocks. The extent is contained within the
466 * actual (current) free extent fbno for flen blocks.
467 * Flags are passed in indicating whether the cursors are set to the
468 * relevant records.
469 */
470STATIC int /* error code */
471xfs_alloc_fixup_trees(
472 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
473 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
474 xfs_agblock_t fbno, /* starting block of free extent */
475 xfs_extlen_t flen, /* length of free extent */
476 xfs_agblock_t rbno, /* starting block of returned extent */
477 xfs_extlen_t rlen, /* length of returned extent */
478 int flags) /* flags, XFSA_FIXUP_... */
479{
480 int error; /* error code */
481 int i; /* operation results */
482 xfs_agblock_t nfbno1; /* first new free startblock */
483 xfs_agblock_t nfbno2; /* second new free startblock */
484 xfs_extlen_t nflen1=0; /* first new free length */
485 xfs_extlen_t nflen2=0; /* second new free length */
486 struct xfs_mount *mp;
487
488 mp = cnt_cur->bc_mp;
489
490 /*
491 * Look up the record in the by-size tree if necessary.
492 */
493 if (flags & XFSA_FIXUP_CNT_OK) {
494#ifdef DEBUG
495 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
496 return error;
497 if (XFS_IS_CORRUPT(mp,
498 i != 1 ||
499 nfbno1 != fbno ||
500 nflen1 != flen))
501 return -EFSCORRUPTED;
502#endif
503 } else {
504 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
505 return error;
506 if (XFS_IS_CORRUPT(mp, i != 1))
507 return -EFSCORRUPTED;
508 }
509 /*
510 * Look up the record in the by-block tree if necessary.
511 */
512 if (flags & XFSA_FIXUP_BNO_OK) {
513#ifdef DEBUG
514 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
515 return error;
516 if (XFS_IS_CORRUPT(mp,
517 i != 1 ||
518 nfbno1 != fbno ||
519 nflen1 != flen))
520 return -EFSCORRUPTED;
521#endif
522 } else {
523 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
524 return error;
525 if (XFS_IS_CORRUPT(mp, i != 1))
526 return -EFSCORRUPTED;
527 }
528
529#ifdef DEBUG
530 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
531 struct xfs_btree_block *bnoblock;
532 struct xfs_btree_block *cntblock;
533
534 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
535 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
536
537 if (XFS_IS_CORRUPT(mp,
538 bnoblock->bb_numrecs !=
539 cntblock->bb_numrecs))
540 return -EFSCORRUPTED;
541 }
542#endif
543
544 /*
545 * Deal with all four cases: the allocated record is contained
546 * within the freespace record, so we can have new freespace
547 * at either (or both) end, or no freespace remaining.
548 */
549 if (rbno == fbno && rlen == flen)
550 nfbno1 = nfbno2 = NULLAGBLOCK;
551 else if (rbno == fbno) {
552 nfbno1 = rbno + rlen;
553 nflen1 = flen - rlen;
554 nfbno2 = NULLAGBLOCK;
555 } else if (rbno + rlen == fbno + flen) {
556 nfbno1 = fbno;
557 nflen1 = flen - rlen;
558 nfbno2 = NULLAGBLOCK;
559 } else {
560 nfbno1 = fbno;
561 nflen1 = rbno - fbno;
562 nfbno2 = rbno + rlen;
563 nflen2 = (fbno + flen) - nfbno2;
564 }
565 /*
566 * Delete the entry from the by-size btree.
567 */
568 if ((error = xfs_btree_delete(cnt_cur, &i)))
569 return error;
570 if (XFS_IS_CORRUPT(mp, i != 1))
571 return -EFSCORRUPTED;
572 /*
573 * Add new by-size btree entry(s).
574 */
575 if (nfbno1 != NULLAGBLOCK) {
576 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
577 return error;
578 if (XFS_IS_CORRUPT(mp, i != 0))
579 return -EFSCORRUPTED;
580 if ((error = xfs_btree_insert(cnt_cur, &i)))
581 return error;
582 if (XFS_IS_CORRUPT(mp, i != 1))
583 return -EFSCORRUPTED;
584 }
585 if (nfbno2 != NULLAGBLOCK) {
586 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
587 return error;
588 if (XFS_IS_CORRUPT(mp, i != 0))
589 return -EFSCORRUPTED;
590 if ((error = xfs_btree_insert(cnt_cur, &i)))
591 return error;
592 if (XFS_IS_CORRUPT(mp, i != 1))
593 return -EFSCORRUPTED;
594 }
595 /*
596 * Fix up the by-block btree entry(s).
597 */
598 if (nfbno1 == NULLAGBLOCK) {
599 /*
600 * No remaining freespace, just delete the by-block tree entry.
601 */
602 if ((error = xfs_btree_delete(bno_cur, &i)))
603 return error;
604 if (XFS_IS_CORRUPT(mp, i != 1))
605 return -EFSCORRUPTED;
606 } else {
607 /*
608 * Update the by-block entry to start later|be shorter.
609 */
610 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
611 return error;
612 }
613 if (nfbno2 != NULLAGBLOCK) {
614 /*
615 * 2 resulting free entries, need to add one.
616 */
617 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
618 return error;
619 if (XFS_IS_CORRUPT(mp, i != 0))
620 return -EFSCORRUPTED;
621 if ((error = xfs_btree_insert(bno_cur, &i)))
622 return error;
623 if (XFS_IS_CORRUPT(mp, i != 1))
624 return -EFSCORRUPTED;
625 }
626 return 0;
627}
628
629/*
630 * We do not verify the AGFL contents against AGF-based index counters here,
631 * even though we may have access to the perag that contains shadow copies. We
632 * don't know if the AGF based counters have been checked, and if they have they
633 * still may be inconsistent because they haven't yet been reset on the first
634 * allocation after the AGF has been read in.
635 *
636 * This means we can only check that all agfl entries contain valid or null
637 * values because we can't reliably determine the active range to exclude
638 * NULLAGBNO as a valid value.
639 *
640 * However, we can't even do that for v4 format filesystems because there are
641 * old versions of mkfs out there that does not initialise the AGFL to known,
642 * verifiable values. HEnce we can't tell the difference between a AGFL block
643 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
644 *
645 * As a result, we can only fully validate AGFL block numbers when we pull them
646 * from the freelist in xfs_alloc_get_freelist().
647 */
648static xfs_failaddr_t
649xfs_agfl_verify(
650 struct xfs_buf *bp)
651{
652 struct xfs_mount *mp = bp->b_mount;
653 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
654 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
655 int i;
656
657 if (!xfs_has_crc(mp))
658 return NULL;
659
660 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
661 return __this_address;
662 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
663 return __this_address;
664 /*
665 * during growfs operations, the perag is not fully initialised,
666 * so we can't use it for any useful checking. growfs ensures we can't
667 * use it by using uncached buffers that don't have the perag attached
668 * so we can detect and avoid this problem.
669 */
670 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
671 return __this_address;
672
673 for (i = 0; i < xfs_agfl_size(mp); i++) {
674 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
675 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
676 return __this_address;
677 }
678
679 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
680 return __this_address;
681 return NULL;
682}
683
684static void
685xfs_agfl_read_verify(
686 struct xfs_buf *bp)
687{
688 struct xfs_mount *mp = bp->b_mount;
689 xfs_failaddr_t fa;
690
691 /*
692 * There is no verification of non-crc AGFLs because mkfs does not
693 * initialise the AGFL to zero or NULL. Hence the only valid part of the
694 * AGFL is what the AGF says is active. We can't get to the AGF, so we
695 * can't verify just those entries are valid.
696 */
697 if (!xfs_has_crc(mp))
698 return;
699
700 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
701 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
702 else {
703 fa = xfs_agfl_verify(bp);
704 if (fa)
705 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
706 }
707}
708
709static void
710xfs_agfl_write_verify(
711 struct xfs_buf *bp)
712{
713 struct xfs_mount *mp = bp->b_mount;
714 struct xfs_buf_log_item *bip = bp->b_log_item;
715 xfs_failaddr_t fa;
716
717 /* no verification of non-crc AGFLs */
718 if (!xfs_has_crc(mp))
719 return;
720
721 fa = xfs_agfl_verify(bp);
722 if (fa) {
723 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
724 return;
725 }
726
727 if (bip)
728 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
729
730 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
731}
732
733const struct xfs_buf_ops xfs_agfl_buf_ops = {
734 .name = "xfs_agfl",
735 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
736 .verify_read = xfs_agfl_read_verify,
737 .verify_write = xfs_agfl_write_verify,
738 .verify_struct = xfs_agfl_verify,
739};
740
741/*
742 * Read in the allocation group free block array.
743 */
744int
745xfs_alloc_read_agfl(
746 struct xfs_perag *pag,
747 struct xfs_trans *tp,
748 struct xfs_buf **bpp)
749{
750 struct xfs_mount *mp = pag->pag_mount;
751 struct xfs_buf *bp;
752 int error;
753
754 error = xfs_trans_read_buf(
755 mp, tp, mp->m_ddev_targp,
756 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
757 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
758 if (error)
759 return error;
760 xfs_buf_set_ref(bp, XFS_AGFL_REF);
761 *bpp = bp;
762 return 0;
763}
764
765STATIC int
766xfs_alloc_update_counters(
767 struct xfs_trans *tp,
768 struct xfs_buf *agbp,
769 long len)
770{
771 struct xfs_agf *agf = agbp->b_addr;
772
773 agbp->b_pag->pagf_freeblks += len;
774 be32_add_cpu(&agf->agf_freeblks, len);
775
776 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
777 be32_to_cpu(agf->agf_length))) {
778 xfs_buf_mark_corrupt(agbp);
779 return -EFSCORRUPTED;
780 }
781
782 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
783 return 0;
784}
785
786/*
787 * Block allocation algorithm and data structures.
788 */
789struct xfs_alloc_cur {
790 struct xfs_btree_cur *cnt; /* btree cursors */
791 struct xfs_btree_cur *bnolt;
792 struct xfs_btree_cur *bnogt;
793 xfs_extlen_t cur_len;/* current search length */
794 xfs_agblock_t rec_bno;/* extent startblock */
795 xfs_extlen_t rec_len;/* extent length */
796 xfs_agblock_t bno; /* alloc bno */
797 xfs_extlen_t len; /* alloc len */
798 xfs_extlen_t diff; /* diff from search bno */
799 unsigned int busy_gen;/* busy state */
800 bool busy;
801};
802
803/*
804 * Set up cursors, etc. in the extent allocation cursor. This function can be
805 * called multiple times to reset an initialized structure without having to
806 * reallocate cursors.
807 */
808static int
809xfs_alloc_cur_setup(
810 struct xfs_alloc_arg *args,
811 struct xfs_alloc_cur *acur)
812{
813 int error;
814 int i;
815
816 acur->cur_len = args->maxlen;
817 acur->rec_bno = 0;
818 acur->rec_len = 0;
819 acur->bno = 0;
820 acur->len = 0;
821 acur->diff = -1;
822 acur->busy = false;
823 acur->busy_gen = 0;
824
825 /*
826 * Perform an initial cntbt lookup to check for availability of maxlen
827 * extents. If this fails, we'll return -ENOSPC to signal the caller to
828 * attempt a small allocation.
829 */
830 if (!acur->cnt)
831 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
832 args->agbp, args->pag, XFS_BTNUM_CNT);
833 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
834 if (error)
835 return error;
836
837 /*
838 * Allocate the bnobt left and right search cursors.
839 */
840 if (!acur->bnolt)
841 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
842 args->agbp, args->pag, XFS_BTNUM_BNO);
843 if (!acur->bnogt)
844 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
845 args->agbp, args->pag, XFS_BTNUM_BNO);
846 return i == 1 ? 0 : -ENOSPC;
847}
848
849static void
850xfs_alloc_cur_close(
851 struct xfs_alloc_cur *acur,
852 bool error)
853{
854 int cur_error = XFS_BTREE_NOERROR;
855
856 if (error)
857 cur_error = XFS_BTREE_ERROR;
858
859 if (acur->cnt)
860 xfs_btree_del_cursor(acur->cnt, cur_error);
861 if (acur->bnolt)
862 xfs_btree_del_cursor(acur->bnolt, cur_error);
863 if (acur->bnogt)
864 xfs_btree_del_cursor(acur->bnogt, cur_error);
865 acur->cnt = acur->bnolt = acur->bnogt = NULL;
866}
867
868/*
869 * Check an extent for allocation and track the best available candidate in the
870 * allocation structure. The cursor is deactivated if it has entered an out of
871 * range state based on allocation arguments. Optionally return the extent
872 * extent geometry and allocation status if requested by the caller.
873 */
874static int
875xfs_alloc_cur_check(
876 struct xfs_alloc_arg *args,
877 struct xfs_alloc_cur *acur,
878 struct xfs_btree_cur *cur,
879 int *new)
880{
881 int error, i;
882 xfs_agblock_t bno, bnoa, bnew;
883 xfs_extlen_t len, lena, diff = -1;
884 bool busy;
885 unsigned busy_gen = 0;
886 bool deactivate = false;
887 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
888
889 *new = 0;
890
891 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
892 if (error)
893 return error;
894 if (XFS_IS_CORRUPT(args->mp, i != 1))
895 return -EFSCORRUPTED;
896
897 /*
898 * Check minlen and deactivate a cntbt cursor if out of acceptable size
899 * range (i.e., walking backwards looking for a minlen extent).
900 */
901 if (len < args->minlen) {
902 deactivate = !isbnobt;
903 goto out;
904 }
905
906 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
907 &busy_gen);
908 acur->busy |= busy;
909 if (busy)
910 acur->busy_gen = busy_gen;
911 /* deactivate a bnobt cursor outside of locality range */
912 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
913 deactivate = isbnobt;
914 goto out;
915 }
916 if (lena < args->minlen)
917 goto out;
918
919 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
920 xfs_alloc_fix_len(args);
921 ASSERT(args->len >= args->minlen);
922 if (args->len < acur->len)
923 goto out;
924
925 /*
926 * We have an aligned record that satisfies minlen and beats or matches
927 * the candidate extent size. Compare locality for near allocation mode.
928 */
929 diff = xfs_alloc_compute_diff(args->agbno, args->len,
930 args->alignment, args->datatype,
931 bnoa, lena, &bnew);
932 if (bnew == NULLAGBLOCK)
933 goto out;
934
935 /*
936 * Deactivate a bnobt cursor with worse locality than the current best.
937 */
938 if (diff > acur->diff) {
939 deactivate = isbnobt;
940 goto out;
941 }
942
943 ASSERT(args->len > acur->len ||
944 (args->len == acur->len && diff <= acur->diff));
945 acur->rec_bno = bno;
946 acur->rec_len = len;
947 acur->bno = bnew;
948 acur->len = args->len;
949 acur->diff = diff;
950 *new = 1;
951
952 /*
953 * We're done if we found a perfect allocation. This only deactivates
954 * the current cursor, but this is just an optimization to terminate a
955 * cntbt search that otherwise runs to the edge of the tree.
956 */
957 if (acur->diff == 0 && acur->len == args->maxlen)
958 deactivate = true;
959out:
960 if (deactivate)
961 cur->bc_ag.abt.active = false;
962 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
963 *new);
964 return 0;
965}
966
967/*
968 * Complete an allocation of a candidate extent. Remove the extent from both
969 * trees and update the args structure.
970 */
971STATIC int
972xfs_alloc_cur_finish(
973 struct xfs_alloc_arg *args,
974 struct xfs_alloc_cur *acur)
975{
976 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
977 int error;
978
979 ASSERT(acur->cnt && acur->bnolt);
980 ASSERT(acur->bno >= acur->rec_bno);
981 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
982 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
983
984 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
985 acur->rec_len, acur->bno, acur->len, 0);
986 if (error)
987 return error;
988
989 args->agbno = acur->bno;
990 args->len = acur->len;
991 args->wasfromfl = 0;
992
993 trace_xfs_alloc_cur(args);
994 return 0;
995}
996
997/*
998 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
999 * bno optimized lookup to search for extents with ideal size and locality.
1000 */
1001STATIC int
1002xfs_alloc_cntbt_iter(
1003 struct xfs_alloc_arg *args,
1004 struct xfs_alloc_cur *acur)
1005{
1006 struct xfs_btree_cur *cur = acur->cnt;
1007 xfs_agblock_t bno;
1008 xfs_extlen_t len, cur_len;
1009 int error;
1010 int i;
1011
1012 if (!xfs_alloc_cur_active(cur))
1013 return 0;
1014
1015 /* locality optimized lookup */
1016 cur_len = acur->cur_len;
1017 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1018 if (error)
1019 return error;
1020 if (i == 0)
1021 return 0;
1022 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1023 if (error)
1024 return error;
1025
1026 /* check the current record and update search length from it */
1027 error = xfs_alloc_cur_check(args, acur, cur, &i);
1028 if (error)
1029 return error;
1030 ASSERT(len >= acur->cur_len);
1031 acur->cur_len = len;
1032
1033 /*
1034 * We looked up the first record >= [agbno, len] above. The agbno is a
1035 * secondary key and so the current record may lie just before or after
1036 * agbno. If it is past agbno, check the previous record too so long as
1037 * the length matches as it may be closer. Don't check a smaller record
1038 * because that could deactivate our cursor.
1039 */
1040 if (bno > args->agbno) {
1041 error = xfs_btree_decrement(cur, 0, &i);
1042 if (!error && i) {
1043 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1044 if (!error && i && len == acur->cur_len)
1045 error = xfs_alloc_cur_check(args, acur, cur,
1046 &i);
1047 }
1048 if (error)
1049 return error;
1050 }
1051
1052 /*
1053 * Increment the search key until we find at least one allocation
1054 * candidate or if the extent we found was larger. Otherwise, double the
1055 * search key to optimize the search. Efficiency is more important here
1056 * than absolute best locality.
1057 */
1058 cur_len <<= 1;
1059 if (!acur->len || acur->cur_len >= cur_len)
1060 acur->cur_len++;
1061 else
1062 acur->cur_len = cur_len;
1063
1064 return error;
1065}
1066
1067/*
1068 * Deal with the case where only small freespaces remain. Either return the
1069 * contents of the last freespace record, or allocate space from the freelist if
1070 * there is nothing in the tree.
1071 */
1072STATIC int /* error */
1073xfs_alloc_ag_vextent_small(
1074 struct xfs_alloc_arg *args, /* allocation argument structure */
1075 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1076 xfs_agblock_t *fbnop, /* result block number */
1077 xfs_extlen_t *flenp, /* result length */
1078 int *stat) /* status: 0-freelist, 1-normal/none */
1079{
1080 struct xfs_agf *agf = args->agbp->b_addr;
1081 int error = 0;
1082 xfs_agblock_t fbno = NULLAGBLOCK;
1083 xfs_extlen_t flen = 0;
1084 int i = 0;
1085
1086 /*
1087 * If a cntbt cursor is provided, try to allocate the largest record in
1088 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1089 * allocation. Make sure to respect minleft even when pulling from the
1090 * freelist.
1091 */
1092 if (ccur)
1093 error = xfs_btree_decrement(ccur, 0, &i);
1094 if (error)
1095 goto error;
1096 if (i) {
1097 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1098 if (error)
1099 goto error;
1100 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1101 error = -EFSCORRUPTED;
1102 goto error;
1103 }
1104 goto out;
1105 }
1106
1107 if (args->minlen != 1 || args->alignment != 1 ||
1108 args->resv == XFS_AG_RESV_AGFL ||
1109 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1110 goto out;
1111
1112 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1113 &fbno, 0);
1114 if (error)
1115 goto error;
1116 if (fbno == NULLAGBLOCK)
1117 goto out;
1118
1119 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1120 (args->datatype & XFS_ALLOC_NOBUSY));
1121
1122 if (args->datatype & XFS_ALLOC_USERDATA) {
1123 struct xfs_buf *bp;
1124
1125 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1126 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1127 args->mp->m_bsize, 0, &bp);
1128 if (error)
1129 goto error;
1130 xfs_trans_binval(args->tp, bp);
1131 }
1132 *fbnop = args->agbno = fbno;
1133 *flenp = args->len = 1;
1134 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1135 error = -EFSCORRUPTED;
1136 goto error;
1137 }
1138 args->wasfromfl = 1;
1139 trace_xfs_alloc_small_freelist(args);
1140
1141 /*
1142 * If we're feeding an AGFL block to something that doesn't live in the
1143 * free space, we need to clear out the OWN_AG rmap.
1144 */
1145 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1146 &XFS_RMAP_OINFO_AG);
1147 if (error)
1148 goto error;
1149
1150 *stat = 0;
1151 return 0;
1152
1153out:
1154 /*
1155 * Can't do the allocation, give up.
1156 */
1157 if (flen < args->minlen) {
1158 args->agbno = NULLAGBLOCK;
1159 trace_xfs_alloc_small_notenough(args);
1160 flen = 0;
1161 }
1162 *fbnop = fbno;
1163 *flenp = flen;
1164 *stat = 1;
1165 trace_xfs_alloc_small_done(args);
1166 return 0;
1167
1168error:
1169 trace_xfs_alloc_small_error(args);
1170 return error;
1171}
1172
1173/*
1174 * Allocate a variable extent at exactly agno/bno.
1175 * Extent's length (returned in *len) will be between minlen and maxlen,
1176 * and of the form k * prod + mod unless there's nothing that large.
1177 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1178 */
1179STATIC int /* error */
1180xfs_alloc_ag_vextent_exact(
1181 xfs_alloc_arg_t *args) /* allocation argument structure */
1182{
1183 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1184 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1185 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1186 int error;
1187 xfs_agblock_t fbno; /* start block of found extent */
1188 xfs_extlen_t flen; /* length of found extent */
1189 xfs_agblock_t tbno; /* start block of busy extent */
1190 xfs_extlen_t tlen; /* length of busy extent */
1191 xfs_agblock_t tend; /* end block of busy extent */
1192 int i; /* success/failure of operation */
1193 unsigned busy_gen;
1194
1195 ASSERT(args->alignment == 1);
1196
1197 /*
1198 * Allocate/initialize a cursor for the by-number freespace btree.
1199 */
1200 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1201 args->pag, XFS_BTNUM_BNO);
1202
1203 /*
1204 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1205 * Look for the closest free block <= bno, it must contain bno
1206 * if any free block does.
1207 */
1208 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1209 if (error)
1210 goto error0;
1211 if (!i)
1212 goto not_found;
1213
1214 /*
1215 * Grab the freespace record.
1216 */
1217 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1218 if (error)
1219 goto error0;
1220 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1221 error = -EFSCORRUPTED;
1222 goto error0;
1223 }
1224 ASSERT(fbno <= args->agbno);
1225
1226 /*
1227 * Check for overlapping busy extents.
1228 */
1229 tbno = fbno;
1230 tlen = flen;
1231 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1232
1233 /*
1234 * Give up if the start of the extent is busy, or the freespace isn't
1235 * long enough for the minimum request.
1236 */
1237 if (tbno > args->agbno)
1238 goto not_found;
1239 if (tlen < args->minlen)
1240 goto not_found;
1241 tend = tbno + tlen;
1242 if (tend < args->agbno + args->minlen)
1243 goto not_found;
1244
1245 /*
1246 * End of extent will be smaller of the freespace end and the
1247 * maximal requested end.
1248 *
1249 * Fix the length according to mod and prod if given.
1250 */
1251 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1252 - args->agbno;
1253 xfs_alloc_fix_len(args);
1254 ASSERT(args->agbno + args->len <= tend);
1255
1256 /*
1257 * We are allocating agbno for args->len
1258 * Allocate/initialize a cursor for the by-size btree.
1259 */
1260 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1261 args->pag, XFS_BTNUM_CNT);
1262 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1263 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1264 args->len, XFSA_FIXUP_BNO_OK);
1265 if (error) {
1266 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1267 goto error0;
1268 }
1269
1270 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1271 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1272
1273 args->wasfromfl = 0;
1274 trace_xfs_alloc_exact_done(args);
1275 return 0;
1276
1277not_found:
1278 /* Didn't find it, return null. */
1279 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1280 args->agbno = NULLAGBLOCK;
1281 trace_xfs_alloc_exact_notfound(args);
1282 return 0;
1283
1284error0:
1285 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1286 trace_xfs_alloc_exact_error(args);
1287 return error;
1288}
1289
1290/*
1291 * Search a given number of btree records in a given direction. Check each
1292 * record against the good extent we've already found.
1293 */
1294STATIC int
1295xfs_alloc_walk_iter(
1296 struct xfs_alloc_arg *args,
1297 struct xfs_alloc_cur *acur,
1298 struct xfs_btree_cur *cur,
1299 bool increment,
1300 bool find_one, /* quit on first candidate */
1301 int count, /* rec count (-1 for infinite) */
1302 int *stat)
1303{
1304 int error;
1305 int i;
1306
1307 *stat = 0;
1308
1309 /*
1310 * Search so long as the cursor is active or we find a better extent.
1311 * The cursor is deactivated if it extends beyond the range of the
1312 * current allocation candidate.
1313 */
1314 while (xfs_alloc_cur_active(cur) && count) {
1315 error = xfs_alloc_cur_check(args, acur, cur, &i);
1316 if (error)
1317 return error;
1318 if (i == 1) {
1319 *stat = 1;
1320 if (find_one)
1321 break;
1322 }
1323 if (!xfs_alloc_cur_active(cur))
1324 break;
1325
1326 if (increment)
1327 error = xfs_btree_increment(cur, 0, &i);
1328 else
1329 error = xfs_btree_decrement(cur, 0, &i);
1330 if (error)
1331 return error;
1332 if (i == 0)
1333 cur->bc_ag.abt.active = false;
1334
1335 if (count > 0)
1336 count--;
1337 }
1338
1339 return 0;
1340}
1341
1342/*
1343 * Search the by-bno and by-size btrees in parallel in search of an extent with
1344 * ideal locality based on the NEAR mode ->agbno locality hint.
1345 */
1346STATIC int
1347xfs_alloc_ag_vextent_locality(
1348 struct xfs_alloc_arg *args,
1349 struct xfs_alloc_cur *acur,
1350 int *stat)
1351{
1352 struct xfs_btree_cur *fbcur = NULL;
1353 int error;
1354 int i;
1355 bool fbinc;
1356
1357 ASSERT(acur->len == 0);
1358
1359 *stat = 0;
1360
1361 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1362 if (error)
1363 return error;
1364 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1365 if (error)
1366 return error;
1367 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1368 if (error)
1369 return error;
1370
1371 /*
1372 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1373 * right and lookup the closest extent to the locality hint for each
1374 * extent size key in the cntbt. The entire search terminates
1375 * immediately on a bnobt hit because that means we've found best case
1376 * locality. Otherwise the search continues until the cntbt cursor runs
1377 * off the end of the tree. If no allocation candidate is found at this
1378 * point, give up on locality, walk backwards from the end of the cntbt
1379 * and take the first available extent.
1380 *
1381 * The parallel tree searches balance each other out to provide fairly
1382 * consistent performance for various situations. The bnobt search can
1383 * have pathological behavior in the worst case scenario of larger
1384 * allocation requests and fragmented free space. On the other hand, the
1385 * bnobt is able to satisfy most smaller allocation requests much more
1386 * quickly than the cntbt. The cntbt search can sift through fragmented
1387 * free space and sets of free extents for larger allocation requests
1388 * more quickly than the bnobt. Since the locality hint is just a hint
1389 * and we don't want to scan the entire bnobt for perfect locality, the
1390 * cntbt search essentially bounds the bnobt search such that we can
1391 * find good enough locality at reasonable performance in most cases.
1392 */
1393 while (xfs_alloc_cur_active(acur->bnolt) ||
1394 xfs_alloc_cur_active(acur->bnogt) ||
1395 xfs_alloc_cur_active(acur->cnt)) {
1396
1397 trace_xfs_alloc_cur_lookup(args);
1398
1399 /*
1400 * Search the bnobt left and right. In the case of a hit, finish
1401 * the search in the opposite direction and we're done.
1402 */
1403 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1404 true, 1, &i);
1405 if (error)
1406 return error;
1407 if (i == 1) {
1408 trace_xfs_alloc_cur_left(args);
1409 fbcur = acur->bnogt;
1410 fbinc = true;
1411 break;
1412 }
1413 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1414 1, &i);
1415 if (error)
1416 return error;
1417 if (i == 1) {
1418 trace_xfs_alloc_cur_right(args);
1419 fbcur = acur->bnolt;
1420 fbinc = false;
1421 break;
1422 }
1423
1424 /*
1425 * Check the extent with best locality based on the current
1426 * extent size search key and keep track of the best candidate.
1427 */
1428 error = xfs_alloc_cntbt_iter(args, acur);
1429 if (error)
1430 return error;
1431 if (!xfs_alloc_cur_active(acur->cnt)) {
1432 trace_xfs_alloc_cur_lookup_done(args);
1433 break;
1434 }
1435 }
1436
1437 /*
1438 * If we failed to find anything due to busy extents, return empty
1439 * handed so the caller can flush and retry. If no busy extents were
1440 * found, walk backwards from the end of the cntbt as a last resort.
1441 */
1442 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1443 error = xfs_btree_decrement(acur->cnt, 0, &i);
1444 if (error)
1445 return error;
1446 if (i) {
1447 acur->cnt->bc_ag.abt.active = true;
1448 fbcur = acur->cnt;
1449 fbinc = false;
1450 }
1451 }
1452
1453 /*
1454 * Search in the opposite direction for a better entry in the case of
1455 * a bnobt hit or walk backwards from the end of the cntbt.
1456 */
1457 if (fbcur) {
1458 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1459 &i);
1460 if (error)
1461 return error;
1462 }
1463
1464 if (acur->len)
1465 *stat = 1;
1466
1467 return 0;
1468}
1469
1470/* Check the last block of the cnt btree for allocations. */
1471static int
1472xfs_alloc_ag_vextent_lastblock(
1473 struct xfs_alloc_arg *args,
1474 struct xfs_alloc_cur *acur,
1475 xfs_agblock_t *bno,
1476 xfs_extlen_t *len,
1477 bool *allocated)
1478{
1479 int error;
1480 int i;
1481
1482#ifdef DEBUG
1483 /* Randomly don't execute the first algorithm. */
1484 if (get_random_u32_below(2))
1485 return 0;
1486#endif
1487
1488 /*
1489 * Start from the entry that lookup found, sequence through all larger
1490 * free blocks. If we're actually pointing at a record smaller than
1491 * maxlen, go to the start of this block, and skip all those smaller
1492 * than minlen.
1493 */
1494 if (*len || args->alignment > 1) {
1495 acur->cnt->bc_levels[0].ptr = 1;
1496 do {
1497 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1498 if (error)
1499 return error;
1500 if (XFS_IS_CORRUPT(args->mp, i != 1))
1501 return -EFSCORRUPTED;
1502 if (*len >= args->minlen)
1503 break;
1504 error = xfs_btree_increment(acur->cnt, 0, &i);
1505 if (error)
1506 return error;
1507 } while (i);
1508 ASSERT(*len >= args->minlen);
1509 if (!i)
1510 return 0;
1511 }
1512
1513 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1514 if (error)
1515 return error;
1516
1517 /*
1518 * It didn't work. We COULD be in a case where there's a good record
1519 * somewhere, so try again.
1520 */
1521 if (acur->len == 0)
1522 return 0;
1523
1524 trace_xfs_alloc_near_first(args);
1525 *allocated = true;
1526 return 0;
1527}
1528
1529/*
1530 * Allocate a variable extent near bno in the allocation group agno.
1531 * Extent's length (returned in len) will be between minlen and maxlen,
1532 * and of the form k * prod + mod unless there's nothing that large.
1533 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1534 */
1535STATIC int
1536xfs_alloc_ag_vextent_near(
1537 struct xfs_alloc_arg *args,
1538 uint32_t alloc_flags)
1539{
1540 struct xfs_alloc_cur acur = {};
1541 int error; /* error code */
1542 int i; /* result code, temporary */
1543 xfs_agblock_t bno;
1544 xfs_extlen_t len;
1545
1546 /* handle uninitialized agbno range so caller doesn't have to */
1547 if (!args->min_agbno && !args->max_agbno)
1548 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1549 ASSERT(args->min_agbno <= args->max_agbno);
1550
1551 /* clamp agbno to the range if it's outside */
1552 if (args->agbno < args->min_agbno)
1553 args->agbno = args->min_agbno;
1554 if (args->agbno > args->max_agbno)
1555 args->agbno = args->max_agbno;
1556
1557 /* Retry once quickly if we find busy extents before blocking. */
1558 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1559restart:
1560 len = 0;
1561
1562 /*
1563 * Set up cursors and see if there are any free extents as big as
1564 * maxlen. If not, pick the last entry in the tree unless the tree is
1565 * empty.
1566 */
1567 error = xfs_alloc_cur_setup(args, &acur);
1568 if (error == -ENOSPC) {
1569 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1570 &len, &i);
1571 if (error)
1572 goto out;
1573 if (i == 0 || len == 0) {
1574 trace_xfs_alloc_near_noentry(args);
1575 goto out;
1576 }
1577 ASSERT(i == 1);
1578 } else if (error) {
1579 goto out;
1580 }
1581
1582 /*
1583 * First algorithm.
1584 * If the requested extent is large wrt the freespaces available
1585 * in this a.g., then the cursor will be pointing to a btree entry
1586 * near the right edge of the tree. If it's in the last btree leaf
1587 * block, then we just examine all the entries in that block
1588 * that are big enough, and pick the best one.
1589 */
1590 if (xfs_btree_islastblock(acur.cnt, 0)) {
1591 bool allocated = false;
1592
1593 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1594 &allocated);
1595 if (error)
1596 goto out;
1597 if (allocated)
1598 goto alloc_finish;
1599 }
1600
1601 /*
1602 * Second algorithm. Combined cntbt and bnobt search to find ideal
1603 * locality.
1604 */
1605 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1606 if (error)
1607 goto out;
1608
1609 /*
1610 * If we couldn't get anything, give up.
1611 */
1612 if (!acur.len) {
1613 if (acur.busy) {
1614 /*
1615 * Our only valid extents must have been busy. Flush and
1616 * retry the allocation again. If we get an -EAGAIN
1617 * error, we're being told that a deadlock was avoided
1618 * and the current transaction needs committing before
1619 * the allocation can be retried.
1620 */
1621 trace_xfs_alloc_near_busy(args);
1622 error = xfs_extent_busy_flush(args->tp, args->pag,
1623 acur.busy_gen, alloc_flags);
1624 if (error)
1625 goto out;
1626
1627 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1628 goto restart;
1629 }
1630 trace_xfs_alloc_size_neither(args);
1631 args->agbno = NULLAGBLOCK;
1632 goto out;
1633 }
1634
1635alloc_finish:
1636 /* fix up btrees on a successful allocation */
1637 error = xfs_alloc_cur_finish(args, &acur);
1638
1639out:
1640 xfs_alloc_cur_close(&acur, error);
1641 return error;
1642}
1643
1644/*
1645 * Allocate a variable extent anywhere in the allocation group agno.
1646 * Extent's length (returned in len) will be between minlen and maxlen,
1647 * and of the form k * prod + mod unless there's nothing that large.
1648 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1649 */
1650static int
1651xfs_alloc_ag_vextent_size(
1652 struct xfs_alloc_arg *args,
1653 uint32_t alloc_flags)
1654{
1655 struct xfs_agf *agf = args->agbp->b_addr;
1656 struct xfs_btree_cur *bno_cur;
1657 struct xfs_btree_cur *cnt_cur;
1658 xfs_agblock_t fbno; /* start of found freespace */
1659 xfs_extlen_t flen; /* length of found freespace */
1660 xfs_agblock_t rbno; /* returned block number */
1661 xfs_extlen_t rlen; /* length of returned extent */
1662 bool busy;
1663 unsigned busy_gen;
1664 int error;
1665 int i;
1666
1667 /* Retry once quickly if we find busy extents before blocking. */
1668 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1669restart:
1670 /*
1671 * Allocate and initialize a cursor for the by-size btree.
1672 */
1673 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1674 args->pag, XFS_BTNUM_CNT);
1675 bno_cur = NULL;
1676
1677 /*
1678 * Look for an entry >= maxlen+alignment-1 blocks.
1679 */
1680 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1681 args->maxlen + args->alignment - 1, &i)))
1682 goto error0;
1683
1684 /*
1685 * If none then we have to settle for a smaller extent. In the case that
1686 * there are no large extents, this will return the last entry in the
1687 * tree unless the tree is empty. In the case that there are only busy
1688 * large extents, this will return the largest small extent unless there
1689 * are no smaller extents available.
1690 */
1691 if (!i) {
1692 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1693 &fbno, &flen, &i);
1694 if (error)
1695 goto error0;
1696 if (i == 0 || flen == 0) {
1697 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1698 trace_xfs_alloc_size_noentry(args);
1699 return 0;
1700 }
1701 ASSERT(i == 1);
1702 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1703 &rlen, &busy_gen);
1704 } else {
1705 /*
1706 * Search for a non-busy extent that is large enough.
1707 */
1708 for (;;) {
1709 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1710 if (error)
1711 goto error0;
1712 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1713 error = -EFSCORRUPTED;
1714 goto error0;
1715 }
1716
1717 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1718 &rbno, &rlen, &busy_gen);
1719
1720 if (rlen >= args->maxlen)
1721 break;
1722
1723 error = xfs_btree_increment(cnt_cur, 0, &i);
1724 if (error)
1725 goto error0;
1726 if (i)
1727 continue;
1728
1729 /*
1730 * Our only valid extents must have been busy. Flush and
1731 * retry the allocation again. If we get an -EAGAIN
1732 * error, we're being told that a deadlock was avoided
1733 * and the current transaction needs committing before
1734 * the allocation can be retried.
1735 */
1736 trace_xfs_alloc_size_busy(args);
1737 error = xfs_extent_busy_flush(args->tp, args->pag,
1738 busy_gen, alloc_flags);
1739 if (error)
1740 goto error0;
1741
1742 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1743 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1744 goto restart;
1745 }
1746 }
1747
1748 /*
1749 * In the first case above, we got the last entry in the
1750 * by-size btree. Now we check to see if the space hits maxlen
1751 * once aligned; if not, we search left for something better.
1752 * This can't happen in the second case above.
1753 */
1754 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1755 if (XFS_IS_CORRUPT(args->mp,
1756 rlen != 0 &&
1757 (rlen > flen ||
1758 rbno + rlen > fbno + flen))) {
1759 error = -EFSCORRUPTED;
1760 goto error0;
1761 }
1762 if (rlen < args->maxlen) {
1763 xfs_agblock_t bestfbno;
1764 xfs_extlen_t bestflen;
1765 xfs_agblock_t bestrbno;
1766 xfs_extlen_t bestrlen;
1767
1768 bestrlen = rlen;
1769 bestrbno = rbno;
1770 bestflen = flen;
1771 bestfbno = fbno;
1772 for (;;) {
1773 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1774 goto error0;
1775 if (i == 0)
1776 break;
1777 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1778 &i)))
1779 goto error0;
1780 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1781 error = -EFSCORRUPTED;
1782 goto error0;
1783 }
1784 if (flen < bestrlen)
1785 break;
1786 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1787 &rbno, &rlen, &busy_gen);
1788 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1789 if (XFS_IS_CORRUPT(args->mp,
1790 rlen != 0 &&
1791 (rlen > flen ||
1792 rbno + rlen > fbno + flen))) {
1793 error = -EFSCORRUPTED;
1794 goto error0;
1795 }
1796 if (rlen > bestrlen) {
1797 bestrlen = rlen;
1798 bestrbno = rbno;
1799 bestflen = flen;
1800 bestfbno = fbno;
1801 if (rlen == args->maxlen)
1802 break;
1803 }
1804 }
1805 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1806 &i)))
1807 goto error0;
1808 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1809 error = -EFSCORRUPTED;
1810 goto error0;
1811 }
1812 rlen = bestrlen;
1813 rbno = bestrbno;
1814 flen = bestflen;
1815 fbno = bestfbno;
1816 }
1817 args->wasfromfl = 0;
1818 /*
1819 * Fix up the length.
1820 */
1821 args->len = rlen;
1822 if (rlen < args->minlen) {
1823 if (busy) {
1824 /*
1825 * Our only valid extents must have been busy. Flush and
1826 * retry the allocation again. If we get an -EAGAIN
1827 * error, we're being told that a deadlock was avoided
1828 * and the current transaction needs committing before
1829 * the allocation can be retried.
1830 */
1831 trace_xfs_alloc_size_busy(args);
1832 error = xfs_extent_busy_flush(args->tp, args->pag,
1833 busy_gen, alloc_flags);
1834 if (error)
1835 goto error0;
1836
1837 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1839 goto restart;
1840 }
1841 goto out_nominleft;
1842 }
1843 xfs_alloc_fix_len(args);
1844
1845 rlen = args->len;
1846 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1847 error = -EFSCORRUPTED;
1848 goto error0;
1849 }
1850 /*
1851 * Allocate and initialize a cursor for the by-block tree.
1852 */
1853 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1854 args->pag, XFS_BTNUM_BNO);
1855 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1856 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1857 goto error0;
1858 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1859 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1860 cnt_cur = bno_cur = NULL;
1861 args->len = rlen;
1862 args->agbno = rbno;
1863 if (XFS_IS_CORRUPT(args->mp,
1864 args->agbno + args->len >
1865 be32_to_cpu(agf->agf_length))) {
1866 error = -EFSCORRUPTED;
1867 goto error0;
1868 }
1869 trace_xfs_alloc_size_done(args);
1870 return 0;
1871
1872error0:
1873 trace_xfs_alloc_size_error(args);
1874 if (cnt_cur)
1875 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1876 if (bno_cur)
1877 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1878 return error;
1879
1880out_nominleft:
1881 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1882 trace_xfs_alloc_size_nominleft(args);
1883 args->agbno = NULLAGBLOCK;
1884 return 0;
1885}
1886
1887/*
1888 * Free the extent starting at agno/bno for length.
1889 */
1890STATIC int
1891xfs_free_ag_extent(
1892 struct xfs_trans *tp,
1893 struct xfs_buf *agbp,
1894 xfs_agnumber_t agno,
1895 xfs_agblock_t bno,
1896 xfs_extlen_t len,
1897 const struct xfs_owner_info *oinfo,
1898 enum xfs_ag_resv_type type)
1899{
1900 struct xfs_mount *mp;
1901 struct xfs_btree_cur *bno_cur;
1902 struct xfs_btree_cur *cnt_cur;
1903 xfs_agblock_t gtbno; /* start of right neighbor */
1904 xfs_extlen_t gtlen; /* length of right neighbor */
1905 xfs_agblock_t ltbno; /* start of left neighbor */
1906 xfs_extlen_t ltlen; /* length of left neighbor */
1907 xfs_agblock_t nbno; /* new starting block of freesp */
1908 xfs_extlen_t nlen; /* new length of freespace */
1909 int haveleft; /* have a left neighbor */
1910 int haveright; /* have a right neighbor */
1911 int i;
1912 int error;
1913 struct xfs_perag *pag = agbp->b_pag;
1914
1915 bno_cur = cnt_cur = NULL;
1916 mp = tp->t_mountp;
1917
1918 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1919 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1920 if (error)
1921 goto error0;
1922 }
1923
1924 /*
1925 * Allocate and initialize a cursor for the by-block btree.
1926 */
1927 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1928 /*
1929 * Look for a neighboring block on the left (lower block numbers)
1930 * that is contiguous with this space.
1931 */
1932 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1933 goto error0;
1934 if (haveleft) {
1935 /*
1936 * There is a block to our left.
1937 */
1938 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1939 goto error0;
1940 if (XFS_IS_CORRUPT(mp, i != 1)) {
1941 error = -EFSCORRUPTED;
1942 goto error0;
1943 }
1944 /*
1945 * It's not contiguous, though.
1946 */
1947 if (ltbno + ltlen < bno)
1948 haveleft = 0;
1949 else {
1950 /*
1951 * If this failure happens the request to free this
1952 * space was invalid, it's (partly) already free.
1953 * Very bad.
1954 */
1955 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1956 error = -EFSCORRUPTED;
1957 goto error0;
1958 }
1959 }
1960 }
1961 /*
1962 * Look for a neighboring block on the right (higher block numbers)
1963 * that is contiguous with this space.
1964 */
1965 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1966 goto error0;
1967 if (haveright) {
1968 /*
1969 * There is a block to our right.
1970 */
1971 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1972 goto error0;
1973 if (XFS_IS_CORRUPT(mp, i != 1)) {
1974 error = -EFSCORRUPTED;
1975 goto error0;
1976 }
1977 /*
1978 * It's not contiguous, though.
1979 */
1980 if (bno + len < gtbno)
1981 haveright = 0;
1982 else {
1983 /*
1984 * If this failure happens the request to free this
1985 * space was invalid, it's (partly) already free.
1986 * Very bad.
1987 */
1988 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1989 error = -EFSCORRUPTED;
1990 goto error0;
1991 }
1992 }
1993 }
1994 /*
1995 * Now allocate and initialize a cursor for the by-size tree.
1996 */
1997 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
1998 /*
1999 * Have both left and right contiguous neighbors.
2000 * Merge all three into a single free block.
2001 */
2002 if (haveleft && haveright) {
2003 /*
2004 * Delete the old by-size entry on the left.
2005 */
2006 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2007 goto error0;
2008 if (XFS_IS_CORRUPT(mp, i != 1)) {
2009 error = -EFSCORRUPTED;
2010 goto error0;
2011 }
2012 if ((error = xfs_btree_delete(cnt_cur, &i)))
2013 goto error0;
2014 if (XFS_IS_CORRUPT(mp, i != 1)) {
2015 error = -EFSCORRUPTED;
2016 goto error0;
2017 }
2018 /*
2019 * Delete the old by-size entry on the right.
2020 */
2021 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2022 goto error0;
2023 if (XFS_IS_CORRUPT(mp, i != 1)) {
2024 error = -EFSCORRUPTED;
2025 goto error0;
2026 }
2027 if ((error = xfs_btree_delete(cnt_cur, &i)))
2028 goto error0;
2029 if (XFS_IS_CORRUPT(mp, i != 1)) {
2030 error = -EFSCORRUPTED;
2031 goto error0;
2032 }
2033 /*
2034 * Delete the old by-block entry for the right block.
2035 */
2036 if ((error = xfs_btree_delete(bno_cur, &i)))
2037 goto error0;
2038 if (XFS_IS_CORRUPT(mp, i != 1)) {
2039 error = -EFSCORRUPTED;
2040 goto error0;
2041 }
2042 /*
2043 * Move the by-block cursor back to the left neighbor.
2044 */
2045 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2046 goto error0;
2047 if (XFS_IS_CORRUPT(mp, i != 1)) {
2048 error = -EFSCORRUPTED;
2049 goto error0;
2050 }
2051#ifdef DEBUG
2052 /*
2053 * Check that this is the right record: delete didn't
2054 * mangle the cursor.
2055 */
2056 {
2057 xfs_agblock_t xxbno;
2058 xfs_extlen_t xxlen;
2059
2060 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2061 &i)))
2062 goto error0;
2063 if (XFS_IS_CORRUPT(mp,
2064 i != 1 ||
2065 xxbno != ltbno ||
2066 xxlen != ltlen)) {
2067 error = -EFSCORRUPTED;
2068 goto error0;
2069 }
2070 }
2071#endif
2072 /*
2073 * Update remaining by-block entry to the new, joined block.
2074 */
2075 nbno = ltbno;
2076 nlen = len + ltlen + gtlen;
2077 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2078 goto error0;
2079 }
2080 /*
2081 * Have only a left contiguous neighbor.
2082 * Merge it together with the new freespace.
2083 */
2084 else if (haveleft) {
2085 /*
2086 * Delete the old by-size entry on the left.
2087 */
2088 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2089 goto error0;
2090 if (XFS_IS_CORRUPT(mp, i != 1)) {
2091 error = -EFSCORRUPTED;
2092 goto error0;
2093 }
2094 if ((error = xfs_btree_delete(cnt_cur, &i)))
2095 goto error0;
2096 if (XFS_IS_CORRUPT(mp, i != 1)) {
2097 error = -EFSCORRUPTED;
2098 goto error0;
2099 }
2100 /*
2101 * Back up the by-block cursor to the left neighbor, and
2102 * update its length.
2103 */
2104 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2105 goto error0;
2106 if (XFS_IS_CORRUPT(mp, i != 1)) {
2107 error = -EFSCORRUPTED;
2108 goto error0;
2109 }
2110 nbno = ltbno;
2111 nlen = len + ltlen;
2112 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2113 goto error0;
2114 }
2115 /*
2116 * Have only a right contiguous neighbor.
2117 * Merge it together with the new freespace.
2118 */
2119 else if (haveright) {
2120 /*
2121 * Delete the old by-size entry on the right.
2122 */
2123 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2124 goto error0;
2125 if (XFS_IS_CORRUPT(mp, i != 1)) {
2126 error = -EFSCORRUPTED;
2127 goto error0;
2128 }
2129 if ((error = xfs_btree_delete(cnt_cur, &i)))
2130 goto error0;
2131 if (XFS_IS_CORRUPT(mp, i != 1)) {
2132 error = -EFSCORRUPTED;
2133 goto error0;
2134 }
2135 /*
2136 * Update the starting block and length of the right
2137 * neighbor in the by-block tree.
2138 */
2139 nbno = bno;
2140 nlen = len + gtlen;
2141 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2142 goto error0;
2143 }
2144 /*
2145 * No contiguous neighbors.
2146 * Insert the new freespace into the by-block tree.
2147 */
2148 else {
2149 nbno = bno;
2150 nlen = len;
2151 if ((error = xfs_btree_insert(bno_cur, &i)))
2152 goto error0;
2153 if (XFS_IS_CORRUPT(mp, i != 1)) {
2154 error = -EFSCORRUPTED;
2155 goto error0;
2156 }
2157 }
2158 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2159 bno_cur = NULL;
2160 /*
2161 * In all cases we need to insert the new freespace in the by-size tree.
2162 */
2163 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2164 goto error0;
2165 if (XFS_IS_CORRUPT(mp, i != 0)) {
2166 error = -EFSCORRUPTED;
2167 goto error0;
2168 }
2169 if ((error = xfs_btree_insert(cnt_cur, &i)))
2170 goto error0;
2171 if (XFS_IS_CORRUPT(mp, i != 1)) {
2172 error = -EFSCORRUPTED;
2173 goto error0;
2174 }
2175 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2176 cnt_cur = NULL;
2177
2178 /*
2179 * Update the freespace totals in the ag and superblock.
2180 */
2181 error = xfs_alloc_update_counters(tp, agbp, len);
2182 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2183 if (error)
2184 goto error0;
2185
2186 XFS_STATS_INC(mp, xs_freex);
2187 XFS_STATS_ADD(mp, xs_freeb, len);
2188
2189 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2190
2191 return 0;
2192
2193 error0:
2194 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2195 if (bno_cur)
2196 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2197 if (cnt_cur)
2198 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2199 return error;
2200}
2201
2202/*
2203 * Visible (exported) allocation/free functions.
2204 * Some of these are used just by xfs_alloc_btree.c and this file.
2205 */
2206
2207/*
2208 * Compute and fill in value of m_alloc_maxlevels.
2209 */
2210void
2211xfs_alloc_compute_maxlevels(
2212 xfs_mount_t *mp) /* file system mount structure */
2213{
2214 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2215 (mp->m_sb.sb_agblocks + 1) / 2);
2216 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2217}
2218
2219/*
2220 * Find the length of the longest extent in an AG. The 'need' parameter
2221 * specifies how much space we're going to need for the AGFL and the
2222 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2223 * other callers.
2224 */
2225xfs_extlen_t
2226xfs_alloc_longest_free_extent(
2227 struct xfs_perag *pag,
2228 xfs_extlen_t need,
2229 xfs_extlen_t reserved)
2230{
2231 xfs_extlen_t delta = 0;
2232
2233 /*
2234 * If the AGFL needs a recharge, we'll have to subtract that from the
2235 * longest extent.
2236 */
2237 if (need > pag->pagf_flcount)
2238 delta = need - pag->pagf_flcount;
2239
2240 /*
2241 * If we cannot maintain others' reservations with space from the
2242 * not-longest freesp extents, we'll have to subtract /that/ from
2243 * the longest extent too.
2244 */
2245 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2246 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2247
2248 /*
2249 * If the longest extent is long enough to satisfy all the
2250 * reservations and AGFL rules in place, we can return this extent.
2251 */
2252 if (pag->pagf_longest > delta)
2253 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2254 pag->pagf_longest - delta);
2255
2256 /* Otherwise, let the caller try for 1 block if there's space. */
2257 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2258}
2259
2260/*
2261 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2262 * return the largest possible minimum length.
2263 */
2264unsigned int
2265xfs_alloc_min_freelist(
2266 struct xfs_mount *mp,
2267 struct xfs_perag *pag)
2268{
2269 /* AG btrees have at least 1 level. */
2270 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2271 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2272 unsigned int min_free;
2273
2274 ASSERT(mp->m_alloc_maxlevels > 0);
2275
2276 /*
2277 * For a btree shorter than the maximum height, the worst case is that
2278 * every level gets split and a new level is added, then while inserting
2279 * another entry to refill the AGFL, every level under the old root gets
2280 * split again. This is:
2281 *
2282 * (full height split reservation) + (AGFL refill split height)
2283 * = (current height + 1) + (current height - 1)
2284 * = (new height) + (new height - 2)
2285 * = 2 * new height - 2
2286 *
2287 * For a btree of maximum height, the worst case is that every level
2288 * under the root gets split, then while inserting another entry to
2289 * refill the AGFL, every level under the root gets split again. This is
2290 * also:
2291 *
2292 * 2 * (current height - 1)
2293 * = 2 * (new height - 1)
2294 * = 2 * new height - 2
2295 */
2296
2297 /* space needed by-bno freespace btree */
2298 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2299 mp->m_alloc_maxlevels) * 2 - 2;
2300 /* space needed by-size freespace btree */
2301 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2302 mp->m_alloc_maxlevels) * 2 - 2;
2303 /* space needed reverse mapping used space btree */
2304 if (xfs_has_rmapbt(mp))
2305 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2306 mp->m_rmap_maxlevels) * 2 - 2;
2307
2308 return min_free;
2309}
2310
2311/*
2312 * Check if the operation we are fixing up the freelist for should go ahead or
2313 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2314 * is dependent on whether the size and shape of free space available will
2315 * permit the requested allocation to take place.
2316 */
2317static bool
2318xfs_alloc_space_available(
2319 struct xfs_alloc_arg *args,
2320 xfs_extlen_t min_free,
2321 int flags)
2322{
2323 struct xfs_perag *pag = args->pag;
2324 xfs_extlen_t alloc_len, longest;
2325 xfs_extlen_t reservation; /* blocks that are still reserved */
2326 int available;
2327 xfs_extlen_t agflcount;
2328
2329 if (flags & XFS_ALLOC_FLAG_FREEING)
2330 return true;
2331
2332 reservation = xfs_ag_resv_needed(pag, args->resv);
2333
2334 /* do we have enough contiguous free space for the allocation? */
2335 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2336 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2337 if (longest < alloc_len)
2338 return false;
2339
2340 /*
2341 * Do we have enough free space remaining for the allocation? Don't
2342 * account extra agfl blocks because we are about to defer free them,
2343 * making them unavailable until the current transaction commits.
2344 */
2345 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2346 available = (int)(pag->pagf_freeblks + agflcount -
2347 reservation - min_free - args->minleft);
2348 if (available < (int)max(args->total, alloc_len))
2349 return false;
2350
2351 /*
2352 * Clamp maxlen to the amount of free space available for the actual
2353 * extent allocation.
2354 */
2355 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2356 args->maxlen = available;
2357 ASSERT(args->maxlen > 0);
2358 ASSERT(args->maxlen >= args->minlen);
2359 }
2360
2361 return true;
2362}
2363
2364int
2365xfs_free_agfl_block(
2366 struct xfs_trans *tp,
2367 xfs_agnumber_t agno,
2368 xfs_agblock_t agbno,
2369 struct xfs_buf *agbp,
2370 struct xfs_owner_info *oinfo)
2371{
2372 int error;
2373 struct xfs_buf *bp;
2374
2375 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2376 XFS_AG_RESV_AGFL);
2377 if (error)
2378 return error;
2379
2380 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2381 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2382 tp->t_mountp->m_bsize, 0, &bp);
2383 if (error)
2384 return error;
2385 xfs_trans_binval(tp, bp);
2386
2387 return 0;
2388}
2389
2390/*
2391 * Check the agfl fields of the agf for inconsistency or corruption.
2392 *
2393 * The original purpose was to detect an agfl header padding mismatch between
2394 * current and early v5 kernels. This problem manifests as a 1-slot size
2395 * difference between the on-disk flcount and the active [first, last] range of
2396 * a wrapped agfl.
2397 *
2398 * However, we need to use these same checks to catch agfl count corruptions
2399 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2400 * way, we need to reset the agfl and warn the user.
2401 *
2402 * Return true if a reset is required before the agfl can be used, false
2403 * otherwise.
2404 */
2405static bool
2406xfs_agfl_needs_reset(
2407 struct xfs_mount *mp,
2408 struct xfs_agf *agf)
2409{
2410 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2411 uint32_t l = be32_to_cpu(agf->agf_fllast);
2412 uint32_t c = be32_to_cpu(agf->agf_flcount);
2413 int agfl_size = xfs_agfl_size(mp);
2414 int active;
2415
2416 /*
2417 * The agf read verifier catches severe corruption of these fields.
2418 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2419 * the verifier allows it.
2420 */
2421 if (f >= agfl_size || l >= agfl_size)
2422 return true;
2423 if (c > agfl_size)
2424 return true;
2425
2426 /*
2427 * Check consistency between the on-disk count and the active range. An
2428 * agfl padding mismatch manifests as an inconsistent flcount.
2429 */
2430 if (c && l >= f)
2431 active = l - f + 1;
2432 else if (c)
2433 active = agfl_size - f + l + 1;
2434 else
2435 active = 0;
2436
2437 return active != c;
2438}
2439
2440/*
2441 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2442 * agfl content cannot be trusted. Warn the user that a repair is required to
2443 * recover leaked blocks.
2444 *
2445 * The purpose of this mechanism is to handle filesystems affected by the agfl
2446 * header padding mismatch problem. A reset keeps the filesystem online with a
2447 * relatively minor free space accounting inconsistency rather than suffer the
2448 * inevitable crash from use of an invalid agfl block.
2449 */
2450static void
2451xfs_agfl_reset(
2452 struct xfs_trans *tp,
2453 struct xfs_buf *agbp,
2454 struct xfs_perag *pag)
2455{
2456 struct xfs_mount *mp = tp->t_mountp;
2457 struct xfs_agf *agf = agbp->b_addr;
2458
2459 ASSERT(xfs_perag_agfl_needs_reset(pag));
2460 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2461
2462 xfs_warn(mp,
2463 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2464 "Please unmount and run xfs_repair.",
2465 pag->pag_agno, pag->pagf_flcount);
2466
2467 agf->agf_flfirst = 0;
2468 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2469 agf->agf_flcount = 0;
2470 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2471 XFS_AGF_FLCOUNT);
2472
2473 pag->pagf_flcount = 0;
2474 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2475}
2476
2477/*
2478 * Defer an AGFL block free. This is effectively equivalent to
2479 * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2480 *
2481 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2482 * allocation operations in a transaction. AGFL frees are prone to this problem
2483 * because for one they are always freed one at a time. Further, an immediate
2484 * AGFL block free can cause a btree join and require another block free before
2485 * the real allocation can proceed. Deferring the free disconnects freeing up
2486 * the AGFL slot from freeing the block.
2487 */
2488static int
2489xfs_defer_agfl_block(
2490 struct xfs_trans *tp,
2491 xfs_agnumber_t agno,
2492 xfs_agblock_t agbno,
2493 struct xfs_owner_info *oinfo)
2494{
2495 struct xfs_mount *mp = tp->t_mountp;
2496 struct xfs_extent_free_item *xefi;
2497 xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
2498
2499 ASSERT(xfs_extfree_item_cache != NULL);
2500 ASSERT(oinfo != NULL);
2501
2502 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
2503 return -EFSCORRUPTED;
2504
2505 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2506 GFP_KERNEL | __GFP_NOFAIL);
2507 xefi->xefi_startblock = fsbno;
2508 xefi->xefi_blockcount = 1;
2509 xefi->xefi_owner = oinfo->oi_owner;
2510 xefi->xefi_agresv = XFS_AG_RESV_AGFL;
2511
2512 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2513
2514 xfs_extent_free_get_group(mp, xefi);
2515 xfs_defer_add(tp, &xefi->xefi_list, &xfs_agfl_free_defer_type);
2516 return 0;
2517}
2518
2519/*
2520 * Add the extent to the list of extents to be free at transaction end.
2521 * The list is maintained sorted (by block number).
2522 */
2523static int
2524xfs_defer_extent_free(
2525 struct xfs_trans *tp,
2526 xfs_fsblock_t bno,
2527 xfs_filblks_t len,
2528 const struct xfs_owner_info *oinfo,
2529 enum xfs_ag_resv_type type,
2530 bool skip_discard,
2531 struct xfs_defer_pending **dfpp)
2532{
2533 struct xfs_extent_free_item *xefi;
2534 struct xfs_mount *mp = tp->t_mountp;
2535#ifdef DEBUG
2536 xfs_agnumber_t agno;
2537 xfs_agblock_t agbno;
2538
2539 ASSERT(bno != NULLFSBLOCK);
2540 ASSERT(len > 0);
2541 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2542 ASSERT(!isnullstartblock(bno));
2543 agno = XFS_FSB_TO_AGNO(mp, bno);
2544 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2545 ASSERT(agno < mp->m_sb.sb_agcount);
2546 ASSERT(agbno < mp->m_sb.sb_agblocks);
2547 ASSERT(len < mp->m_sb.sb_agblocks);
2548 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2549#endif
2550 ASSERT(xfs_extfree_item_cache != NULL);
2551 ASSERT(type != XFS_AG_RESV_AGFL);
2552
2553 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2554 return -EFSCORRUPTED;
2555
2556 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2557 GFP_KERNEL | __GFP_NOFAIL);
2558 xefi->xefi_startblock = bno;
2559 xefi->xefi_blockcount = (xfs_extlen_t)len;
2560 xefi->xefi_agresv = type;
2561 if (skip_discard)
2562 xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2563 if (oinfo) {
2564 ASSERT(oinfo->oi_offset == 0);
2565
2566 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2567 xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2568 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2569 xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2570 xefi->xefi_owner = oinfo->oi_owner;
2571 } else {
2572 xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2573 }
2574 trace_xfs_bmap_free_defer(mp,
2575 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2576 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2577
2578 xfs_extent_free_get_group(mp, xefi);
2579 *dfpp = xfs_defer_add(tp, &xefi->xefi_list, &xfs_extent_free_defer_type);
2580 return 0;
2581}
2582
2583int
2584xfs_free_extent_later(
2585 struct xfs_trans *tp,
2586 xfs_fsblock_t bno,
2587 xfs_filblks_t len,
2588 const struct xfs_owner_info *oinfo,
2589 enum xfs_ag_resv_type type,
2590 bool skip_discard)
2591{
2592 struct xfs_defer_pending *dontcare = NULL;
2593
2594 return xfs_defer_extent_free(tp, bno, len, oinfo, type, skip_discard,
2595 &dontcare);
2596}
2597
2598/*
2599 * Set up automatic freeing of unwritten space in the filesystem.
2600 *
2601 * This function attached a paused deferred extent free item to the
2602 * transaction. Pausing means that the EFI will be logged in the next
2603 * transaction commit, but the pending EFI will not be finished until the
2604 * pending item is unpaused.
2605 *
2606 * If the system goes down after the EFI has been persisted to the log but
2607 * before the pending item is unpaused, log recovery will find the EFI, fail to
2608 * find the EFD, and free the space.
2609 *
2610 * If the pending item is unpaused, the next transaction commit will log an EFD
2611 * without freeing the space.
2612 *
2613 * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2614 * @args structure are set to the relevant values.
2615 */
2616int
2617xfs_alloc_schedule_autoreap(
2618 const struct xfs_alloc_arg *args,
2619 bool skip_discard,
2620 struct xfs_alloc_autoreap *aarp)
2621{
2622 int error;
2623
2624 error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2625 &args->oinfo, args->resv, skip_discard, &aarp->dfp);
2626 if (error)
2627 return error;
2628
2629 xfs_defer_item_pause(args->tp, aarp->dfp);
2630 return 0;
2631}
2632
2633/*
2634 * Cancel automatic freeing of unwritten space in the filesystem.
2635 *
2636 * Earlier, we created a paused deferred extent free item and attached it to
2637 * this transaction so that we could automatically roll back a new space
2638 * allocation if the system went down. Now we want to cancel the paused work
2639 * item by marking the EFI stale so we don't actually free the space, unpausing
2640 * the pending item and logging an EFD.
2641 *
2642 * The caller generally should have already mapped the space into the ondisk
2643 * filesystem. If the reserved space was partially used, the caller must call
2644 * xfs_free_extent_later to create a new EFI to free the unused space.
2645 */
2646void
2647xfs_alloc_cancel_autoreap(
2648 struct xfs_trans *tp,
2649 struct xfs_alloc_autoreap *aarp)
2650{
2651 struct xfs_defer_pending *dfp = aarp->dfp;
2652 struct xfs_extent_free_item *xefi;
2653
2654 if (!dfp)
2655 return;
2656
2657 list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2658 xefi->xefi_flags |= XFS_EFI_CANCELLED;
2659
2660 xfs_defer_item_unpause(tp, dfp);
2661}
2662
2663/*
2664 * Commit automatic freeing of unwritten space in the filesystem.
2665 *
2666 * This unpauses an earlier _schedule_autoreap and commits to freeing the
2667 * allocated space. Call this if none of the reserved space was used.
2668 */
2669void
2670xfs_alloc_commit_autoreap(
2671 struct xfs_trans *tp,
2672 struct xfs_alloc_autoreap *aarp)
2673{
2674 if (aarp->dfp)
2675 xfs_defer_item_unpause(tp, aarp->dfp);
2676}
2677
2678#ifdef DEBUG
2679/*
2680 * Check if an AGF has a free extent record whose length is equal to
2681 * args->minlen.
2682 */
2683STATIC int
2684xfs_exact_minlen_extent_available(
2685 struct xfs_alloc_arg *args,
2686 struct xfs_buf *agbp,
2687 int *stat)
2688{
2689 struct xfs_btree_cur *cnt_cur;
2690 xfs_agblock_t fbno;
2691 xfs_extlen_t flen;
2692 int error = 0;
2693
2694 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2695 args->pag, XFS_BTNUM_CNT);
2696 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2697 if (error)
2698 goto out;
2699
2700 if (*stat == 0) {
2701 error = -EFSCORRUPTED;
2702 goto out;
2703 }
2704
2705 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2706 if (error)
2707 goto out;
2708
2709 if (*stat == 1 && flen != args->minlen)
2710 *stat = 0;
2711
2712out:
2713 xfs_btree_del_cursor(cnt_cur, error);
2714
2715 return error;
2716}
2717#endif
2718
2719/*
2720 * Decide whether to use this allocation group for this allocation.
2721 * If so, fix up the btree freelist's size.
2722 */
2723int /* error */
2724xfs_alloc_fix_freelist(
2725 struct xfs_alloc_arg *args, /* allocation argument structure */
2726 uint32_t alloc_flags)
2727{
2728 struct xfs_mount *mp = args->mp;
2729 struct xfs_perag *pag = args->pag;
2730 struct xfs_trans *tp = args->tp;
2731 struct xfs_buf *agbp = NULL;
2732 struct xfs_buf *agflbp = NULL;
2733 struct xfs_alloc_arg targs; /* local allocation arguments */
2734 xfs_agblock_t bno; /* freelist block */
2735 xfs_extlen_t need; /* total blocks needed in freelist */
2736 int error = 0;
2737
2738 /* deferred ops (AGFL block frees) require permanent transactions */
2739 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2740
2741 if (!xfs_perag_initialised_agf(pag)) {
2742 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2743 if (error) {
2744 /* Couldn't lock the AGF so skip this AG. */
2745 if (error == -EAGAIN)
2746 error = 0;
2747 goto out_no_agbp;
2748 }
2749 }
2750
2751 /*
2752 * If this is a metadata preferred pag and we are user data then try
2753 * somewhere else if we are not being asked to try harder at this
2754 * point
2755 */
2756 if (xfs_perag_prefers_metadata(pag) &&
2757 (args->datatype & XFS_ALLOC_USERDATA) &&
2758 (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2759 ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2760 goto out_agbp_relse;
2761 }
2762
2763 need = xfs_alloc_min_freelist(mp, pag);
2764 if (!xfs_alloc_space_available(args, need, alloc_flags |
2765 XFS_ALLOC_FLAG_CHECK))
2766 goto out_agbp_relse;
2767
2768 /*
2769 * Get the a.g. freespace buffer.
2770 * Can fail if we're not blocking on locks, and it's held.
2771 */
2772 if (!agbp) {
2773 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2774 if (error) {
2775 /* Couldn't lock the AGF so skip this AG. */
2776 if (error == -EAGAIN)
2777 error = 0;
2778 goto out_no_agbp;
2779 }
2780 }
2781
2782 /* reset a padding mismatched agfl before final free space check */
2783 if (xfs_perag_agfl_needs_reset(pag))
2784 xfs_agfl_reset(tp, agbp, pag);
2785
2786 /* If there isn't enough total space or single-extent, reject it. */
2787 need = xfs_alloc_min_freelist(mp, pag);
2788 if (!xfs_alloc_space_available(args, need, alloc_flags))
2789 goto out_agbp_relse;
2790
2791#ifdef DEBUG
2792 if (args->alloc_minlen_only) {
2793 int stat;
2794
2795 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2796 if (error || !stat)
2797 goto out_agbp_relse;
2798 }
2799#endif
2800 /*
2801 * Make the freelist shorter if it's too long.
2802 *
2803 * Note that from this point onwards, we will always release the agf and
2804 * agfl buffers on error. This handles the case where we error out and
2805 * the buffers are clean or may not have been joined to the transaction
2806 * and hence need to be released manually. If they have been joined to
2807 * the transaction, then xfs_trans_brelse() will handle them
2808 * appropriately based on the recursion count and dirty state of the
2809 * buffer.
2810 *
2811 * XXX (dgc): When we have lots of free space, does this buy us
2812 * anything other than extra overhead when we need to put more blocks
2813 * back on the free list? Maybe we should only do this when space is
2814 * getting low or the AGFL is more than half full?
2815 *
2816 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2817 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2818 * updating the rmapbt. Both flags are used in xfs_repair while we're
2819 * rebuilding the rmapbt, and neither are used by the kernel. They're
2820 * both required to ensure that rmaps are correctly recorded for the
2821 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2822 * repair/rmap.c in xfsprogs for details.
2823 */
2824 memset(&targs, 0, sizeof(targs));
2825 /* struct copy below */
2826 if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2827 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2828 else
2829 targs.oinfo = XFS_RMAP_OINFO_AG;
2830 while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2831 pag->pagf_flcount > need) {
2832 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2833 if (error)
2834 goto out_agbp_relse;
2835
2836 /* defer agfl frees */
2837 error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2838 if (error)
2839 goto out_agbp_relse;
2840 }
2841
2842 targs.tp = tp;
2843 targs.mp = mp;
2844 targs.agbp = agbp;
2845 targs.agno = args->agno;
2846 targs.alignment = targs.minlen = targs.prod = 1;
2847 targs.pag = pag;
2848 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2849 if (error)
2850 goto out_agbp_relse;
2851
2852 /* Make the freelist longer if it's too short. */
2853 while (pag->pagf_flcount < need) {
2854 targs.agbno = 0;
2855 targs.maxlen = need - pag->pagf_flcount;
2856 targs.resv = XFS_AG_RESV_AGFL;
2857
2858 /* Allocate as many blocks as possible at once. */
2859 error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2860 if (error)
2861 goto out_agflbp_relse;
2862
2863 /*
2864 * Stop if we run out. Won't happen if callers are obeying
2865 * the restrictions correctly. Can happen for free calls
2866 * on a completely full ag.
2867 */
2868 if (targs.agbno == NULLAGBLOCK) {
2869 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2870 break;
2871 goto out_agflbp_relse;
2872 }
2873
2874 if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2875 error = xfs_rmap_alloc(tp, agbp, pag,
2876 targs.agbno, targs.len, &targs.oinfo);
2877 if (error)
2878 goto out_agflbp_relse;
2879 }
2880 error = xfs_alloc_update_counters(tp, agbp,
2881 -((long)(targs.len)));
2882 if (error)
2883 goto out_agflbp_relse;
2884
2885 /*
2886 * Put each allocated block on the list.
2887 */
2888 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2889 error = xfs_alloc_put_freelist(pag, tp, agbp,
2890 agflbp, bno, 0);
2891 if (error)
2892 goto out_agflbp_relse;
2893 }
2894 }
2895 xfs_trans_brelse(tp, agflbp);
2896 args->agbp = agbp;
2897 return 0;
2898
2899out_agflbp_relse:
2900 xfs_trans_brelse(tp, agflbp);
2901out_agbp_relse:
2902 if (agbp)
2903 xfs_trans_brelse(tp, agbp);
2904out_no_agbp:
2905 args->agbp = NULL;
2906 return error;
2907}
2908
2909/*
2910 * Get a block from the freelist.
2911 * Returns with the buffer for the block gotten.
2912 */
2913int
2914xfs_alloc_get_freelist(
2915 struct xfs_perag *pag,
2916 struct xfs_trans *tp,
2917 struct xfs_buf *agbp,
2918 xfs_agblock_t *bnop,
2919 int btreeblk)
2920{
2921 struct xfs_agf *agf = agbp->b_addr;
2922 struct xfs_buf *agflbp;
2923 xfs_agblock_t bno;
2924 __be32 *agfl_bno;
2925 int error;
2926 uint32_t logflags;
2927 struct xfs_mount *mp = tp->t_mountp;
2928
2929 /*
2930 * Freelist is empty, give up.
2931 */
2932 if (!agf->agf_flcount) {
2933 *bnop = NULLAGBLOCK;
2934 return 0;
2935 }
2936 /*
2937 * Read the array of free blocks.
2938 */
2939 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2940 if (error)
2941 return error;
2942
2943
2944 /*
2945 * Get the block number and update the data structures.
2946 */
2947 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2948 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2949 if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
2950 return -EFSCORRUPTED;
2951
2952 be32_add_cpu(&agf->agf_flfirst, 1);
2953 xfs_trans_brelse(tp, agflbp);
2954 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2955 agf->agf_flfirst = 0;
2956
2957 ASSERT(!xfs_perag_agfl_needs_reset(pag));
2958 be32_add_cpu(&agf->agf_flcount, -1);
2959 pag->pagf_flcount--;
2960
2961 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2962 if (btreeblk) {
2963 be32_add_cpu(&agf->agf_btreeblks, 1);
2964 pag->pagf_btreeblks++;
2965 logflags |= XFS_AGF_BTREEBLKS;
2966 }
2967
2968 xfs_alloc_log_agf(tp, agbp, logflags);
2969 *bnop = bno;
2970
2971 return 0;
2972}
2973
2974/*
2975 * Log the given fields from the agf structure.
2976 */
2977void
2978xfs_alloc_log_agf(
2979 struct xfs_trans *tp,
2980 struct xfs_buf *bp,
2981 uint32_t fields)
2982{
2983 int first; /* first byte offset */
2984 int last; /* last byte offset */
2985 static const short offsets[] = {
2986 offsetof(xfs_agf_t, agf_magicnum),
2987 offsetof(xfs_agf_t, agf_versionnum),
2988 offsetof(xfs_agf_t, agf_seqno),
2989 offsetof(xfs_agf_t, agf_length),
2990 offsetof(xfs_agf_t, agf_roots[0]),
2991 offsetof(xfs_agf_t, agf_levels[0]),
2992 offsetof(xfs_agf_t, agf_flfirst),
2993 offsetof(xfs_agf_t, agf_fllast),
2994 offsetof(xfs_agf_t, agf_flcount),
2995 offsetof(xfs_agf_t, agf_freeblks),
2996 offsetof(xfs_agf_t, agf_longest),
2997 offsetof(xfs_agf_t, agf_btreeblks),
2998 offsetof(xfs_agf_t, agf_uuid),
2999 offsetof(xfs_agf_t, agf_rmap_blocks),
3000 offsetof(xfs_agf_t, agf_refcount_blocks),
3001 offsetof(xfs_agf_t, agf_refcount_root),
3002 offsetof(xfs_agf_t, agf_refcount_level),
3003 /* needed so that we don't log the whole rest of the structure: */
3004 offsetof(xfs_agf_t, agf_spare64),
3005 sizeof(xfs_agf_t)
3006 };
3007
3008 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3009
3010 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3011
3012 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3013 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3014}
3015
3016/*
3017 * Put the block on the freelist for the allocation group.
3018 */
3019int
3020xfs_alloc_put_freelist(
3021 struct xfs_perag *pag,
3022 struct xfs_trans *tp,
3023 struct xfs_buf *agbp,
3024 struct xfs_buf *agflbp,
3025 xfs_agblock_t bno,
3026 int btreeblk)
3027{
3028 struct xfs_mount *mp = tp->t_mountp;
3029 struct xfs_agf *agf = agbp->b_addr;
3030 __be32 *blockp;
3031 int error;
3032 uint32_t logflags;
3033 __be32 *agfl_bno;
3034 int startoff;
3035
3036 if (!agflbp) {
3037 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3038 if (error)
3039 return error;
3040 }
3041
3042 be32_add_cpu(&agf->agf_fllast, 1);
3043 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3044 agf->agf_fllast = 0;
3045
3046 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3047 be32_add_cpu(&agf->agf_flcount, 1);
3048 pag->pagf_flcount++;
3049
3050 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3051 if (btreeblk) {
3052 be32_add_cpu(&agf->agf_btreeblks, -1);
3053 pag->pagf_btreeblks--;
3054 logflags |= XFS_AGF_BTREEBLKS;
3055 }
3056
3057 xfs_alloc_log_agf(tp, agbp, logflags);
3058
3059 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3060
3061 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3062 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3063 *blockp = cpu_to_be32(bno);
3064 startoff = (char *)blockp - (char *)agflbp->b_addr;
3065
3066 xfs_alloc_log_agf(tp, agbp, logflags);
3067
3068 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3069 xfs_trans_log_buf(tp, agflbp, startoff,
3070 startoff + sizeof(xfs_agblock_t) - 1);
3071 return 0;
3072}
3073
3074/*
3075 * Check that this AGF/AGI header's sequence number and length matches the AG
3076 * number and size in fsblocks.
3077 */
3078xfs_failaddr_t
3079xfs_validate_ag_length(
3080 struct xfs_buf *bp,
3081 uint32_t seqno,
3082 uint32_t length)
3083{
3084 struct xfs_mount *mp = bp->b_mount;
3085 /*
3086 * During growfs operations, the perag is not fully initialised,
3087 * so we can't use it for any useful checking. growfs ensures we can't
3088 * use it by using uncached buffers that don't have the perag attached
3089 * so we can detect and avoid this problem.
3090 */
3091 if (bp->b_pag && seqno != bp->b_pag->pag_agno)
3092 return __this_address;
3093
3094 /*
3095 * Only the last AG in the filesystem is allowed to be shorter
3096 * than the AG size recorded in the superblock.
3097 */
3098 if (length != mp->m_sb.sb_agblocks) {
3099 /*
3100 * During growfs, the new last AG can get here before we
3101 * have updated the superblock. Give it a pass on the seqno
3102 * check.
3103 */
3104 if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3105 return __this_address;
3106 if (length < XFS_MIN_AG_BLOCKS)
3107 return __this_address;
3108 if (length > mp->m_sb.sb_agblocks)
3109 return __this_address;
3110 }
3111
3112 return NULL;
3113}
3114
3115/*
3116 * Verify the AGF is consistent.
3117 *
3118 * We do not verify the AGFL indexes in the AGF are fully consistent here
3119 * because of issues with variable on-disk structure sizes. Instead, we check
3120 * the agfl indexes for consistency when we initialise the perag from the AGF
3121 * information after a read completes.
3122 *
3123 * If the index is inconsistent, then we mark the perag as needing an AGFL
3124 * reset. The first AGFL update performed then resets the AGFL indexes and
3125 * refills the AGFL with known good free blocks, allowing the filesystem to
3126 * continue operating normally at the cost of a few leaked free space blocks.
3127 */
3128static xfs_failaddr_t
3129xfs_agf_verify(
3130 struct xfs_buf *bp)
3131{
3132 struct xfs_mount *mp = bp->b_mount;
3133 struct xfs_agf *agf = bp->b_addr;
3134 xfs_failaddr_t fa;
3135 uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
3136 uint32_t agf_length = be32_to_cpu(agf->agf_length);
3137
3138 if (xfs_has_crc(mp)) {
3139 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3140 return __this_address;
3141 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3142 return __this_address;
3143 }
3144
3145 if (!xfs_verify_magic(bp, agf->agf_magicnum))
3146 return __this_address;
3147
3148 if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3149 return __this_address;
3150
3151 /*
3152 * Both agf_seqno and agf_length need to validated before anything else
3153 * block number related in the AGF or AGFL can be checked.
3154 */
3155 fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3156 if (fa)
3157 return fa;
3158
3159 if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3160 return __this_address;
3161 if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3162 return __this_address;
3163 if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3164 return __this_address;
3165
3166 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3167 be32_to_cpu(agf->agf_freeblks) > agf_length)
3168 return __this_address;
3169
3170 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
3171 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
3172 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
3173 mp->m_alloc_maxlevels ||
3174 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
3175 mp->m_alloc_maxlevels)
3176 return __this_address;
3177
3178 if (xfs_has_lazysbcount(mp) &&
3179 be32_to_cpu(agf->agf_btreeblks) > agf_length)
3180 return __this_address;
3181
3182 if (xfs_has_rmapbt(mp)) {
3183 if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3184 return __this_address;
3185
3186 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
3187 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
3188 mp->m_rmap_maxlevels)
3189 return __this_address;
3190 }
3191
3192 if (xfs_has_reflink(mp)) {
3193 if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3194 return __this_address;
3195
3196 if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3197 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3198 return __this_address;
3199 }
3200
3201 return NULL;
3202}
3203
3204static void
3205xfs_agf_read_verify(
3206 struct xfs_buf *bp)
3207{
3208 struct xfs_mount *mp = bp->b_mount;
3209 xfs_failaddr_t fa;
3210
3211 if (xfs_has_crc(mp) &&
3212 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3213 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3214 else {
3215 fa = xfs_agf_verify(bp);
3216 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3217 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3218 }
3219}
3220
3221static void
3222xfs_agf_write_verify(
3223 struct xfs_buf *bp)
3224{
3225 struct xfs_mount *mp = bp->b_mount;
3226 struct xfs_buf_log_item *bip = bp->b_log_item;
3227 struct xfs_agf *agf = bp->b_addr;
3228 xfs_failaddr_t fa;
3229
3230 fa = xfs_agf_verify(bp);
3231 if (fa) {
3232 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3233 return;
3234 }
3235
3236 if (!xfs_has_crc(mp))
3237 return;
3238
3239 if (bip)
3240 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3241
3242 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3243}
3244
3245const struct xfs_buf_ops xfs_agf_buf_ops = {
3246 .name = "xfs_agf",
3247 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3248 .verify_read = xfs_agf_read_verify,
3249 .verify_write = xfs_agf_write_verify,
3250 .verify_struct = xfs_agf_verify,
3251};
3252
3253/*
3254 * Read in the allocation group header (free/alloc section).
3255 */
3256int
3257xfs_read_agf(
3258 struct xfs_perag *pag,
3259 struct xfs_trans *tp,
3260 int flags,
3261 struct xfs_buf **agfbpp)
3262{
3263 struct xfs_mount *mp = pag->pag_mount;
3264 int error;
3265
3266 trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3267
3268 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3269 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3270 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3271 if (error)
3272 return error;
3273
3274 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3275 return 0;
3276}
3277
3278/*
3279 * Read in the allocation group header (free/alloc section) and initialise the
3280 * perag structure if necessary. If the caller provides @agfbpp, then return the
3281 * locked buffer to the caller, otherwise free it.
3282 */
3283int
3284xfs_alloc_read_agf(
3285 struct xfs_perag *pag,
3286 struct xfs_trans *tp,
3287 int flags,
3288 struct xfs_buf **agfbpp)
3289{
3290 struct xfs_buf *agfbp;
3291 struct xfs_agf *agf;
3292 int error;
3293 int allocbt_blks;
3294
3295 trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3296
3297 /* We don't support trylock when freeing. */
3298 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3299 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3300 error = xfs_read_agf(pag, tp,
3301 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3302 &agfbp);
3303 if (error)
3304 return error;
3305
3306 agf = agfbp->b_addr;
3307 if (!xfs_perag_initialised_agf(pag)) {
3308 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3309 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3310 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3311 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3312 pag->pagf_levels[XFS_BTNUM_BNOi] =
3313 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3314 pag->pagf_levels[XFS_BTNUM_CNTi] =
3315 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3316 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3317 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3318 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3319 if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3320 set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3321 else
3322 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3323
3324 /*
3325 * Update the in-core allocbt counter. Filter out the rmapbt
3326 * subset of the btreeblks counter because the rmapbt is managed
3327 * by perag reservation. Subtract one for the rmapbt root block
3328 * because the rmap counter includes it while the btreeblks
3329 * counter only tracks non-root blocks.
3330 */
3331 allocbt_blks = pag->pagf_btreeblks;
3332 if (xfs_has_rmapbt(pag->pag_mount))
3333 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3334 if (allocbt_blks > 0)
3335 atomic64_add(allocbt_blks,
3336 &pag->pag_mount->m_allocbt_blks);
3337
3338 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3339 }
3340#ifdef DEBUG
3341 else if (!xfs_is_shutdown(pag->pag_mount)) {
3342 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3343 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3344 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3345 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3346 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3347 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3348 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3349 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3350 }
3351#endif
3352 if (agfbpp)
3353 *agfbpp = agfbp;
3354 else
3355 xfs_trans_brelse(tp, agfbp);
3356 return 0;
3357}
3358
3359/*
3360 * Pre-proces allocation arguments to set initial state that we don't require
3361 * callers to set up correctly, as well as bounds check the allocation args
3362 * that are set up.
3363 */
3364static int
3365xfs_alloc_vextent_check_args(
3366 struct xfs_alloc_arg *args,
3367 xfs_fsblock_t target,
3368 xfs_agnumber_t *minimum_agno)
3369{
3370 struct xfs_mount *mp = args->mp;
3371 xfs_agblock_t agsize;
3372
3373 args->fsbno = NULLFSBLOCK;
3374
3375 *minimum_agno = 0;
3376 if (args->tp->t_highest_agno != NULLAGNUMBER)
3377 *minimum_agno = args->tp->t_highest_agno;
3378
3379 /*
3380 * Just fix this up, for the case where the last a.g. is shorter
3381 * (or there's only one a.g.) and the caller couldn't easily figure
3382 * that out (xfs_bmap_alloc).
3383 */
3384 agsize = mp->m_sb.sb_agblocks;
3385 if (args->maxlen > agsize)
3386 args->maxlen = agsize;
3387 if (args->alignment == 0)
3388 args->alignment = 1;
3389
3390 ASSERT(args->minlen > 0);
3391 ASSERT(args->maxlen > 0);
3392 ASSERT(args->alignment > 0);
3393 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3394
3395 ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3396 ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3397 ASSERT(args->minlen <= args->maxlen);
3398 ASSERT(args->minlen <= agsize);
3399 ASSERT(args->mod < args->prod);
3400
3401 if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3402 XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3403 args->minlen > args->maxlen || args->minlen > agsize ||
3404 args->mod >= args->prod) {
3405 trace_xfs_alloc_vextent_badargs(args);
3406 return -ENOSPC;
3407 }
3408
3409 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3410 trace_xfs_alloc_vextent_skip_deadlock(args);
3411 return -ENOSPC;
3412 }
3413 return 0;
3414
3415}
3416
3417/*
3418 * Prepare an AG for allocation. If the AG is not prepared to accept the
3419 * allocation, return failure.
3420 *
3421 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3422 * modified to hold their own perag references.
3423 */
3424static int
3425xfs_alloc_vextent_prepare_ag(
3426 struct xfs_alloc_arg *args,
3427 uint32_t alloc_flags)
3428{
3429 bool need_pag = !args->pag;
3430 int error;
3431
3432 if (need_pag)
3433 args->pag = xfs_perag_get(args->mp, args->agno);
3434
3435 args->agbp = NULL;
3436 error = xfs_alloc_fix_freelist(args, alloc_flags);
3437 if (error) {
3438 trace_xfs_alloc_vextent_nofix(args);
3439 if (need_pag)
3440 xfs_perag_put(args->pag);
3441 args->agbno = NULLAGBLOCK;
3442 return error;
3443 }
3444 if (!args->agbp) {
3445 /* cannot allocate in this AG at all */
3446 trace_xfs_alloc_vextent_noagbp(args);
3447 args->agbno = NULLAGBLOCK;
3448 return 0;
3449 }
3450 args->wasfromfl = 0;
3451 return 0;
3452}
3453
3454/*
3455 * Post-process allocation results to account for the allocation if it succeed
3456 * and set the allocated block number correctly for the caller.
3457 *
3458 * XXX: we should really be returning ENOSPC for ENOSPC, not
3459 * hiding it behind a "successful" NULLFSBLOCK allocation.
3460 */
3461static int
3462xfs_alloc_vextent_finish(
3463 struct xfs_alloc_arg *args,
3464 xfs_agnumber_t minimum_agno,
3465 int alloc_error,
3466 bool drop_perag)
3467{
3468 struct xfs_mount *mp = args->mp;
3469 int error = 0;
3470
3471 /*
3472 * We can end up here with a locked AGF. If we failed, the caller is
3473 * likely going to try to allocate again with different parameters, and
3474 * that can widen the AGs that are searched for free space. If we have
3475 * to do BMBT block allocation, we have to do a new allocation.
3476 *
3477 * Hence leaving this function with the AGF locked opens up potential
3478 * ABBA AGF deadlocks because a future allocation attempt in this
3479 * transaction may attempt to lock a lower number AGF.
3480 *
3481 * We can't release the AGF until the transaction is commited, so at
3482 * this point we must update the "first allocation" tracker to point at
3483 * this AG if the tracker is empty or points to a lower AG. This allows
3484 * the next allocation attempt to be modified appropriately to avoid
3485 * deadlocks.
3486 */
3487 if (args->agbp &&
3488 (args->tp->t_highest_agno == NULLAGNUMBER ||
3489 args->agno > minimum_agno))
3490 args->tp->t_highest_agno = args->agno;
3491
3492 /*
3493 * If the allocation failed with an error or we had an ENOSPC result,
3494 * preserve the returned error whilst also marking the allocation result
3495 * as "no extent allocated". This ensures that callers that fail to
3496 * capture the error will still treat it as a failed allocation.
3497 */
3498 if (alloc_error || args->agbno == NULLAGBLOCK) {
3499 args->fsbno = NULLFSBLOCK;
3500 error = alloc_error;
3501 goto out_drop_perag;
3502 }
3503
3504 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3505
3506 ASSERT(args->len >= args->minlen);
3507 ASSERT(args->len <= args->maxlen);
3508 ASSERT(args->agbno % args->alignment == 0);
3509 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3510
3511 /* if not file data, insert new block into the reverse map btree */
3512 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3513 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3514 args->agbno, args->len, &args->oinfo);
3515 if (error)
3516 goto out_drop_perag;
3517 }
3518
3519 if (!args->wasfromfl) {
3520 error = xfs_alloc_update_counters(args->tp, args->agbp,
3521 -((long)(args->len)));
3522 if (error)
3523 goto out_drop_perag;
3524
3525 ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3526 args->len));
3527 }
3528
3529 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3530
3531 XFS_STATS_INC(mp, xs_allocx);
3532 XFS_STATS_ADD(mp, xs_allocb, args->len);
3533
3534 trace_xfs_alloc_vextent_finish(args);
3535
3536out_drop_perag:
3537 if (drop_perag && args->pag) {
3538 xfs_perag_rele(args->pag);
3539 args->pag = NULL;
3540 }
3541 return error;
3542}
3543
3544/*
3545 * Allocate within a single AG only. This uses a best-fit length algorithm so if
3546 * you need an exact sized allocation without locality constraints, this is the
3547 * fastest way to do it.
3548 *
3549 * Caller is expected to hold a perag reference in args->pag.
3550 */
3551int
3552xfs_alloc_vextent_this_ag(
3553 struct xfs_alloc_arg *args,
3554 xfs_agnumber_t agno)
3555{
3556 struct xfs_mount *mp = args->mp;
3557 xfs_agnumber_t minimum_agno;
3558 uint32_t alloc_flags = 0;
3559 int error;
3560
3561 ASSERT(args->pag != NULL);
3562 ASSERT(args->pag->pag_agno == agno);
3563
3564 args->agno = agno;
3565 args->agbno = 0;
3566
3567 trace_xfs_alloc_vextent_this_ag(args);
3568
3569 error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3570 &minimum_agno);
3571 if (error) {
3572 if (error == -ENOSPC)
3573 return 0;
3574 return error;
3575 }
3576
3577 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3578 if (!error && args->agbp)
3579 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3580
3581 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3582}
3583
3584/*
3585 * Iterate all AGs trying to allocate an extent starting from @start_ag.
3586 *
3587 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3588 * allocation attempts in @start_agno have locality information. If we fail to
3589 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3590 * we attempt to allocation in as there is no locality optimisation possible for
3591 * those allocations.
3592 *
3593 * On return, args->pag may be left referenced if we finish before the "all
3594 * failed" return point. The allocation finish still needs the perag, and
3595 * so the caller will release it once they've finished the allocation.
3596 *
3597 * When we wrap the AG iteration at the end of the filesystem, we have to be
3598 * careful not to wrap into AGs below ones we already have locked in the
3599 * transaction if we are doing a blocking iteration. This will result in an
3600 * out-of-order locking of AGFs and hence can cause deadlocks.
3601 */
3602static int
3603xfs_alloc_vextent_iterate_ags(
3604 struct xfs_alloc_arg *args,
3605 xfs_agnumber_t minimum_agno,
3606 xfs_agnumber_t start_agno,
3607 xfs_agblock_t target_agbno,
3608 uint32_t alloc_flags)
3609{
3610 struct xfs_mount *mp = args->mp;
3611 xfs_agnumber_t restart_agno = minimum_agno;
3612 xfs_agnumber_t agno;
3613 int error = 0;
3614
3615 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3616 restart_agno = 0;
3617restart:
3618 for_each_perag_wrap_range(mp, start_agno, restart_agno,
3619 mp->m_sb.sb_agcount, agno, args->pag) {
3620 args->agno = agno;
3621 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3622 if (error)
3623 break;
3624 if (!args->agbp) {
3625 trace_xfs_alloc_vextent_loopfailed(args);
3626 continue;
3627 }
3628
3629 /*
3630 * Allocation is supposed to succeed now, so break out of the
3631 * loop regardless of whether we succeed or not.
3632 */
3633 if (args->agno == start_agno && target_agbno) {
3634 args->agbno = target_agbno;
3635 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3636 } else {
3637 args->agbno = 0;
3638 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3639 }
3640 break;
3641 }
3642 if (error) {
3643 xfs_perag_rele(args->pag);
3644 args->pag = NULL;
3645 return error;
3646 }
3647 if (args->agbp)
3648 return 0;
3649
3650 /*
3651 * We didn't find an AG we can alloation from. If we were given
3652 * constraining flags by the caller, drop them and retry the allocation
3653 * without any constraints being set.
3654 */
3655 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3656 alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3657 restart_agno = minimum_agno;
3658 goto restart;
3659 }
3660
3661 ASSERT(args->pag == NULL);
3662 trace_xfs_alloc_vextent_allfailed(args);
3663 return 0;
3664}
3665
3666/*
3667 * Iterate from the AGs from the start AG to the end of the filesystem, trying
3668 * to allocate blocks. It starts with a near allocation attempt in the initial
3669 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3670 * back to zero if allowed by previous allocations in this transaction,
3671 * otherwise will wrap back to the start AG and run a second blocking pass to
3672 * the end of the filesystem.
3673 */
3674int
3675xfs_alloc_vextent_start_ag(
3676 struct xfs_alloc_arg *args,
3677 xfs_fsblock_t target)
3678{
3679 struct xfs_mount *mp = args->mp;
3680 xfs_agnumber_t minimum_agno;
3681 xfs_agnumber_t start_agno;
3682 xfs_agnumber_t rotorstep = xfs_rotorstep;
3683 bool bump_rotor = false;
3684 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3685 int error;
3686
3687 ASSERT(args->pag == NULL);
3688
3689 args->agno = NULLAGNUMBER;
3690 args->agbno = NULLAGBLOCK;
3691
3692 trace_xfs_alloc_vextent_start_ag(args);
3693
3694 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3695 if (error) {
3696 if (error == -ENOSPC)
3697 return 0;
3698 return error;
3699 }
3700
3701 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3702 xfs_is_inode32(mp)) {
3703 target = XFS_AGB_TO_FSB(mp,
3704 ((mp->m_agfrotor / rotorstep) %
3705 mp->m_sb.sb_agcount), 0);
3706 bump_rotor = 1;
3707 }
3708
3709 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3710 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3711 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3712
3713 if (bump_rotor) {
3714 if (args->agno == start_agno)
3715 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3716 (mp->m_sb.sb_agcount * rotorstep);
3717 else
3718 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3719 (mp->m_sb.sb_agcount * rotorstep);
3720 }
3721
3722 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3723}
3724
3725/*
3726 * Iterate from the agno indicated via @target through to the end of the
3727 * filesystem attempting blocking allocation. This does not wrap or try a second
3728 * pass, so will not recurse into AGs lower than indicated by the target.
3729 */
3730int
3731xfs_alloc_vextent_first_ag(
3732 struct xfs_alloc_arg *args,
3733 xfs_fsblock_t target)
3734 {
3735 struct xfs_mount *mp = args->mp;
3736 xfs_agnumber_t minimum_agno;
3737 xfs_agnumber_t start_agno;
3738 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3739 int error;
3740
3741 ASSERT(args->pag == NULL);
3742
3743 args->agno = NULLAGNUMBER;
3744 args->agbno = NULLAGBLOCK;
3745
3746 trace_xfs_alloc_vextent_first_ag(args);
3747
3748 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3749 if (error) {
3750 if (error == -ENOSPC)
3751 return 0;
3752 return error;
3753 }
3754
3755 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3756 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3757 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3758 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3759}
3760
3761/*
3762 * Allocate at the exact block target or fail. Caller is expected to hold a
3763 * perag reference in args->pag.
3764 */
3765int
3766xfs_alloc_vextent_exact_bno(
3767 struct xfs_alloc_arg *args,
3768 xfs_fsblock_t target)
3769{
3770 struct xfs_mount *mp = args->mp;
3771 xfs_agnumber_t minimum_agno;
3772 int error;
3773
3774 ASSERT(args->pag != NULL);
3775 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3776
3777 args->agno = XFS_FSB_TO_AGNO(mp, target);
3778 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3779
3780 trace_xfs_alloc_vextent_exact_bno(args);
3781
3782 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3783 if (error) {
3784 if (error == -ENOSPC)
3785 return 0;
3786 return error;
3787 }
3788
3789 error = xfs_alloc_vextent_prepare_ag(args, 0);
3790 if (!error && args->agbp)
3791 error = xfs_alloc_ag_vextent_exact(args);
3792
3793 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3794}
3795
3796/*
3797 * Allocate an extent as close to the target as possible. If there are not
3798 * viable candidates in the AG, then fail the allocation.
3799 *
3800 * Caller may or may not have a per-ag reference in args->pag.
3801 */
3802int
3803xfs_alloc_vextent_near_bno(
3804 struct xfs_alloc_arg *args,
3805 xfs_fsblock_t target)
3806{
3807 struct xfs_mount *mp = args->mp;
3808 xfs_agnumber_t minimum_agno;
3809 bool needs_perag = args->pag == NULL;
3810 uint32_t alloc_flags = 0;
3811 int error;
3812
3813 if (!needs_perag)
3814 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3815
3816 args->agno = XFS_FSB_TO_AGNO(mp, target);
3817 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3818
3819 trace_xfs_alloc_vextent_near_bno(args);
3820
3821 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3822 if (error) {
3823 if (error == -ENOSPC)
3824 return 0;
3825 return error;
3826 }
3827
3828 if (needs_perag)
3829 args->pag = xfs_perag_grab(mp, args->agno);
3830
3831 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3832 if (!error && args->agbp)
3833 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3834
3835 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3836}
3837
3838/* Ensure that the freelist is at full capacity. */
3839int
3840xfs_free_extent_fix_freelist(
3841 struct xfs_trans *tp,
3842 struct xfs_perag *pag,
3843 struct xfs_buf **agbp)
3844{
3845 struct xfs_alloc_arg args;
3846 int error;
3847
3848 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3849 args.tp = tp;
3850 args.mp = tp->t_mountp;
3851 args.agno = pag->pag_agno;
3852 args.pag = pag;
3853
3854 /*
3855 * validate that the block number is legal - the enables us to detect
3856 * and handle a silent filesystem corruption rather than crashing.
3857 */
3858 if (args.agno >= args.mp->m_sb.sb_agcount)
3859 return -EFSCORRUPTED;
3860
3861 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3862 if (error)
3863 return error;
3864
3865 *agbp = args.agbp;
3866 return 0;
3867}
3868
3869/*
3870 * Free an extent.
3871 * Just break up the extent address and hand off to xfs_free_ag_extent
3872 * after fixing up the freelist.
3873 */
3874int
3875__xfs_free_extent(
3876 struct xfs_trans *tp,
3877 struct xfs_perag *pag,
3878 xfs_agblock_t agbno,
3879 xfs_extlen_t len,
3880 const struct xfs_owner_info *oinfo,
3881 enum xfs_ag_resv_type type,
3882 bool skip_discard)
3883{
3884 struct xfs_mount *mp = tp->t_mountp;
3885 struct xfs_buf *agbp;
3886 struct xfs_agf *agf;
3887 int error;
3888 unsigned int busy_flags = 0;
3889
3890 ASSERT(len != 0);
3891 ASSERT(type != XFS_AG_RESV_AGFL);
3892
3893 if (XFS_TEST_ERROR(false, mp,
3894 XFS_ERRTAG_FREE_EXTENT))
3895 return -EIO;
3896
3897 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3898 if (error)
3899 return error;
3900 agf = agbp->b_addr;
3901
3902 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3903 error = -EFSCORRUPTED;
3904 goto err_release;
3905 }
3906
3907 /* validate the extent size is legal now we have the agf locked */
3908 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3909 error = -EFSCORRUPTED;
3910 goto err_release;
3911 }
3912
3913 error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
3914 type);
3915 if (error)
3916 goto err_release;
3917
3918 if (skip_discard)
3919 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3920 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3921 return 0;
3922
3923err_release:
3924 xfs_trans_brelse(tp, agbp);
3925 return error;
3926}
3927
3928struct xfs_alloc_query_range_info {
3929 xfs_alloc_query_range_fn fn;
3930 void *priv;
3931};
3932
3933/* Format btree record and pass to our callback. */
3934STATIC int
3935xfs_alloc_query_range_helper(
3936 struct xfs_btree_cur *cur,
3937 const union xfs_btree_rec *rec,
3938 void *priv)
3939{
3940 struct xfs_alloc_query_range_info *query = priv;
3941 struct xfs_alloc_rec_incore irec;
3942 xfs_failaddr_t fa;
3943
3944 xfs_alloc_btrec_to_irec(rec, &irec);
3945 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
3946 if (fa)
3947 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
3948
3949 return query->fn(cur, &irec, query->priv);
3950}
3951
3952/* Find all free space within a given range of blocks. */
3953int
3954xfs_alloc_query_range(
3955 struct xfs_btree_cur *cur,
3956 const struct xfs_alloc_rec_incore *low_rec,
3957 const struct xfs_alloc_rec_incore *high_rec,
3958 xfs_alloc_query_range_fn fn,
3959 void *priv)
3960{
3961 union xfs_btree_irec low_brec = { .a = *low_rec };
3962 union xfs_btree_irec high_brec = { .a = *high_rec };
3963 struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
3964
3965 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3966 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3967 xfs_alloc_query_range_helper, &query);
3968}
3969
3970/* Find all free space records. */
3971int
3972xfs_alloc_query_all(
3973 struct xfs_btree_cur *cur,
3974 xfs_alloc_query_range_fn fn,
3975 void *priv)
3976{
3977 struct xfs_alloc_query_range_info query;
3978
3979 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3980 query.priv = priv;
3981 query.fn = fn;
3982 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3983}
3984
3985/*
3986 * Scan part of the keyspace of the free space and tell us if the area has no
3987 * records, is fully mapped by records, or is partially filled.
3988 */
3989int
3990xfs_alloc_has_records(
3991 struct xfs_btree_cur *cur,
3992 xfs_agblock_t bno,
3993 xfs_extlen_t len,
3994 enum xbtree_recpacking *outcome)
3995{
3996 union xfs_btree_irec low;
3997 union xfs_btree_irec high;
3998
3999 memset(&low, 0, sizeof(low));
4000 low.a.ar_startblock = bno;
4001 memset(&high, 0xFF, sizeof(high));
4002 high.a.ar_startblock = bno + len - 1;
4003
4004 return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4005}
4006
4007/*
4008 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
4009 * error code or XFS_ITER_*.
4010 */
4011int
4012xfs_agfl_walk(
4013 struct xfs_mount *mp,
4014 struct xfs_agf *agf,
4015 struct xfs_buf *agflbp,
4016 xfs_agfl_walk_fn walk_fn,
4017 void *priv)
4018{
4019 __be32 *agfl_bno;
4020 unsigned int i;
4021 int error;
4022
4023 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4024 i = be32_to_cpu(agf->agf_flfirst);
4025
4026 /* Nothing to walk in an empty AGFL. */
4027 if (agf->agf_flcount == cpu_to_be32(0))
4028 return 0;
4029
4030 /* Otherwise, walk from first to last, wrapping as needed. */
4031 for (;;) {
4032 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4033 if (error)
4034 return error;
4035 if (i == be32_to_cpu(agf->agf_fllast))
4036 break;
4037 if (++i == xfs_agfl_size(mp))
4038 i = 0;
4039 }
4040
4041 return 0;
4042}
4043
4044int __init
4045xfs_extfree_intent_init_cache(void)
4046{
4047 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4048 sizeof(struct xfs_extent_free_item),
4049 0, 0, NULL);
4050
4051 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4052}
4053
4054void
4055xfs_extfree_intent_destroy_cache(void)
4056{
4057 kmem_cache_destroy(xfs_extfree_item_cache);
4058 xfs_extfree_item_cache = NULL;
4059}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_shared.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_btree.h"
16#include "xfs_rmap.h"
17#include "xfs_alloc_btree.h"
18#include "xfs_alloc.h"
19#include "xfs_extent_busy.h"
20#include "xfs_errortag.h"
21#include "xfs_error.h"
22#include "xfs_trace.h"
23#include "xfs_trans.h"
24#include "xfs_buf_item.h"
25#include "xfs_log.h"
26#include "xfs_ag.h"
27#include "xfs_ag_resv.h"
28#include "xfs_bmap.h"
29#include "xfs_health.h"
30#include "xfs_extfree_item.h"
31
32struct kmem_cache *xfs_extfree_item_cache;
33
34struct workqueue_struct *xfs_alloc_wq;
35
36#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37
38#define XFSA_FIXUP_BNO_OK 1
39#define XFSA_FIXUP_CNT_OK 2
40
41/*
42 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
43 * the beginning of the block for a proper header with the location information
44 * and CRC.
45 */
46unsigned int
47xfs_agfl_size(
48 struct xfs_mount *mp)
49{
50 unsigned int size = mp->m_sb.sb_sectsize;
51
52 if (xfs_has_crc(mp))
53 size -= sizeof(struct xfs_agfl);
54
55 return size / sizeof(xfs_agblock_t);
56}
57
58unsigned int
59xfs_refc_block(
60 struct xfs_mount *mp)
61{
62 if (xfs_has_rmapbt(mp))
63 return XFS_RMAP_BLOCK(mp) + 1;
64 if (xfs_has_finobt(mp))
65 return XFS_FIBT_BLOCK(mp) + 1;
66 return XFS_IBT_BLOCK(mp) + 1;
67}
68
69xfs_extlen_t
70xfs_prealloc_blocks(
71 struct xfs_mount *mp)
72{
73 if (xfs_has_reflink(mp))
74 return xfs_refc_block(mp) + 1;
75 if (xfs_has_rmapbt(mp))
76 return XFS_RMAP_BLOCK(mp) + 1;
77 if (xfs_has_finobt(mp))
78 return XFS_FIBT_BLOCK(mp) + 1;
79 return XFS_IBT_BLOCK(mp) + 1;
80}
81
82/*
83 * The number of blocks per AG that we withhold from xfs_dec_fdblocks to
84 * guarantee that we can refill the AGFL prior to allocating space in a nearly
85 * full AG. Although the space described by the free space btrees, the
86 * blocks used by the freesp btrees themselves, and the blocks owned by the
87 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
88 * free space in the AG drop so low that the free space btrees cannot refill an
89 * empty AGFL up to the minimum level. Rather than grind through empty AGs
90 * until the fs goes down, we subtract this many AG blocks from the incore
91 * fdblocks to ensure user allocation does not overcommit the space the
92 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
93 * withhold space from xfs_dec_fdblocks, so we do not account for that here.
94 */
95#define XFS_ALLOCBT_AGFL_RESERVE 4
96
97/*
98 * Compute the number of blocks that we set aside to guarantee the ability to
99 * refill the AGFL and handle a full bmap btree split.
100 *
101 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
102 * AGF buffer (PV 947395), we place constraints on the relationship among
103 * actual allocations for data blocks, freelist blocks, and potential file data
104 * bmap btree blocks. However, these restrictions may result in no actual space
105 * allocated for a delayed extent, for example, a data block in a certain AG is
106 * allocated but there is no additional block for the additional bmap btree
107 * block due to a split of the bmap btree of the file. The result of this may
108 * lead to an infinite loop when the file gets flushed to disk and all delayed
109 * extents need to be actually allocated. To get around this, we explicitly set
110 * aside a few blocks which will not be reserved in delayed allocation.
111 *
112 * For each AG, we need to reserve enough blocks to replenish a totally empty
113 * AGFL and 4 more to handle a potential split of the file's bmap btree.
114 */
115unsigned int
116xfs_alloc_set_aside(
117 struct xfs_mount *mp)
118{
119 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
120}
121
122/*
123 * When deciding how much space to allocate out of an AG, we limit the
124 * allocation maximum size to the size the AG. However, we cannot use all the
125 * blocks in the AG - some are permanently used by metadata. These
126 * blocks are generally:
127 * - the AG superblock, AGF, AGI and AGFL
128 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
129 * the AGI free inode and rmap btree root blocks.
130 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
131 * - the rmapbt root block
132 *
133 * The AG headers are sector sized, so the amount of space they take up is
134 * dependent on filesystem geometry. The others are all single blocks.
135 */
136unsigned int
137xfs_alloc_ag_max_usable(
138 struct xfs_mount *mp)
139{
140 unsigned int blocks;
141
142 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
143 blocks += XFS_ALLOCBT_AGFL_RESERVE;
144 blocks += 3; /* AGF, AGI btree root blocks */
145 if (xfs_has_finobt(mp))
146 blocks++; /* finobt root block */
147 if (xfs_has_rmapbt(mp))
148 blocks++; /* rmap root block */
149 if (xfs_has_reflink(mp))
150 blocks++; /* refcount root block */
151
152 return mp->m_sb.sb_agblocks - blocks;
153}
154
155
156static int
157xfs_alloc_lookup(
158 struct xfs_btree_cur *cur,
159 xfs_lookup_t dir,
160 xfs_agblock_t bno,
161 xfs_extlen_t len,
162 int *stat)
163{
164 int error;
165
166 cur->bc_rec.a.ar_startblock = bno;
167 cur->bc_rec.a.ar_blockcount = len;
168 error = xfs_btree_lookup(cur, dir, stat);
169 if (*stat == 1)
170 cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
171 else
172 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
173 return error;
174}
175
176/*
177 * Lookup the record equal to [bno, len] in the btree given by cur.
178 */
179static inline int /* error */
180xfs_alloc_lookup_eq(
181 struct xfs_btree_cur *cur, /* btree cursor */
182 xfs_agblock_t bno, /* starting block of extent */
183 xfs_extlen_t len, /* length of extent */
184 int *stat) /* success/failure */
185{
186 return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
187}
188
189/*
190 * Lookup the first record greater than or equal to [bno, len]
191 * in the btree given by cur.
192 */
193int /* error */
194xfs_alloc_lookup_ge(
195 struct xfs_btree_cur *cur, /* btree cursor */
196 xfs_agblock_t bno, /* starting block of extent */
197 xfs_extlen_t len, /* length of extent */
198 int *stat) /* success/failure */
199{
200 return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
201}
202
203/*
204 * Lookup the first record less than or equal to [bno, len]
205 * in the btree given by cur.
206 */
207int /* error */
208xfs_alloc_lookup_le(
209 struct xfs_btree_cur *cur, /* btree cursor */
210 xfs_agblock_t bno, /* starting block of extent */
211 xfs_extlen_t len, /* length of extent */
212 int *stat) /* success/failure */
213{
214 return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
215}
216
217static inline bool
218xfs_alloc_cur_active(
219 struct xfs_btree_cur *cur)
220{
221 return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
222}
223
224/*
225 * Update the record referred to by cur to the value given
226 * by [bno, len].
227 * This either works (return 0) or gets an EFSCORRUPTED error.
228 */
229STATIC int /* error */
230xfs_alloc_update(
231 struct xfs_btree_cur *cur, /* btree cursor */
232 xfs_agblock_t bno, /* starting block of extent */
233 xfs_extlen_t len) /* length of extent */
234{
235 union xfs_btree_rec rec;
236
237 rec.alloc.ar_startblock = cpu_to_be32(bno);
238 rec.alloc.ar_blockcount = cpu_to_be32(len);
239 return xfs_btree_update(cur, &rec);
240}
241
242/* Convert the ondisk btree record to its incore representation. */
243void
244xfs_alloc_btrec_to_irec(
245 const union xfs_btree_rec *rec,
246 struct xfs_alloc_rec_incore *irec)
247{
248 irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
249 irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
250}
251
252/* Simple checks for free space records. */
253xfs_failaddr_t
254xfs_alloc_check_irec(
255 struct xfs_perag *pag,
256 const struct xfs_alloc_rec_incore *irec)
257{
258 if (irec->ar_blockcount == 0)
259 return __this_address;
260
261 /* check for valid extent range, including overflow */
262 if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
263 return __this_address;
264
265 return NULL;
266}
267
268static inline int
269xfs_alloc_complain_bad_rec(
270 struct xfs_btree_cur *cur,
271 xfs_failaddr_t fa,
272 const struct xfs_alloc_rec_incore *irec)
273{
274 struct xfs_mount *mp = cur->bc_mp;
275
276 xfs_warn(mp,
277 "%sbt record corruption in AG %d detected at %pS!",
278 cur->bc_ops->name, cur->bc_group->xg_gno, fa);
279 xfs_warn(mp,
280 "start block 0x%x block count 0x%x", irec->ar_startblock,
281 irec->ar_blockcount);
282 xfs_btree_mark_sick(cur);
283 return -EFSCORRUPTED;
284}
285
286/*
287 * Get the data from the pointed-to record.
288 */
289int /* error */
290xfs_alloc_get_rec(
291 struct xfs_btree_cur *cur, /* btree cursor */
292 xfs_agblock_t *bno, /* output: starting block of extent */
293 xfs_extlen_t *len, /* output: length of extent */
294 int *stat) /* output: success/failure */
295{
296 struct xfs_alloc_rec_incore irec;
297 union xfs_btree_rec *rec;
298 xfs_failaddr_t fa;
299 int error;
300
301 error = xfs_btree_get_rec(cur, &rec, stat);
302 if (error || !(*stat))
303 return error;
304
305 xfs_alloc_btrec_to_irec(rec, &irec);
306 fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
307 if (fa)
308 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
309
310 *bno = irec.ar_startblock;
311 *len = irec.ar_blockcount;
312 return 0;
313}
314
315/*
316 * Compute aligned version of the found extent.
317 * Takes alignment and min length into account.
318 */
319STATIC bool
320xfs_alloc_compute_aligned(
321 xfs_alloc_arg_t *args, /* allocation argument structure */
322 xfs_agblock_t foundbno, /* starting block in found extent */
323 xfs_extlen_t foundlen, /* length in found extent */
324 xfs_agblock_t *resbno, /* result block number */
325 xfs_extlen_t *reslen, /* result length */
326 unsigned *busy_gen)
327{
328 xfs_agblock_t bno = foundbno;
329 xfs_extlen_t len = foundlen;
330 xfs_extlen_t diff;
331 bool busy;
332
333 /* Trim busy sections out of found extent */
334 busy = xfs_extent_busy_trim(pag_group(args->pag), args->minlen,
335 args->maxlen, &bno, &len, busy_gen);
336
337 /*
338 * If we have a largish extent that happens to start before min_agbno,
339 * see if we can shift it into range...
340 */
341 if (bno < args->min_agbno && bno + len > args->min_agbno) {
342 diff = args->min_agbno - bno;
343 if (len > diff) {
344 bno += diff;
345 len -= diff;
346 }
347 }
348
349 if (args->alignment > 1 && len >= args->minlen) {
350 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
351
352 diff = aligned_bno - bno;
353
354 *resbno = aligned_bno;
355 *reslen = diff >= len ? 0 : len - diff;
356 } else {
357 *resbno = bno;
358 *reslen = len;
359 }
360
361 return busy;
362}
363
364/*
365 * Compute best start block and diff for "near" allocations.
366 * freelen >= wantlen already checked by caller.
367 */
368STATIC xfs_extlen_t /* difference value (absolute) */
369xfs_alloc_compute_diff(
370 xfs_agblock_t wantbno, /* target starting block */
371 xfs_extlen_t wantlen, /* target length */
372 xfs_extlen_t alignment, /* target alignment */
373 int datatype, /* are we allocating data? */
374 xfs_agblock_t freebno, /* freespace's starting block */
375 xfs_extlen_t freelen, /* freespace's length */
376 xfs_agblock_t *newbnop) /* result: best start block from free */
377{
378 xfs_agblock_t freeend; /* end of freespace extent */
379 xfs_agblock_t newbno1; /* return block number */
380 xfs_agblock_t newbno2; /* other new block number */
381 xfs_extlen_t newlen1=0; /* length with newbno1 */
382 xfs_extlen_t newlen2=0; /* length with newbno2 */
383 xfs_agblock_t wantend; /* end of target extent */
384 bool userdata = datatype & XFS_ALLOC_USERDATA;
385
386 ASSERT(freelen >= wantlen);
387 freeend = freebno + freelen;
388 wantend = wantbno + wantlen;
389 /*
390 * We want to allocate from the start of a free extent if it is past
391 * the desired block or if we are allocating user data and the free
392 * extent is before desired block. The second case is there to allow
393 * for contiguous allocation from the remaining free space if the file
394 * grows in the short term.
395 */
396 if (freebno >= wantbno || (userdata && freeend < wantend)) {
397 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
398 newbno1 = NULLAGBLOCK;
399 } else if (freeend >= wantend && alignment > 1) {
400 newbno1 = roundup(wantbno, alignment);
401 newbno2 = newbno1 - alignment;
402 if (newbno1 >= freeend)
403 newbno1 = NULLAGBLOCK;
404 else
405 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
406 if (newbno2 < freebno)
407 newbno2 = NULLAGBLOCK;
408 else
409 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
410 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
411 if (newlen1 < newlen2 ||
412 (newlen1 == newlen2 &&
413 XFS_ABSDIFF(newbno1, wantbno) >
414 XFS_ABSDIFF(newbno2, wantbno)))
415 newbno1 = newbno2;
416 } else if (newbno2 != NULLAGBLOCK)
417 newbno1 = newbno2;
418 } else if (freeend >= wantend) {
419 newbno1 = wantbno;
420 } else if (alignment > 1) {
421 newbno1 = roundup(freeend - wantlen, alignment);
422 if (newbno1 > freeend - wantlen &&
423 newbno1 - alignment >= freebno)
424 newbno1 -= alignment;
425 else if (newbno1 >= freeend)
426 newbno1 = NULLAGBLOCK;
427 } else
428 newbno1 = freeend - wantlen;
429 *newbnop = newbno1;
430 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
431}
432
433/*
434 * Fix up the length, based on mod and prod.
435 * len should be k * prod + mod for some k.
436 * If len is too small it is returned unchanged.
437 * If len hits maxlen it is left alone.
438 */
439STATIC void
440xfs_alloc_fix_len(
441 xfs_alloc_arg_t *args) /* allocation argument structure */
442{
443 xfs_extlen_t k;
444 xfs_extlen_t rlen;
445
446 ASSERT(args->mod < args->prod);
447 rlen = args->len;
448 ASSERT(rlen >= args->minlen);
449 ASSERT(rlen <= args->maxlen);
450 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
451 (args->mod == 0 && rlen < args->prod))
452 return;
453 k = rlen % args->prod;
454 if (k == args->mod)
455 return;
456 if (k > args->mod)
457 rlen = rlen - (k - args->mod);
458 else
459 rlen = rlen - args->prod + (args->mod - k);
460 /* casts to (int) catch length underflows */
461 if ((int)rlen < (int)args->minlen)
462 return;
463 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
464 ASSERT(rlen % args->prod == args->mod);
465 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
466 rlen + args->minleft);
467 args->len = rlen;
468}
469
470/*
471 * Determine if the cursor points to the block that contains the right-most
472 * block of records in the by-count btree. This block contains the largest
473 * contiguous free extent in the AG, so if we modify a record in this block we
474 * need to call xfs_alloc_fixup_longest() once the modifications are done to
475 * ensure the agf->agf_longest field is kept up to date with the longest free
476 * extent tracked by the by-count btree.
477 */
478static bool
479xfs_alloc_cursor_at_lastrec(
480 struct xfs_btree_cur *cnt_cur)
481{
482 struct xfs_btree_block *block;
483 union xfs_btree_ptr ptr;
484 struct xfs_buf *bp;
485
486 block = xfs_btree_get_block(cnt_cur, 0, &bp);
487
488 xfs_btree_get_sibling(cnt_cur, block, &ptr, XFS_BB_RIGHTSIB);
489 return xfs_btree_ptr_is_null(cnt_cur, &ptr);
490}
491
492/*
493 * Find the rightmost record of the cntbt, and return the longest free space
494 * recorded in it. Simply set both the block number and the length to their
495 * maximum values before searching.
496 */
497static int
498xfs_cntbt_longest(
499 struct xfs_btree_cur *cnt_cur,
500 xfs_extlen_t *longest)
501{
502 struct xfs_alloc_rec_incore irec;
503 union xfs_btree_rec *rec;
504 int stat = 0;
505 int error;
506
507 memset(&cnt_cur->bc_rec, 0xFF, sizeof(cnt_cur->bc_rec));
508 error = xfs_btree_lookup(cnt_cur, XFS_LOOKUP_LE, &stat);
509 if (error)
510 return error;
511 if (!stat) {
512 /* totally empty tree */
513 *longest = 0;
514 return 0;
515 }
516
517 error = xfs_btree_get_rec(cnt_cur, &rec, &stat);
518 if (error)
519 return error;
520 if (XFS_IS_CORRUPT(cnt_cur->bc_mp, !stat)) {
521 xfs_btree_mark_sick(cnt_cur);
522 return -EFSCORRUPTED;
523 }
524
525 xfs_alloc_btrec_to_irec(rec, &irec);
526 *longest = irec.ar_blockcount;
527 return 0;
528}
529
530/*
531 * Update the longest contiguous free extent in the AG from the by-count cursor
532 * that is passed to us. This should be done at the end of any allocation or
533 * freeing operation that touches the longest extent in the btree.
534 *
535 * Needing to update the longest extent can be determined by calling
536 * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record
537 * modification but before the modification begins.
538 */
539static int
540xfs_alloc_fixup_longest(
541 struct xfs_btree_cur *cnt_cur)
542{
543 struct xfs_perag *pag = to_perag(cnt_cur->bc_group);
544 struct xfs_buf *bp = cnt_cur->bc_ag.agbp;
545 struct xfs_agf *agf = bp->b_addr;
546 xfs_extlen_t longest = 0;
547 int error;
548
549 /* Lookup last rec in order to update AGF. */
550 error = xfs_cntbt_longest(cnt_cur, &longest);
551 if (error)
552 return error;
553
554 pag->pagf_longest = longest;
555 agf->agf_longest = cpu_to_be32(pag->pagf_longest);
556 xfs_alloc_log_agf(cnt_cur->bc_tp, bp, XFS_AGF_LONGEST);
557
558 return 0;
559}
560
561/*
562 * Update the two btrees, logically removing from freespace the extent
563 * starting at rbno, rlen blocks. The extent is contained within the
564 * actual (current) free extent fbno for flen blocks.
565 * Flags are passed in indicating whether the cursors are set to the
566 * relevant records.
567 */
568STATIC int /* error code */
569xfs_alloc_fixup_trees(
570 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
571 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
572 xfs_agblock_t fbno, /* starting block of free extent */
573 xfs_extlen_t flen, /* length of free extent */
574 xfs_agblock_t rbno, /* starting block of returned extent */
575 xfs_extlen_t rlen, /* length of returned extent */
576 int flags) /* flags, XFSA_FIXUP_... */
577{
578 int error; /* error code */
579 int i; /* operation results */
580 xfs_agblock_t nfbno1; /* first new free startblock */
581 xfs_agblock_t nfbno2; /* second new free startblock */
582 xfs_extlen_t nflen1=0; /* first new free length */
583 xfs_extlen_t nflen2=0; /* second new free length */
584 struct xfs_mount *mp;
585 bool fixup_longest = false;
586
587 mp = cnt_cur->bc_mp;
588
589 /*
590 * Look up the record in the by-size tree if necessary.
591 */
592 if (flags & XFSA_FIXUP_CNT_OK) {
593#ifdef DEBUG
594 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
595 return error;
596 if (XFS_IS_CORRUPT(mp,
597 i != 1 ||
598 nfbno1 != fbno ||
599 nflen1 != flen)) {
600 xfs_btree_mark_sick(cnt_cur);
601 return -EFSCORRUPTED;
602 }
603#endif
604 } else {
605 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
606 return error;
607 if (XFS_IS_CORRUPT(mp, i != 1)) {
608 xfs_btree_mark_sick(cnt_cur);
609 return -EFSCORRUPTED;
610 }
611 }
612 /*
613 * Look up the record in the by-block tree if necessary.
614 */
615 if (flags & XFSA_FIXUP_BNO_OK) {
616#ifdef DEBUG
617 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
618 return error;
619 if (XFS_IS_CORRUPT(mp,
620 i != 1 ||
621 nfbno1 != fbno ||
622 nflen1 != flen)) {
623 xfs_btree_mark_sick(bno_cur);
624 return -EFSCORRUPTED;
625 }
626#endif
627 } else {
628 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
629 return error;
630 if (XFS_IS_CORRUPT(mp, i != 1)) {
631 xfs_btree_mark_sick(bno_cur);
632 return -EFSCORRUPTED;
633 }
634 }
635
636#ifdef DEBUG
637 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
638 struct xfs_btree_block *bnoblock;
639 struct xfs_btree_block *cntblock;
640
641 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
642 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
643
644 if (XFS_IS_CORRUPT(mp,
645 bnoblock->bb_numrecs !=
646 cntblock->bb_numrecs)) {
647 xfs_btree_mark_sick(bno_cur);
648 return -EFSCORRUPTED;
649 }
650 }
651#endif
652
653 /*
654 * Deal with all four cases: the allocated record is contained
655 * within the freespace record, so we can have new freespace
656 * at either (or both) end, or no freespace remaining.
657 */
658 if (rbno == fbno && rlen == flen)
659 nfbno1 = nfbno2 = NULLAGBLOCK;
660 else if (rbno == fbno) {
661 nfbno1 = rbno + rlen;
662 nflen1 = flen - rlen;
663 nfbno2 = NULLAGBLOCK;
664 } else if (rbno + rlen == fbno + flen) {
665 nfbno1 = fbno;
666 nflen1 = flen - rlen;
667 nfbno2 = NULLAGBLOCK;
668 } else {
669 nfbno1 = fbno;
670 nflen1 = rbno - fbno;
671 nfbno2 = rbno + rlen;
672 nflen2 = (fbno + flen) - nfbno2;
673 }
674
675 if (xfs_alloc_cursor_at_lastrec(cnt_cur))
676 fixup_longest = true;
677
678 /*
679 * Delete the entry from the by-size btree.
680 */
681 if ((error = xfs_btree_delete(cnt_cur, &i)))
682 return error;
683 if (XFS_IS_CORRUPT(mp, i != 1)) {
684 xfs_btree_mark_sick(cnt_cur);
685 return -EFSCORRUPTED;
686 }
687 /*
688 * Add new by-size btree entry(s).
689 */
690 if (nfbno1 != NULLAGBLOCK) {
691 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
692 return error;
693 if (XFS_IS_CORRUPT(mp, i != 0)) {
694 xfs_btree_mark_sick(cnt_cur);
695 return -EFSCORRUPTED;
696 }
697 if ((error = xfs_btree_insert(cnt_cur, &i)))
698 return error;
699 if (XFS_IS_CORRUPT(mp, i != 1)) {
700 xfs_btree_mark_sick(cnt_cur);
701 return -EFSCORRUPTED;
702 }
703 }
704 if (nfbno2 != NULLAGBLOCK) {
705 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
706 return error;
707 if (XFS_IS_CORRUPT(mp, i != 0)) {
708 xfs_btree_mark_sick(cnt_cur);
709 return -EFSCORRUPTED;
710 }
711 if ((error = xfs_btree_insert(cnt_cur, &i)))
712 return error;
713 if (XFS_IS_CORRUPT(mp, i != 1)) {
714 xfs_btree_mark_sick(cnt_cur);
715 return -EFSCORRUPTED;
716 }
717 }
718 /*
719 * Fix up the by-block btree entry(s).
720 */
721 if (nfbno1 == NULLAGBLOCK) {
722 /*
723 * No remaining freespace, just delete the by-block tree entry.
724 */
725 if ((error = xfs_btree_delete(bno_cur, &i)))
726 return error;
727 if (XFS_IS_CORRUPT(mp, i != 1)) {
728 xfs_btree_mark_sick(bno_cur);
729 return -EFSCORRUPTED;
730 }
731 } else {
732 /*
733 * Update the by-block entry to start later|be shorter.
734 */
735 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
736 return error;
737 }
738 if (nfbno2 != NULLAGBLOCK) {
739 /*
740 * 2 resulting free entries, need to add one.
741 */
742 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
743 return error;
744 if (XFS_IS_CORRUPT(mp, i != 0)) {
745 xfs_btree_mark_sick(bno_cur);
746 return -EFSCORRUPTED;
747 }
748 if ((error = xfs_btree_insert(bno_cur, &i)))
749 return error;
750 if (XFS_IS_CORRUPT(mp, i != 1)) {
751 xfs_btree_mark_sick(bno_cur);
752 return -EFSCORRUPTED;
753 }
754 }
755
756 if (fixup_longest)
757 return xfs_alloc_fixup_longest(cnt_cur);
758
759 return 0;
760}
761
762/*
763 * We do not verify the AGFL contents against AGF-based index counters here,
764 * even though we may have access to the perag that contains shadow copies. We
765 * don't know if the AGF based counters have been checked, and if they have they
766 * still may be inconsistent because they haven't yet been reset on the first
767 * allocation after the AGF has been read in.
768 *
769 * This means we can only check that all agfl entries contain valid or null
770 * values because we can't reliably determine the active range to exclude
771 * NULLAGBNO as a valid value.
772 *
773 * However, we can't even do that for v4 format filesystems because there are
774 * old versions of mkfs out there that does not initialise the AGFL to known,
775 * verifiable values. HEnce we can't tell the difference between a AGFL block
776 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
777 *
778 * As a result, we can only fully validate AGFL block numbers when we pull them
779 * from the freelist in xfs_alloc_get_freelist().
780 */
781static xfs_failaddr_t
782xfs_agfl_verify(
783 struct xfs_buf *bp)
784{
785 struct xfs_mount *mp = bp->b_mount;
786 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
787 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
788 int i;
789
790 if (!xfs_has_crc(mp))
791 return NULL;
792
793 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
794 return __this_address;
795 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
796 return __this_address;
797 /*
798 * during growfs operations, the perag is not fully initialised,
799 * so we can't use it for any useful checking. growfs ensures we can't
800 * use it by using uncached buffers that don't have the perag attached
801 * so we can detect and avoid this problem.
802 */
803 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != pag_agno((bp->b_pag)))
804 return __this_address;
805
806 for (i = 0; i < xfs_agfl_size(mp); i++) {
807 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
808 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
809 return __this_address;
810 }
811
812 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
813 return __this_address;
814 return NULL;
815}
816
817static void
818xfs_agfl_read_verify(
819 struct xfs_buf *bp)
820{
821 struct xfs_mount *mp = bp->b_mount;
822 xfs_failaddr_t fa;
823
824 /*
825 * There is no verification of non-crc AGFLs because mkfs does not
826 * initialise the AGFL to zero or NULL. Hence the only valid part of the
827 * AGFL is what the AGF says is active. We can't get to the AGF, so we
828 * can't verify just those entries are valid.
829 */
830 if (!xfs_has_crc(mp))
831 return;
832
833 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
834 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
835 else {
836 fa = xfs_agfl_verify(bp);
837 if (fa)
838 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
839 }
840}
841
842static void
843xfs_agfl_write_verify(
844 struct xfs_buf *bp)
845{
846 struct xfs_mount *mp = bp->b_mount;
847 struct xfs_buf_log_item *bip = bp->b_log_item;
848 xfs_failaddr_t fa;
849
850 /* no verification of non-crc AGFLs */
851 if (!xfs_has_crc(mp))
852 return;
853
854 fa = xfs_agfl_verify(bp);
855 if (fa) {
856 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
857 return;
858 }
859
860 if (bip)
861 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
862
863 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
864}
865
866const struct xfs_buf_ops xfs_agfl_buf_ops = {
867 .name = "xfs_agfl",
868 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
869 .verify_read = xfs_agfl_read_verify,
870 .verify_write = xfs_agfl_write_verify,
871 .verify_struct = xfs_agfl_verify,
872};
873
874/*
875 * Read in the allocation group free block array.
876 */
877int
878xfs_alloc_read_agfl(
879 struct xfs_perag *pag,
880 struct xfs_trans *tp,
881 struct xfs_buf **bpp)
882{
883 struct xfs_mount *mp = pag_mount(pag);
884 struct xfs_buf *bp;
885 int error;
886
887 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
888 XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGFL_DADDR(mp)),
889 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
890 if (xfs_metadata_is_sick(error))
891 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
892 if (error)
893 return error;
894 xfs_buf_set_ref(bp, XFS_AGFL_REF);
895 *bpp = bp;
896 return 0;
897}
898
899STATIC int
900xfs_alloc_update_counters(
901 struct xfs_trans *tp,
902 struct xfs_buf *agbp,
903 long len)
904{
905 struct xfs_agf *agf = agbp->b_addr;
906
907 agbp->b_pag->pagf_freeblks += len;
908 be32_add_cpu(&agf->agf_freeblks, len);
909
910 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
911 be32_to_cpu(agf->agf_length))) {
912 xfs_buf_mark_corrupt(agbp);
913 xfs_ag_mark_sick(agbp->b_pag, XFS_SICK_AG_AGF);
914 return -EFSCORRUPTED;
915 }
916
917 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
918 return 0;
919}
920
921/*
922 * Block allocation algorithm and data structures.
923 */
924struct xfs_alloc_cur {
925 struct xfs_btree_cur *cnt; /* btree cursors */
926 struct xfs_btree_cur *bnolt;
927 struct xfs_btree_cur *bnogt;
928 xfs_extlen_t cur_len;/* current search length */
929 xfs_agblock_t rec_bno;/* extent startblock */
930 xfs_extlen_t rec_len;/* extent length */
931 xfs_agblock_t bno; /* alloc bno */
932 xfs_extlen_t len; /* alloc len */
933 xfs_extlen_t diff; /* diff from search bno */
934 unsigned int busy_gen;/* busy state */
935 bool busy;
936};
937
938/*
939 * Set up cursors, etc. in the extent allocation cursor. This function can be
940 * called multiple times to reset an initialized structure without having to
941 * reallocate cursors.
942 */
943static int
944xfs_alloc_cur_setup(
945 struct xfs_alloc_arg *args,
946 struct xfs_alloc_cur *acur)
947{
948 int error;
949 int i;
950
951 acur->cur_len = args->maxlen;
952 acur->rec_bno = 0;
953 acur->rec_len = 0;
954 acur->bno = 0;
955 acur->len = 0;
956 acur->diff = -1;
957 acur->busy = false;
958 acur->busy_gen = 0;
959
960 /*
961 * Perform an initial cntbt lookup to check for availability of maxlen
962 * extents. If this fails, we'll return -ENOSPC to signal the caller to
963 * attempt a small allocation.
964 */
965 if (!acur->cnt)
966 acur->cnt = xfs_cntbt_init_cursor(args->mp, args->tp,
967 args->agbp, args->pag);
968 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
969 if (error)
970 return error;
971
972 /*
973 * Allocate the bnobt left and right search cursors.
974 */
975 if (!acur->bnolt)
976 acur->bnolt = xfs_bnobt_init_cursor(args->mp, args->tp,
977 args->agbp, args->pag);
978 if (!acur->bnogt)
979 acur->bnogt = xfs_bnobt_init_cursor(args->mp, args->tp,
980 args->agbp, args->pag);
981 return i == 1 ? 0 : -ENOSPC;
982}
983
984static void
985xfs_alloc_cur_close(
986 struct xfs_alloc_cur *acur,
987 bool error)
988{
989 int cur_error = XFS_BTREE_NOERROR;
990
991 if (error)
992 cur_error = XFS_BTREE_ERROR;
993
994 if (acur->cnt)
995 xfs_btree_del_cursor(acur->cnt, cur_error);
996 if (acur->bnolt)
997 xfs_btree_del_cursor(acur->bnolt, cur_error);
998 if (acur->bnogt)
999 xfs_btree_del_cursor(acur->bnogt, cur_error);
1000 acur->cnt = acur->bnolt = acur->bnogt = NULL;
1001}
1002
1003/*
1004 * Check an extent for allocation and track the best available candidate in the
1005 * allocation structure. The cursor is deactivated if it has entered an out of
1006 * range state based on allocation arguments. Optionally return the extent
1007 * extent geometry and allocation status if requested by the caller.
1008 */
1009static int
1010xfs_alloc_cur_check(
1011 struct xfs_alloc_arg *args,
1012 struct xfs_alloc_cur *acur,
1013 struct xfs_btree_cur *cur,
1014 int *new)
1015{
1016 int error, i;
1017 xfs_agblock_t bno, bnoa, bnew;
1018 xfs_extlen_t len, lena, diff = -1;
1019 bool busy;
1020 unsigned busy_gen = 0;
1021 bool deactivate = false;
1022 bool isbnobt = xfs_btree_is_bno(cur->bc_ops);
1023
1024 *new = 0;
1025
1026 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1027 if (error)
1028 return error;
1029 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1030 xfs_btree_mark_sick(cur);
1031 return -EFSCORRUPTED;
1032 }
1033
1034 /*
1035 * Check minlen and deactivate a cntbt cursor if out of acceptable size
1036 * range (i.e., walking backwards looking for a minlen extent).
1037 */
1038 if (len < args->minlen) {
1039 deactivate = !isbnobt;
1040 goto out;
1041 }
1042
1043 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
1044 &busy_gen);
1045 acur->busy |= busy;
1046 if (busy)
1047 acur->busy_gen = busy_gen;
1048 /* deactivate a bnobt cursor outside of locality range */
1049 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
1050 deactivate = isbnobt;
1051 goto out;
1052 }
1053 if (lena < args->minlen)
1054 goto out;
1055
1056 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
1057 xfs_alloc_fix_len(args);
1058 ASSERT(args->len >= args->minlen);
1059 if (args->len < acur->len)
1060 goto out;
1061
1062 /*
1063 * We have an aligned record that satisfies minlen and beats or matches
1064 * the candidate extent size. Compare locality for near allocation mode.
1065 */
1066 diff = xfs_alloc_compute_diff(args->agbno, args->len,
1067 args->alignment, args->datatype,
1068 bnoa, lena, &bnew);
1069 if (bnew == NULLAGBLOCK)
1070 goto out;
1071
1072 /*
1073 * Deactivate a bnobt cursor with worse locality than the current best.
1074 */
1075 if (diff > acur->diff) {
1076 deactivate = isbnobt;
1077 goto out;
1078 }
1079
1080 ASSERT(args->len > acur->len ||
1081 (args->len == acur->len && diff <= acur->diff));
1082 acur->rec_bno = bno;
1083 acur->rec_len = len;
1084 acur->bno = bnew;
1085 acur->len = args->len;
1086 acur->diff = diff;
1087 *new = 1;
1088
1089 /*
1090 * We're done if we found a perfect allocation. This only deactivates
1091 * the current cursor, but this is just an optimization to terminate a
1092 * cntbt search that otherwise runs to the edge of the tree.
1093 */
1094 if (acur->diff == 0 && acur->len == args->maxlen)
1095 deactivate = true;
1096out:
1097 if (deactivate)
1098 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1099 trace_xfs_alloc_cur_check(cur, bno, len, diff, *new);
1100 return 0;
1101}
1102
1103/*
1104 * Complete an allocation of a candidate extent. Remove the extent from both
1105 * trees and update the args structure.
1106 */
1107STATIC int
1108xfs_alloc_cur_finish(
1109 struct xfs_alloc_arg *args,
1110 struct xfs_alloc_cur *acur)
1111{
1112 int error;
1113
1114 ASSERT(acur->cnt && acur->bnolt);
1115 ASSERT(acur->bno >= acur->rec_bno);
1116 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
1117 ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
1118
1119 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
1120 acur->rec_len, acur->bno, acur->len, 0);
1121 if (error)
1122 return error;
1123
1124 args->agbno = acur->bno;
1125 args->len = acur->len;
1126 args->wasfromfl = 0;
1127
1128 trace_xfs_alloc_cur(args);
1129 return 0;
1130}
1131
1132/*
1133 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
1134 * bno optimized lookup to search for extents with ideal size and locality.
1135 */
1136STATIC int
1137xfs_alloc_cntbt_iter(
1138 struct xfs_alloc_arg *args,
1139 struct xfs_alloc_cur *acur)
1140{
1141 struct xfs_btree_cur *cur = acur->cnt;
1142 xfs_agblock_t bno;
1143 xfs_extlen_t len, cur_len;
1144 int error;
1145 int i;
1146
1147 if (!xfs_alloc_cur_active(cur))
1148 return 0;
1149
1150 /* locality optimized lookup */
1151 cur_len = acur->cur_len;
1152 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1153 if (error)
1154 return error;
1155 if (i == 0)
1156 return 0;
1157 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1158 if (error)
1159 return error;
1160
1161 /* check the current record and update search length from it */
1162 error = xfs_alloc_cur_check(args, acur, cur, &i);
1163 if (error)
1164 return error;
1165 ASSERT(len >= acur->cur_len);
1166 acur->cur_len = len;
1167
1168 /*
1169 * We looked up the first record >= [agbno, len] above. The agbno is a
1170 * secondary key and so the current record may lie just before or after
1171 * agbno. If it is past agbno, check the previous record too so long as
1172 * the length matches as it may be closer. Don't check a smaller record
1173 * because that could deactivate our cursor.
1174 */
1175 if (bno > args->agbno) {
1176 error = xfs_btree_decrement(cur, 0, &i);
1177 if (!error && i) {
1178 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1179 if (!error && i && len == acur->cur_len)
1180 error = xfs_alloc_cur_check(args, acur, cur,
1181 &i);
1182 }
1183 if (error)
1184 return error;
1185 }
1186
1187 /*
1188 * Increment the search key until we find at least one allocation
1189 * candidate or if the extent we found was larger. Otherwise, double the
1190 * search key to optimize the search. Efficiency is more important here
1191 * than absolute best locality.
1192 */
1193 cur_len <<= 1;
1194 if (!acur->len || acur->cur_len >= cur_len)
1195 acur->cur_len++;
1196 else
1197 acur->cur_len = cur_len;
1198
1199 return error;
1200}
1201
1202/*
1203 * Deal with the case where only small freespaces remain. Either return the
1204 * contents of the last freespace record, or allocate space from the freelist if
1205 * there is nothing in the tree.
1206 */
1207STATIC int /* error */
1208xfs_alloc_ag_vextent_small(
1209 struct xfs_alloc_arg *args, /* allocation argument structure */
1210 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1211 xfs_agblock_t *fbnop, /* result block number */
1212 xfs_extlen_t *flenp, /* result length */
1213 int *stat) /* status: 0-freelist, 1-normal/none */
1214{
1215 struct xfs_agf *agf = args->agbp->b_addr;
1216 int error = 0;
1217 xfs_agblock_t fbno = NULLAGBLOCK;
1218 xfs_extlen_t flen = 0;
1219 int i = 0;
1220
1221 /*
1222 * If a cntbt cursor is provided, try to allocate the largest record in
1223 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1224 * allocation. Make sure to respect minleft even when pulling from the
1225 * freelist.
1226 */
1227 if (ccur)
1228 error = xfs_btree_decrement(ccur, 0, &i);
1229 if (error)
1230 goto error;
1231 if (i) {
1232 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1233 if (error)
1234 goto error;
1235 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1236 xfs_btree_mark_sick(ccur);
1237 error = -EFSCORRUPTED;
1238 goto error;
1239 }
1240 goto out;
1241 }
1242
1243 if (args->minlen != 1 || args->alignment != 1 ||
1244 args->resv == XFS_AG_RESV_AGFL ||
1245 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1246 goto out;
1247
1248 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1249 &fbno, 0);
1250 if (error)
1251 goto error;
1252 if (fbno == NULLAGBLOCK)
1253 goto out;
1254
1255 xfs_extent_busy_reuse(pag_group(args->pag), fbno, 1,
1256 (args->datatype & XFS_ALLOC_NOBUSY));
1257
1258 if (args->datatype & XFS_ALLOC_USERDATA) {
1259 struct xfs_buf *bp;
1260
1261 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1262 xfs_agbno_to_daddr(args->pag, fbno),
1263 args->mp->m_bsize, 0, &bp);
1264 if (error)
1265 goto error;
1266 xfs_trans_binval(args->tp, bp);
1267 }
1268 *fbnop = args->agbno = fbno;
1269 *flenp = args->len = 1;
1270 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1271 xfs_btree_mark_sick(ccur);
1272 error = -EFSCORRUPTED;
1273 goto error;
1274 }
1275 args->wasfromfl = 1;
1276 trace_xfs_alloc_small_freelist(args);
1277
1278 /*
1279 * If we're feeding an AGFL block to something that doesn't live in the
1280 * free space, we need to clear out the OWN_AG rmap.
1281 */
1282 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1283 &XFS_RMAP_OINFO_AG);
1284 if (error)
1285 goto error;
1286
1287 *stat = 0;
1288 return 0;
1289
1290out:
1291 /*
1292 * Can't do the allocation, give up.
1293 */
1294 if (flen < args->minlen) {
1295 args->agbno = NULLAGBLOCK;
1296 trace_xfs_alloc_small_notenough(args);
1297 flen = 0;
1298 }
1299 *fbnop = fbno;
1300 *flenp = flen;
1301 *stat = 1;
1302 trace_xfs_alloc_small_done(args);
1303 return 0;
1304
1305error:
1306 trace_xfs_alloc_small_error(args);
1307 return error;
1308}
1309
1310/*
1311 * Allocate a variable extent at exactly agno/bno.
1312 * Extent's length (returned in *len) will be between minlen and maxlen,
1313 * and of the form k * prod + mod unless there's nothing that large.
1314 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1315 */
1316STATIC int /* error */
1317xfs_alloc_ag_vextent_exact(
1318 xfs_alloc_arg_t *args) /* allocation argument structure */
1319{
1320 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1321 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1322 int error;
1323 xfs_agblock_t fbno; /* start block of found extent */
1324 xfs_extlen_t flen; /* length of found extent */
1325 xfs_agblock_t tbno; /* start block of busy extent */
1326 xfs_extlen_t tlen; /* length of busy extent */
1327 xfs_agblock_t tend; /* end block of busy extent */
1328 int i; /* success/failure of operation */
1329 unsigned busy_gen;
1330
1331 ASSERT(args->alignment == 1);
1332
1333 /*
1334 * Allocate/initialize a cursor for the by-number freespace btree.
1335 */
1336 bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
1337 args->pag);
1338
1339 /*
1340 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1341 * Look for the closest free block <= bno, it must contain bno
1342 * if any free block does.
1343 */
1344 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1345 if (error)
1346 goto error0;
1347 if (!i)
1348 goto not_found;
1349
1350 /*
1351 * Grab the freespace record.
1352 */
1353 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1354 if (error)
1355 goto error0;
1356 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1357 xfs_btree_mark_sick(bno_cur);
1358 error = -EFSCORRUPTED;
1359 goto error0;
1360 }
1361 ASSERT(fbno <= args->agbno);
1362
1363 /*
1364 * Check for overlapping busy extents.
1365 */
1366 tbno = fbno;
1367 tlen = flen;
1368 xfs_extent_busy_trim(pag_group(args->pag), args->minlen, args->maxlen,
1369 &tbno, &tlen, &busy_gen);
1370
1371 /*
1372 * Give up if the start of the extent is busy, or the freespace isn't
1373 * long enough for the minimum request.
1374 */
1375 if (tbno > args->agbno)
1376 goto not_found;
1377 if (tlen < args->minlen)
1378 goto not_found;
1379 tend = tbno + tlen;
1380 if (tend < args->agbno + args->minlen)
1381 goto not_found;
1382
1383 /*
1384 * End of extent will be smaller of the freespace end and the
1385 * maximal requested end.
1386 *
1387 * Fix the length according to mod and prod if given.
1388 */
1389 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1390 - args->agbno;
1391 xfs_alloc_fix_len(args);
1392 ASSERT(args->agbno + args->len <= tend);
1393
1394 /*
1395 * We are allocating agbno for args->len
1396 * Allocate/initialize a cursor for the by-size btree.
1397 */
1398 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1399 args->pag);
1400 ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
1401 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1402 args->len, XFSA_FIXUP_BNO_OK);
1403 if (error) {
1404 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1405 goto error0;
1406 }
1407
1408 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1409 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1410
1411 args->wasfromfl = 0;
1412 trace_xfs_alloc_exact_done(args);
1413 return 0;
1414
1415not_found:
1416 /* Didn't find it, return null. */
1417 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1418 args->agbno = NULLAGBLOCK;
1419 trace_xfs_alloc_exact_notfound(args);
1420 return 0;
1421
1422error0:
1423 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1424 trace_xfs_alloc_exact_error(args);
1425 return error;
1426}
1427
1428/*
1429 * Search a given number of btree records in a given direction. Check each
1430 * record against the good extent we've already found.
1431 */
1432STATIC int
1433xfs_alloc_walk_iter(
1434 struct xfs_alloc_arg *args,
1435 struct xfs_alloc_cur *acur,
1436 struct xfs_btree_cur *cur,
1437 bool increment,
1438 bool find_one, /* quit on first candidate */
1439 int count, /* rec count (-1 for infinite) */
1440 int *stat)
1441{
1442 int error;
1443 int i;
1444
1445 *stat = 0;
1446
1447 /*
1448 * Search so long as the cursor is active or we find a better extent.
1449 * The cursor is deactivated if it extends beyond the range of the
1450 * current allocation candidate.
1451 */
1452 while (xfs_alloc_cur_active(cur) && count) {
1453 error = xfs_alloc_cur_check(args, acur, cur, &i);
1454 if (error)
1455 return error;
1456 if (i == 1) {
1457 *stat = 1;
1458 if (find_one)
1459 break;
1460 }
1461 if (!xfs_alloc_cur_active(cur))
1462 break;
1463
1464 if (increment)
1465 error = xfs_btree_increment(cur, 0, &i);
1466 else
1467 error = xfs_btree_decrement(cur, 0, &i);
1468 if (error)
1469 return error;
1470 if (i == 0)
1471 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1472
1473 if (count > 0)
1474 count--;
1475 }
1476
1477 return 0;
1478}
1479
1480/*
1481 * Search the by-bno and by-size btrees in parallel in search of an extent with
1482 * ideal locality based on the NEAR mode ->agbno locality hint.
1483 */
1484STATIC int
1485xfs_alloc_ag_vextent_locality(
1486 struct xfs_alloc_arg *args,
1487 struct xfs_alloc_cur *acur,
1488 int *stat)
1489{
1490 struct xfs_btree_cur *fbcur = NULL;
1491 int error;
1492 int i;
1493 bool fbinc;
1494
1495 ASSERT(acur->len == 0);
1496
1497 *stat = 0;
1498
1499 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1500 if (error)
1501 return error;
1502 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1503 if (error)
1504 return error;
1505 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1506 if (error)
1507 return error;
1508
1509 /*
1510 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1511 * right and lookup the closest extent to the locality hint for each
1512 * extent size key in the cntbt. The entire search terminates
1513 * immediately on a bnobt hit because that means we've found best case
1514 * locality. Otherwise the search continues until the cntbt cursor runs
1515 * off the end of the tree. If no allocation candidate is found at this
1516 * point, give up on locality, walk backwards from the end of the cntbt
1517 * and take the first available extent.
1518 *
1519 * The parallel tree searches balance each other out to provide fairly
1520 * consistent performance for various situations. The bnobt search can
1521 * have pathological behavior in the worst case scenario of larger
1522 * allocation requests and fragmented free space. On the other hand, the
1523 * bnobt is able to satisfy most smaller allocation requests much more
1524 * quickly than the cntbt. The cntbt search can sift through fragmented
1525 * free space and sets of free extents for larger allocation requests
1526 * more quickly than the bnobt. Since the locality hint is just a hint
1527 * and we don't want to scan the entire bnobt for perfect locality, the
1528 * cntbt search essentially bounds the bnobt search such that we can
1529 * find good enough locality at reasonable performance in most cases.
1530 */
1531 while (xfs_alloc_cur_active(acur->bnolt) ||
1532 xfs_alloc_cur_active(acur->bnogt) ||
1533 xfs_alloc_cur_active(acur->cnt)) {
1534
1535 trace_xfs_alloc_cur_lookup(args);
1536
1537 /*
1538 * Search the bnobt left and right. In the case of a hit, finish
1539 * the search in the opposite direction and we're done.
1540 */
1541 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1542 true, 1, &i);
1543 if (error)
1544 return error;
1545 if (i == 1) {
1546 trace_xfs_alloc_cur_left(args);
1547 fbcur = acur->bnogt;
1548 fbinc = true;
1549 break;
1550 }
1551 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1552 1, &i);
1553 if (error)
1554 return error;
1555 if (i == 1) {
1556 trace_xfs_alloc_cur_right(args);
1557 fbcur = acur->bnolt;
1558 fbinc = false;
1559 break;
1560 }
1561
1562 /*
1563 * Check the extent with best locality based on the current
1564 * extent size search key and keep track of the best candidate.
1565 */
1566 error = xfs_alloc_cntbt_iter(args, acur);
1567 if (error)
1568 return error;
1569 if (!xfs_alloc_cur_active(acur->cnt)) {
1570 trace_xfs_alloc_cur_lookup_done(args);
1571 break;
1572 }
1573 }
1574
1575 /*
1576 * If we failed to find anything due to busy extents, return empty
1577 * handed so the caller can flush and retry. If no busy extents were
1578 * found, walk backwards from the end of the cntbt as a last resort.
1579 */
1580 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1581 error = xfs_btree_decrement(acur->cnt, 0, &i);
1582 if (error)
1583 return error;
1584 if (i) {
1585 acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
1586 fbcur = acur->cnt;
1587 fbinc = false;
1588 }
1589 }
1590
1591 /*
1592 * Search in the opposite direction for a better entry in the case of
1593 * a bnobt hit or walk backwards from the end of the cntbt.
1594 */
1595 if (fbcur) {
1596 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1597 &i);
1598 if (error)
1599 return error;
1600 }
1601
1602 if (acur->len)
1603 *stat = 1;
1604
1605 return 0;
1606}
1607
1608/* Check the last block of the cnt btree for allocations. */
1609static int
1610xfs_alloc_ag_vextent_lastblock(
1611 struct xfs_alloc_arg *args,
1612 struct xfs_alloc_cur *acur,
1613 xfs_agblock_t *bno,
1614 xfs_extlen_t *len,
1615 bool *allocated)
1616{
1617 int error;
1618 int i;
1619
1620#ifdef DEBUG
1621 /* Randomly don't execute the first algorithm. */
1622 if (get_random_u32_below(2))
1623 return 0;
1624#endif
1625
1626 /*
1627 * Start from the entry that lookup found, sequence through all larger
1628 * free blocks. If we're actually pointing at a record smaller than
1629 * maxlen, go to the start of this block, and skip all those smaller
1630 * than minlen.
1631 */
1632 if (*len || args->alignment > 1) {
1633 acur->cnt->bc_levels[0].ptr = 1;
1634 do {
1635 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1636 if (error)
1637 return error;
1638 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1639 xfs_btree_mark_sick(acur->cnt);
1640 return -EFSCORRUPTED;
1641 }
1642 if (*len >= args->minlen)
1643 break;
1644 error = xfs_btree_increment(acur->cnt, 0, &i);
1645 if (error)
1646 return error;
1647 } while (i);
1648 ASSERT(*len >= args->minlen);
1649 if (!i)
1650 return 0;
1651 }
1652
1653 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1654 if (error)
1655 return error;
1656
1657 /*
1658 * It didn't work. We COULD be in a case where there's a good record
1659 * somewhere, so try again.
1660 */
1661 if (acur->len == 0)
1662 return 0;
1663
1664 trace_xfs_alloc_near_first(args);
1665 *allocated = true;
1666 return 0;
1667}
1668
1669/*
1670 * Allocate a variable extent near bno in the allocation group agno.
1671 * Extent's length (returned in len) will be between minlen and maxlen,
1672 * and of the form k * prod + mod unless there's nothing that large.
1673 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1674 */
1675STATIC int
1676xfs_alloc_ag_vextent_near(
1677 struct xfs_alloc_arg *args,
1678 uint32_t alloc_flags)
1679{
1680 struct xfs_alloc_cur acur = {};
1681 int error; /* error code */
1682 int i; /* result code, temporary */
1683 xfs_agblock_t bno;
1684 xfs_extlen_t len;
1685
1686 /* handle uninitialized agbno range so caller doesn't have to */
1687 if (!args->min_agbno && !args->max_agbno)
1688 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1689 ASSERT(args->min_agbno <= args->max_agbno);
1690
1691 /* clamp agbno to the range if it's outside */
1692 if (args->agbno < args->min_agbno)
1693 args->agbno = args->min_agbno;
1694 if (args->agbno > args->max_agbno)
1695 args->agbno = args->max_agbno;
1696
1697 /* Retry once quickly if we find busy extents before blocking. */
1698 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1699restart:
1700 len = 0;
1701
1702 /*
1703 * Set up cursors and see if there are any free extents as big as
1704 * maxlen. If not, pick the last entry in the tree unless the tree is
1705 * empty.
1706 */
1707 error = xfs_alloc_cur_setup(args, &acur);
1708 if (error == -ENOSPC) {
1709 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1710 &len, &i);
1711 if (error)
1712 goto out;
1713 if (i == 0 || len == 0) {
1714 trace_xfs_alloc_near_noentry(args);
1715 goto out;
1716 }
1717 ASSERT(i == 1);
1718 } else if (error) {
1719 goto out;
1720 }
1721
1722 /*
1723 * First algorithm.
1724 * If the requested extent is large wrt the freespaces available
1725 * in this a.g., then the cursor will be pointing to a btree entry
1726 * near the right edge of the tree. If it's in the last btree leaf
1727 * block, then we just examine all the entries in that block
1728 * that are big enough, and pick the best one.
1729 */
1730 if (xfs_btree_islastblock(acur.cnt, 0)) {
1731 bool allocated = false;
1732
1733 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1734 &allocated);
1735 if (error)
1736 goto out;
1737 if (allocated)
1738 goto alloc_finish;
1739 }
1740
1741 /*
1742 * Second algorithm. Combined cntbt and bnobt search to find ideal
1743 * locality.
1744 */
1745 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1746 if (error)
1747 goto out;
1748
1749 /*
1750 * If we couldn't get anything, give up.
1751 */
1752 if (!acur.len) {
1753 if (acur.busy) {
1754 /*
1755 * Our only valid extents must have been busy. Flush and
1756 * retry the allocation again. If we get an -EAGAIN
1757 * error, we're being told that a deadlock was avoided
1758 * and the current transaction needs committing before
1759 * the allocation can be retried.
1760 */
1761 trace_xfs_alloc_near_busy(args);
1762 error = xfs_extent_busy_flush(args->tp,
1763 pag_group(args->pag), acur.busy_gen,
1764 alloc_flags);
1765 if (error)
1766 goto out;
1767
1768 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1769 goto restart;
1770 }
1771 trace_xfs_alloc_size_neither(args);
1772 args->agbno = NULLAGBLOCK;
1773 goto out;
1774 }
1775
1776alloc_finish:
1777 /* fix up btrees on a successful allocation */
1778 error = xfs_alloc_cur_finish(args, &acur);
1779
1780out:
1781 xfs_alloc_cur_close(&acur, error);
1782 return error;
1783}
1784
1785/*
1786 * Allocate a variable extent anywhere in the allocation group agno.
1787 * Extent's length (returned in len) will be between minlen and maxlen,
1788 * and of the form k * prod + mod unless there's nothing that large.
1789 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1790 */
1791static int
1792xfs_alloc_ag_vextent_size(
1793 struct xfs_alloc_arg *args,
1794 uint32_t alloc_flags)
1795{
1796 struct xfs_agf *agf = args->agbp->b_addr;
1797 struct xfs_btree_cur *bno_cur;
1798 struct xfs_btree_cur *cnt_cur;
1799 xfs_agblock_t fbno; /* start of found freespace */
1800 xfs_extlen_t flen; /* length of found freespace */
1801 xfs_agblock_t rbno; /* returned block number */
1802 xfs_extlen_t rlen; /* length of returned extent */
1803 bool busy;
1804 unsigned busy_gen;
1805 int error;
1806 int i;
1807
1808 /* Retry once quickly if we find busy extents before blocking. */
1809 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1810restart:
1811 /*
1812 * Allocate and initialize a cursor for the by-size btree.
1813 */
1814 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1815 args->pag);
1816 bno_cur = NULL;
1817
1818 /*
1819 * Look for an entry >= maxlen+alignment-1 blocks.
1820 */
1821 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1822 args->maxlen + args->alignment - 1, &i)))
1823 goto error0;
1824
1825 /*
1826 * If none then we have to settle for a smaller extent. In the case that
1827 * there are no large extents, this will return the last entry in the
1828 * tree unless the tree is empty. In the case that there are only busy
1829 * large extents, this will return the largest small extent unless there
1830 * are no smaller extents available.
1831 */
1832 if (!i) {
1833 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1834 &fbno, &flen, &i);
1835 if (error)
1836 goto error0;
1837 if (i == 0 || flen == 0) {
1838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1839 trace_xfs_alloc_size_noentry(args);
1840 return 0;
1841 }
1842 ASSERT(i == 1);
1843 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1844 &rlen, &busy_gen);
1845 } else {
1846 /*
1847 * Search for a non-busy extent that is large enough.
1848 */
1849 for (;;) {
1850 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1851 if (error)
1852 goto error0;
1853 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1854 xfs_btree_mark_sick(cnt_cur);
1855 error = -EFSCORRUPTED;
1856 goto error0;
1857 }
1858
1859 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1860 &rbno, &rlen, &busy_gen);
1861
1862 if (rlen >= args->maxlen)
1863 break;
1864
1865 error = xfs_btree_increment(cnt_cur, 0, &i);
1866 if (error)
1867 goto error0;
1868 if (i)
1869 continue;
1870
1871 /*
1872 * Our only valid extents must have been busy. Flush and
1873 * retry the allocation again. If we get an -EAGAIN
1874 * error, we're being told that a deadlock was avoided
1875 * and the current transaction needs committing before
1876 * the allocation can be retried.
1877 */
1878 trace_xfs_alloc_size_busy(args);
1879 error = xfs_extent_busy_flush(args->tp,
1880 pag_group(args->pag), busy_gen,
1881 alloc_flags);
1882 if (error)
1883 goto error0;
1884
1885 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1886 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1887 goto restart;
1888 }
1889 }
1890
1891 /*
1892 * In the first case above, we got the last entry in the
1893 * by-size btree. Now we check to see if the space hits maxlen
1894 * once aligned; if not, we search left for something better.
1895 * This can't happen in the second case above.
1896 */
1897 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1898 if (XFS_IS_CORRUPT(args->mp,
1899 rlen != 0 &&
1900 (rlen > flen ||
1901 rbno + rlen > fbno + flen))) {
1902 xfs_btree_mark_sick(cnt_cur);
1903 error = -EFSCORRUPTED;
1904 goto error0;
1905 }
1906 if (rlen < args->maxlen) {
1907 xfs_agblock_t bestfbno;
1908 xfs_extlen_t bestflen;
1909 xfs_agblock_t bestrbno;
1910 xfs_extlen_t bestrlen;
1911
1912 bestrlen = rlen;
1913 bestrbno = rbno;
1914 bestflen = flen;
1915 bestfbno = fbno;
1916 for (;;) {
1917 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1918 goto error0;
1919 if (i == 0)
1920 break;
1921 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1922 &i)))
1923 goto error0;
1924 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1925 xfs_btree_mark_sick(cnt_cur);
1926 error = -EFSCORRUPTED;
1927 goto error0;
1928 }
1929 if (flen <= bestrlen)
1930 break;
1931 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1932 &rbno, &rlen, &busy_gen);
1933 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1934 if (XFS_IS_CORRUPT(args->mp,
1935 rlen != 0 &&
1936 (rlen > flen ||
1937 rbno + rlen > fbno + flen))) {
1938 xfs_btree_mark_sick(cnt_cur);
1939 error = -EFSCORRUPTED;
1940 goto error0;
1941 }
1942 if (rlen > bestrlen) {
1943 bestrlen = rlen;
1944 bestrbno = rbno;
1945 bestflen = flen;
1946 bestfbno = fbno;
1947 if (rlen == args->maxlen)
1948 break;
1949 }
1950 }
1951 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1952 &i)))
1953 goto error0;
1954 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1955 xfs_btree_mark_sick(cnt_cur);
1956 error = -EFSCORRUPTED;
1957 goto error0;
1958 }
1959 rlen = bestrlen;
1960 rbno = bestrbno;
1961 flen = bestflen;
1962 fbno = bestfbno;
1963 }
1964 args->wasfromfl = 0;
1965 /*
1966 * Fix up the length.
1967 */
1968 args->len = rlen;
1969 if (rlen < args->minlen) {
1970 if (busy) {
1971 /*
1972 * Our only valid extents must have been busy. Flush and
1973 * retry the allocation again. If we get an -EAGAIN
1974 * error, we're being told that a deadlock was avoided
1975 * and the current transaction needs committing before
1976 * the allocation can be retried.
1977 */
1978 trace_xfs_alloc_size_busy(args);
1979 error = xfs_extent_busy_flush(args->tp,
1980 pag_group(args->pag), busy_gen,
1981 alloc_flags);
1982 if (error)
1983 goto error0;
1984
1985 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1986 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1987 goto restart;
1988 }
1989 goto out_nominleft;
1990 }
1991 xfs_alloc_fix_len(args);
1992
1993 rlen = args->len;
1994 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1995 xfs_btree_mark_sick(cnt_cur);
1996 error = -EFSCORRUPTED;
1997 goto error0;
1998 }
1999 /*
2000 * Allocate and initialize a cursor for the by-block tree.
2001 */
2002 bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
2003 args->pag);
2004 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
2005 rbno, rlen, XFSA_FIXUP_CNT_OK)))
2006 goto error0;
2007 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2008 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2009 cnt_cur = bno_cur = NULL;
2010 args->len = rlen;
2011 args->agbno = rbno;
2012 if (XFS_IS_CORRUPT(args->mp,
2013 args->agbno + args->len >
2014 be32_to_cpu(agf->agf_length))) {
2015 xfs_ag_mark_sick(args->pag, XFS_SICK_AG_BNOBT);
2016 error = -EFSCORRUPTED;
2017 goto error0;
2018 }
2019 trace_xfs_alloc_size_done(args);
2020 return 0;
2021
2022error0:
2023 trace_xfs_alloc_size_error(args);
2024 if (cnt_cur)
2025 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2026 if (bno_cur)
2027 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2028 return error;
2029
2030out_nominleft:
2031 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2032 trace_xfs_alloc_size_nominleft(args);
2033 args->agbno = NULLAGBLOCK;
2034 return 0;
2035}
2036
2037/*
2038 * Free the extent starting at agno/bno for length.
2039 */
2040int
2041xfs_free_ag_extent(
2042 struct xfs_trans *tp,
2043 struct xfs_buf *agbp,
2044 xfs_agblock_t bno,
2045 xfs_extlen_t len,
2046 const struct xfs_owner_info *oinfo,
2047 enum xfs_ag_resv_type type)
2048{
2049 struct xfs_mount *mp;
2050 struct xfs_btree_cur *bno_cur;
2051 struct xfs_btree_cur *cnt_cur;
2052 xfs_agblock_t gtbno; /* start of right neighbor */
2053 xfs_extlen_t gtlen; /* length of right neighbor */
2054 xfs_agblock_t ltbno; /* start of left neighbor */
2055 xfs_extlen_t ltlen; /* length of left neighbor */
2056 xfs_agblock_t nbno; /* new starting block of freesp */
2057 xfs_extlen_t nlen; /* new length of freespace */
2058 int haveleft; /* have a left neighbor */
2059 int haveright; /* have a right neighbor */
2060 int i;
2061 int error;
2062 struct xfs_perag *pag = agbp->b_pag;
2063 bool fixup_longest = false;
2064
2065 bno_cur = cnt_cur = NULL;
2066 mp = tp->t_mountp;
2067
2068 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
2069 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
2070 if (error)
2071 goto error0;
2072 }
2073
2074 /*
2075 * Allocate and initialize a cursor for the by-block btree.
2076 */
2077 bno_cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
2078 /*
2079 * Look for a neighboring block on the left (lower block numbers)
2080 * that is contiguous with this space.
2081 */
2082 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
2083 goto error0;
2084 if (haveleft) {
2085 /*
2086 * There is a block to our left.
2087 */
2088 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
2089 goto error0;
2090 if (XFS_IS_CORRUPT(mp, i != 1)) {
2091 xfs_btree_mark_sick(bno_cur);
2092 error = -EFSCORRUPTED;
2093 goto error0;
2094 }
2095 /*
2096 * It's not contiguous, though.
2097 */
2098 if (ltbno + ltlen < bno)
2099 haveleft = 0;
2100 else {
2101 /*
2102 * If this failure happens the request to free this
2103 * space was invalid, it's (partly) already free.
2104 * Very bad.
2105 */
2106 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
2107 xfs_btree_mark_sick(bno_cur);
2108 error = -EFSCORRUPTED;
2109 goto error0;
2110 }
2111 }
2112 }
2113 /*
2114 * Look for a neighboring block on the right (higher block numbers)
2115 * that is contiguous with this space.
2116 */
2117 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
2118 goto error0;
2119 if (haveright) {
2120 /*
2121 * There is a block to our right.
2122 */
2123 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
2124 goto error0;
2125 if (XFS_IS_CORRUPT(mp, i != 1)) {
2126 xfs_btree_mark_sick(bno_cur);
2127 error = -EFSCORRUPTED;
2128 goto error0;
2129 }
2130 /*
2131 * It's not contiguous, though.
2132 */
2133 if (bno + len < gtbno)
2134 haveright = 0;
2135 else {
2136 /*
2137 * If this failure happens the request to free this
2138 * space was invalid, it's (partly) already free.
2139 * Very bad.
2140 */
2141 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
2142 xfs_btree_mark_sick(bno_cur);
2143 error = -EFSCORRUPTED;
2144 goto error0;
2145 }
2146 }
2147 }
2148 /*
2149 * Now allocate and initialize a cursor for the by-size tree.
2150 */
2151 cnt_cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
2152 /*
2153 * Have both left and right contiguous neighbors.
2154 * Merge all three into a single free block.
2155 */
2156 if (haveleft && haveright) {
2157 /*
2158 * Delete the old by-size entry on the left.
2159 */
2160 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2161 goto error0;
2162 if (XFS_IS_CORRUPT(mp, i != 1)) {
2163 xfs_btree_mark_sick(cnt_cur);
2164 error = -EFSCORRUPTED;
2165 goto error0;
2166 }
2167 if ((error = xfs_btree_delete(cnt_cur, &i)))
2168 goto error0;
2169 if (XFS_IS_CORRUPT(mp, i != 1)) {
2170 xfs_btree_mark_sick(cnt_cur);
2171 error = -EFSCORRUPTED;
2172 goto error0;
2173 }
2174 /*
2175 * Delete the old by-size entry on the right.
2176 */
2177 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2178 goto error0;
2179 if (XFS_IS_CORRUPT(mp, i != 1)) {
2180 xfs_btree_mark_sick(cnt_cur);
2181 error = -EFSCORRUPTED;
2182 goto error0;
2183 }
2184 if ((error = xfs_btree_delete(cnt_cur, &i)))
2185 goto error0;
2186 if (XFS_IS_CORRUPT(mp, i != 1)) {
2187 xfs_btree_mark_sick(cnt_cur);
2188 error = -EFSCORRUPTED;
2189 goto error0;
2190 }
2191 /*
2192 * Delete the old by-block entry for the right block.
2193 */
2194 if ((error = xfs_btree_delete(bno_cur, &i)))
2195 goto error0;
2196 if (XFS_IS_CORRUPT(mp, i != 1)) {
2197 xfs_btree_mark_sick(bno_cur);
2198 error = -EFSCORRUPTED;
2199 goto error0;
2200 }
2201 /*
2202 * Move the by-block cursor back to the left neighbor.
2203 */
2204 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2205 goto error0;
2206 if (XFS_IS_CORRUPT(mp, i != 1)) {
2207 xfs_btree_mark_sick(bno_cur);
2208 error = -EFSCORRUPTED;
2209 goto error0;
2210 }
2211#ifdef DEBUG
2212 /*
2213 * Check that this is the right record: delete didn't
2214 * mangle the cursor.
2215 */
2216 {
2217 xfs_agblock_t xxbno;
2218 xfs_extlen_t xxlen;
2219
2220 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2221 &i)))
2222 goto error0;
2223 if (XFS_IS_CORRUPT(mp,
2224 i != 1 ||
2225 xxbno != ltbno ||
2226 xxlen != ltlen)) {
2227 xfs_btree_mark_sick(bno_cur);
2228 error = -EFSCORRUPTED;
2229 goto error0;
2230 }
2231 }
2232#endif
2233 /*
2234 * Update remaining by-block entry to the new, joined block.
2235 */
2236 nbno = ltbno;
2237 nlen = len + ltlen + gtlen;
2238 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2239 goto error0;
2240 }
2241 /*
2242 * Have only a left contiguous neighbor.
2243 * Merge it together with the new freespace.
2244 */
2245 else if (haveleft) {
2246 /*
2247 * Delete the old by-size entry on the left.
2248 */
2249 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2250 goto error0;
2251 if (XFS_IS_CORRUPT(mp, i != 1)) {
2252 xfs_btree_mark_sick(cnt_cur);
2253 error = -EFSCORRUPTED;
2254 goto error0;
2255 }
2256 if ((error = xfs_btree_delete(cnt_cur, &i)))
2257 goto error0;
2258 if (XFS_IS_CORRUPT(mp, i != 1)) {
2259 xfs_btree_mark_sick(cnt_cur);
2260 error = -EFSCORRUPTED;
2261 goto error0;
2262 }
2263 /*
2264 * Back up the by-block cursor to the left neighbor, and
2265 * update its length.
2266 */
2267 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2268 goto error0;
2269 if (XFS_IS_CORRUPT(mp, i != 1)) {
2270 xfs_btree_mark_sick(bno_cur);
2271 error = -EFSCORRUPTED;
2272 goto error0;
2273 }
2274 nbno = ltbno;
2275 nlen = len + ltlen;
2276 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2277 goto error0;
2278 }
2279 /*
2280 * Have only a right contiguous neighbor.
2281 * Merge it together with the new freespace.
2282 */
2283 else if (haveright) {
2284 /*
2285 * Delete the old by-size entry on the right.
2286 */
2287 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2288 goto error0;
2289 if (XFS_IS_CORRUPT(mp, i != 1)) {
2290 xfs_btree_mark_sick(cnt_cur);
2291 error = -EFSCORRUPTED;
2292 goto error0;
2293 }
2294 if ((error = xfs_btree_delete(cnt_cur, &i)))
2295 goto error0;
2296 if (XFS_IS_CORRUPT(mp, i != 1)) {
2297 xfs_btree_mark_sick(cnt_cur);
2298 error = -EFSCORRUPTED;
2299 goto error0;
2300 }
2301 /*
2302 * Update the starting block and length of the right
2303 * neighbor in the by-block tree.
2304 */
2305 nbno = bno;
2306 nlen = len + gtlen;
2307 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2308 goto error0;
2309 }
2310 /*
2311 * No contiguous neighbors.
2312 * Insert the new freespace into the by-block tree.
2313 */
2314 else {
2315 nbno = bno;
2316 nlen = len;
2317 if ((error = xfs_btree_insert(bno_cur, &i)))
2318 goto error0;
2319 if (XFS_IS_CORRUPT(mp, i != 1)) {
2320 xfs_btree_mark_sick(bno_cur);
2321 error = -EFSCORRUPTED;
2322 goto error0;
2323 }
2324 }
2325 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2326 bno_cur = NULL;
2327
2328 /*
2329 * In all cases we need to insert the new freespace in the by-size tree.
2330 *
2331 * If this new freespace is being inserted in the block that contains
2332 * the largest free space in the btree, make sure we also fix up the
2333 * agf->agf-longest tracker field.
2334 */
2335 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2336 goto error0;
2337 if (XFS_IS_CORRUPT(mp, i != 0)) {
2338 xfs_btree_mark_sick(cnt_cur);
2339 error = -EFSCORRUPTED;
2340 goto error0;
2341 }
2342 if (xfs_alloc_cursor_at_lastrec(cnt_cur))
2343 fixup_longest = true;
2344 if ((error = xfs_btree_insert(cnt_cur, &i)))
2345 goto error0;
2346 if (XFS_IS_CORRUPT(mp, i != 1)) {
2347 xfs_btree_mark_sick(cnt_cur);
2348 error = -EFSCORRUPTED;
2349 goto error0;
2350 }
2351 if (fixup_longest) {
2352 error = xfs_alloc_fixup_longest(cnt_cur);
2353 if (error)
2354 goto error0;
2355 }
2356
2357 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2358 cnt_cur = NULL;
2359
2360 /*
2361 * Update the freespace totals in the ag and superblock.
2362 */
2363 error = xfs_alloc_update_counters(tp, agbp, len);
2364 xfs_ag_resv_free_extent(pag, type, tp, len);
2365 if (error)
2366 goto error0;
2367
2368 XFS_STATS_INC(mp, xs_freex);
2369 XFS_STATS_ADD(mp, xs_freeb, len);
2370
2371 trace_xfs_free_extent(pag, bno, len, type, haveleft, haveright);
2372
2373 return 0;
2374
2375 error0:
2376 trace_xfs_free_extent(pag, bno, len, type, -1, -1);
2377 if (bno_cur)
2378 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2379 if (cnt_cur)
2380 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2381 return error;
2382}
2383
2384/*
2385 * Visible (exported) allocation/free functions.
2386 * Some of these are used just by xfs_alloc_btree.c and this file.
2387 */
2388
2389/*
2390 * Compute and fill in value of m_alloc_maxlevels.
2391 */
2392void
2393xfs_alloc_compute_maxlevels(
2394 xfs_mount_t *mp) /* file system mount structure */
2395{
2396 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2397 (mp->m_sb.sb_agblocks + 1) / 2);
2398 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2399}
2400
2401/*
2402 * Find the length of the longest extent in an AG. The 'need' parameter
2403 * specifies how much space we're going to need for the AGFL and the
2404 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2405 * other callers.
2406 */
2407xfs_extlen_t
2408xfs_alloc_longest_free_extent(
2409 struct xfs_perag *pag,
2410 xfs_extlen_t need,
2411 xfs_extlen_t reserved)
2412{
2413 xfs_extlen_t delta = 0;
2414
2415 /*
2416 * If the AGFL needs a recharge, we'll have to subtract that from the
2417 * longest extent.
2418 */
2419 if (need > pag->pagf_flcount)
2420 delta = need - pag->pagf_flcount;
2421
2422 /*
2423 * If we cannot maintain others' reservations with space from the
2424 * not-longest freesp extents, we'll have to subtract /that/ from
2425 * the longest extent too.
2426 */
2427 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2428 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2429
2430 /*
2431 * If the longest extent is long enough to satisfy all the
2432 * reservations and AGFL rules in place, we can return this extent.
2433 */
2434 if (pag->pagf_longest > delta)
2435 return min_t(xfs_extlen_t, pag_mount(pag)->m_ag_max_usable,
2436 pag->pagf_longest - delta);
2437
2438 /* Otherwise, let the caller try for 1 block if there's space. */
2439 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2440}
2441
2442/*
2443 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2444 * return the largest possible minimum length.
2445 */
2446unsigned int
2447xfs_alloc_min_freelist(
2448 struct xfs_mount *mp,
2449 struct xfs_perag *pag)
2450{
2451 /* AG btrees have at least 1 level. */
2452 const unsigned int bno_level = pag ? pag->pagf_bno_level : 1;
2453 const unsigned int cnt_level = pag ? pag->pagf_cnt_level : 1;
2454 const unsigned int rmap_level = pag ? pag->pagf_rmap_level : 1;
2455 unsigned int min_free;
2456
2457 ASSERT(mp->m_alloc_maxlevels > 0);
2458
2459 /*
2460 * For a btree shorter than the maximum height, the worst case is that
2461 * every level gets split and a new level is added, then while inserting
2462 * another entry to refill the AGFL, every level under the old root gets
2463 * split again. This is:
2464 *
2465 * (full height split reservation) + (AGFL refill split height)
2466 * = (current height + 1) + (current height - 1)
2467 * = (new height) + (new height - 2)
2468 * = 2 * new height - 2
2469 *
2470 * For a btree of maximum height, the worst case is that every level
2471 * under the root gets split, then while inserting another entry to
2472 * refill the AGFL, every level under the root gets split again. This is
2473 * also:
2474 *
2475 * 2 * (current height - 1)
2476 * = 2 * (new height - 1)
2477 * = 2 * new height - 2
2478 */
2479
2480 /* space needed by-bno freespace btree */
2481 min_free = min(bno_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2482 /* space needed by-size freespace btree */
2483 min_free += min(cnt_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2484 /* space needed reverse mapping used space btree */
2485 if (xfs_has_rmapbt(mp))
2486 min_free += min(rmap_level + 1, mp->m_rmap_maxlevels) * 2 - 2;
2487 return min_free;
2488}
2489
2490/*
2491 * Check if the operation we are fixing up the freelist for should go ahead or
2492 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2493 * is dependent on whether the size and shape of free space available will
2494 * permit the requested allocation to take place.
2495 */
2496static bool
2497xfs_alloc_space_available(
2498 struct xfs_alloc_arg *args,
2499 xfs_extlen_t min_free,
2500 int flags)
2501{
2502 struct xfs_perag *pag = args->pag;
2503 xfs_extlen_t alloc_len, longest;
2504 xfs_extlen_t reservation; /* blocks that are still reserved */
2505 int available;
2506 xfs_extlen_t agflcount;
2507
2508 if (flags & XFS_ALLOC_FLAG_FREEING)
2509 return true;
2510
2511 reservation = xfs_ag_resv_needed(pag, args->resv);
2512
2513 /* do we have enough contiguous free space for the allocation? */
2514 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2515 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2516 if (longest < alloc_len)
2517 return false;
2518
2519 /*
2520 * Do we have enough free space remaining for the allocation? Don't
2521 * account extra agfl blocks because we are about to defer free them,
2522 * making them unavailable until the current transaction commits.
2523 */
2524 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2525 available = (int)(pag->pagf_freeblks + agflcount -
2526 reservation - min_free - args->minleft);
2527 if (available < (int)max(args->total, alloc_len))
2528 return false;
2529
2530 /*
2531 * Clamp maxlen to the amount of free space available for the actual
2532 * extent allocation.
2533 */
2534 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2535 args->maxlen = available;
2536 ASSERT(args->maxlen > 0);
2537 ASSERT(args->maxlen >= args->minlen);
2538 }
2539
2540 return true;
2541}
2542
2543/*
2544 * Check the agfl fields of the agf for inconsistency or corruption.
2545 *
2546 * The original purpose was to detect an agfl header padding mismatch between
2547 * current and early v5 kernels. This problem manifests as a 1-slot size
2548 * difference between the on-disk flcount and the active [first, last] range of
2549 * a wrapped agfl.
2550 *
2551 * However, we need to use these same checks to catch agfl count corruptions
2552 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2553 * way, we need to reset the agfl and warn the user.
2554 *
2555 * Return true if a reset is required before the agfl can be used, false
2556 * otherwise.
2557 */
2558static bool
2559xfs_agfl_needs_reset(
2560 struct xfs_mount *mp,
2561 struct xfs_agf *agf)
2562{
2563 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2564 uint32_t l = be32_to_cpu(agf->agf_fllast);
2565 uint32_t c = be32_to_cpu(agf->agf_flcount);
2566 int agfl_size = xfs_agfl_size(mp);
2567 int active;
2568
2569 /*
2570 * The agf read verifier catches severe corruption of these fields.
2571 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2572 * the verifier allows it.
2573 */
2574 if (f >= agfl_size || l >= agfl_size)
2575 return true;
2576 if (c > agfl_size)
2577 return true;
2578
2579 /*
2580 * Check consistency between the on-disk count and the active range. An
2581 * agfl padding mismatch manifests as an inconsistent flcount.
2582 */
2583 if (c && l >= f)
2584 active = l - f + 1;
2585 else if (c)
2586 active = agfl_size - f + l + 1;
2587 else
2588 active = 0;
2589
2590 return active != c;
2591}
2592
2593/*
2594 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2595 * agfl content cannot be trusted. Warn the user that a repair is required to
2596 * recover leaked blocks.
2597 *
2598 * The purpose of this mechanism is to handle filesystems affected by the agfl
2599 * header padding mismatch problem. A reset keeps the filesystem online with a
2600 * relatively minor free space accounting inconsistency rather than suffer the
2601 * inevitable crash from use of an invalid agfl block.
2602 */
2603static void
2604xfs_agfl_reset(
2605 struct xfs_trans *tp,
2606 struct xfs_buf *agbp,
2607 struct xfs_perag *pag)
2608{
2609 struct xfs_mount *mp = tp->t_mountp;
2610 struct xfs_agf *agf = agbp->b_addr;
2611
2612 ASSERT(xfs_perag_agfl_needs_reset(pag));
2613 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2614
2615 xfs_warn(mp,
2616 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2617 "Please unmount and run xfs_repair.",
2618 pag_agno(pag), pag->pagf_flcount);
2619
2620 agf->agf_flfirst = 0;
2621 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2622 agf->agf_flcount = 0;
2623 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2624 XFS_AGF_FLCOUNT);
2625
2626 pag->pagf_flcount = 0;
2627 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2628}
2629
2630/*
2631 * Add the extent to the list of extents to be free at transaction end.
2632 * The list is maintained sorted (by block number).
2633 */
2634static int
2635xfs_defer_extent_free(
2636 struct xfs_trans *tp,
2637 xfs_fsblock_t bno,
2638 xfs_filblks_t len,
2639 const struct xfs_owner_info *oinfo,
2640 enum xfs_ag_resv_type type,
2641 unsigned int free_flags,
2642 struct xfs_defer_pending **dfpp)
2643{
2644 struct xfs_extent_free_item *xefi;
2645 struct xfs_mount *mp = tp->t_mountp;
2646
2647 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2648 ASSERT(!isnullstartblock(bno));
2649 ASSERT(!(free_flags & ~XFS_FREE_EXTENT_ALL_FLAGS));
2650
2651 if (free_flags & XFS_FREE_EXTENT_REALTIME) {
2652 if (type != XFS_AG_RESV_NONE) {
2653 ASSERT(type == XFS_AG_RESV_NONE);
2654 return -EFSCORRUPTED;
2655 }
2656 if (XFS_IS_CORRUPT(mp, !xfs_verify_rtbext(mp, bno, len)))
2657 return -EFSCORRUPTED;
2658 } else {
2659 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2660 return -EFSCORRUPTED;
2661 }
2662
2663 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2664 GFP_KERNEL | __GFP_NOFAIL);
2665 xefi->xefi_startblock = bno;
2666 xefi->xefi_blockcount = (xfs_extlen_t)len;
2667 xefi->xefi_agresv = type;
2668 if (free_flags & XFS_FREE_EXTENT_SKIP_DISCARD)
2669 xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2670 if (free_flags & XFS_FREE_EXTENT_REALTIME)
2671 xefi->xefi_flags |= XFS_EFI_REALTIME;
2672 if (oinfo) {
2673 ASSERT(oinfo->oi_offset == 0);
2674
2675 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2676 xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2677 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2678 xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2679 xefi->xefi_owner = oinfo->oi_owner;
2680 } else {
2681 xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2682 }
2683
2684 xfs_extent_free_defer_add(tp, xefi, dfpp);
2685 return 0;
2686}
2687
2688int
2689xfs_free_extent_later(
2690 struct xfs_trans *tp,
2691 xfs_fsblock_t bno,
2692 xfs_filblks_t len,
2693 const struct xfs_owner_info *oinfo,
2694 enum xfs_ag_resv_type type,
2695 unsigned int free_flags)
2696{
2697 struct xfs_defer_pending *dontcare = NULL;
2698
2699 return xfs_defer_extent_free(tp, bno, len, oinfo, type, free_flags,
2700 &dontcare);
2701}
2702
2703/*
2704 * Set up automatic freeing of unwritten space in the filesystem.
2705 *
2706 * This function attached a paused deferred extent free item to the
2707 * transaction. Pausing means that the EFI will be logged in the next
2708 * transaction commit, but the pending EFI will not be finished until the
2709 * pending item is unpaused.
2710 *
2711 * If the system goes down after the EFI has been persisted to the log but
2712 * before the pending item is unpaused, log recovery will find the EFI, fail to
2713 * find the EFD, and free the space.
2714 *
2715 * If the pending item is unpaused, the next transaction commit will log an EFD
2716 * without freeing the space.
2717 *
2718 * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2719 * @args structure are set to the relevant values.
2720 */
2721int
2722xfs_alloc_schedule_autoreap(
2723 const struct xfs_alloc_arg *args,
2724 unsigned int free_flags,
2725 struct xfs_alloc_autoreap *aarp)
2726{
2727 int error;
2728
2729 error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2730 &args->oinfo, args->resv, free_flags, &aarp->dfp);
2731 if (error)
2732 return error;
2733
2734 xfs_defer_item_pause(args->tp, aarp->dfp);
2735 return 0;
2736}
2737
2738/*
2739 * Cancel automatic freeing of unwritten space in the filesystem.
2740 *
2741 * Earlier, we created a paused deferred extent free item and attached it to
2742 * this transaction so that we could automatically roll back a new space
2743 * allocation if the system went down. Now we want to cancel the paused work
2744 * item by marking the EFI stale so we don't actually free the space, unpausing
2745 * the pending item and logging an EFD.
2746 *
2747 * The caller generally should have already mapped the space into the ondisk
2748 * filesystem. If the reserved space was partially used, the caller must call
2749 * xfs_free_extent_later to create a new EFI to free the unused space.
2750 */
2751void
2752xfs_alloc_cancel_autoreap(
2753 struct xfs_trans *tp,
2754 struct xfs_alloc_autoreap *aarp)
2755{
2756 struct xfs_defer_pending *dfp = aarp->dfp;
2757 struct xfs_extent_free_item *xefi;
2758
2759 if (!dfp)
2760 return;
2761
2762 list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2763 xefi->xefi_flags |= XFS_EFI_CANCELLED;
2764
2765 xfs_defer_item_unpause(tp, dfp);
2766}
2767
2768/*
2769 * Commit automatic freeing of unwritten space in the filesystem.
2770 *
2771 * This unpauses an earlier _schedule_autoreap and commits to freeing the
2772 * allocated space. Call this if none of the reserved space was used.
2773 */
2774void
2775xfs_alloc_commit_autoreap(
2776 struct xfs_trans *tp,
2777 struct xfs_alloc_autoreap *aarp)
2778{
2779 if (aarp->dfp)
2780 xfs_defer_item_unpause(tp, aarp->dfp);
2781}
2782
2783/*
2784 * Check if an AGF has a free extent record whose length is equal to
2785 * args->minlen.
2786 */
2787STATIC int
2788xfs_exact_minlen_extent_available(
2789 struct xfs_alloc_arg *args,
2790 struct xfs_buf *agbp,
2791 int *stat)
2792{
2793 struct xfs_btree_cur *cnt_cur;
2794 xfs_agblock_t fbno;
2795 xfs_extlen_t flen;
2796 int error = 0;
2797
2798 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, agbp,
2799 args->pag);
2800 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2801 if (error)
2802 goto out;
2803
2804 if (*stat == 0) {
2805 xfs_btree_mark_sick(cnt_cur);
2806 error = -EFSCORRUPTED;
2807 goto out;
2808 }
2809
2810 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2811 if (error)
2812 goto out;
2813
2814 if (*stat == 1 && flen != args->minlen)
2815 *stat = 0;
2816
2817out:
2818 xfs_btree_del_cursor(cnt_cur, error);
2819
2820 return error;
2821}
2822
2823/*
2824 * Decide whether to use this allocation group for this allocation.
2825 * If so, fix up the btree freelist's size.
2826 */
2827int /* error */
2828xfs_alloc_fix_freelist(
2829 struct xfs_alloc_arg *args, /* allocation argument structure */
2830 uint32_t alloc_flags)
2831{
2832 struct xfs_mount *mp = args->mp;
2833 struct xfs_perag *pag = args->pag;
2834 struct xfs_trans *tp = args->tp;
2835 struct xfs_buf *agbp = NULL;
2836 struct xfs_buf *agflbp = NULL;
2837 struct xfs_alloc_arg targs; /* local allocation arguments */
2838 xfs_agblock_t bno; /* freelist block */
2839 xfs_extlen_t need; /* total blocks needed in freelist */
2840 int error = 0;
2841
2842 /* deferred ops (AGFL block frees) require permanent transactions */
2843 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2844
2845 if (!xfs_perag_initialised_agf(pag)) {
2846 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2847 if (error) {
2848 /* Couldn't lock the AGF so skip this AG. */
2849 if (error == -EAGAIN)
2850 error = 0;
2851 goto out_no_agbp;
2852 }
2853 }
2854
2855 /*
2856 * If this is a metadata preferred pag and we are user data then try
2857 * somewhere else if we are not being asked to try harder at this
2858 * point
2859 */
2860 if (xfs_perag_prefers_metadata(pag) &&
2861 (args->datatype & XFS_ALLOC_USERDATA) &&
2862 (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2863 ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2864 goto out_agbp_relse;
2865 }
2866
2867 need = xfs_alloc_min_freelist(mp, pag);
2868 if (!xfs_alloc_space_available(args, need, alloc_flags |
2869 XFS_ALLOC_FLAG_CHECK))
2870 goto out_agbp_relse;
2871
2872 /*
2873 * Get the a.g. freespace buffer.
2874 * Can fail if we're not blocking on locks, and it's held.
2875 */
2876 if (!agbp) {
2877 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2878 if (error) {
2879 /* Couldn't lock the AGF so skip this AG. */
2880 if (error == -EAGAIN)
2881 error = 0;
2882 goto out_no_agbp;
2883 }
2884 }
2885
2886 /* reset a padding mismatched agfl before final free space check */
2887 if (xfs_perag_agfl_needs_reset(pag))
2888 xfs_agfl_reset(tp, agbp, pag);
2889
2890 /* If there isn't enough total space or single-extent, reject it. */
2891 need = xfs_alloc_min_freelist(mp, pag);
2892 if (!xfs_alloc_space_available(args, need, alloc_flags))
2893 goto out_agbp_relse;
2894
2895 if (IS_ENABLED(CONFIG_XFS_DEBUG) && args->alloc_minlen_only) {
2896 int stat;
2897
2898 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2899 if (error || !stat)
2900 goto out_agbp_relse;
2901 }
2902
2903 /*
2904 * Make the freelist shorter if it's too long.
2905 *
2906 * Note that from this point onwards, we will always release the agf and
2907 * agfl buffers on error. This handles the case where we error out and
2908 * the buffers are clean or may not have been joined to the transaction
2909 * and hence need to be released manually. If they have been joined to
2910 * the transaction, then xfs_trans_brelse() will handle them
2911 * appropriately based on the recursion count and dirty state of the
2912 * buffer.
2913 *
2914 * XXX (dgc): When we have lots of free space, does this buy us
2915 * anything other than extra overhead when we need to put more blocks
2916 * back on the free list? Maybe we should only do this when space is
2917 * getting low or the AGFL is more than half full?
2918 *
2919 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2920 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2921 * updating the rmapbt. Both flags are used in xfs_repair while we're
2922 * rebuilding the rmapbt, and neither are used by the kernel. They're
2923 * both required to ensure that rmaps are correctly recorded for the
2924 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2925 * repair/rmap.c in xfsprogs for details.
2926 */
2927 memset(&targs, 0, sizeof(targs));
2928 /* struct copy below */
2929 if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2930 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2931 else
2932 targs.oinfo = XFS_RMAP_OINFO_AG;
2933 while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2934 pag->pagf_flcount > need) {
2935 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2936 if (error)
2937 goto out_agbp_relse;
2938
2939 /*
2940 * Defer the AGFL block free.
2941 *
2942 * This helps to prevent log reservation overruns due to too
2943 * many allocation operations in a transaction. AGFL frees are
2944 * prone to this problem because for one they are always freed
2945 * one at a time. Further, an immediate AGFL block free can
2946 * cause a btree join and require another block free before the
2947 * real allocation can proceed.
2948 * Deferring the free disconnects freeing up the AGFL slot from
2949 * freeing the block.
2950 */
2951 error = xfs_free_extent_later(tp, xfs_agbno_to_fsb(pag, bno),
2952 1, &targs.oinfo, XFS_AG_RESV_AGFL, 0);
2953 if (error)
2954 goto out_agbp_relse;
2955 }
2956
2957 targs.tp = tp;
2958 targs.mp = mp;
2959 targs.agbp = agbp;
2960 targs.agno = args->agno;
2961 targs.alignment = targs.minlen = targs.prod = 1;
2962 targs.pag = pag;
2963 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2964 if (error)
2965 goto out_agbp_relse;
2966
2967 /* Make the freelist longer if it's too short. */
2968 while (pag->pagf_flcount < need) {
2969 targs.agbno = 0;
2970 targs.maxlen = need - pag->pagf_flcount;
2971 targs.resv = XFS_AG_RESV_AGFL;
2972
2973 /* Allocate as many blocks as possible at once. */
2974 error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2975 if (error)
2976 goto out_agflbp_relse;
2977
2978 /*
2979 * Stop if we run out. Won't happen if callers are obeying
2980 * the restrictions correctly. Can happen for free calls
2981 * on a completely full ag.
2982 */
2983 if (targs.agbno == NULLAGBLOCK) {
2984 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2985 break;
2986 goto out_agflbp_relse;
2987 }
2988
2989 if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2990 error = xfs_rmap_alloc(tp, agbp, pag,
2991 targs.agbno, targs.len, &targs.oinfo);
2992 if (error)
2993 goto out_agflbp_relse;
2994 }
2995 error = xfs_alloc_update_counters(tp, agbp,
2996 -((long)(targs.len)));
2997 if (error)
2998 goto out_agflbp_relse;
2999
3000 /*
3001 * Put each allocated block on the list.
3002 */
3003 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
3004 error = xfs_alloc_put_freelist(pag, tp, agbp,
3005 agflbp, bno, 0);
3006 if (error)
3007 goto out_agflbp_relse;
3008 }
3009 }
3010 xfs_trans_brelse(tp, agflbp);
3011 args->agbp = agbp;
3012 return 0;
3013
3014out_agflbp_relse:
3015 xfs_trans_brelse(tp, agflbp);
3016out_agbp_relse:
3017 if (agbp)
3018 xfs_trans_brelse(tp, agbp);
3019out_no_agbp:
3020 args->agbp = NULL;
3021 return error;
3022}
3023
3024/*
3025 * Get a block from the freelist.
3026 * Returns with the buffer for the block gotten.
3027 */
3028int
3029xfs_alloc_get_freelist(
3030 struct xfs_perag *pag,
3031 struct xfs_trans *tp,
3032 struct xfs_buf *agbp,
3033 xfs_agblock_t *bnop,
3034 int btreeblk)
3035{
3036 struct xfs_agf *agf = agbp->b_addr;
3037 struct xfs_buf *agflbp;
3038 xfs_agblock_t bno;
3039 __be32 *agfl_bno;
3040 int error;
3041 uint32_t logflags;
3042 struct xfs_mount *mp = tp->t_mountp;
3043
3044 /*
3045 * Freelist is empty, give up.
3046 */
3047 if (!agf->agf_flcount) {
3048 *bnop = NULLAGBLOCK;
3049 return 0;
3050 }
3051 /*
3052 * Read the array of free blocks.
3053 */
3054 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3055 if (error)
3056 return error;
3057
3058
3059 /*
3060 * Get the block number and update the data structures.
3061 */
3062 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3063 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
3064 if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
3065 return -EFSCORRUPTED;
3066
3067 be32_add_cpu(&agf->agf_flfirst, 1);
3068 xfs_trans_brelse(tp, agflbp);
3069 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
3070 agf->agf_flfirst = 0;
3071
3072 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3073 be32_add_cpu(&agf->agf_flcount, -1);
3074 pag->pagf_flcount--;
3075
3076 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
3077 if (btreeblk) {
3078 be32_add_cpu(&agf->agf_btreeblks, 1);
3079 pag->pagf_btreeblks++;
3080 logflags |= XFS_AGF_BTREEBLKS;
3081 }
3082
3083 xfs_alloc_log_agf(tp, agbp, logflags);
3084 *bnop = bno;
3085
3086 return 0;
3087}
3088
3089/*
3090 * Log the given fields from the agf structure.
3091 */
3092void
3093xfs_alloc_log_agf(
3094 struct xfs_trans *tp,
3095 struct xfs_buf *bp,
3096 uint32_t fields)
3097{
3098 int first; /* first byte offset */
3099 int last; /* last byte offset */
3100 static const short offsets[] = {
3101 offsetof(xfs_agf_t, agf_magicnum),
3102 offsetof(xfs_agf_t, agf_versionnum),
3103 offsetof(xfs_agf_t, agf_seqno),
3104 offsetof(xfs_agf_t, agf_length),
3105 offsetof(xfs_agf_t, agf_bno_root), /* also cnt/rmap root */
3106 offsetof(xfs_agf_t, agf_bno_level), /* also cnt/rmap levels */
3107 offsetof(xfs_agf_t, agf_flfirst),
3108 offsetof(xfs_agf_t, agf_fllast),
3109 offsetof(xfs_agf_t, agf_flcount),
3110 offsetof(xfs_agf_t, agf_freeblks),
3111 offsetof(xfs_agf_t, agf_longest),
3112 offsetof(xfs_agf_t, agf_btreeblks),
3113 offsetof(xfs_agf_t, agf_uuid),
3114 offsetof(xfs_agf_t, agf_rmap_blocks),
3115 offsetof(xfs_agf_t, agf_refcount_blocks),
3116 offsetof(xfs_agf_t, agf_refcount_root),
3117 offsetof(xfs_agf_t, agf_refcount_level),
3118 /* needed so that we don't log the whole rest of the structure: */
3119 offsetof(xfs_agf_t, agf_spare64),
3120 sizeof(xfs_agf_t)
3121 };
3122
3123 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3124
3125 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3126
3127 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3128 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3129}
3130
3131/*
3132 * Put the block on the freelist for the allocation group.
3133 */
3134int
3135xfs_alloc_put_freelist(
3136 struct xfs_perag *pag,
3137 struct xfs_trans *tp,
3138 struct xfs_buf *agbp,
3139 struct xfs_buf *agflbp,
3140 xfs_agblock_t bno,
3141 int btreeblk)
3142{
3143 struct xfs_mount *mp = tp->t_mountp;
3144 struct xfs_agf *agf = agbp->b_addr;
3145 __be32 *blockp;
3146 int error;
3147 uint32_t logflags;
3148 __be32 *agfl_bno;
3149 int startoff;
3150
3151 if (!agflbp) {
3152 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3153 if (error)
3154 return error;
3155 }
3156
3157 be32_add_cpu(&agf->agf_fllast, 1);
3158 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3159 agf->agf_fllast = 0;
3160
3161 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3162 be32_add_cpu(&agf->agf_flcount, 1);
3163 pag->pagf_flcount++;
3164
3165 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3166 if (btreeblk) {
3167 be32_add_cpu(&agf->agf_btreeblks, -1);
3168 pag->pagf_btreeblks--;
3169 logflags |= XFS_AGF_BTREEBLKS;
3170 }
3171
3172 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3173
3174 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3175 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3176 *blockp = cpu_to_be32(bno);
3177 startoff = (char *)blockp - (char *)agflbp->b_addr;
3178
3179 xfs_alloc_log_agf(tp, agbp, logflags);
3180
3181 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3182 xfs_trans_log_buf(tp, agflbp, startoff,
3183 startoff + sizeof(xfs_agblock_t) - 1);
3184 return 0;
3185}
3186
3187/*
3188 * Check that this AGF/AGI header's sequence number and length matches the AG
3189 * number and size in fsblocks.
3190 */
3191xfs_failaddr_t
3192xfs_validate_ag_length(
3193 struct xfs_buf *bp,
3194 uint32_t seqno,
3195 uint32_t length)
3196{
3197 struct xfs_mount *mp = bp->b_mount;
3198 /*
3199 * During growfs operations, the perag is not fully initialised,
3200 * so we can't use it for any useful checking. growfs ensures we can't
3201 * use it by using uncached buffers that don't have the perag attached
3202 * so we can detect and avoid this problem.
3203 */
3204 if (bp->b_pag && seqno != pag_agno(bp->b_pag))
3205 return __this_address;
3206
3207 /*
3208 * Only the last AG in the filesystem is allowed to be shorter
3209 * than the AG size recorded in the superblock.
3210 */
3211 if (length != mp->m_sb.sb_agblocks) {
3212 /*
3213 * During growfs, the new last AG can get here before we
3214 * have updated the superblock. Give it a pass on the seqno
3215 * check.
3216 */
3217 if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3218 return __this_address;
3219 if (length < XFS_MIN_AG_BLOCKS)
3220 return __this_address;
3221 if (length > mp->m_sb.sb_agblocks)
3222 return __this_address;
3223 }
3224
3225 return NULL;
3226}
3227
3228/*
3229 * Verify the AGF is consistent.
3230 *
3231 * We do not verify the AGFL indexes in the AGF are fully consistent here
3232 * because of issues with variable on-disk structure sizes. Instead, we check
3233 * the agfl indexes for consistency when we initialise the perag from the AGF
3234 * information after a read completes.
3235 *
3236 * If the index is inconsistent, then we mark the perag as needing an AGFL
3237 * reset. The first AGFL update performed then resets the AGFL indexes and
3238 * refills the AGFL with known good free blocks, allowing the filesystem to
3239 * continue operating normally at the cost of a few leaked free space blocks.
3240 */
3241static xfs_failaddr_t
3242xfs_agf_verify(
3243 struct xfs_buf *bp)
3244{
3245 struct xfs_mount *mp = bp->b_mount;
3246 struct xfs_agf *agf = bp->b_addr;
3247 xfs_failaddr_t fa;
3248 uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
3249 uint32_t agf_length = be32_to_cpu(agf->agf_length);
3250
3251 if (xfs_has_crc(mp)) {
3252 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3253 return __this_address;
3254 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3255 return __this_address;
3256 }
3257
3258 if (!xfs_verify_magic(bp, agf->agf_magicnum))
3259 return __this_address;
3260
3261 if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3262 return __this_address;
3263
3264 /*
3265 * Both agf_seqno and agf_length need to validated before anything else
3266 * block number related in the AGF or AGFL can be checked.
3267 */
3268 fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3269 if (fa)
3270 return fa;
3271
3272 if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3273 return __this_address;
3274 if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3275 return __this_address;
3276 if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3277 return __this_address;
3278
3279 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3280 be32_to_cpu(agf->agf_freeblks) > agf_length)
3281 return __this_address;
3282
3283 if (be32_to_cpu(agf->agf_bno_level) < 1 ||
3284 be32_to_cpu(agf->agf_cnt_level) < 1 ||
3285 be32_to_cpu(agf->agf_bno_level) > mp->m_alloc_maxlevels ||
3286 be32_to_cpu(agf->agf_cnt_level) > mp->m_alloc_maxlevels)
3287 return __this_address;
3288
3289 if (xfs_has_lazysbcount(mp) &&
3290 be32_to_cpu(agf->agf_btreeblks) > agf_length)
3291 return __this_address;
3292
3293 if (xfs_has_rmapbt(mp)) {
3294 if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3295 return __this_address;
3296
3297 if (be32_to_cpu(agf->agf_rmap_level) < 1 ||
3298 be32_to_cpu(agf->agf_rmap_level) > mp->m_rmap_maxlevels)
3299 return __this_address;
3300 }
3301
3302 if (xfs_has_reflink(mp)) {
3303 if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3304 return __this_address;
3305
3306 if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3307 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3308 return __this_address;
3309 }
3310
3311 return NULL;
3312}
3313
3314static void
3315xfs_agf_read_verify(
3316 struct xfs_buf *bp)
3317{
3318 struct xfs_mount *mp = bp->b_mount;
3319 xfs_failaddr_t fa;
3320
3321 if (xfs_has_crc(mp) &&
3322 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3323 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3324 else {
3325 fa = xfs_agf_verify(bp);
3326 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3327 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3328 }
3329}
3330
3331static void
3332xfs_agf_write_verify(
3333 struct xfs_buf *bp)
3334{
3335 struct xfs_mount *mp = bp->b_mount;
3336 struct xfs_buf_log_item *bip = bp->b_log_item;
3337 struct xfs_agf *agf = bp->b_addr;
3338 xfs_failaddr_t fa;
3339
3340 fa = xfs_agf_verify(bp);
3341 if (fa) {
3342 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3343 return;
3344 }
3345
3346 if (!xfs_has_crc(mp))
3347 return;
3348
3349 if (bip)
3350 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3351
3352 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3353}
3354
3355const struct xfs_buf_ops xfs_agf_buf_ops = {
3356 .name = "xfs_agf",
3357 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3358 .verify_read = xfs_agf_read_verify,
3359 .verify_write = xfs_agf_write_verify,
3360 .verify_struct = xfs_agf_verify,
3361};
3362
3363/*
3364 * Read in the allocation group header (free/alloc section).
3365 */
3366int
3367xfs_read_agf(
3368 struct xfs_perag *pag,
3369 struct xfs_trans *tp,
3370 int flags,
3371 struct xfs_buf **agfbpp)
3372{
3373 struct xfs_mount *mp = pag_mount(pag);
3374 int error;
3375
3376 trace_xfs_read_agf(pag);
3377
3378 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3379 XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGF_DADDR(mp)),
3380 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3381 if (xfs_metadata_is_sick(error))
3382 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
3383 if (error)
3384 return error;
3385
3386 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3387 return 0;
3388}
3389
3390/*
3391 * Read in the allocation group header (free/alloc section) and initialise the
3392 * perag structure if necessary. If the caller provides @agfbpp, then return the
3393 * locked buffer to the caller, otherwise free it.
3394 */
3395int
3396xfs_alloc_read_agf(
3397 struct xfs_perag *pag,
3398 struct xfs_trans *tp,
3399 int flags,
3400 struct xfs_buf **agfbpp)
3401{
3402 struct xfs_mount *mp = pag_mount(pag);
3403 struct xfs_buf *agfbp;
3404 struct xfs_agf *agf;
3405 int error;
3406 int allocbt_blks;
3407
3408 trace_xfs_alloc_read_agf(pag);
3409
3410 /* We don't support trylock when freeing. */
3411 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3412 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3413 error = xfs_read_agf(pag, tp,
3414 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3415 &agfbp);
3416 if (error)
3417 return error;
3418
3419 agf = agfbp->b_addr;
3420 if (!xfs_perag_initialised_agf(pag)) {
3421 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3422 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3423 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3424 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3425 pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
3426 pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
3427 pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
3428 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3429 if (xfs_agfl_needs_reset(mp, agf))
3430 set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3431 else
3432 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3433
3434 /*
3435 * Update the in-core allocbt counter. Filter out the rmapbt
3436 * subset of the btreeblks counter because the rmapbt is managed
3437 * by perag reservation. Subtract one for the rmapbt root block
3438 * because the rmap counter includes it while the btreeblks
3439 * counter only tracks non-root blocks.
3440 */
3441 allocbt_blks = pag->pagf_btreeblks;
3442 if (xfs_has_rmapbt(mp))
3443 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3444 if (allocbt_blks > 0)
3445 atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
3446
3447 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3448 }
3449#ifdef DEBUG
3450 else if (!xfs_is_shutdown(mp)) {
3451 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3452 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3453 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3454 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3455 ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level));
3456 ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level));
3457 }
3458#endif
3459 if (agfbpp)
3460 *agfbpp = agfbp;
3461 else
3462 xfs_trans_brelse(tp, agfbp);
3463 return 0;
3464}
3465
3466/*
3467 * Pre-proces allocation arguments to set initial state that we don't require
3468 * callers to set up correctly, as well as bounds check the allocation args
3469 * that are set up.
3470 */
3471static int
3472xfs_alloc_vextent_check_args(
3473 struct xfs_alloc_arg *args,
3474 xfs_fsblock_t target,
3475 xfs_agnumber_t *minimum_agno)
3476{
3477 struct xfs_mount *mp = args->mp;
3478 xfs_agblock_t agsize;
3479
3480 args->fsbno = NULLFSBLOCK;
3481
3482 *minimum_agno = 0;
3483 if (args->tp->t_highest_agno != NULLAGNUMBER)
3484 *minimum_agno = args->tp->t_highest_agno;
3485
3486 /*
3487 * Just fix this up, for the case where the last a.g. is shorter
3488 * (or there's only one a.g.) and the caller couldn't easily figure
3489 * that out (xfs_bmap_alloc).
3490 */
3491 agsize = mp->m_sb.sb_agblocks;
3492 if (args->maxlen > agsize)
3493 args->maxlen = agsize;
3494 if (args->alignment == 0)
3495 args->alignment = 1;
3496
3497 ASSERT(args->minlen > 0);
3498 ASSERT(args->maxlen > 0);
3499 ASSERT(args->alignment > 0);
3500 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3501
3502 ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3503 ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3504 ASSERT(args->minlen <= args->maxlen);
3505 ASSERT(args->minlen <= agsize);
3506 ASSERT(args->mod < args->prod);
3507
3508 if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3509 XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3510 args->minlen > args->maxlen || args->minlen > agsize ||
3511 args->mod >= args->prod) {
3512 trace_xfs_alloc_vextent_badargs(args);
3513 return -ENOSPC;
3514 }
3515
3516 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3517 trace_xfs_alloc_vextent_skip_deadlock(args);
3518 return -ENOSPC;
3519 }
3520 return 0;
3521
3522}
3523
3524/*
3525 * Prepare an AG for allocation. If the AG is not prepared to accept the
3526 * allocation, return failure.
3527 *
3528 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3529 * modified to hold their own perag references.
3530 */
3531static int
3532xfs_alloc_vextent_prepare_ag(
3533 struct xfs_alloc_arg *args,
3534 uint32_t alloc_flags)
3535{
3536 bool need_pag = !args->pag;
3537 int error;
3538
3539 if (need_pag)
3540 args->pag = xfs_perag_get(args->mp, args->agno);
3541
3542 args->agbp = NULL;
3543 error = xfs_alloc_fix_freelist(args, alloc_flags);
3544 if (error) {
3545 trace_xfs_alloc_vextent_nofix(args);
3546 if (need_pag)
3547 xfs_perag_put(args->pag);
3548 args->agbno = NULLAGBLOCK;
3549 return error;
3550 }
3551 if (!args->agbp) {
3552 /* cannot allocate in this AG at all */
3553 trace_xfs_alloc_vextent_noagbp(args);
3554 args->agbno = NULLAGBLOCK;
3555 return 0;
3556 }
3557 args->wasfromfl = 0;
3558 return 0;
3559}
3560
3561/*
3562 * Post-process allocation results to account for the allocation if it succeed
3563 * and set the allocated block number correctly for the caller.
3564 *
3565 * XXX: we should really be returning ENOSPC for ENOSPC, not
3566 * hiding it behind a "successful" NULLFSBLOCK allocation.
3567 */
3568static int
3569xfs_alloc_vextent_finish(
3570 struct xfs_alloc_arg *args,
3571 xfs_agnumber_t minimum_agno,
3572 int alloc_error,
3573 bool drop_perag)
3574{
3575 struct xfs_mount *mp = args->mp;
3576 int error = 0;
3577
3578 /*
3579 * We can end up here with a locked AGF. If we failed, the caller is
3580 * likely going to try to allocate again with different parameters, and
3581 * that can widen the AGs that are searched for free space. If we have
3582 * to do BMBT block allocation, we have to do a new allocation.
3583 *
3584 * Hence leaving this function with the AGF locked opens up potential
3585 * ABBA AGF deadlocks because a future allocation attempt in this
3586 * transaction may attempt to lock a lower number AGF.
3587 *
3588 * We can't release the AGF until the transaction is commited, so at
3589 * this point we must update the "first allocation" tracker to point at
3590 * this AG if the tracker is empty or points to a lower AG. This allows
3591 * the next allocation attempt to be modified appropriately to avoid
3592 * deadlocks.
3593 */
3594 if (args->agbp &&
3595 (args->tp->t_highest_agno == NULLAGNUMBER ||
3596 args->agno > minimum_agno))
3597 args->tp->t_highest_agno = args->agno;
3598
3599 /*
3600 * If the allocation failed with an error or we had an ENOSPC result,
3601 * preserve the returned error whilst also marking the allocation result
3602 * as "no extent allocated". This ensures that callers that fail to
3603 * capture the error will still treat it as a failed allocation.
3604 */
3605 if (alloc_error || args->agbno == NULLAGBLOCK) {
3606 args->fsbno = NULLFSBLOCK;
3607 error = alloc_error;
3608 goto out_drop_perag;
3609 }
3610
3611 args->fsbno = xfs_agbno_to_fsb(args->pag, args->agbno);
3612
3613 ASSERT(args->len >= args->minlen);
3614 ASSERT(args->len <= args->maxlen);
3615 ASSERT(args->agbno % args->alignment == 0);
3616 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3617
3618 /* if not file data, insert new block into the reverse map btree */
3619 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3620 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3621 args->agbno, args->len, &args->oinfo);
3622 if (error)
3623 goto out_drop_perag;
3624 }
3625
3626 if (!args->wasfromfl) {
3627 error = xfs_alloc_update_counters(args->tp, args->agbp,
3628 -((long)(args->len)));
3629 if (error)
3630 goto out_drop_perag;
3631
3632 ASSERT(!xfs_extent_busy_search(pag_group(args->pag),
3633 args->agbno, args->len));
3634 }
3635
3636 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3637
3638 XFS_STATS_INC(mp, xs_allocx);
3639 XFS_STATS_ADD(mp, xs_allocb, args->len);
3640
3641 trace_xfs_alloc_vextent_finish(args);
3642
3643out_drop_perag:
3644 if (drop_perag && args->pag) {
3645 xfs_perag_rele(args->pag);
3646 args->pag = NULL;
3647 }
3648 return error;
3649}
3650
3651/*
3652 * Allocate within a single AG only. This uses a best-fit length algorithm so if
3653 * you need an exact sized allocation without locality constraints, this is the
3654 * fastest way to do it.
3655 *
3656 * Caller is expected to hold a perag reference in args->pag.
3657 */
3658int
3659xfs_alloc_vextent_this_ag(
3660 struct xfs_alloc_arg *args,
3661 xfs_agnumber_t agno)
3662{
3663 xfs_agnumber_t minimum_agno;
3664 uint32_t alloc_flags = 0;
3665 int error;
3666
3667 ASSERT(args->pag != NULL);
3668 ASSERT(pag_agno(args->pag) == agno);
3669
3670 args->agno = agno;
3671 args->agbno = 0;
3672
3673 trace_xfs_alloc_vextent_this_ag(args);
3674
3675 error = xfs_alloc_vextent_check_args(args,
3676 xfs_agbno_to_fsb(args->pag, 0), &minimum_agno);
3677 if (error) {
3678 if (error == -ENOSPC)
3679 return 0;
3680 return error;
3681 }
3682
3683 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3684 if (!error && args->agbp)
3685 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3686
3687 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3688}
3689
3690/*
3691 * Iterate all AGs trying to allocate an extent starting from @start_ag.
3692 *
3693 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3694 * allocation attempts in @start_agno have locality information. If we fail to
3695 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3696 * we attempt to allocation in as there is no locality optimisation possible for
3697 * those allocations.
3698 *
3699 * On return, args->pag may be left referenced if we finish before the "all
3700 * failed" return point. The allocation finish still needs the perag, and
3701 * so the caller will release it once they've finished the allocation.
3702 *
3703 * When we wrap the AG iteration at the end of the filesystem, we have to be
3704 * careful not to wrap into AGs below ones we already have locked in the
3705 * transaction if we are doing a blocking iteration. This will result in an
3706 * out-of-order locking of AGFs and hence can cause deadlocks.
3707 */
3708static int
3709xfs_alloc_vextent_iterate_ags(
3710 struct xfs_alloc_arg *args,
3711 xfs_agnumber_t minimum_agno,
3712 xfs_agnumber_t start_agno,
3713 xfs_agblock_t target_agbno,
3714 uint32_t alloc_flags)
3715{
3716 struct xfs_mount *mp = args->mp;
3717 xfs_agnumber_t restart_agno = minimum_agno;
3718 xfs_agnumber_t agno;
3719 int error = 0;
3720
3721 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3722 restart_agno = 0;
3723restart:
3724 for_each_perag_wrap_range(mp, start_agno, restart_agno,
3725 mp->m_sb.sb_agcount, agno, args->pag) {
3726 args->agno = agno;
3727 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3728 if (error)
3729 break;
3730 if (!args->agbp) {
3731 trace_xfs_alloc_vextent_loopfailed(args);
3732 continue;
3733 }
3734
3735 /*
3736 * Allocation is supposed to succeed now, so break out of the
3737 * loop regardless of whether we succeed or not.
3738 */
3739 if (args->agno == start_agno && target_agbno) {
3740 args->agbno = target_agbno;
3741 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3742 } else {
3743 args->agbno = 0;
3744 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3745 }
3746 break;
3747 }
3748 if (error) {
3749 xfs_perag_rele(args->pag);
3750 args->pag = NULL;
3751 return error;
3752 }
3753 if (args->agbp)
3754 return 0;
3755
3756 /*
3757 * We didn't find an AG we can alloation from. If we were given
3758 * constraining flags by the caller, drop them and retry the allocation
3759 * without any constraints being set.
3760 */
3761 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3762 alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3763 restart_agno = minimum_agno;
3764 goto restart;
3765 }
3766
3767 ASSERT(args->pag == NULL);
3768 trace_xfs_alloc_vextent_allfailed(args);
3769 return 0;
3770}
3771
3772/*
3773 * Iterate from the AGs from the start AG to the end of the filesystem, trying
3774 * to allocate blocks. It starts with a near allocation attempt in the initial
3775 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3776 * back to zero if allowed by previous allocations in this transaction,
3777 * otherwise will wrap back to the start AG and run a second blocking pass to
3778 * the end of the filesystem.
3779 */
3780int
3781xfs_alloc_vextent_start_ag(
3782 struct xfs_alloc_arg *args,
3783 xfs_fsblock_t target)
3784{
3785 struct xfs_mount *mp = args->mp;
3786 xfs_agnumber_t minimum_agno;
3787 xfs_agnumber_t start_agno;
3788 xfs_agnumber_t rotorstep = xfs_rotorstep;
3789 bool bump_rotor = false;
3790 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3791 int error;
3792
3793 ASSERT(args->pag == NULL);
3794
3795 args->agno = NULLAGNUMBER;
3796 args->agbno = NULLAGBLOCK;
3797
3798 trace_xfs_alloc_vextent_start_ag(args);
3799
3800 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3801 if (error) {
3802 if (error == -ENOSPC)
3803 return 0;
3804 return error;
3805 }
3806
3807 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3808 xfs_is_inode32(mp)) {
3809 target = XFS_AGB_TO_FSB(mp,
3810 ((mp->m_agfrotor / rotorstep) %
3811 mp->m_sb.sb_agcount), 0);
3812 bump_rotor = 1;
3813 }
3814
3815 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3816 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3817 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3818
3819 if (bump_rotor) {
3820 if (args->agno == start_agno)
3821 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3822 (mp->m_sb.sb_agcount * rotorstep);
3823 else
3824 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3825 (mp->m_sb.sb_agcount * rotorstep);
3826 }
3827
3828 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3829}
3830
3831/*
3832 * Iterate from the agno indicated via @target through to the end of the
3833 * filesystem attempting blocking allocation. This does not wrap or try a second
3834 * pass, so will not recurse into AGs lower than indicated by the target.
3835 */
3836int
3837xfs_alloc_vextent_first_ag(
3838 struct xfs_alloc_arg *args,
3839 xfs_fsblock_t target)
3840 {
3841 struct xfs_mount *mp = args->mp;
3842 xfs_agnumber_t minimum_agno;
3843 xfs_agnumber_t start_agno;
3844 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3845 int error;
3846
3847 ASSERT(args->pag == NULL);
3848
3849 args->agno = NULLAGNUMBER;
3850 args->agbno = NULLAGBLOCK;
3851
3852 trace_xfs_alloc_vextent_first_ag(args);
3853
3854 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3855 if (error) {
3856 if (error == -ENOSPC)
3857 return 0;
3858 return error;
3859 }
3860
3861 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3862 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3863 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3864 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3865}
3866
3867/*
3868 * Allocate at the exact block target or fail. Caller is expected to hold a
3869 * perag reference in args->pag.
3870 */
3871int
3872xfs_alloc_vextent_exact_bno(
3873 struct xfs_alloc_arg *args,
3874 xfs_fsblock_t target)
3875{
3876 struct xfs_mount *mp = args->mp;
3877 xfs_agnumber_t minimum_agno;
3878 int error;
3879
3880 ASSERT(args->pag != NULL);
3881 ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target));
3882
3883 args->agno = XFS_FSB_TO_AGNO(mp, target);
3884 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3885
3886 trace_xfs_alloc_vextent_exact_bno(args);
3887
3888 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3889 if (error) {
3890 if (error == -ENOSPC)
3891 return 0;
3892 return error;
3893 }
3894
3895 error = xfs_alloc_vextent_prepare_ag(args, 0);
3896 if (!error && args->agbp)
3897 error = xfs_alloc_ag_vextent_exact(args);
3898
3899 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3900}
3901
3902/*
3903 * Allocate an extent as close to the target as possible. If there are not
3904 * viable candidates in the AG, then fail the allocation.
3905 *
3906 * Caller may or may not have a per-ag reference in args->pag.
3907 */
3908int
3909xfs_alloc_vextent_near_bno(
3910 struct xfs_alloc_arg *args,
3911 xfs_fsblock_t target)
3912{
3913 struct xfs_mount *mp = args->mp;
3914 xfs_agnumber_t minimum_agno;
3915 bool needs_perag = args->pag == NULL;
3916 uint32_t alloc_flags = 0;
3917 int error;
3918
3919 if (!needs_perag)
3920 ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target));
3921
3922 args->agno = XFS_FSB_TO_AGNO(mp, target);
3923 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3924
3925 trace_xfs_alloc_vextent_near_bno(args);
3926
3927 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3928 if (error) {
3929 if (error == -ENOSPC)
3930 return 0;
3931 return error;
3932 }
3933
3934 if (needs_perag)
3935 args->pag = xfs_perag_grab(mp, args->agno);
3936
3937 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3938 if (!error && args->agbp)
3939 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3940
3941 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3942}
3943
3944/* Ensure that the freelist is at full capacity. */
3945int
3946xfs_free_extent_fix_freelist(
3947 struct xfs_trans *tp,
3948 struct xfs_perag *pag,
3949 struct xfs_buf **agbp)
3950{
3951 struct xfs_alloc_arg args;
3952 int error;
3953
3954 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3955 args.tp = tp;
3956 args.mp = tp->t_mountp;
3957 args.agno = pag_agno(pag);
3958 args.pag = pag;
3959
3960 /*
3961 * validate that the block number is legal - the enables us to detect
3962 * and handle a silent filesystem corruption rather than crashing.
3963 */
3964 if (args.agno >= args.mp->m_sb.sb_agcount)
3965 return -EFSCORRUPTED;
3966
3967 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3968 if (error)
3969 return error;
3970
3971 *agbp = args.agbp;
3972 return 0;
3973}
3974
3975/*
3976 * Free an extent.
3977 * Just break up the extent address and hand off to xfs_free_ag_extent
3978 * after fixing up the freelist.
3979 */
3980int
3981__xfs_free_extent(
3982 struct xfs_trans *tp,
3983 struct xfs_perag *pag,
3984 xfs_agblock_t agbno,
3985 xfs_extlen_t len,
3986 const struct xfs_owner_info *oinfo,
3987 enum xfs_ag_resv_type type,
3988 bool skip_discard)
3989{
3990 struct xfs_mount *mp = tp->t_mountp;
3991 struct xfs_buf *agbp;
3992 struct xfs_agf *agf;
3993 int error;
3994 unsigned int busy_flags = 0;
3995
3996 ASSERT(len != 0);
3997 ASSERT(type != XFS_AG_RESV_AGFL);
3998
3999 if (XFS_TEST_ERROR(false, mp,
4000 XFS_ERRTAG_FREE_EXTENT))
4001 return -EIO;
4002
4003 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
4004 if (error) {
4005 if (xfs_metadata_is_sick(error))
4006 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4007 return error;
4008 }
4009
4010 agf = agbp->b_addr;
4011
4012 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
4013 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4014 error = -EFSCORRUPTED;
4015 goto err_release;
4016 }
4017
4018 /* validate the extent size is legal now we have the agf locked */
4019 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
4020 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4021 error = -EFSCORRUPTED;
4022 goto err_release;
4023 }
4024
4025 error = xfs_free_ag_extent(tp, agbp, agbno, len, oinfo, type);
4026 if (error)
4027 goto err_release;
4028
4029 if (skip_discard)
4030 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
4031 xfs_extent_busy_insert(tp, pag_group(pag), agbno, len, busy_flags);
4032 return 0;
4033
4034err_release:
4035 xfs_trans_brelse(tp, agbp);
4036 return error;
4037}
4038
4039struct xfs_alloc_query_range_info {
4040 xfs_alloc_query_range_fn fn;
4041 void *priv;
4042};
4043
4044/* Format btree record and pass to our callback. */
4045STATIC int
4046xfs_alloc_query_range_helper(
4047 struct xfs_btree_cur *cur,
4048 const union xfs_btree_rec *rec,
4049 void *priv)
4050{
4051 struct xfs_alloc_query_range_info *query = priv;
4052 struct xfs_alloc_rec_incore irec;
4053 xfs_failaddr_t fa;
4054
4055 xfs_alloc_btrec_to_irec(rec, &irec);
4056 fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
4057 if (fa)
4058 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
4059
4060 return query->fn(cur, &irec, query->priv);
4061}
4062
4063/* Find all free space within a given range of blocks. */
4064int
4065xfs_alloc_query_range(
4066 struct xfs_btree_cur *cur,
4067 const struct xfs_alloc_rec_incore *low_rec,
4068 const struct xfs_alloc_rec_incore *high_rec,
4069 xfs_alloc_query_range_fn fn,
4070 void *priv)
4071{
4072 union xfs_btree_irec low_brec = { .a = *low_rec };
4073 union xfs_btree_irec high_brec = { .a = *high_rec };
4074 struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
4075
4076 ASSERT(xfs_btree_is_bno(cur->bc_ops));
4077 return xfs_btree_query_range(cur, &low_brec, &high_brec,
4078 xfs_alloc_query_range_helper, &query);
4079}
4080
4081/* Find all free space records. */
4082int
4083xfs_alloc_query_all(
4084 struct xfs_btree_cur *cur,
4085 xfs_alloc_query_range_fn fn,
4086 void *priv)
4087{
4088 struct xfs_alloc_query_range_info query;
4089
4090 ASSERT(xfs_btree_is_bno(cur->bc_ops));
4091 query.priv = priv;
4092 query.fn = fn;
4093 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
4094}
4095
4096/*
4097 * Scan part of the keyspace of the free space and tell us if the area has no
4098 * records, is fully mapped by records, or is partially filled.
4099 */
4100int
4101xfs_alloc_has_records(
4102 struct xfs_btree_cur *cur,
4103 xfs_agblock_t bno,
4104 xfs_extlen_t len,
4105 enum xbtree_recpacking *outcome)
4106{
4107 union xfs_btree_irec low;
4108 union xfs_btree_irec high;
4109
4110 memset(&low, 0, sizeof(low));
4111 low.a.ar_startblock = bno;
4112 memset(&high, 0xFF, sizeof(high));
4113 high.a.ar_startblock = bno + len - 1;
4114
4115 return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4116}
4117
4118/*
4119 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
4120 * error code or XFS_ITER_*.
4121 */
4122int
4123xfs_agfl_walk(
4124 struct xfs_mount *mp,
4125 struct xfs_agf *agf,
4126 struct xfs_buf *agflbp,
4127 xfs_agfl_walk_fn walk_fn,
4128 void *priv)
4129{
4130 __be32 *agfl_bno;
4131 unsigned int i;
4132 int error;
4133
4134 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4135 i = be32_to_cpu(agf->agf_flfirst);
4136
4137 /* Nothing to walk in an empty AGFL. */
4138 if (agf->agf_flcount == cpu_to_be32(0))
4139 return 0;
4140
4141 /* Otherwise, walk from first to last, wrapping as needed. */
4142 for (;;) {
4143 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4144 if (error)
4145 return error;
4146 if (i == be32_to_cpu(agf->agf_fllast))
4147 break;
4148 if (++i == xfs_agfl_size(mp))
4149 i = 0;
4150 }
4151
4152 return 0;
4153}
4154
4155int __init
4156xfs_extfree_intent_init_cache(void)
4157{
4158 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4159 sizeof(struct xfs_extent_free_item),
4160 0, 0, NULL);
4161
4162 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4163}
4164
4165void
4166xfs_extfree_intent_destroy_cache(void)
4167{
4168 kmem_cache_destroy(xfs_extfree_item_cache);
4169 xfs_extfree_item_cache = NULL;
4170}