Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_trans.h"
16#include "xfs_alloc.h"
17#include "xfs_btree.h"
18#include "xfs_bmap_btree.h"
19#include "xfs_bmap.h"
20#include "xfs_error.h"
21#include "xfs_quota.h"
22#include "xfs_trace.h"
23#include "xfs_rmap.h"
24
25/*
26 * Convert on-disk form of btree root to in-memory form.
27 */
28void
29xfs_bmdr_to_bmbt(
30 struct xfs_inode *ip,
31 xfs_bmdr_block_t *dblock,
32 int dblocklen,
33 struct xfs_btree_block *rblock,
34 int rblocklen)
35{
36 struct xfs_mount *mp = ip->i_mount;
37 int dmxr;
38 xfs_bmbt_key_t *fkp;
39 __be64 *fpp;
40 xfs_bmbt_key_t *tkp;
41 __be64 *tpp;
42
43 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
44 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
45 XFS_BTREE_LONG_PTRS);
46 rblock->bb_level = dblock->bb_level;
47 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
48 rblock->bb_numrecs = dblock->bb_numrecs;
49 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
50 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
51 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
52 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
53 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
54 dmxr = be16_to_cpu(dblock->bb_numrecs);
55 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
56 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
57}
58
59void
60xfs_bmbt_disk_get_all(
61 struct xfs_bmbt_rec *rec,
62 struct xfs_bmbt_irec *irec)
63{
64 uint64_t l0 = get_unaligned_be64(&rec->l0);
65 uint64_t l1 = get_unaligned_be64(&rec->l1);
66
67 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
68 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
69 irec->br_blockcount = l1 & xfs_mask64lo(21);
70 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
71 irec->br_state = XFS_EXT_UNWRITTEN;
72 else
73 irec->br_state = XFS_EXT_NORM;
74}
75
76/*
77 * Extract the blockcount field from an on disk bmap extent record.
78 */
79xfs_filblks_t
80xfs_bmbt_disk_get_blockcount(
81 xfs_bmbt_rec_t *r)
82{
83 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
84}
85
86/*
87 * Extract the startoff field from a disk format bmap extent record.
88 */
89xfs_fileoff_t
90xfs_bmbt_disk_get_startoff(
91 xfs_bmbt_rec_t *r)
92{
93 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
94 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
95}
96
97/*
98 * Set all the fields in a bmap extent record from the uncompressed form.
99 */
100void
101xfs_bmbt_disk_set_all(
102 struct xfs_bmbt_rec *r,
103 struct xfs_bmbt_irec *s)
104{
105 int extent_flag = (s->br_state != XFS_EXT_NORM);
106
107 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
108 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
109 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
110 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
111
112 put_unaligned_be64(
113 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
114 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
115 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
116 put_unaligned_be64(
117 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
118 ((xfs_bmbt_rec_base_t)s->br_blockcount &
119 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
120}
121
122/*
123 * Convert in-memory form of btree root to on-disk form.
124 */
125void
126xfs_bmbt_to_bmdr(
127 struct xfs_mount *mp,
128 struct xfs_btree_block *rblock,
129 int rblocklen,
130 xfs_bmdr_block_t *dblock,
131 int dblocklen)
132{
133 int dmxr;
134 xfs_bmbt_key_t *fkp;
135 __be64 *fpp;
136 xfs_bmbt_key_t *tkp;
137 __be64 *tpp;
138
139 if (xfs_sb_version_hascrc(&mp->m_sb)) {
140 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
141 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
142 &mp->m_sb.sb_meta_uuid));
143 ASSERT(rblock->bb_u.l.bb_blkno ==
144 cpu_to_be64(XFS_BUF_DADDR_NULL));
145 } else
146 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
147 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
148 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
149 ASSERT(rblock->bb_level != 0);
150 dblock->bb_level = rblock->bb_level;
151 dblock->bb_numrecs = rblock->bb_numrecs;
152 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
153 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
154 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
155 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
156 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
157 dmxr = be16_to_cpu(dblock->bb_numrecs);
158 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
159 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
160}
161
162STATIC struct xfs_btree_cur *
163xfs_bmbt_dup_cursor(
164 struct xfs_btree_cur *cur)
165{
166 struct xfs_btree_cur *new;
167
168 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
169 cur->bc_ino.ip, cur->bc_ino.whichfork);
170
171 /*
172 * Copy the firstblock, dfops, and flags values,
173 * since init cursor doesn't get them.
174 */
175 new->bc_ino.flags = cur->bc_ino.flags;
176
177 return new;
178}
179
180STATIC void
181xfs_bmbt_update_cursor(
182 struct xfs_btree_cur *src,
183 struct xfs_btree_cur *dst)
184{
185 ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
186 (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
187
188 dst->bc_ino.allocated += src->bc_ino.allocated;
189 dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
190
191 src->bc_ino.allocated = 0;
192}
193
194STATIC int
195xfs_bmbt_alloc_block(
196 struct xfs_btree_cur *cur,
197 union xfs_btree_ptr *start,
198 union xfs_btree_ptr *new,
199 int *stat)
200{
201 xfs_alloc_arg_t args; /* block allocation args */
202 int error; /* error return value */
203
204 memset(&args, 0, sizeof(args));
205 args.tp = cur->bc_tp;
206 args.mp = cur->bc_mp;
207 args.fsbno = cur->bc_tp->t_firstblock;
208 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
209 cur->bc_ino.whichfork);
210
211 if (args.fsbno == NULLFSBLOCK) {
212 args.fsbno = be64_to_cpu(start->l);
213 args.type = XFS_ALLOCTYPE_START_BNO;
214 /*
215 * Make sure there is sufficient room left in the AG to
216 * complete a full tree split for an extent insert. If
217 * we are converting the middle part of an extent then
218 * we may need space for two tree splits.
219 *
220 * We are relying on the caller to make the correct block
221 * reservation for this operation to succeed. If the
222 * reservation amount is insufficient then we may fail a
223 * block allocation here and corrupt the filesystem.
224 */
225 args.minleft = args.tp->t_blk_res;
226 } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
227 args.type = XFS_ALLOCTYPE_START_BNO;
228 } else {
229 args.type = XFS_ALLOCTYPE_NEAR_BNO;
230 }
231
232 args.minlen = args.maxlen = args.prod = 1;
233 args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
234 if (!args.wasdel && args.tp->t_blk_res == 0) {
235 error = -ENOSPC;
236 goto error0;
237 }
238 error = xfs_alloc_vextent(&args);
239 if (error)
240 goto error0;
241
242 if (args.fsbno == NULLFSBLOCK && args.minleft) {
243 /*
244 * Could not find an AG with enough free space to satisfy
245 * a full btree split. Try again and if
246 * successful activate the lowspace algorithm.
247 */
248 args.fsbno = 0;
249 args.type = XFS_ALLOCTYPE_FIRST_AG;
250 error = xfs_alloc_vextent(&args);
251 if (error)
252 goto error0;
253 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
254 }
255 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
256 *stat = 0;
257 return 0;
258 }
259
260 ASSERT(args.len == 1);
261 cur->bc_tp->t_firstblock = args.fsbno;
262 cur->bc_ino.allocated++;
263 cur->bc_ino.ip->i_nblocks++;
264 xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
265 xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
266 XFS_TRANS_DQ_BCOUNT, 1L);
267
268 new->l = cpu_to_be64(args.fsbno);
269
270 *stat = 1;
271 return 0;
272
273 error0:
274 return error;
275}
276
277STATIC int
278xfs_bmbt_free_block(
279 struct xfs_btree_cur *cur,
280 struct xfs_buf *bp)
281{
282 struct xfs_mount *mp = cur->bc_mp;
283 struct xfs_inode *ip = cur->bc_ino.ip;
284 struct xfs_trans *tp = cur->bc_tp;
285 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
286 struct xfs_owner_info oinfo;
287
288 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
289 xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
290 ip->i_nblocks--;
291
292 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
293 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
294 return 0;
295}
296
297STATIC int
298xfs_bmbt_get_minrecs(
299 struct xfs_btree_cur *cur,
300 int level)
301{
302 if (level == cur->bc_nlevels - 1) {
303 struct xfs_ifork *ifp;
304
305 ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
306 cur->bc_ino.whichfork);
307
308 return xfs_bmbt_maxrecs(cur->bc_mp,
309 ifp->if_broot_bytes, level == 0) / 2;
310 }
311
312 return cur->bc_mp->m_bmap_dmnr[level != 0];
313}
314
315int
316xfs_bmbt_get_maxrecs(
317 struct xfs_btree_cur *cur,
318 int level)
319{
320 if (level == cur->bc_nlevels - 1) {
321 struct xfs_ifork *ifp;
322
323 ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
324 cur->bc_ino.whichfork);
325
326 return xfs_bmbt_maxrecs(cur->bc_mp,
327 ifp->if_broot_bytes, level == 0);
328 }
329
330 return cur->bc_mp->m_bmap_dmxr[level != 0];
331
332}
333
334/*
335 * Get the maximum records we could store in the on-disk format.
336 *
337 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
338 * for the root node this checks the available space in the dinode fork
339 * so that we can resize the in-memory buffer to match it. After a
340 * resize to the maximum size this function returns the same value
341 * as xfs_bmbt_get_maxrecs for the root node, too.
342 */
343STATIC int
344xfs_bmbt_get_dmaxrecs(
345 struct xfs_btree_cur *cur,
346 int level)
347{
348 if (level != cur->bc_nlevels - 1)
349 return cur->bc_mp->m_bmap_dmxr[level != 0];
350 return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0);
351}
352
353STATIC void
354xfs_bmbt_init_key_from_rec(
355 union xfs_btree_key *key,
356 union xfs_btree_rec *rec)
357{
358 key->bmbt.br_startoff =
359 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
360}
361
362STATIC void
363xfs_bmbt_init_high_key_from_rec(
364 union xfs_btree_key *key,
365 union xfs_btree_rec *rec)
366{
367 key->bmbt.br_startoff = cpu_to_be64(
368 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
369 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
370}
371
372STATIC void
373xfs_bmbt_init_rec_from_cur(
374 struct xfs_btree_cur *cur,
375 union xfs_btree_rec *rec)
376{
377 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
378}
379
380STATIC void
381xfs_bmbt_init_ptr_from_cur(
382 struct xfs_btree_cur *cur,
383 union xfs_btree_ptr *ptr)
384{
385 ptr->l = 0;
386}
387
388STATIC int64_t
389xfs_bmbt_key_diff(
390 struct xfs_btree_cur *cur,
391 union xfs_btree_key *key)
392{
393 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
394 cur->bc_rec.b.br_startoff;
395}
396
397STATIC int64_t
398xfs_bmbt_diff_two_keys(
399 struct xfs_btree_cur *cur,
400 union xfs_btree_key *k1,
401 union xfs_btree_key *k2)
402{
403 uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
404 uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
405
406 /*
407 * Note: This routine previously casted a and b to int64 and subtracted
408 * them to generate a result. This lead to problems if b was the
409 * "maximum" key value (all ones) being signed incorrectly, hence this
410 * somewhat less efficient version.
411 */
412 if (a > b)
413 return 1;
414 if (b > a)
415 return -1;
416 return 0;
417}
418
419static xfs_failaddr_t
420xfs_bmbt_verify(
421 struct xfs_buf *bp)
422{
423 struct xfs_mount *mp = bp->b_mount;
424 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
425 xfs_failaddr_t fa;
426 unsigned int level;
427
428 if (!xfs_verify_magic(bp, block->bb_magic))
429 return __this_address;
430
431 if (xfs_sb_version_hascrc(&mp->m_sb)) {
432 /*
433 * XXX: need a better way of verifying the owner here. Right now
434 * just make sure there has been one set.
435 */
436 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
437 if (fa)
438 return fa;
439 }
440
441 /*
442 * numrecs and level verification.
443 *
444 * We don't know what fork we belong to, so just verify that the level
445 * is less than the maximum of the two. Later checks will be more
446 * precise.
447 */
448 level = be16_to_cpu(block->bb_level);
449 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
450 return __this_address;
451
452 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
453}
454
455static void
456xfs_bmbt_read_verify(
457 struct xfs_buf *bp)
458{
459 xfs_failaddr_t fa;
460
461 if (!xfs_btree_lblock_verify_crc(bp))
462 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
463 else {
464 fa = xfs_bmbt_verify(bp);
465 if (fa)
466 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
467 }
468
469 if (bp->b_error)
470 trace_xfs_btree_corrupt(bp, _RET_IP_);
471}
472
473static void
474xfs_bmbt_write_verify(
475 struct xfs_buf *bp)
476{
477 xfs_failaddr_t fa;
478
479 fa = xfs_bmbt_verify(bp);
480 if (fa) {
481 trace_xfs_btree_corrupt(bp, _RET_IP_);
482 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
483 return;
484 }
485 xfs_btree_lblock_calc_crc(bp);
486}
487
488const struct xfs_buf_ops xfs_bmbt_buf_ops = {
489 .name = "xfs_bmbt",
490 .magic = { cpu_to_be32(XFS_BMAP_MAGIC),
491 cpu_to_be32(XFS_BMAP_CRC_MAGIC) },
492 .verify_read = xfs_bmbt_read_verify,
493 .verify_write = xfs_bmbt_write_verify,
494 .verify_struct = xfs_bmbt_verify,
495};
496
497
498STATIC int
499xfs_bmbt_keys_inorder(
500 struct xfs_btree_cur *cur,
501 union xfs_btree_key *k1,
502 union xfs_btree_key *k2)
503{
504 return be64_to_cpu(k1->bmbt.br_startoff) <
505 be64_to_cpu(k2->bmbt.br_startoff);
506}
507
508STATIC int
509xfs_bmbt_recs_inorder(
510 struct xfs_btree_cur *cur,
511 union xfs_btree_rec *r1,
512 union xfs_btree_rec *r2)
513{
514 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
515 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
516 xfs_bmbt_disk_get_startoff(&r2->bmbt);
517}
518
519static const struct xfs_btree_ops xfs_bmbt_ops = {
520 .rec_len = sizeof(xfs_bmbt_rec_t),
521 .key_len = sizeof(xfs_bmbt_key_t),
522
523 .dup_cursor = xfs_bmbt_dup_cursor,
524 .update_cursor = xfs_bmbt_update_cursor,
525 .alloc_block = xfs_bmbt_alloc_block,
526 .free_block = xfs_bmbt_free_block,
527 .get_maxrecs = xfs_bmbt_get_maxrecs,
528 .get_minrecs = xfs_bmbt_get_minrecs,
529 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
530 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
531 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
532 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
533 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
534 .key_diff = xfs_bmbt_key_diff,
535 .diff_two_keys = xfs_bmbt_diff_two_keys,
536 .buf_ops = &xfs_bmbt_buf_ops,
537 .keys_inorder = xfs_bmbt_keys_inorder,
538 .recs_inorder = xfs_bmbt_recs_inorder,
539};
540
541/*
542 * Allocate a new bmap btree cursor.
543 */
544struct xfs_btree_cur * /* new bmap btree cursor */
545xfs_bmbt_init_cursor(
546 struct xfs_mount *mp, /* file system mount point */
547 struct xfs_trans *tp, /* transaction pointer */
548 struct xfs_inode *ip, /* inode owning the btree */
549 int whichfork) /* data or attr fork */
550{
551 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
552 struct xfs_btree_cur *cur;
553 ASSERT(whichfork != XFS_COW_FORK);
554
555 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
556
557 cur->bc_tp = tp;
558 cur->bc_mp = mp;
559 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
560 cur->bc_btnum = XFS_BTNUM_BMAP;
561 cur->bc_blocklog = mp->m_sb.sb_blocklog;
562 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
563
564 cur->bc_ops = &xfs_bmbt_ops;
565 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
566 if (xfs_sb_version_hascrc(&mp->m_sb))
567 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
568
569 cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork);
570 cur->bc_ino.ip = ip;
571 cur->bc_ino.allocated = 0;
572 cur->bc_ino.flags = 0;
573 cur->bc_ino.whichfork = whichfork;
574
575 return cur;
576}
577
578/*
579 * Calculate number of records in a bmap btree block.
580 */
581int
582xfs_bmbt_maxrecs(
583 struct xfs_mount *mp,
584 int blocklen,
585 int leaf)
586{
587 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
588
589 if (leaf)
590 return blocklen / sizeof(xfs_bmbt_rec_t);
591 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
592}
593
594/*
595 * Calculate number of records in a bmap btree inode root.
596 */
597int
598xfs_bmdr_maxrecs(
599 int blocklen,
600 int leaf)
601{
602 blocklen -= sizeof(xfs_bmdr_block_t);
603
604 if (leaf)
605 return blocklen / sizeof(xfs_bmdr_rec_t);
606 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
607}
608
609/*
610 * Change the owner of a btree format fork fo the inode passed in. Change it to
611 * the owner of that is passed in so that we can change owners before or after
612 * we switch forks between inodes. The operation that the caller is doing will
613 * determine whether is needs to change owner before or after the switch.
614 *
615 * For demand paged transactional modification, the fork switch should be done
616 * after reading in all the blocks, modifying them and pinning them in the
617 * transaction. For modification when the buffers are already pinned in memory,
618 * the fork switch can be done before changing the owner as we won't need to
619 * validate the owner until the btree buffers are unpinned and writes can occur
620 * again.
621 *
622 * For recovery based ownership change, there is no transactional context and
623 * so a buffer list must be supplied so that we can record the buffers that we
624 * modified for the caller to issue IO on.
625 */
626int
627xfs_bmbt_change_owner(
628 struct xfs_trans *tp,
629 struct xfs_inode *ip,
630 int whichfork,
631 xfs_ino_t new_owner,
632 struct list_head *buffer_list)
633{
634 struct xfs_btree_cur *cur;
635 int error;
636
637 ASSERT(tp || buffer_list);
638 ASSERT(!(tp && buffer_list));
639 ASSERT(XFS_IFORK_PTR(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
640
641 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
642 cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
643
644 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
645 xfs_btree_del_cursor(cur, error);
646 return error;
647}
648
649/* Calculate the bmap btree size for some records. */
650unsigned long long
651xfs_bmbt_calc_size(
652 struct xfs_mount *mp,
653 unsigned long long len)
654{
655 return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
656}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_trans.h"
16#include "xfs_alloc.h"
17#include "xfs_btree.h"
18#include "xfs_btree_staging.h"
19#include "xfs_bmap_btree.h"
20#include "xfs_bmap.h"
21#include "xfs_error.h"
22#include "xfs_quota.h"
23#include "xfs_trace.h"
24#include "xfs_rmap.h"
25#include "xfs_ag.h"
26
27static struct kmem_cache *xfs_bmbt_cur_cache;
28
29/*
30 * Convert on-disk form of btree root to in-memory form.
31 */
32void
33xfs_bmdr_to_bmbt(
34 struct xfs_inode *ip,
35 xfs_bmdr_block_t *dblock,
36 int dblocklen,
37 struct xfs_btree_block *rblock,
38 int rblocklen)
39{
40 struct xfs_mount *mp = ip->i_mount;
41 int dmxr;
42 xfs_bmbt_key_t *fkp;
43 __be64 *fpp;
44 xfs_bmbt_key_t *tkp;
45 __be64 *tpp;
46
47 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
48 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
49 XFS_BTREE_LONG_PTRS);
50 rblock->bb_level = dblock->bb_level;
51 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
52 rblock->bb_numrecs = dblock->bb_numrecs;
53 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
54 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
55 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
56 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
57 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
58 dmxr = be16_to_cpu(dblock->bb_numrecs);
59 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
60 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
61}
62
63void
64xfs_bmbt_disk_get_all(
65 const struct xfs_bmbt_rec *rec,
66 struct xfs_bmbt_irec *irec)
67{
68 uint64_t l0 = get_unaligned_be64(&rec->l0);
69 uint64_t l1 = get_unaligned_be64(&rec->l1);
70
71 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
72 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
73 irec->br_blockcount = l1 & xfs_mask64lo(21);
74 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
75 irec->br_state = XFS_EXT_UNWRITTEN;
76 else
77 irec->br_state = XFS_EXT_NORM;
78}
79
80/*
81 * Extract the blockcount field from an on disk bmap extent record.
82 */
83xfs_filblks_t
84xfs_bmbt_disk_get_blockcount(
85 const struct xfs_bmbt_rec *r)
86{
87 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
88}
89
90/*
91 * Extract the startoff field from a disk format bmap extent record.
92 */
93xfs_fileoff_t
94xfs_bmbt_disk_get_startoff(
95 const struct xfs_bmbt_rec *r)
96{
97 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
98 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
99}
100
101/*
102 * Set all the fields in a bmap extent record from the uncompressed form.
103 */
104void
105xfs_bmbt_disk_set_all(
106 struct xfs_bmbt_rec *r,
107 struct xfs_bmbt_irec *s)
108{
109 int extent_flag = (s->br_state != XFS_EXT_NORM);
110
111 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
112 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
113 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
114 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
115
116 put_unaligned_be64(
117 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
118 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
119 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
120 put_unaligned_be64(
121 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
122 ((xfs_bmbt_rec_base_t)s->br_blockcount &
123 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
124}
125
126/*
127 * Convert in-memory form of btree root to on-disk form.
128 */
129void
130xfs_bmbt_to_bmdr(
131 struct xfs_mount *mp,
132 struct xfs_btree_block *rblock,
133 int rblocklen,
134 xfs_bmdr_block_t *dblock,
135 int dblocklen)
136{
137 int dmxr;
138 xfs_bmbt_key_t *fkp;
139 __be64 *fpp;
140 xfs_bmbt_key_t *tkp;
141 __be64 *tpp;
142
143 if (xfs_has_crc(mp)) {
144 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
145 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
146 &mp->m_sb.sb_meta_uuid));
147 ASSERT(rblock->bb_u.l.bb_blkno ==
148 cpu_to_be64(XFS_BUF_DADDR_NULL));
149 } else
150 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
151 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
152 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
153 ASSERT(rblock->bb_level != 0);
154 dblock->bb_level = rblock->bb_level;
155 dblock->bb_numrecs = rblock->bb_numrecs;
156 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
157 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
158 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
159 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
160 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
161 dmxr = be16_to_cpu(dblock->bb_numrecs);
162 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
163 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
164}
165
166STATIC struct xfs_btree_cur *
167xfs_bmbt_dup_cursor(
168 struct xfs_btree_cur *cur)
169{
170 struct xfs_btree_cur *new;
171
172 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
173 cur->bc_ino.ip, cur->bc_ino.whichfork);
174
175 /*
176 * Copy the firstblock, dfops, and flags values,
177 * since init cursor doesn't get them.
178 */
179 new->bc_ino.flags = cur->bc_ino.flags;
180
181 return new;
182}
183
184STATIC void
185xfs_bmbt_update_cursor(
186 struct xfs_btree_cur *src,
187 struct xfs_btree_cur *dst)
188{
189 ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) ||
190 (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
191
192 dst->bc_ino.allocated += src->bc_ino.allocated;
193 dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno;
194
195 src->bc_ino.allocated = 0;
196}
197
198STATIC int
199xfs_bmbt_alloc_block(
200 struct xfs_btree_cur *cur,
201 const union xfs_btree_ptr *start,
202 union xfs_btree_ptr *new,
203 int *stat)
204{
205 struct xfs_alloc_arg args;
206 int error;
207
208 memset(&args, 0, sizeof(args));
209 args.tp = cur->bc_tp;
210 args.mp = cur->bc_mp;
211 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
212 cur->bc_ino.whichfork);
213 args.minlen = args.maxlen = args.prod = 1;
214 args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
215 if (!args.wasdel && args.tp->t_blk_res == 0)
216 return -ENOSPC;
217
218 /*
219 * If we are coming here from something like unwritten extent
220 * conversion, there has been no data extent allocation already done, so
221 * we have to ensure that we attempt to locate the entire set of bmbt
222 * allocations in the same AG, as xfs_bmapi_write() would have reserved.
223 */
224 if (cur->bc_tp->t_highest_agno == NULLAGNUMBER)
225 args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip,
226 cur->bc_ino.whichfork);
227
228 error = xfs_alloc_vextent_start_ag(&args, be64_to_cpu(start->l));
229 if (error)
230 return error;
231
232 if (args.fsbno == NULLFSBLOCK && args.minleft) {
233 /*
234 * Could not find an AG with enough free space to satisfy
235 * a full btree split. Try again and if
236 * successful activate the lowspace algorithm.
237 */
238 args.minleft = 0;
239 error = xfs_alloc_vextent_start_ag(&args, 0);
240 if (error)
241 return error;
242 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
243 }
244 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
245 *stat = 0;
246 return 0;
247 }
248
249 ASSERT(args.len == 1);
250 cur->bc_ino.allocated++;
251 cur->bc_ino.ip->i_nblocks++;
252 xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
253 xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
254 XFS_TRANS_DQ_BCOUNT, 1L);
255
256 new->l = cpu_to_be64(args.fsbno);
257
258 *stat = 1;
259 return 0;
260}
261
262STATIC int
263xfs_bmbt_free_block(
264 struct xfs_btree_cur *cur,
265 struct xfs_buf *bp)
266{
267 struct xfs_mount *mp = cur->bc_mp;
268 struct xfs_inode *ip = cur->bc_ino.ip;
269 struct xfs_trans *tp = cur->bc_tp;
270 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
271 struct xfs_owner_info oinfo;
272 int error;
273
274 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
275 error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo,
276 XFS_AG_RESV_NONE, false);
277 if (error)
278 return error;
279
280 ip->i_nblocks--;
281 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
282 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
283 return 0;
284}
285
286STATIC int
287xfs_bmbt_get_minrecs(
288 struct xfs_btree_cur *cur,
289 int level)
290{
291 if (level == cur->bc_nlevels - 1) {
292 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
293
294 return xfs_bmbt_maxrecs(cur->bc_mp,
295 ifp->if_broot_bytes, level == 0) / 2;
296 }
297
298 return cur->bc_mp->m_bmap_dmnr[level != 0];
299}
300
301int
302xfs_bmbt_get_maxrecs(
303 struct xfs_btree_cur *cur,
304 int level)
305{
306 if (level == cur->bc_nlevels - 1) {
307 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
308
309 return xfs_bmbt_maxrecs(cur->bc_mp,
310 ifp->if_broot_bytes, level == 0);
311 }
312
313 return cur->bc_mp->m_bmap_dmxr[level != 0];
314
315}
316
317/*
318 * Get the maximum records we could store in the on-disk format.
319 *
320 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
321 * for the root node this checks the available space in the dinode fork
322 * so that we can resize the in-memory buffer to match it. After a
323 * resize to the maximum size this function returns the same value
324 * as xfs_bmbt_get_maxrecs for the root node, too.
325 */
326STATIC int
327xfs_bmbt_get_dmaxrecs(
328 struct xfs_btree_cur *cur,
329 int level)
330{
331 if (level != cur->bc_nlevels - 1)
332 return cur->bc_mp->m_bmap_dmxr[level != 0];
333 return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0);
334}
335
336STATIC void
337xfs_bmbt_init_key_from_rec(
338 union xfs_btree_key *key,
339 const union xfs_btree_rec *rec)
340{
341 key->bmbt.br_startoff =
342 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
343}
344
345STATIC void
346xfs_bmbt_init_high_key_from_rec(
347 union xfs_btree_key *key,
348 const union xfs_btree_rec *rec)
349{
350 key->bmbt.br_startoff = cpu_to_be64(
351 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
352 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
353}
354
355STATIC void
356xfs_bmbt_init_rec_from_cur(
357 struct xfs_btree_cur *cur,
358 union xfs_btree_rec *rec)
359{
360 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
361}
362
363STATIC void
364xfs_bmbt_init_ptr_from_cur(
365 struct xfs_btree_cur *cur,
366 union xfs_btree_ptr *ptr)
367{
368 ptr->l = 0;
369}
370
371STATIC int64_t
372xfs_bmbt_key_diff(
373 struct xfs_btree_cur *cur,
374 const union xfs_btree_key *key)
375{
376 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
377 cur->bc_rec.b.br_startoff;
378}
379
380STATIC int64_t
381xfs_bmbt_diff_two_keys(
382 struct xfs_btree_cur *cur,
383 const union xfs_btree_key *k1,
384 const union xfs_btree_key *k2,
385 const union xfs_btree_key *mask)
386{
387 uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
388 uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
389
390 ASSERT(!mask || mask->bmbt.br_startoff);
391
392 /*
393 * Note: This routine previously casted a and b to int64 and subtracted
394 * them to generate a result. This lead to problems if b was the
395 * "maximum" key value (all ones) being signed incorrectly, hence this
396 * somewhat less efficient version.
397 */
398 if (a > b)
399 return 1;
400 if (b > a)
401 return -1;
402 return 0;
403}
404
405static xfs_failaddr_t
406xfs_bmbt_verify(
407 struct xfs_buf *bp)
408{
409 struct xfs_mount *mp = bp->b_mount;
410 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
411 xfs_failaddr_t fa;
412 unsigned int level;
413
414 if (!xfs_verify_magic(bp, block->bb_magic))
415 return __this_address;
416
417 if (xfs_has_crc(mp)) {
418 /*
419 * XXX: need a better way of verifying the owner here. Right now
420 * just make sure there has been one set.
421 */
422 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
423 if (fa)
424 return fa;
425 }
426
427 /*
428 * numrecs and level verification.
429 *
430 * We don't know what fork we belong to, so just verify that the level
431 * is less than the maximum of the two. Later checks will be more
432 * precise.
433 */
434 level = be16_to_cpu(block->bb_level);
435 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
436 return __this_address;
437
438 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
439}
440
441static void
442xfs_bmbt_read_verify(
443 struct xfs_buf *bp)
444{
445 xfs_failaddr_t fa;
446
447 if (!xfs_btree_lblock_verify_crc(bp))
448 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
449 else {
450 fa = xfs_bmbt_verify(bp);
451 if (fa)
452 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
453 }
454
455 if (bp->b_error)
456 trace_xfs_btree_corrupt(bp, _RET_IP_);
457}
458
459static void
460xfs_bmbt_write_verify(
461 struct xfs_buf *bp)
462{
463 xfs_failaddr_t fa;
464
465 fa = xfs_bmbt_verify(bp);
466 if (fa) {
467 trace_xfs_btree_corrupt(bp, _RET_IP_);
468 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
469 return;
470 }
471 xfs_btree_lblock_calc_crc(bp);
472}
473
474const struct xfs_buf_ops xfs_bmbt_buf_ops = {
475 .name = "xfs_bmbt",
476 .magic = { cpu_to_be32(XFS_BMAP_MAGIC),
477 cpu_to_be32(XFS_BMAP_CRC_MAGIC) },
478 .verify_read = xfs_bmbt_read_verify,
479 .verify_write = xfs_bmbt_write_verify,
480 .verify_struct = xfs_bmbt_verify,
481};
482
483
484STATIC int
485xfs_bmbt_keys_inorder(
486 struct xfs_btree_cur *cur,
487 const union xfs_btree_key *k1,
488 const union xfs_btree_key *k2)
489{
490 return be64_to_cpu(k1->bmbt.br_startoff) <
491 be64_to_cpu(k2->bmbt.br_startoff);
492}
493
494STATIC int
495xfs_bmbt_recs_inorder(
496 struct xfs_btree_cur *cur,
497 const union xfs_btree_rec *r1,
498 const union xfs_btree_rec *r2)
499{
500 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
501 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
502 xfs_bmbt_disk_get_startoff(&r2->bmbt);
503}
504
505STATIC enum xbtree_key_contig
506xfs_bmbt_keys_contiguous(
507 struct xfs_btree_cur *cur,
508 const union xfs_btree_key *key1,
509 const union xfs_btree_key *key2,
510 const union xfs_btree_key *mask)
511{
512 ASSERT(!mask || mask->bmbt.br_startoff);
513
514 return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff),
515 be64_to_cpu(key2->bmbt.br_startoff));
516}
517
518static const struct xfs_btree_ops xfs_bmbt_ops = {
519 .rec_len = sizeof(xfs_bmbt_rec_t),
520 .key_len = sizeof(xfs_bmbt_key_t),
521
522 .dup_cursor = xfs_bmbt_dup_cursor,
523 .update_cursor = xfs_bmbt_update_cursor,
524 .alloc_block = xfs_bmbt_alloc_block,
525 .free_block = xfs_bmbt_free_block,
526 .get_maxrecs = xfs_bmbt_get_maxrecs,
527 .get_minrecs = xfs_bmbt_get_minrecs,
528 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
529 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
530 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
531 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
532 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
533 .key_diff = xfs_bmbt_key_diff,
534 .diff_two_keys = xfs_bmbt_diff_two_keys,
535 .buf_ops = &xfs_bmbt_buf_ops,
536 .keys_inorder = xfs_bmbt_keys_inorder,
537 .recs_inorder = xfs_bmbt_recs_inorder,
538 .keys_contiguous = xfs_bmbt_keys_contiguous,
539};
540
541static struct xfs_btree_cur *
542xfs_bmbt_init_common(
543 struct xfs_mount *mp,
544 struct xfs_trans *tp,
545 struct xfs_inode *ip,
546 int whichfork)
547{
548 struct xfs_btree_cur *cur;
549
550 ASSERT(whichfork != XFS_COW_FORK);
551
552 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
553 mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
554 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
555
556 cur->bc_ops = &xfs_bmbt_ops;
557 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
558 if (xfs_has_crc(mp))
559 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
560
561 cur->bc_ino.ip = ip;
562 cur->bc_ino.allocated = 0;
563 cur->bc_ino.flags = 0;
564
565 return cur;
566}
567
568/*
569 * Allocate a new bmap btree cursor.
570 */
571struct xfs_btree_cur *
572xfs_bmbt_init_cursor(
573 struct xfs_mount *mp,
574 struct xfs_trans *tp,
575 struct xfs_inode *ip,
576 int whichfork)
577{
578 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
579 struct xfs_btree_cur *cur;
580
581 cur = xfs_bmbt_init_common(mp, tp, ip, whichfork);
582
583 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
584 cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
585 cur->bc_ino.whichfork = whichfork;
586
587 return cur;
588}
589
590/* Calculate number of records in a block mapping btree block. */
591static inline unsigned int
592xfs_bmbt_block_maxrecs(
593 unsigned int blocklen,
594 bool leaf)
595{
596 if (leaf)
597 return blocklen / sizeof(xfs_bmbt_rec_t);
598 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
599}
600
601/*
602 * Allocate a new bmap btree cursor for reloading an inode block mapping data
603 * structure. Note that callers can use the staged cursor to reload extents
604 * format inode forks if they rebuild the iext tree and commit the staged
605 * cursor immediately.
606 */
607struct xfs_btree_cur *
608xfs_bmbt_stage_cursor(
609 struct xfs_mount *mp,
610 struct xfs_inode *ip,
611 struct xbtree_ifakeroot *ifake)
612{
613 struct xfs_btree_cur *cur;
614 struct xfs_btree_ops *ops;
615
616 /* data fork always has larger maxheight */
617 cur = xfs_bmbt_init_common(mp, NULL, ip, XFS_DATA_FORK);
618 cur->bc_nlevels = ifake->if_levels;
619 cur->bc_ino.forksize = ifake->if_fork_size;
620
621 /* Don't let anyone think we're attached to the real fork yet. */
622 cur->bc_ino.whichfork = -1;
623 xfs_btree_stage_ifakeroot(cur, ifake, &ops);
624 ops->update_cursor = NULL;
625 return cur;
626}
627
628/*
629 * Swap in the new inode fork root. Once we pass this point the newly rebuilt
630 * mappings are in place and we have to kill off any old btree blocks.
631 */
632void
633xfs_bmbt_commit_staged_btree(
634 struct xfs_btree_cur *cur,
635 struct xfs_trans *tp,
636 int whichfork)
637{
638 struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
639 struct xfs_ifork *ifp;
640 static const short brootflag[2] = {XFS_ILOG_DBROOT, XFS_ILOG_ABROOT};
641 static const short extflag[2] = {XFS_ILOG_DEXT, XFS_ILOG_AEXT};
642 int flags = XFS_ILOG_CORE;
643
644 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
645 ASSERT(whichfork != XFS_COW_FORK);
646
647 /*
648 * Free any resources hanging off the real fork, then shallow-copy the
649 * staging fork's contents into the real fork to transfer everything
650 * we just built.
651 */
652 ifp = xfs_ifork_ptr(cur->bc_ino.ip, whichfork);
653 xfs_idestroy_fork(ifp);
654 memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
655
656 switch (ifp->if_format) {
657 case XFS_DINODE_FMT_EXTENTS:
658 flags |= extflag[whichfork];
659 break;
660 case XFS_DINODE_FMT_BTREE:
661 flags |= brootflag[whichfork];
662 break;
663 default:
664 ASSERT(0);
665 break;
666 }
667 xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
668 xfs_btree_commit_ifakeroot(cur, tp, whichfork, &xfs_bmbt_ops);
669}
670
671/*
672 * Calculate number of records in a bmap btree block.
673 */
674int
675xfs_bmbt_maxrecs(
676 struct xfs_mount *mp,
677 int blocklen,
678 int leaf)
679{
680 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
681 return xfs_bmbt_block_maxrecs(blocklen, leaf);
682}
683
684/*
685 * Calculate the maximum possible height of the btree that the on-disk format
686 * supports. This is used for sizing structures large enough to support every
687 * possible configuration of a filesystem that might get mounted.
688 */
689unsigned int
690xfs_bmbt_maxlevels_ondisk(void)
691{
692 unsigned int minrecs[2];
693 unsigned int blocklen;
694
695 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
696 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
697
698 minrecs[0] = xfs_bmbt_block_maxrecs(blocklen, true) / 2;
699 minrecs[1] = xfs_bmbt_block_maxrecs(blocklen, false) / 2;
700
701 /* One extra level for the inode root. */
702 return xfs_btree_compute_maxlevels(minrecs,
703 XFS_MAX_EXTCNT_DATA_FORK_LARGE) + 1;
704}
705
706/*
707 * Calculate number of records in a bmap btree inode root.
708 */
709int
710xfs_bmdr_maxrecs(
711 int blocklen,
712 int leaf)
713{
714 blocklen -= sizeof(xfs_bmdr_block_t);
715
716 if (leaf)
717 return blocklen / sizeof(xfs_bmdr_rec_t);
718 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
719}
720
721/*
722 * Change the owner of a btree format fork fo the inode passed in. Change it to
723 * the owner of that is passed in so that we can change owners before or after
724 * we switch forks between inodes. The operation that the caller is doing will
725 * determine whether is needs to change owner before or after the switch.
726 *
727 * For demand paged transactional modification, the fork switch should be done
728 * after reading in all the blocks, modifying them and pinning them in the
729 * transaction. For modification when the buffers are already pinned in memory,
730 * the fork switch can be done before changing the owner as we won't need to
731 * validate the owner until the btree buffers are unpinned and writes can occur
732 * again.
733 *
734 * For recovery based ownership change, there is no transactional context and
735 * so a buffer list must be supplied so that we can record the buffers that we
736 * modified for the caller to issue IO on.
737 */
738int
739xfs_bmbt_change_owner(
740 struct xfs_trans *tp,
741 struct xfs_inode *ip,
742 int whichfork,
743 xfs_ino_t new_owner,
744 struct list_head *buffer_list)
745{
746 struct xfs_btree_cur *cur;
747 int error;
748
749 ASSERT(tp || buffer_list);
750 ASSERT(!(tp && buffer_list));
751 ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
752
753 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
754 cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
755
756 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
757 xfs_btree_del_cursor(cur, error);
758 return error;
759}
760
761/* Calculate the bmap btree size for some records. */
762unsigned long long
763xfs_bmbt_calc_size(
764 struct xfs_mount *mp,
765 unsigned long long len)
766{
767 return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
768}
769
770int __init
771xfs_bmbt_init_cur_cache(void)
772{
773 xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur",
774 xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()),
775 0, 0, NULL);
776
777 if (!xfs_bmbt_cur_cache)
778 return -ENOMEM;
779 return 0;
780}
781
782void
783xfs_bmbt_destroy_cur_cache(void)
784{
785 kmem_cache_destroy(xfs_bmbt_cur_cache);
786 xfs_bmbt_cur_cache = NULL;
787}