Loading...
1/*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
26#include "xfs_inode.h"
27#include "xfs_btree.h"
28#include "xfs_ialloc.h"
29#include "xfs_ialloc_btree.h"
30#include "xfs_alloc.h"
31#include "xfs_error.h"
32#include "xfs_trace.h"
33#include "xfs_cksum.h"
34#include "xfs_trans.h"
35#include "xfs_rmap.h"
36
37
38STATIC int
39xfs_inobt_get_minrecs(
40 struct xfs_btree_cur *cur,
41 int level)
42{
43 return cur->bc_mp->m_inobt_mnr[level != 0];
44}
45
46STATIC struct xfs_btree_cur *
47xfs_inobt_dup_cursor(
48 struct xfs_btree_cur *cur)
49{
50 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
51 cur->bc_private.a.agbp, cur->bc_private.a.agno,
52 cur->bc_btnum);
53}
54
55STATIC void
56xfs_inobt_set_root(
57 struct xfs_btree_cur *cur,
58 union xfs_btree_ptr *nptr,
59 int inc) /* level change */
60{
61 struct xfs_buf *agbp = cur->bc_private.a.agbp;
62 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
63
64 agi->agi_root = nptr->s;
65 be32_add_cpu(&agi->agi_level, inc);
66 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
67}
68
69STATIC void
70xfs_finobt_set_root(
71 struct xfs_btree_cur *cur,
72 union xfs_btree_ptr *nptr,
73 int inc) /* level change */
74{
75 struct xfs_buf *agbp = cur->bc_private.a.agbp;
76 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
77
78 agi->agi_free_root = nptr->s;
79 be32_add_cpu(&agi->agi_free_level, inc);
80 xfs_ialloc_log_agi(cur->bc_tp, agbp,
81 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
82}
83
84STATIC int
85__xfs_inobt_alloc_block(
86 struct xfs_btree_cur *cur,
87 union xfs_btree_ptr *start,
88 union xfs_btree_ptr *new,
89 int *stat,
90 enum xfs_ag_resv_type resv)
91{
92 xfs_alloc_arg_t args; /* block allocation args */
93 int error; /* error return value */
94 xfs_agblock_t sbno = be32_to_cpu(start->s);
95
96 memset(&args, 0, sizeof(args));
97 args.tp = cur->bc_tp;
98 args.mp = cur->bc_mp;
99 xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
100 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
101 args.minlen = 1;
102 args.maxlen = 1;
103 args.prod = 1;
104 args.type = XFS_ALLOCTYPE_NEAR_BNO;
105 args.resv = resv;
106
107 error = xfs_alloc_vextent(&args);
108 if (error)
109 return error;
110
111 if (args.fsbno == NULLFSBLOCK) {
112 *stat = 0;
113 return 0;
114 }
115 ASSERT(args.len == 1);
116
117 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
118 *stat = 1;
119 return 0;
120}
121
122STATIC int
123xfs_inobt_alloc_block(
124 struct xfs_btree_cur *cur,
125 union xfs_btree_ptr *start,
126 union xfs_btree_ptr *new,
127 int *stat)
128{
129 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
130}
131
132STATIC int
133xfs_finobt_alloc_block(
134 struct xfs_btree_cur *cur,
135 union xfs_btree_ptr *start,
136 union xfs_btree_ptr *new,
137 int *stat)
138{
139 if (cur->bc_mp->m_inotbt_nores)
140 return xfs_inobt_alloc_block(cur, start, new, stat);
141 return __xfs_inobt_alloc_block(cur, start, new, stat,
142 XFS_AG_RESV_METADATA);
143}
144
145STATIC int
146__xfs_inobt_free_block(
147 struct xfs_btree_cur *cur,
148 struct xfs_buf *bp,
149 enum xfs_ag_resv_type resv)
150{
151 struct xfs_owner_info oinfo;
152
153 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
154 return xfs_free_extent(cur->bc_tp,
155 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
156 &oinfo, resv);
157}
158
159STATIC int
160xfs_inobt_free_block(
161 struct xfs_btree_cur *cur,
162 struct xfs_buf *bp)
163{
164 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
165}
166
167STATIC int
168xfs_finobt_free_block(
169 struct xfs_btree_cur *cur,
170 struct xfs_buf *bp)
171{
172 if (cur->bc_mp->m_inotbt_nores)
173 return xfs_inobt_free_block(cur, bp);
174 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
175}
176
177STATIC int
178xfs_inobt_get_maxrecs(
179 struct xfs_btree_cur *cur,
180 int level)
181{
182 return cur->bc_mp->m_inobt_mxr[level != 0];
183}
184
185STATIC void
186xfs_inobt_init_key_from_rec(
187 union xfs_btree_key *key,
188 union xfs_btree_rec *rec)
189{
190 key->inobt.ir_startino = rec->inobt.ir_startino;
191}
192
193STATIC void
194xfs_inobt_init_high_key_from_rec(
195 union xfs_btree_key *key,
196 union xfs_btree_rec *rec)
197{
198 __u32 x;
199
200 x = be32_to_cpu(rec->inobt.ir_startino);
201 x += XFS_INODES_PER_CHUNK - 1;
202 key->inobt.ir_startino = cpu_to_be32(x);
203}
204
205STATIC void
206xfs_inobt_init_rec_from_cur(
207 struct xfs_btree_cur *cur,
208 union xfs_btree_rec *rec)
209{
210 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
211 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
212 rec->inobt.ir_u.sp.ir_holemask =
213 cpu_to_be16(cur->bc_rec.i.ir_holemask);
214 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
215 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
216 } else {
217 /* ir_holemask/ir_count not supported on-disk */
218 rec->inobt.ir_u.f.ir_freecount =
219 cpu_to_be32(cur->bc_rec.i.ir_freecount);
220 }
221 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
222}
223
224/*
225 * initial value of ptr for lookup
226 */
227STATIC void
228xfs_inobt_init_ptr_from_cur(
229 struct xfs_btree_cur *cur,
230 union xfs_btree_ptr *ptr)
231{
232 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
233
234 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
235
236 ptr->s = agi->agi_root;
237}
238
239STATIC void
240xfs_finobt_init_ptr_from_cur(
241 struct xfs_btree_cur *cur,
242 union xfs_btree_ptr *ptr)
243{
244 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
245
246 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
247 ptr->s = agi->agi_free_root;
248}
249
250STATIC int64_t
251xfs_inobt_key_diff(
252 struct xfs_btree_cur *cur,
253 union xfs_btree_key *key)
254{
255 return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
256 cur->bc_rec.i.ir_startino;
257}
258
259STATIC int64_t
260xfs_inobt_diff_two_keys(
261 struct xfs_btree_cur *cur,
262 union xfs_btree_key *k1,
263 union xfs_btree_key *k2)
264{
265 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
266 be32_to_cpu(k2->inobt.ir_startino);
267}
268
269static xfs_failaddr_t
270xfs_inobt_verify(
271 struct xfs_buf *bp)
272{
273 struct xfs_mount *mp = bp->b_target->bt_mount;
274 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
275 xfs_failaddr_t fa;
276 unsigned int level;
277
278 /*
279 * During growfs operations, we can't verify the exact owner as the
280 * perag is not fully initialised and hence not attached to the buffer.
281 *
282 * Similarly, during log recovery we will have a perag structure
283 * attached, but the agi information will not yet have been initialised
284 * from the on disk AGI. We don't currently use any of this information,
285 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
286 * ever do.
287 */
288 switch (block->bb_magic) {
289 case cpu_to_be32(XFS_IBT_CRC_MAGIC):
290 case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
291 fa = xfs_btree_sblock_v5hdr_verify(bp);
292 if (fa)
293 return fa;
294 /* fall through */
295 case cpu_to_be32(XFS_IBT_MAGIC):
296 case cpu_to_be32(XFS_FIBT_MAGIC):
297 break;
298 default:
299 return NULL;
300 }
301
302 /* level verification */
303 level = be16_to_cpu(block->bb_level);
304 if (level >= mp->m_in_maxlevels)
305 return __this_address;
306
307 return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
308}
309
310static void
311xfs_inobt_read_verify(
312 struct xfs_buf *bp)
313{
314 xfs_failaddr_t fa;
315
316 if (!xfs_btree_sblock_verify_crc(bp))
317 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
318 else {
319 fa = xfs_inobt_verify(bp);
320 if (fa)
321 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
322 }
323
324 if (bp->b_error)
325 trace_xfs_btree_corrupt(bp, _RET_IP_);
326}
327
328static void
329xfs_inobt_write_verify(
330 struct xfs_buf *bp)
331{
332 xfs_failaddr_t fa;
333
334 fa = xfs_inobt_verify(bp);
335 if (fa) {
336 trace_xfs_btree_corrupt(bp, _RET_IP_);
337 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338 return;
339 }
340 xfs_btree_sblock_calc_crc(bp);
341
342}
343
344const struct xfs_buf_ops xfs_inobt_buf_ops = {
345 .name = "xfs_inobt",
346 .verify_read = xfs_inobt_read_verify,
347 .verify_write = xfs_inobt_write_verify,
348 .verify_struct = xfs_inobt_verify,
349};
350
351STATIC int
352xfs_inobt_keys_inorder(
353 struct xfs_btree_cur *cur,
354 union xfs_btree_key *k1,
355 union xfs_btree_key *k2)
356{
357 return be32_to_cpu(k1->inobt.ir_startino) <
358 be32_to_cpu(k2->inobt.ir_startino);
359}
360
361STATIC int
362xfs_inobt_recs_inorder(
363 struct xfs_btree_cur *cur,
364 union xfs_btree_rec *r1,
365 union xfs_btree_rec *r2)
366{
367 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
368 be32_to_cpu(r2->inobt.ir_startino);
369}
370
371static const struct xfs_btree_ops xfs_inobt_ops = {
372 .rec_len = sizeof(xfs_inobt_rec_t),
373 .key_len = sizeof(xfs_inobt_key_t),
374
375 .dup_cursor = xfs_inobt_dup_cursor,
376 .set_root = xfs_inobt_set_root,
377 .alloc_block = xfs_inobt_alloc_block,
378 .free_block = xfs_inobt_free_block,
379 .get_minrecs = xfs_inobt_get_minrecs,
380 .get_maxrecs = xfs_inobt_get_maxrecs,
381 .init_key_from_rec = xfs_inobt_init_key_from_rec,
382 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
383 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
384 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
385 .key_diff = xfs_inobt_key_diff,
386 .buf_ops = &xfs_inobt_buf_ops,
387 .diff_two_keys = xfs_inobt_diff_two_keys,
388 .keys_inorder = xfs_inobt_keys_inorder,
389 .recs_inorder = xfs_inobt_recs_inorder,
390};
391
392static const struct xfs_btree_ops xfs_finobt_ops = {
393 .rec_len = sizeof(xfs_inobt_rec_t),
394 .key_len = sizeof(xfs_inobt_key_t),
395
396 .dup_cursor = xfs_inobt_dup_cursor,
397 .set_root = xfs_finobt_set_root,
398 .alloc_block = xfs_finobt_alloc_block,
399 .free_block = xfs_finobt_free_block,
400 .get_minrecs = xfs_inobt_get_minrecs,
401 .get_maxrecs = xfs_inobt_get_maxrecs,
402 .init_key_from_rec = xfs_inobt_init_key_from_rec,
403 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
404 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
405 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
406 .key_diff = xfs_inobt_key_diff,
407 .buf_ops = &xfs_inobt_buf_ops,
408 .diff_two_keys = xfs_inobt_diff_two_keys,
409 .keys_inorder = xfs_inobt_keys_inorder,
410 .recs_inorder = xfs_inobt_recs_inorder,
411};
412
413/*
414 * Allocate a new inode btree cursor.
415 */
416struct xfs_btree_cur * /* new inode btree cursor */
417xfs_inobt_init_cursor(
418 struct xfs_mount *mp, /* file system mount point */
419 struct xfs_trans *tp, /* transaction pointer */
420 struct xfs_buf *agbp, /* buffer for agi structure */
421 xfs_agnumber_t agno, /* allocation group number */
422 xfs_btnum_t btnum) /* ialloc or free ino btree */
423{
424 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
425 struct xfs_btree_cur *cur;
426
427 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
428
429 cur->bc_tp = tp;
430 cur->bc_mp = mp;
431 cur->bc_btnum = btnum;
432 if (btnum == XFS_BTNUM_INO) {
433 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
434 cur->bc_ops = &xfs_inobt_ops;
435 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
436 } else {
437 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
438 cur->bc_ops = &xfs_finobt_ops;
439 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
440 }
441
442 cur->bc_blocklog = mp->m_sb.sb_blocklog;
443
444 if (xfs_sb_version_hascrc(&mp->m_sb))
445 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
446
447 cur->bc_private.a.agbp = agbp;
448 cur->bc_private.a.agno = agno;
449
450 return cur;
451}
452
453/*
454 * Calculate number of records in an inobt btree block.
455 */
456int
457xfs_inobt_maxrecs(
458 struct xfs_mount *mp,
459 int blocklen,
460 int leaf)
461{
462 blocklen -= XFS_INOBT_BLOCK_LEN(mp);
463
464 if (leaf)
465 return blocklen / sizeof(xfs_inobt_rec_t);
466 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
467}
468
469/*
470 * Convert the inode record holemask to an inode allocation bitmap. The inode
471 * allocation bitmap is inode granularity and specifies whether an inode is
472 * physically allocated on disk (not whether the inode is considered allocated
473 * or free by the fs).
474 *
475 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
476 */
477uint64_t
478xfs_inobt_irec_to_allocmask(
479 struct xfs_inobt_rec_incore *rec)
480{
481 uint64_t bitmap = 0;
482 uint64_t inodespbit;
483 int nextbit;
484 uint allocbitmap;
485
486 /*
487 * The holemask has 16-bits for a 64 inode record. Therefore each
488 * holemask bit represents multiple inodes. Create a mask of bits to set
489 * in the allocmask for each holemask bit.
490 */
491 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
492
493 /*
494 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
495 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
496 * anything beyond the 16 holemask bits since this casts to a larger
497 * type.
498 */
499 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
500
501 /*
502 * allocbitmap is the inverted holemask so every set bit represents
503 * allocated inodes. To expand from 16-bit holemask granularity to
504 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
505 * bitmap for every holemask bit.
506 */
507 nextbit = xfs_next_bit(&allocbitmap, 1, 0);
508 while (nextbit != -1) {
509 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
510
511 bitmap |= (inodespbit <<
512 (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
513
514 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
515 }
516
517 return bitmap;
518}
519
520#if defined(DEBUG) || defined(XFS_WARN)
521/*
522 * Verify that an in-core inode record has a valid inode count.
523 */
524int
525xfs_inobt_rec_check_count(
526 struct xfs_mount *mp,
527 struct xfs_inobt_rec_incore *rec)
528{
529 int inocount = 0;
530 int nextbit = 0;
531 uint64_t allocbmap;
532 int wordsz;
533
534 wordsz = sizeof(allocbmap) / sizeof(unsigned int);
535 allocbmap = xfs_inobt_irec_to_allocmask(rec);
536
537 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
538 while (nextbit != -1) {
539 inocount++;
540 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
541 nextbit + 1);
542 }
543
544 if (inocount != rec->ir_count)
545 return -EFSCORRUPTED;
546
547 return 0;
548}
549#endif /* DEBUG */
550
551static xfs_extlen_t
552xfs_inobt_max_size(
553 struct xfs_mount *mp)
554{
555 /* Bail out if we're uninitialized, which can happen in mkfs. */
556 if (mp->m_inobt_mxr[0] == 0)
557 return 0;
558
559 return xfs_btree_calc_size(mp->m_inobt_mnr,
560 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
561 XFS_INODES_PER_CHUNK);
562}
563
564static int
565xfs_inobt_count_blocks(
566 struct xfs_mount *mp,
567 xfs_agnumber_t agno,
568 xfs_btnum_t btnum,
569 xfs_extlen_t *tree_blocks)
570{
571 struct xfs_buf *agbp;
572 struct xfs_btree_cur *cur;
573 int error;
574
575 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
576 if (error)
577 return error;
578
579 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
580 error = xfs_btree_count_blocks(cur, tree_blocks);
581 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
582 xfs_buf_relse(agbp);
583
584 return error;
585}
586
587/*
588 * Figure out how many blocks to reserve and how many are used by this btree.
589 */
590int
591xfs_finobt_calc_reserves(
592 struct xfs_mount *mp,
593 xfs_agnumber_t agno,
594 xfs_extlen_t *ask,
595 xfs_extlen_t *used)
596{
597 xfs_extlen_t tree_len = 0;
598 int error;
599
600 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
601 return 0;
602
603 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
604 if (error)
605 return error;
606
607 *ask += xfs_inobt_max_size(mp);
608 *used += tree_len;
609 return 0;
610}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_btree.h"
15#include "xfs_btree_staging.h"
16#include "xfs_ialloc.h"
17#include "xfs_ialloc_btree.h"
18#include "xfs_alloc.h"
19#include "xfs_error.h"
20#include "xfs_trace.h"
21#include "xfs_trans.h"
22#include "xfs_rmap.h"
23
24STATIC int
25xfs_inobt_get_minrecs(
26 struct xfs_btree_cur *cur,
27 int level)
28{
29 return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
30}
31
32STATIC struct xfs_btree_cur *
33xfs_inobt_dup_cursor(
34 struct xfs_btree_cur *cur)
35{
36 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
37 cur->bc_ag.agbp, cur->bc_ag.agno,
38 cur->bc_btnum);
39}
40
41STATIC void
42xfs_inobt_set_root(
43 struct xfs_btree_cur *cur,
44 union xfs_btree_ptr *nptr,
45 int inc) /* level change */
46{
47 struct xfs_buf *agbp = cur->bc_ag.agbp;
48 struct xfs_agi *agi = agbp->b_addr;
49
50 agi->agi_root = nptr->s;
51 be32_add_cpu(&agi->agi_level, inc);
52 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
53}
54
55STATIC void
56xfs_finobt_set_root(
57 struct xfs_btree_cur *cur,
58 union xfs_btree_ptr *nptr,
59 int inc) /* level change */
60{
61 struct xfs_buf *agbp = cur->bc_ag.agbp;
62 struct xfs_agi *agi = agbp->b_addr;
63
64 agi->agi_free_root = nptr->s;
65 be32_add_cpu(&agi->agi_free_level, inc);
66 xfs_ialloc_log_agi(cur->bc_tp, agbp,
67 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
68}
69
70STATIC int
71__xfs_inobt_alloc_block(
72 struct xfs_btree_cur *cur,
73 union xfs_btree_ptr *start,
74 union xfs_btree_ptr *new,
75 int *stat,
76 enum xfs_ag_resv_type resv)
77{
78 xfs_alloc_arg_t args; /* block allocation args */
79 int error; /* error return value */
80 xfs_agblock_t sbno = be32_to_cpu(start->s);
81
82 memset(&args, 0, sizeof(args));
83 args.tp = cur->bc_tp;
84 args.mp = cur->bc_mp;
85 args.oinfo = XFS_RMAP_OINFO_INOBT;
86 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
87 args.minlen = 1;
88 args.maxlen = 1;
89 args.prod = 1;
90 args.type = XFS_ALLOCTYPE_NEAR_BNO;
91 args.resv = resv;
92
93 error = xfs_alloc_vextent(&args);
94 if (error)
95 return error;
96
97 if (args.fsbno == NULLFSBLOCK) {
98 *stat = 0;
99 return 0;
100 }
101 ASSERT(args.len == 1);
102
103 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
104 *stat = 1;
105 return 0;
106}
107
108STATIC int
109xfs_inobt_alloc_block(
110 struct xfs_btree_cur *cur,
111 union xfs_btree_ptr *start,
112 union xfs_btree_ptr *new,
113 int *stat)
114{
115 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
116}
117
118STATIC int
119xfs_finobt_alloc_block(
120 struct xfs_btree_cur *cur,
121 union xfs_btree_ptr *start,
122 union xfs_btree_ptr *new,
123 int *stat)
124{
125 if (cur->bc_mp->m_finobt_nores)
126 return xfs_inobt_alloc_block(cur, start, new, stat);
127 return __xfs_inobt_alloc_block(cur, start, new, stat,
128 XFS_AG_RESV_METADATA);
129}
130
131STATIC int
132__xfs_inobt_free_block(
133 struct xfs_btree_cur *cur,
134 struct xfs_buf *bp,
135 enum xfs_ag_resv_type resv)
136{
137 return xfs_free_extent(cur->bc_tp,
138 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
139 &XFS_RMAP_OINFO_INOBT, resv);
140}
141
142STATIC int
143xfs_inobt_free_block(
144 struct xfs_btree_cur *cur,
145 struct xfs_buf *bp)
146{
147 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
148}
149
150STATIC int
151xfs_finobt_free_block(
152 struct xfs_btree_cur *cur,
153 struct xfs_buf *bp)
154{
155 if (cur->bc_mp->m_finobt_nores)
156 return xfs_inobt_free_block(cur, bp);
157 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
158}
159
160STATIC int
161xfs_inobt_get_maxrecs(
162 struct xfs_btree_cur *cur,
163 int level)
164{
165 return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
166}
167
168STATIC void
169xfs_inobt_init_key_from_rec(
170 union xfs_btree_key *key,
171 union xfs_btree_rec *rec)
172{
173 key->inobt.ir_startino = rec->inobt.ir_startino;
174}
175
176STATIC void
177xfs_inobt_init_high_key_from_rec(
178 union xfs_btree_key *key,
179 union xfs_btree_rec *rec)
180{
181 __u32 x;
182
183 x = be32_to_cpu(rec->inobt.ir_startino);
184 x += XFS_INODES_PER_CHUNK - 1;
185 key->inobt.ir_startino = cpu_to_be32(x);
186}
187
188STATIC void
189xfs_inobt_init_rec_from_cur(
190 struct xfs_btree_cur *cur,
191 union xfs_btree_rec *rec)
192{
193 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
194 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
195 rec->inobt.ir_u.sp.ir_holemask =
196 cpu_to_be16(cur->bc_rec.i.ir_holemask);
197 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
198 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
199 } else {
200 /* ir_holemask/ir_count not supported on-disk */
201 rec->inobt.ir_u.f.ir_freecount =
202 cpu_to_be32(cur->bc_rec.i.ir_freecount);
203 }
204 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
205}
206
207/*
208 * initial value of ptr for lookup
209 */
210STATIC void
211xfs_inobt_init_ptr_from_cur(
212 struct xfs_btree_cur *cur,
213 union xfs_btree_ptr *ptr)
214{
215 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
216
217 ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
218
219 ptr->s = agi->agi_root;
220}
221
222STATIC void
223xfs_finobt_init_ptr_from_cur(
224 struct xfs_btree_cur *cur,
225 union xfs_btree_ptr *ptr)
226{
227 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
228
229 ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
230 ptr->s = agi->agi_free_root;
231}
232
233STATIC int64_t
234xfs_inobt_key_diff(
235 struct xfs_btree_cur *cur,
236 union xfs_btree_key *key)
237{
238 return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
239 cur->bc_rec.i.ir_startino;
240}
241
242STATIC int64_t
243xfs_inobt_diff_two_keys(
244 struct xfs_btree_cur *cur,
245 union xfs_btree_key *k1,
246 union xfs_btree_key *k2)
247{
248 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
249 be32_to_cpu(k2->inobt.ir_startino);
250}
251
252static xfs_failaddr_t
253xfs_inobt_verify(
254 struct xfs_buf *bp)
255{
256 struct xfs_mount *mp = bp->b_mount;
257 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
258 xfs_failaddr_t fa;
259 unsigned int level;
260
261 if (!xfs_verify_magic(bp, block->bb_magic))
262 return __this_address;
263
264 /*
265 * During growfs operations, we can't verify the exact owner as the
266 * perag is not fully initialised and hence not attached to the buffer.
267 *
268 * Similarly, during log recovery we will have a perag structure
269 * attached, but the agi information will not yet have been initialised
270 * from the on disk AGI. We don't currently use any of this information,
271 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
272 * ever do.
273 */
274 if (xfs_sb_version_hascrc(&mp->m_sb)) {
275 fa = xfs_btree_sblock_v5hdr_verify(bp);
276 if (fa)
277 return fa;
278 }
279
280 /* level verification */
281 level = be16_to_cpu(block->bb_level);
282 if (level >= M_IGEO(mp)->inobt_maxlevels)
283 return __this_address;
284
285 return xfs_btree_sblock_verify(bp,
286 M_IGEO(mp)->inobt_mxr[level != 0]);
287}
288
289static void
290xfs_inobt_read_verify(
291 struct xfs_buf *bp)
292{
293 xfs_failaddr_t fa;
294
295 if (!xfs_btree_sblock_verify_crc(bp))
296 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
297 else {
298 fa = xfs_inobt_verify(bp);
299 if (fa)
300 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
301 }
302
303 if (bp->b_error)
304 trace_xfs_btree_corrupt(bp, _RET_IP_);
305}
306
307static void
308xfs_inobt_write_verify(
309 struct xfs_buf *bp)
310{
311 xfs_failaddr_t fa;
312
313 fa = xfs_inobt_verify(bp);
314 if (fa) {
315 trace_xfs_btree_corrupt(bp, _RET_IP_);
316 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
317 return;
318 }
319 xfs_btree_sblock_calc_crc(bp);
320
321}
322
323const struct xfs_buf_ops xfs_inobt_buf_ops = {
324 .name = "xfs_inobt",
325 .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
326 .verify_read = xfs_inobt_read_verify,
327 .verify_write = xfs_inobt_write_verify,
328 .verify_struct = xfs_inobt_verify,
329};
330
331const struct xfs_buf_ops xfs_finobt_buf_ops = {
332 .name = "xfs_finobt",
333 .magic = { cpu_to_be32(XFS_FIBT_MAGIC),
334 cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
335 .verify_read = xfs_inobt_read_verify,
336 .verify_write = xfs_inobt_write_verify,
337 .verify_struct = xfs_inobt_verify,
338};
339
340STATIC int
341xfs_inobt_keys_inorder(
342 struct xfs_btree_cur *cur,
343 union xfs_btree_key *k1,
344 union xfs_btree_key *k2)
345{
346 return be32_to_cpu(k1->inobt.ir_startino) <
347 be32_to_cpu(k2->inobt.ir_startino);
348}
349
350STATIC int
351xfs_inobt_recs_inorder(
352 struct xfs_btree_cur *cur,
353 union xfs_btree_rec *r1,
354 union xfs_btree_rec *r2)
355{
356 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
357 be32_to_cpu(r2->inobt.ir_startino);
358}
359
360static const struct xfs_btree_ops xfs_inobt_ops = {
361 .rec_len = sizeof(xfs_inobt_rec_t),
362 .key_len = sizeof(xfs_inobt_key_t),
363
364 .dup_cursor = xfs_inobt_dup_cursor,
365 .set_root = xfs_inobt_set_root,
366 .alloc_block = xfs_inobt_alloc_block,
367 .free_block = xfs_inobt_free_block,
368 .get_minrecs = xfs_inobt_get_minrecs,
369 .get_maxrecs = xfs_inobt_get_maxrecs,
370 .init_key_from_rec = xfs_inobt_init_key_from_rec,
371 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
372 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
373 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
374 .key_diff = xfs_inobt_key_diff,
375 .buf_ops = &xfs_inobt_buf_ops,
376 .diff_two_keys = xfs_inobt_diff_two_keys,
377 .keys_inorder = xfs_inobt_keys_inorder,
378 .recs_inorder = xfs_inobt_recs_inorder,
379};
380
381static const struct xfs_btree_ops xfs_finobt_ops = {
382 .rec_len = sizeof(xfs_inobt_rec_t),
383 .key_len = sizeof(xfs_inobt_key_t),
384
385 .dup_cursor = xfs_inobt_dup_cursor,
386 .set_root = xfs_finobt_set_root,
387 .alloc_block = xfs_finobt_alloc_block,
388 .free_block = xfs_finobt_free_block,
389 .get_minrecs = xfs_inobt_get_minrecs,
390 .get_maxrecs = xfs_inobt_get_maxrecs,
391 .init_key_from_rec = xfs_inobt_init_key_from_rec,
392 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
393 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
394 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
395 .key_diff = xfs_inobt_key_diff,
396 .buf_ops = &xfs_finobt_buf_ops,
397 .diff_two_keys = xfs_inobt_diff_two_keys,
398 .keys_inorder = xfs_inobt_keys_inorder,
399 .recs_inorder = xfs_inobt_recs_inorder,
400};
401
402/*
403 * Initialize a new inode btree cursor.
404 */
405static struct xfs_btree_cur *
406xfs_inobt_init_common(
407 struct xfs_mount *mp, /* file system mount point */
408 struct xfs_trans *tp, /* transaction pointer */
409 xfs_agnumber_t agno, /* allocation group number */
410 xfs_btnum_t btnum) /* ialloc or free ino btree */
411{
412 struct xfs_btree_cur *cur;
413
414 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
415 cur->bc_tp = tp;
416 cur->bc_mp = mp;
417 cur->bc_btnum = btnum;
418 if (btnum == XFS_BTNUM_INO) {
419 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
420 cur->bc_ops = &xfs_inobt_ops;
421 } else {
422 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
423 cur->bc_ops = &xfs_finobt_ops;
424 }
425
426 cur->bc_blocklog = mp->m_sb.sb_blocklog;
427
428 if (xfs_sb_version_hascrc(&mp->m_sb))
429 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
430
431 cur->bc_ag.agno = agno;
432 return cur;
433}
434
435/* Create an inode btree cursor. */
436struct xfs_btree_cur *
437xfs_inobt_init_cursor(
438 struct xfs_mount *mp,
439 struct xfs_trans *tp,
440 struct xfs_buf *agbp,
441 xfs_agnumber_t agno,
442 xfs_btnum_t btnum)
443{
444 struct xfs_btree_cur *cur;
445 struct xfs_agi *agi = agbp->b_addr;
446
447 cur = xfs_inobt_init_common(mp, tp, agno, btnum);
448 if (btnum == XFS_BTNUM_INO)
449 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
450 else
451 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
452 cur->bc_ag.agbp = agbp;
453 return cur;
454}
455
456/* Create an inode btree cursor with a fake root for staging. */
457struct xfs_btree_cur *
458xfs_inobt_stage_cursor(
459 struct xfs_mount *mp,
460 struct xbtree_afakeroot *afake,
461 xfs_agnumber_t agno,
462 xfs_btnum_t btnum)
463{
464 struct xfs_btree_cur *cur;
465
466 cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
467 xfs_btree_stage_afakeroot(cur, afake);
468 return cur;
469}
470
471/*
472 * Install a new inobt btree root. Caller is responsible for invalidating
473 * and freeing the old btree blocks.
474 */
475void
476xfs_inobt_commit_staged_btree(
477 struct xfs_btree_cur *cur,
478 struct xfs_trans *tp,
479 struct xfs_buf *agbp)
480{
481 struct xfs_agi *agi = agbp->b_addr;
482 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
483
484 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
485
486 if (cur->bc_btnum == XFS_BTNUM_INO) {
487 agi->agi_root = cpu_to_be32(afake->af_root);
488 agi->agi_level = cpu_to_be32(afake->af_levels);
489 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
490 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
491 } else {
492 agi->agi_free_root = cpu_to_be32(afake->af_root);
493 agi->agi_free_level = cpu_to_be32(afake->af_levels);
494 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREE_ROOT |
495 XFS_AGI_FREE_LEVEL);
496 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
497 }
498}
499
500/*
501 * Calculate number of records in an inobt btree block.
502 */
503int
504xfs_inobt_maxrecs(
505 struct xfs_mount *mp,
506 int blocklen,
507 int leaf)
508{
509 blocklen -= XFS_INOBT_BLOCK_LEN(mp);
510
511 if (leaf)
512 return blocklen / sizeof(xfs_inobt_rec_t);
513 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
514}
515
516/*
517 * Convert the inode record holemask to an inode allocation bitmap. The inode
518 * allocation bitmap is inode granularity and specifies whether an inode is
519 * physically allocated on disk (not whether the inode is considered allocated
520 * or free by the fs).
521 *
522 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
523 */
524uint64_t
525xfs_inobt_irec_to_allocmask(
526 struct xfs_inobt_rec_incore *rec)
527{
528 uint64_t bitmap = 0;
529 uint64_t inodespbit;
530 int nextbit;
531 uint allocbitmap;
532
533 /*
534 * The holemask has 16-bits for a 64 inode record. Therefore each
535 * holemask bit represents multiple inodes. Create a mask of bits to set
536 * in the allocmask for each holemask bit.
537 */
538 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
539
540 /*
541 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
542 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
543 * anything beyond the 16 holemask bits since this casts to a larger
544 * type.
545 */
546 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
547
548 /*
549 * allocbitmap is the inverted holemask so every set bit represents
550 * allocated inodes. To expand from 16-bit holemask granularity to
551 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
552 * bitmap for every holemask bit.
553 */
554 nextbit = xfs_next_bit(&allocbitmap, 1, 0);
555 while (nextbit != -1) {
556 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
557
558 bitmap |= (inodespbit <<
559 (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
560
561 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
562 }
563
564 return bitmap;
565}
566
567#if defined(DEBUG) || defined(XFS_WARN)
568/*
569 * Verify that an in-core inode record has a valid inode count.
570 */
571int
572xfs_inobt_rec_check_count(
573 struct xfs_mount *mp,
574 struct xfs_inobt_rec_incore *rec)
575{
576 int inocount = 0;
577 int nextbit = 0;
578 uint64_t allocbmap;
579 int wordsz;
580
581 wordsz = sizeof(allocbmap) / sizeof(unsigned int);
582 allocbmap = xfs_inobt_irec_to_allocmask(rec);
583
584 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
585 while (nextbit != -1) {
586 inocount++;
587 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
588 nextbit + 1);
589 }
590
591 if (inocount != rec->ir_count)
592 return -EFSCORRUPTED;
593
594 return 0;
595}
596#endif /* DEBUG */
597
598static xfs_extlen_t
599xfs_inobt_max_size(
600 struct xfs_mount *mp,
601 xfs_agnumber_t agno)
602{
603 xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
604
605 /* Bail out if we're uninitialized, which can happen in mkfs. */
606 if (M_IGEO(mp)->inobt_mxr[0] == 0)
607 return 0;
608
609 /*
610 * The log is permanently allocated, so the space it occupies will
611 * never be available for the kinds of things that would require btree
612 * expansion. We therefore can pretend the space isn't there.
613 */
614 if (mp->m_sb.sb_logstart &&
615 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
616 agblocks -= mp->m_sb.sb_logblocks;
617
618 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
619 (uint64_t)agblocks * mp->m_sb.sb_inopblock /
620 XFS_INODES_PER_CHUNK);
621}
622
623/* Read AGI and create inobt cursor. */
624int
625xfs_inobt_cur(
626 struct xfs_mount *mp,
627 struct xfs_trans *tp,
628 xfs_agnumber_t agno,
629 xfs_btnum_t which,
630 struct xfs_btree_cur **curpp,
631 struct xfs_buf **agi_bpp)
632{
633 struct xfs_btree_cur *cur;
634 int error;
635
636 ASSERT(*agi_bpp == NULL);
637 ASSERT(*curpp == NULL);
638
639 error = xfs_ialloc_read_agi(mp, tp, agno, agi_bpp);
640 if (error)
641 return error;
642
643 cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, agno, which);
644 if (!cur) {
645 xfs_trans_brelse(tp, *agi_bpp);
646 *agi_bpp = NULL;
647 return -ENOMEM;
648 }
649 *curpp = cur;
650 return 0;
651}
652
653static int
654xfs_inobt_count_blocks(
655 struct xfs_mount *mp,
656 struct xfs_trans *tp,
657 xfs_agnumber_t agno,
658 xfs_btnum_t btnum,
659 xfs_extlen_t *tree_blocks)
660{
661 struct xfs_buf *agbp = NULL;
662 struct xfs_btree_cur *cur = NULL;
663 int error;
664
665 error = xfs_inobt_cur(mp, tp, agno, btnum, &cur, &agbp);
666 if (error)
667 return error;
668
669 error = xfs_btree_count_blocks(cur, tree_blocks);
670 xfs_btree_del_cursor(cur, error);
671 xfs_trans_brelse(tp, agbp);
672
673 return error;
674}
675
676/*
677 * Figure out how many blocks to reserve and how many are used by this btree.
678 */
679int
680xfs_finobt_calc_reserves(
681 struct xfs_mount *mp,
682 struct xfs_trans *tp,
683 xfs_agnumber_t agno,
684 xfs_extlen_t *ask,
685 xfs_extlen_t *used)
686{
687 xfs_extlen_t tree_len = 0;
688 int error;
689
690 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
691 return 0;
692
693 error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
694 if (error)
695 return error;
696
697 *ask += xfs_inobt_max_size(mp, agno);
698 *used += tree_len;
699 return 0;
700}
701
702/* Calculate the inobt btree size for some records. */
703xfs_extlen_t
704xfs_iallocbt_calc_size(
705 struct xfs_mount *mp,
706 unsigned long long len)
707{
708 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
709}