Loading...
1/*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
26#include "xfs_inode.h"
27#include "xfs_btree.h"
28#include "xfs_ialloc.h"
29#include "xfs_ialloc_btree.h"
30#include "xfs_alloc.h"
31#include "xfs_error.h"
32#include "xfs_trace.h"
33#include "xfs_cksum.h"
34#include "xfs_trans.h"
35#include "xfs_rmap.h"
36
37
38STATIC int
39xfs_inobt_get_minrecs(
40 struct xfs_btree_cur *cur,
41 int level)
42{
43 return cur->bc_mp->m_inobt_mnr[level != 0];
44}
45
46STATIC struct xfs_btree_cur *
47xfs_inobt_dup_cursor(
48 struct xfs_btree_cur *cur)
49{
50 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
51 cur->bc_private.a.agbp, cur->bc_private.a.agno,
52 cur->bc_btnum);
53}
54
55STATIC void
56xfs_inobt_set_root(
57 struct xfs_btree_cur *cur,
58 union xfs_btree_ptr *nptr,
59 int inc) /* level change */
60{
61 struct xfs_buf *agbp = cur->bc_private.a.agbp;
62 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
63
64 agi->agi_root = nptr->s;
65 be32_add_cpu(&agi->agi_level, inc);
66 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
67}
68
69STATIC void
70xfs_finobt_set_root(
71 struct xfs_btree_cur *cur,
72 union xfs_btree_ptr *nptr,
73 int inc) /* level change */
74{
75 struct xfs_buf *agbp = cur->bc_private.a.agbp;
76 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
77
78 agi->agi_free_root = nptr->s;
79 be32_add_cpu(&agi->agi_free_level, inc);
80 xfs_ialloc_log_agi(cur->bc_tp, agbp,
81 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
82}
83
84STATIC int
85__xfs_inobt_alloc_block(
86 struct xfs_btree_cur *cur,
87 union xfs_btree_ptr *start,
88 union xfs_btree_ptr *new,
89 int *stat,
90 enum xfs_ag_resv_type resv)
91{
92 xfs_alloc_arg_t args; /* block allocation args */
93 int error; /* error return value */
94 xfs_agblock_t sbno = be32_to_cpu(start->s);
95
96 memset(&args, 0, sizeof(args));
97 args.tp = cur->bc_tp;
98 args.mp = cur->bc_mp;
99 xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
100 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
101 args.minlen = 1;
102 args.maxlen = 1;
103 args.prod = 1;
104 args.type = XFS_ALLOCTYPE_NEAR_BNO;
105 args.resv = resv;
106
107 error = xfs_alloc_vextent(&args);
108 if (error)
109 return error;
110
111 if (args.fsbno == NULLFSBLOCK) {
112 *stat = 0;
113 return 0;
114 }
115 ASSERT(args.len == 1);
116
117 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
118 *stat = 1;
119 return 0;
120}
121
122STATIC int
123xfs_inobt_alloc_block(
124 struct xfs_btree_cur *cur,
125 union xfs_btree_ptr *start,
126 union xfs_btree_ptr *new,
127 int *stat)
128{
129 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
130}
131
132STATIC int
133xfs_finobt_alloc_block(
134 struct xfs_btree_cur *cur,
135 union xfs_btree_ptr *start,
136 union xfs_btree_ptr *new,
137 int *stat)
138{
139 if (cur->bc_mp->m_inotbt_nores)
140 return xfs_inobt_alloc_block(cur, start, new, stat);
141 return __xfs_inobt_alloc_block(cur, start, new, stat,
142 XFS_AG_RESV_METADATA);
143}
144
145STATIC int
146__xfs_inobt_free_block(
147 struct xfs_btree_cur *cur,
148 struct xfs_buf *bp,
149 enum xfs_ag_resv_type resv)
150{
151 struct xfs_owner_info oinfo;
152
153 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
154 return xfs_free_extent(cur->bc_tp,
155 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
156 &oinfo, resv);
157}
158
159STATIC int
160xfs_inobt_free_block(
161 struct xfs_btree_cur *cur,
162 struct xfs_buf *bp)
163{
164 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
165}
166
167STATIC int
168xfs_finobt_free_block(
169 struct xfs_btree_cur *cur,
170 struct xfs_buf *bp)
171{
172 if (cur->bc_mp->m_inotbt_nores)
173 return xfs_inobt_free_block(cur, bp);
174 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
175}
176
177STATIC int
178xfs_inobt_get_maxrecs(
179 struct xfs_btree_cur *cur,
180 int level)
181{
182 return cur->bc_mp->m_inobt_mxr[level != 0];
183}
184
185STATIC void
186xfs_inobt_init_key_from_rec(
187 union xfs_btree_key *key,
188 union xfs_btree_rec *rec)
189{
190 key->inobt.ir_startino = rec->inobt.ir_startino;
191}
192
193STATIC void
194xfs_inobt_init_high_key_from_rec(
195 union xfs_btree_key *key,
196 union xfs_btree_rec *rec)
197{
198 __u32 x;
199
200 x = be32_to_cpu(rec->inobt.ir_startino);
201 x += XFS_INODES_PER_CHUNK - 1;
202 key->inobt.ir_startino = cpu_to_be32(x);
203}
204
205STATIC void
206xfs_inobt_init_rec_from_cur(
207 struct xfs_btree_cur *cur,
208 union xfs_btree_rec *rec)
209{
210 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
211 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
212 rec->inobt.ir_u.sp.ir_holemask =
213 cpu_to_be16(cur->bc_rec.i.ir_holemask);
214 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
215 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
216 } else {
217 /* ir_holemask/ir_count not supported on-disk */
218 rec->inobt.ir_u.f.ir_freecount =
219 cpu_to_be32(cur->bc_rec.i.ir_freecount);
220 }
221 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
222}
223
224/*
225 * initial value of ptr for lookup
226 */
227STATIC void
228xfs_inobt_init_ptr_from_cur(
229 struct xfs_btree_cur *cur,
230 union xfs_btree_ptr *ptr)
231{
232 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
233
234 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
235
236 ptr->s = agi->agi_root;
237}
238
239STATIC void
240xfs_finobt_init_ptr_from_cur(
241 struct xfs_btree_cur *cur,
242 union xfs_btree_ptr *ptr)
243{
244 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
245
246 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
247 ptr->s = agi->agi_free_root;
248}
249
250STATIC int64_t
251xfs_inobt_key_diff(
252 struct xfs_btree_cur *cur,
253 union xfs_btree_key *key)
254{
255 return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
256 cur->bc_rec.i.ir_startino;
257}
258
259STATIC int64_t
260xfs_inobt_diff_two_keys(
261 struct xfs_btree_cur *cur,
262 union xfs_btree_key *k1,
263 union xfs_btree_key *k2)
264{
265 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
266 be32_to_cpu(k2->inobt.ir_startino);
267}
268
269static xfs_failaddr_t
270xfs_inobt_verify(
271 struct xfs_buf *bp)
272{
273 struct xfs_mount *mp = bp->b_target->bt_mount;
274 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
275 xfs_failaddr_t fa;
276 unsigned int level;
277
278 /*
279 * During growfs operations, we can't verify the exact owner as the
280 * perag is not fully initialised and hence not attached to the buffer.
281 *
282 * Similarly, during log recovery we will have a perag structure
283 * attached, but the agi information will not yet have been initialised
284 * from the on disk AGI. We don't currently use any of this information,
285 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
286 * ever do.
287 */
288 switch (block->bb_magic) {
289 case cpu_to_be32(XFS_IBT_CRC_MAGIC):
290 case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
291 fa = xfs_btree_sblock_v5hdr_verify(bp);
292 if (fa)
293 return fa;
294 /* fall through */
295 case cpu_to_be32(XFS_IBT_MAGIC):
296 case cpu_to_be32(XFS_FIBT_MAGIC):
297 break;
298 default:
299 return NULL;
300 }
301
302 /* level verification */
303 level = be16_to_cpu(block->bb_level);
304 if (level >= mp->m_in_maxlevels)
305 return __this_address;
306
307 return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
308}
309
310static void
311xfs_inobt_read_verify(
312 struct xfs_buf *bp)
313{
314 xfs_failaddr_t fa;
315
316 if (!xfs_btree_sblock_verify_crc(bp))
317 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
318 else {
319 fa = xfs_inobt_verify(bp);
320 if (fa)
321 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
322 }
323
324 if (bp->b_error)
325 trace_xfs_btree_corrupt(bp, _RET_IP_);
326}
327
328static void
329xfs_inobt_write_verify(
330 struct xfs_buf *bp)
331{
332 xfs_failaddr_t fa;
333
334 fa = xfs_inobt_verify(bp);
335 if (fa) {
336 trace_xfs_btree_corrupt(bp, _RET_IP_);
337 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338 return;
339 }
340 xfs_btree_sblock_calc_crc(bp);
341
342}
343
344const struct xfs_buf_ops xfs_inobt_buf_ops = {
345 .name = "xfs_inobt",
346 .verify_read = xfs_inobt_read_verify,
347 .verify_write = xfs_inobt_write_verify,
348 .verify_struct = xfs_inobt_verify,
349};
350
351STATIC int
352xfs_inobt_keys_inorder(
353 struct xfs_btree_cur *cur,
354 union xfs_btree_key *k1,
355 union xfs_btree_key *k2)
356{
357 return be32_to_cpu(k1->inobt.ir_startino) <
358 be32_to_cpu(k2->inobt.ir_startino);
359}
360
361STATIC int
362xfs_inobt_recs_inorder(
363 struct xfs_btree_cur *cur,
364 union xfs_btree_rec *r1,
365 union xfs_btree_rec *r2)
366{
367 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
368 be32_to_cpu(r2->inobt.ir_startino);
369}
370
371static const struct xfs_btree_ops xfs_inobt_ops = {
372 .rec_len = sizeof(xfs_inobt_rec_t),
373 .key_len = sizeof(xfs_inobt_key_t),
374
375 .dup_cursor = xfs_inobt_dup_cursor,
376 .set_root = xfs_inobt_set_root,
377 .alloc_block = xfs_inobt_alloc_block,
378 .free_block = xfs_inobt_free_block,
379 .get_minrecs = xfs_inobt_get_minrecs,
380 .get_maxrecs = xfs_inobt_get_maxrecs,
381 .init_key_from_rec = xfs_inobt_init_key_from_rec,
382 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
383 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
384 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
385 .key_diff = xfs_inobt_key_diff,
386 .buf_ops = &xfs_inobt_buf_ops,
387 .diff_two_keys = xfs_inobt_diff_two_keys,
388 .keys_inorder = xfs_inobt_keys_inorder,
389 .recs_inorder = xfs_inobt_recs_inorder,
390};
391
392static const struct xfs_btree_ops xfs_finobt_ops = {
393 .rec_len = sizeof(xfs_inobt_rec_t),
394 .key_len = sizeof(xfs_inobt_key_t),
395
396 .dup_cursor = xfs_inobt_dup_cursor,
397 .set_root = xfs_finobt_set_root,
398 .alloc_block = xfs_finobt_alloc_block,
399 .free_block = xfs_finobt_free_block,
400 .get_minrecs = xfs_inobt_get_minrecs,
401 .get_maxrecs = xfs_inobt_get_maxrecs,
402 .init_key_from_rec = xfs_inobt_init_key_from_rec,
403 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
404 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
405 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
406 .key_diff = xfs_inobt_key_diff,
407 .buf_ops = &xfs_inobt_buf_ops,
408 .diff_two_keys = xfs_inobt_diff_two_keys,
409 .keys_inorder = xfs_inobt_keys_inorder,
410 .recs_inorder = xfs_inobt_recs_inorder,
411};
412
413/*
414 * Allocate a new inode btree cursor.
415 */
416struct xfs_btree_cur * /* new inode btree cursor */
417xfs_inobt_init_cursor(
418 struct xfs_mount *mp, /* file system mount point */
419 struct xfs_trans *tp, /* transaction pointer */
420 struct xfs_buf *agbp, /* buffer for agi structure */
421 xfs_agnumber_t agno, /* allocation group number */
422 xfs_btnum_t btnum) /* ialloc or free ino btree */
423{
424 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
425 struct xfs_btree_cur *cur;
426
427 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
428
429 cur->bc_tp = tp;
430 cur->bc_mp = mp;
431 cur->bc_btnum = btnum;
432 if (btnum == XFS_BTNUM_INO) {
433 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
434 cur->bc_ops = &xfs_inobt_ops;
435 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
436 } else {
437 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
438 cur->bc_ops = &xfs_finobt_ops;
439 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
440 }
441
442 cur->bc_blocklog = mp->m_sb.sb_blocklog;
443
444 if (xfs_sb_version_hascrc(&mp->m_sb))
445 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
446
447 cur->bc_private.a.agbp = agbp;
448 cur->bc_private.a.agno = agno;
449
450 return cur;
451}
452
453/*
454 * Calculate number of records in an inobt btree block.
455 */
456int
457xfs_inobt_maxrecs(
458 struct xfs_mount *mp,
459 int blocklen,
460 int leaf)
461{
462 blocklen -= XFS_INOBT_BLOCK_LEN(mp);
463
464 if (leaf)
465 return blocklen / sizeof(xfs_inobt_rec_t);
466 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
467}
468
469/*
470 * Convert the inode record holemask to an inode allocation bitmap. The inode
471 * allocation bitmap is inode granularity and specifies whether an inode is
472 * physically allocated on disk (not whether the inode is considered allocated
473 * or free by the fs).
474 *
475 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
476 */
477uint64_t
478xfs_inobt_irec_to_allocmask(
479 struct xfs_inobt_rec_incore *rec)
480{
481 uint64_t bitmap = 0;
482 uint64_t inodespbit;
483 int nextbit;
484 uint allocbitmap;
485
486 /*
487 * The holemask has 16-bits for a 64 inode record. Therefore each
488 * holemask bit represents multiple inodes. Create a mask of bits to set
489 * in the allocmask for each holemask bit.
490 */
491 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
492
493 /*
494 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
495 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
496 * anything beyond the 16 holemask bits since this casts to a larger
497 * type.
498 */
499 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
500
501 /*
502 * allocbitmap is the inverted holemask so every set bit represents
503 * allocated inodes. To expand from 16-bit holemask granularity to
504 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
505 * bitmap for every holemask bit.
506 */
507 nextbit = xfs_next_bit(&allocbitmap, 1, 0);
508 while (nextbit != -1) {
509 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
510
511 bitmap |= (inodespbit <<
512 (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
513
514 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
515 }
516
517 return bitmap;
518}
519
520#if defined(DEBUG) || defined(XFS_WARN)
521/*
522 * Verify that an in-core inode record has a valid inode count.
523 */
524int
525xfs_inobt_rec_check_count(
526 struct xfs_mount *mp,
527 struct xfs_inobt_rec_incore *rec)
528{
529 int inocount = 0;
530 int nextbit = 0;
531 uint64_t allocbmap;
532 int wordsz;
533
534 wordsz = sizeof(allocbmap) / sizeof(unsigned int);
535 allocbmap = xfs_inobt_irec_to_allocmask(rec);
536
537 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
538 while (nextbit != -1) {
539 inocount++;
540 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
541 nextbit + 1);
542 }
543
544 if (inocount != rec->ir_count)
545 return -EFSCORRUPTED;
546
547 return 0;
548}
549#endif /* DEBUG */
550
551static xfs_extlen_t
552xfs_inobt_max_size(
553 struct xfs_mount *mp)
554{
555 /* Bail out if we're uninitialized, which can happen in mkfs. */
556 if (mp->m_inobt_mxr[0] == 0)
557 return 0;
558
559 return xfs_btree_calc_size(mp->m_inobt_mnr,
560 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
561 XFS_INODES_PER_CHUNK);
562}
563
564static int
565xfs_inobt_count_blocks(
566 struct xfs_mount *mp,
567 xfs_agnumber_t agno,
568 xfs_btnum_t btnum,
569 xfs_extlen_t *tree_blocks)
570{
571 struct xfs_buf *agbp;
572 struct xfs_btree_cur *cur;
573 int error;
574
575 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
576 if (error)
577 return error;
578
579 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
580 error = xfs_btree_count_blocks(cur, tree_blocks);
581 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
582 xfs_buf_relse(agbp);
583
584 return error;
585}
586
587/*
588 * Figure out how many blocks to reserve and how many are used by this btree.
589 */
590int
591xfs_finobt_calc_reserves(
592 struct xfs_mount *mp,
593 xfs_agnumber_t agno,
594 xfs_extlen_t *ask,
595 xfs_extlen_t *used)
596{
597 xfs_extlen_t tree_len = 0;
598 int error;
599
600 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
601 return 0;
602
603 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
604 if (error)
605 return error;
606
607 *ask += xfs_inobt_max_size(mp);
608 *used += tree_len;
609 return 0;
610}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_btree.h"
15#include "xfs_btree_staging.h"
16#include "xfs_ialloc.h"
17#include "xfs_ialloc_btree.h"
18#include "xfs_alloc.h"
19#include "xfs_error.h"
20#include "xfs_health.h"
21#include "xfs_trace.h"
22#include "xfs_trans.h"
23#include "xfs_rmap.h"
24#include "xfs_ag.h"
25
26static struct kmem_cache *xfs_inobt_cur_cache;
27
28STATIC int
29xfs_inobt_get_minrecs(
30 struct xfs_btree_cur *cur,
31 int level)
32{
33 return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
34}
35
36STATIC struct xfs_btree_cur *
37xfs_inobt_dup_cursor(
38 struct xfs_btree_cur *cur)
39{
40 return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
41 cur->bc_ag.agbp);
42}
43
44STATIC struct xfs_btree_cur *
45xfs_finobt_dup_cursor(
46 struct xfs_btree_cur *cur)
47{
48 return xfs_finobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
49 cur->bc_ag.agbp);
50}
51
52STATIC void
53xfs_inobt_set_root(
54 struct xfs_btree_cur *cur,
55 const union xfs_btree_ptr *nptr,
56 int inc) /* level change */
57{
58 struct xfs_buf *agbp = cur->bc_ag.agbp;
59 struct xfs_agi *agi = agbp->b_addr;
60
61 agi->agi_root = nptr->s;
62 be32_add_cpu(&agi->agi_level, inc);
63 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
64}
65
66STATIC void
67xfs_finobt_set_root(
68 struct xfs_btree_cur *cur,
69 const union xfs_btree_ptr *nptr,
70 int inc) /* level change */
71{
72 struct xfs_buf *agbp = cur->bc_ag.agbp;
73 struct xfs_agi *agi = agbp->b_addr;
74
75 agi->agi_free_root = nptr->s;
76 be32_add_cpu(&agi->agi_free_level, inc);
77 xfs_ialloc_log_agi(cur->bc_tp, agbp,
78 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
79}
80
81/* Update the inode btree block counter for this btree. */
82static inline void
83xfs_inobt_mod_blockcount(
84 struct xfs_btree_cur *cur,
85 int howmuch)
86{
87 struct xfs_buf *agbp = cur->bc_ag.agbp;
88 struct xfs_agi *agi = agbp->b_addr;
89
90 if (!xfs_has_inobtcounts(cur->bc_mp))
91 return;
92
93 if (xfs_btree_is_fino(cur->bc_ops))
94 be32_add_cpu(&agi->agi_fblocks, howmuch);
95 else
96 be32_add_cpu(&agi->agi_iblocks, howmuch);
97 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
98}
99
100STATIC int
101__xfs_inobt_alloc_block(
102 struct xfs_btree_cur *cur,
103 const union xfs_btree_ptr *start,
104 union xfs_btree_ptr *new,
105 int *stat,
106 enum xfs_ag_resv_type resv)
107{
108 xfs_alloc_arg_t args; /* block allocation args */
109 int error; /* error return value */
110 xfs_agblock_t sbno = be32_to_cpu(start->s);
111
112 memset(&args, 0, sizeof(args));
113 args.tp = cur->bc_tp;
114 args.mp = cur->bc_mp;
115 args.pag = cur->bc_ag.pag;
116 args.oinfo = XFS_RMAP_OINFO_INOBT;
117 args.minlen = 1;
118 args.maxlen = 1;
119 args.prod = 1;
120 args.resv = resv;
121
122 error = xfs_alloc_vextent_near_bno(&args,
123 XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno, sbno));
124 if (error)
125 return error;
126
127 if (args.fsbno == NULLFSBLOCK) {
128 *stat = 0;
129 return 0;
130 }
131 ASSERT(args.len == 1);
132
133 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
134 *stat = 1;
135 xfs_inobt_mod_blockcount(cur, 1);
136 return 0;
137}
138
139STATIC int
140xfs_inobt_alloc_block(
141 struct xfs_btree_cur *cur,
142 const union xfs_btree_ptr *start,
143 union xfs_btree_ptr *new,
144 int *stat)
145{
146 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
147}
148
149STATIC int
150xfs_finobt_alloc_block(
151 struct xfs_btree_cur *cur,
152 const union xfs_btree_ptr *start,
153 union xfs_btree_ptr *new,
154 int *stat)
155{
156 if (cur->bc_mp->m_finobt_nores)
157 return xfs_inobt_alloc_block(cur, start, new, stat);
158 return __xfs_inobt_alloc_block(cur, start, new, stat,
159 XFS_AG_RESV_METADATA);
160}
161
162STATIC int
163__xfs_inobt_free_block(
164 struct xfs_btree_cur *cur,
165 struct xfs_buf *bp,
166 enum xfs_ag_resv_type resv)
167{
168 xfs_fsblock_t fsbno;
169
170 xfs_inobt_mod_blockcount(cur, -1);
171 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
172 return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
173 &XFS_RMAP_OINFO_INOBT, resv, false);
174}
175
176STATIC int
177xfs_inobt_free_block(
178 struct xfs_btree_cur *cur,
179 struct xfs_buf *bp)
180{
181 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
182}
183
184STATIC int
185xfs_finobt_free_block(
186 struct xfs_btree_cur *cur,
187 struct xfs_buf *bp)
188{
189 if (cur->bc_mp->m_finobt_nores)
190 return xfs_inobt_free_block(cur, bp);
191 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
192}
193
194STATIC int
195xfs_inobt_get_maxrecs(
196 struct xfs_btree_cur *cur,
197 int level)
198{
199 return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
200}
201
202STATIC void
203xfs_inobt_init_key_from_rec(
204 union xfs_btree_key *key,
205 const union xfs_btree_rec *rec)
206{
207 key->inobt.ir_startino = rec->inobt.ir_startino;
208}
209
210STATIC void
211xfs_inobt_init_high_key_from_rec(
212 union xfs_btree_key *key,
213 const union xfs_btree_rec *rec)
214{
215 __u32 x;
216
217 x = be32_to_cpu(rec->inobt.ir_startino);
218 x += XFS_INODES_PER_CHUNK - 1;
219 key->inobt.ir_startino = cpu_to_be32(x);
220}
221
222STATIC void
223xfs_inobt_init_rec_from_cur(
224 struct xfs_btree_cur *cur,
225 union xfs_btree_rec *rec)
226{
227 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
228 if (xfs_has_sparseinodes(cur->bc_mp)) {
229 rec->inobt.ir_u.sp.ir_holemask =
230 cpu_to_be16(cur->bc_rec.i.ir_holemask);
231 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
232 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
233 } else {
234 /* ir_holemask/ir_count not supported on-disk */
235 rec->inobt.ir_u.f.ir_freecount =
236 cpu_to_be32(cur->bc_rec.i.ir_freecount);
237 }
238 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
239}
240
241/*
242 * initial value of ptr for lookup
243 */
244STATIC void
245xfs_inobt_init_ptr_from_cur(
246 struct xfs_btree_cur *cur,
247 union xfs_btree_ptr *ptr)
248{
249 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
250
251 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
252
253 ptr->s = agi->agi_root;
254}
255
256STATIC void
257xfs_finobt_init_ptr_from_cur(
258 struct xfs_btree_cur *cur,
259 union xfs_btree_ptr *ptr)
260{
261 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
262
263 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
264 ptr->s = agi->agi_free_root;
265}
266
267STATIC int64_t
268xfs_inobt_key_diff(
269 struct xfs_btree_cur *cur,
270 const union xfs_btree_key *key)
271{
272 return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
273 cur->bc_rec.i.ir_startino;
274}
275
276STATIC int64_t
277xfs_inobt_diff_two_keys(
278 struct xfs_btree_cur *cur,
279 const union xfs_btree_key *k1,
280 const union xfs_btree_key *k2,
281 const union xfs_btree_key *mask)
282{
283 ASSERT(!mask || mask->inobt.ir_startino);
284
285 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
286 be32_to_cpu(k2->inobt.ir_startino);
287}
288
289static xfs_failaddr_t
290xfs_inobt_verify(
291 struct xfs_buf *bp)
292{
293 struct xfs_mount *mp = bp->b_mount;
294 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
295 xfs_failaddr_t fa;
296 unsigned int level;
297
298 if (!xfs_verify_magic(bp, block->bb_magic))
299 return __this_address;
300
301 /*
302 * During growfs operations, we can't verify the exact owner as the
303 * perag is not fully initialised and hence not attached to the buffer.
304 *
305 * Similarly, during log recovery we will have a perag structure
306 * attached, but the agi information will not yet have been initialised
307 * from the on disk AGI. We don't currently use any of this information,
308 * but beware of the landmine (i.e. need to check
309 * xfs_perag_initialised_agi(pag)) if we ever do.
310 */
311 if (xfs_has_crc(mp)) {
312 fa = xfs_btree_agblock_v5hdr_verify(bp);
313 if (fa)
314 return fa;
315 }
316
317 /* level verification */
318 level = be16_to_cpu(block->bb_level);
319 if (level >= M_IGEO(mp)->inobt_maxlevels)
320 return __this_address;
321
322 return xfs_btree_agblock_verify(bp,
323 M_IGEO(mp)->inobt_mxr[level != 0]);
324}
325
326static void
327xfs_inobt_read_verify(
328 struct xfs_buf *bp)
329{
330 xfs_failaddr_t fa;
331
332 if (!xfs_btree_agblock_verify_crc(bp))
333 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
334 else {
335 fa = xfs_inobt_verify(bp);
336 if (fa)
337 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338 }
339
340 if (bp->b_error)
341 trace_xfs_btree_corrupt(bp, _RET_IP_);
342}
343
344static void
345xfs_inobt_write_verify(
346 struct xfs_buf *bp)
347{
348 xfs_failaddr_t fa;
349
350 fa = xfs_inobt_verify(bp);
351 if (fa) {
352 trace_xfs_btree_corrupt(bp, _RET_IP_);
353 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
354 return;
355 }
356 xfs_btree_agblock_calc_crc(bp);
357
358}
359
360const struct xfs_buf_ops xfs_inobt_buf_ops = {
361 .name = "xfs_inobt",
362 .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
363 .verify_read = xfs_inobt_read_verify,
364 .verify_write = xfs_inobt_write_verify,
365 .verify_struct = xfs_inobt_verify,
366};
367
368const struct xfs_buf_ops xfs_finobt_buf_ops = {
369 .name = "xfs_finobt",
370 .magic = { cpu_to_be32(XFS_FIBT_MAGIC),
371 cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
372 .verify_read = xfs_inobt_read_verify,
373 .verify_write = xfs_inobt_write_verify,
374 .verify_struct = xfs_inobt_verify,
375};
376
377STATIC int
378xfs_inobt_keys_inorder(
379 struct xfs_btree_cur *cur,
380 const union xfs_btree_key *k1,
381 const union xfs_btree_key *k2)
382{
383 return be32_to_cpu(k1->inobt.ir_startino) <
384 be32_to_cpu(k2->inobt.ir_startino);
385}
386
387STATIC int
388xfs_inobt_recs_inorder(
389 struct xfs_btree_cur *cur,
390 const union xfs_btree_rec *r1,
391 const union xfs_btree_rec *r2)
392{
393 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
394 be32_to_cpu(r2->inobt.ir_startino);
395}
396
397STATIC enum xbtree_key_contig
398xfs_inobt_keys_contiguous(
399 struct xfs_btree_cur *cur,
400 const union xfs_btree_key *key1,
401 const union xfs_btree_key *key2,
402 const union xfs_btree_key *mask)
403{
404 ASSERT(!mask || mask->inobt.ir_startino);
405
406 return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
407 be32_to_cpu(key2->inobt.ir_startino));
408}
409
410const struct xfs_btree_ops xfs_inobt_ops = {
411 .name = "ino",
412 .type = XFS_BTREE_TYPE_AG,
413
414 .rec_len = sizeof(xfs_inobt_rec_t),
415 .key_len = sizeof(xfs_inobt_key_t),
416 .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
417
418 .lru_refs = XFS_INO_BTREE_REF,
419 .statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
420 .sick_mask = XFS_SICK_AG_INOBT,
421
422 .dup_cursor = xfs_inobt_dup_cursor,
423 .set_root = xfs_inobt_set_root,
424 .alloc_block = xfs_inobt_alloc_block,
425 .free_block = xfs_inobt_free_block,
426 .get_minrecs = xfs_inobt_get_minrecs,
427 .get_maxrecs = xfs_inobt_get_maxrecs,
428 .init_key_from_rec = xfs_inobt_init_key_from_rec,
429 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
430 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
431 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
432 .key_diff = xfs_inobt_key_diff,
433 .buf_ops = &xfs_inobt_buf_ops,
434 .diff_two_keys = xfs_inobt_diff_two_keys,
435 .keys_inorder = xfs_inobt_keys_inorder,
436 .recs_inorder = xfs_inobt_recs_inorder,
437 .keys_contiguous = xfs_inobt_keys_contiguous,
438};
439
440const struct xfs_btree_ops xfs_finobt_ops = {
441 .name = "fino",
442 .type = XFS_BTREE_TYPE_AG,
443
444 .rec_len = sizeof(xfs_inobt_rec_t),
445 .key_len = sizeof(xfs_inobt_key_t),
446 .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
447
448 .lru_refs = XFS_INO_BTREE_REF,
449 .statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
450 .sick_mask = XFS_SICK_AG_FINOBT,
451
452 .dup_cursor = xfs_finobt_dup_cursor,
453 .set_root = xfs_finobt_set_root,
454 .alloc_block = xfs_finobt_alloc_block,
455 .free_block = xfs_finobt_free_block,
456 .get_minrecs = xfs_inobt_get_minrecs,
457 .get_maxrecs = xfs_inobt_get_maxrecs,
458 .init_key_from_rec = xfs_inobt_init_key_from_rec,
459 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
460 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
461 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
462 .key_diff = xfs_inobt_key_diff,
463 .buf_ops = &xfs_finobt_buf_ops,
464 .diff_two_keys = xfs_inobt_diff_two_keys,
465 .keys_inorder = xfs_inobt_keys_inorder,
466 .recs_inorder = xfs_inobt_recs_inorder,
467 .keys_contiguous = xfs_inobt_keys_contiguous,
468};
469
470/*
471 * Create an inode btree cursor.
472 *
473 * For staging cursors tp and agbp are NULL.
474 */
475struct xfs_btree_cur *
476xfs_inobt_init_cursor(
477 struct xfs_perag *pag,
478 struct xfs_trans *tp,
479 struct xfs_buf *agbp)
480{
481 struct xfs_mount *mp = pag->pag_mount;
482 struct xfs_btree_cur *cur;
483
484 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
485 M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
486 cur->bc_ag.pag = xfs_perag_hold(pag);
487 cur->bc_ag.agbp = agbp;
488 if (agbp) {
489 struct xfs_agi *agi = agbp->b_addr;
490
491 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
492 }
493 return cur;
494}
495
496/*
497 * Create a free inode btree cursor.
498 *
499 * For staging cursors tp and agbp are NULL.
500 */
501struct xfs_btree_cur *
502xfs_finobt_init_cursor(
503 struct xfs_perag *pag,
504 struct xfs_trans *tp,
505 struct xfs_buf *agbp)
506{
507 struct xfs_mount *mp = pag->pag_mount;
508 struct xfs_btree_cur *cur;
509
510 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
511 M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
512 cur->bc_ag.pag = xfs_perag_hold(pag);
513 cur->bc_ag.agbp = agbp;
514 if (agbp) {
515 struct xfs_agi *agi = agbp->b_addr;
516
517 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
518 }
519 return cur;
520}
521
522/*
523 * Install a new inobt btree root. Caller is responsible for invalidating
524 * and freeing the old btree blocks.
525 */
526void
527xfs_inobt_commit_staged_btree(
528 struct xfs_btree_cur *cur,
529 struct xfs_trans *tp,
530 struct xfs_buf *agbp)
531{
532 struct xfs_agi *agi = agbp->b_addr;
533 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
534 int fields;
535
536 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
537
538 if (xfs_btree_is_ino(cur->bc_ops)) {
539 fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
540 agi->agi_root = cpu_to_be32(afake->af_root);
541 agi->agi_level = cpu_to_be32(afake->af_levels);
542 if (xfs_has_inobtcounts(cur->bc_mp)) {
543 agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
544 fields |= XFS_AGI_IBLOCKS;
545 }
546 xfs_ialloc_log_agi(tp, agbp, fields);
547 xfs_btree_commit_afakeroot(cur, tp, agbp);
548 } else {
549 fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
550 agi->agi_free_root = cpu_to_be32(afake->af_root);
551 agi->agi_free_level = cpu_to_be32(afake->af_levels);
552 if (xfs_has_inobtcounts(cur->bc_mp)) {
553 agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
554 fields |= XFS_AGI_IBLOCKS;
555 }
556 xfs_ialloc_log_agi(tp, agbp, fields);
557 xfs_btree_commit_afakeroot(cur, tp, agbp);
558 }
559}
560
561/* Calculate number of records in an inode btree block. */
562static inline unsigned int
563xfs_inobt_block_maxrecs(
564 unsigned int blocklen,
565 bool leaf)
566{
567 if (leaf)
568 return blocklen / sizeof(xfs_inobt_rec_t);
569 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
570}
571
572/*
573 * Calculate number of records in an inobt btree block.
574 */
575int
576xfs_inobt_maxrecs(
577 struct xfs_mount *mp,
578 int blocklen,
579 int leaf)
580{
581 blocklen -= XFS_INOBT_BLOCK_LEN(mp);
582 return xfs_inobt_block_maxrecs(blocklen, leaf);
583}
584
585/*
586 * Maximum number of inode btree records per AG. Pretend that we can fill an
587 * entire AG completely full of inodes except for the AG headers.
588 */
589#define XFS_MAX_INODE_RECORDS \
590 ((XFS_MAX_AG_BYTES - (4 * BBSIZE)) / XFS_DINODE_MIN_SIZE) / \
591 XFS_INODES_PER_CHUNK
592
593/* Compute the max possible height for the inode btree. */
594static inline unsigned int
595xfs_inobt_maxlevels_ondisk(void)
596{
597 unsigned int minrecs[2];
598 unsigned int blocklen;
599
600 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
601 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
602
603 minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
604 minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
605
606 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
607}
608
609/* Compute the max possible height for the free inode btree. */
610static inline unsigned int
611xfs_finobt_maxlevels_ondisk(void)
612{
613 unsigned int minrecs[2];
614 unsigned int blocklen;
615
616 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
617
618 minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
619 minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
620
621 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
622}
623
624/* Compute the max possible height for either inode btree. */
625unsigned int
626xfs_iallocbt_maxlevels_ondisk(void)
627{
628 return max(xfs_inobt_maxlevels_ondisk(),
629 xfs_finobt_maxlevels_ondisk());
630}
631
632/*
633 * Convert the inode record holemask to an inode allocation bitmap. The inode
634 * allocation bitmap is inode granularity and specifies whether an inode is
635 * physically allocated on disk (not whether the inode is considered allocated
636 * or free by the fs).
637 *
638 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
639 */
640uint64_t
641xfs_inobt_irec_to_allocmask(
642 const struct xfs_inobt_rec_incore *rec)
643{
644 uint64_t bitmap = 0;
645 uint64_t inodespbit;
646 int nextbit;
647 uint allocbitmap;
648
649 /*
650 * The holemask has 16-bits for a 64 inode record. Therefore each
651 * holemask bit represents multiple inodes. Create a mask of bits to set
652 * in the allocmask for each holemask bit.
653 */
654 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
655
656 /*
657 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
658 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
659 * anything beyond the 16 holemask bits since this casts to a larger
660 * type.
661 */
662 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
663
664 /*
665 * allocbitmap is the inverted holemask so every set bit represents
666 * allocated inodes. To expand from 16-bit holemask granularity to
667 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
668 * bitmap for every holemask bit.
669 */
670 nextbit = xfs_next_bit(&allocbitmap, 1, 0);
671 while (nextbit != -1) {
672 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
673
674 bitmap |= (inodespbit <<
675 (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
676
677 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
678 }
679
680 return bitmap;
681}
682
683#if defined(DEBUG) || defined(XFS_WARN)
684/*
685 * Verify that an in-core inode record has a valid inode count.
686 */
687int
688xfs_inobt_rec_check_count(
689 struct xfs_mount *mp,
690 struct xfs_inobt_rec_incore *rec)
691{
692 int inocount = 0;
693 int nextbit = 0;
694 uint64_t allocbmap;
695 int wordsz;
696
697 wordsz = sizeof(allocbmap) / sizeof(unsigned int);
698 allocbmap = xfs_inobt_irec_to_allocmask(rec);
699
700 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
701 while (nextbit != -1) {
702 inocount++;
703 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
704 nextbit + 1);
705 }
706
707 if (inocount != rec->ir_count)
708 return -EFSCORRUPTED;
709
710 return 0;
711}
712#endif /* DEBUG */
713
714static xfs_extlen_t
715xfs_inobt_max_size(
716 struct xfs_perag *pag)
717{
718 struct xfs_mount *mp = pag->pag_mount;
719 xfs_agblock_t agblocks = pag->block_count;
720
721 /* Bail out if we're uninitialized, which can happen in mkfs. */
722 if (M_IGEO(mp)->inobt_mxr[0] == 0)
723 return 0;
724
725 /*
726 * The log is permanently allocated, so the space it occupies will
727 * never be available for the kinds of things that would require btree
728 * expansion. We therefore can pretend the space isn't there.
729 */
730 if (xfs_ag_contains_log(mp, pag->pag_agno))
731 agblocks -= mp->m_sb.sb_logblocks;
732
733 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
734 (uint64_t)agblocks * mp->m_sb.sb_inopblock /
735 XFS_INODES_PER_CHUNK);
736}
737
738static int
739xfs_finobt_count_blocks(
740 struct xfs_perag *pag,
741 struct xfs_trans *tp,
742 xfs_extlen_t *tree_blocks)
743{
744 struct xfs_buf *agbp = NULL;
745 struct xfs_btree_cur *cur;
746 int error;
747
748 error = xfs_ialloc_read_agi(pag, tp, &agbp);
749 if (error)
750 return error;
751
752 cur = xfs_inobt_init_cursor(pag, tp, agbp);
753 error = xfs_btree_count_blocks(cur, tree_blocks);
754 xfs_btree_del_cursor(cur, error);
755 xfs_trans_brelse(tp, agbp);
756
757 return error;
758}
759
760/* Read finobt block count from AGI header. */
761static int
762xfs_finobt_read_blocks(
763 struct xfs_perag *pag,
764 struct xfs_trans *tp,
765 xfs_extlen_t *tree_blocks)
766{
767 struct xfs_buf *agbp;
768 struct xfs_agi *agi;
769 int error;
770
771 error = xfs_ialloc_read_agi(pag, tp, &agbp);
772 if (error)
773 return error;
774
775 agi = agbp->b_addr;
776 *tree_blocks = be32_to_cpu(agi->agi_fblocks);
777 xfs_trans_brelse(tp, agbp);
778 return 0;
779}
780
781/*
782 * Figure out how many blocks to reserve and how many are used by this btree.
783 */
784int
785xfs_finobt_calc_reserves(
786 struct xfs_perag *pag,
787 struct xfs_trans *tp,
788 xfs_extlen_t *ask,
789 xfs_extlen_t *used)
790{
791 xfs_extlen_t tree_len = 0;
792 int error;
793
794 if (!xfs_has_finobt(pag->pag_mount))
795 return 0;
796
797 if (xfs_has_inobtcounts(pag->pag_mount))
798 error = xfs_finobt_read_blocks(pag, tp, &tree_len);
799 else
800 error = xfs_finobt_count_blocks(pag, tp, &tree_len);
801 if (error)
802 return error;
803
804 *ask += xfs_inobt_max_size(pag);
805 *used += tree_len;
806 return 0;
807}
808
809/* Calculate the inobt btree size for some records. */
810xfs_extlen_t
811xfs_iallocbt_calc_size(
812 struct xfs_mount *mp,
813 unsigned long long len)
814{
815 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
816}
817
818int __init
819xfs_inobt_init_cur_cache(void)
820{
821 xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur",
822 xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()),
823 0, 0, NULL);
824
825 if (!xfs_inobt_cur_cache)
826 return -ENOMEM;
827 return 0;
828}
829
830void
831xfs_inobt_destroy_cur_cache(void)
832{
833 kmem_cache_destroy(xfs_inobt_cur_cache);
834 xfs_inobt_cur_cache = NULL;
835}