Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_bit.h"
 13#include "xfs_mount.h"
 14#include "xfs_btree.h"
 15#include "xfs_btree_staging.h"
 16#include "xfs_ialloc.h"
 17#include "xfs_ialloc_btree.h"
 18#include "xfs_alloc.h"
 19#include "xfs_error.h"
 20#include "xfs_health.h"
 21#include "xfs_trace.h"
 22#include "xfs_trans.h"
 23#include "xfs_rmap.h"
 24#include "xfs_ag.h"
 25
 26static struct kmem_cache	*xfs_inobt_cur_cache;
 27
 28STATIC int
 29xfs_inobt_get_minrecs(
 30	struct xfs_btree_cur	*cur,
 31	int			level)
 32{
 33	return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
 34}
 35
 36STATIC struct xfs_btree_cur *
 37xfs_inobt_dup_cursor(
 38	struct xfs_btree_cur	*cur)
 39{
 40	return xfs_inobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
 41			cur->bc_ag.agbp);
 42}
 43
 44STATIC struct xfs_btree_cur *
 45xfs_finobt_dup_cursor(
 46	struct xfs_btree_cur	*cur)
 47{
 48	return xfs_finobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
 49			cur->bc_ag.agbp);
 50}
 51
 52STATIC void
 53xfs_inobt_set_root(
 54	struct xfs_btree_cur		*cur,
 55	const union xfs_btree_ptr	*nptr,
 56	int				inc)	/* level change */
 57{
 58	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 59	struct xfs_agi		*agi = agbp->b_addr;
 60
 61	agi->agi_root = nptr->s;
 62	be32_add_cpu(&agi->agi_level, inc);
 63	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 64}
 65
 66STATIC void
 67xfs_finobt_set_root(
 68	struct xfs_btree_cur		*cur,
 69	const union xfs_btree_ptr	*nptr,
 70	int				inc)	/* level change */
 71{
 72	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 73	struct xfs_agi		*agi = agbp->b_addr;
 74
 75	agi->agi_free_root = nptr->s;
 76	be32_add_cpu(&agi->agi_free_level, inc);
 77	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 78			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 79}
 80
 81/* Update the inode btree block counter for this btree. */
 82static inline void
 83xfs_inobt_mod_blockcount(
 84	struct xfs_btree_cur	*cur,
 85	int			howmuch)
 86{
 87	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 88	struct xfs_agi		*agi = agbp->b_addr;
 89
 90	if (!xfs_has_inobtcounts(cur->bc_mp))
 91		return;
 92
 93	if (xfs_btree_is_fino(cur->bc_ops))
 94		be32_add_cpu(&agi->agi_fblocks, howmuch);
 95	else
 96		be32_add_cpu(&agi->agi_iblocks, howmuch);
 97	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
 98}
 99
100STATIC int
101__xfs_inobt_alloc_block(
102	struct xfs_btree_cur		*cur,
103	const union xfs_btree_ptr	*start,
104	union xfs_btree_ptr		*new,
105	int				*stat,
106	enum xfs_ag_resv_type		resv)
107{
108	xfs_alloc_arg_t		args;		/* block allocation args */
109	int			error;		/* error return value */
110	xfs_agblock_t		sbno = be32_to_cpu(start->s);
111
112	memset(&args, 0, sizeof(args));
113	args.tp = cur->bc_tp;
114	args.mp = cur->bc_mp;
115	args.pag = to_perag(cur->bc_group);
116	args.oinfo = XFS_RMAP_OINFO_INOBT;
 
117	args.minlen = 1;
118	args.maxlen = 1;
119	args.prod = 1;
 
120	args.resv = resv;
121
122	error = xfs_alloc_vextent_near_bno(&args,
123			xfs_agbno_to_fsb(args.pag, sbno));
124	if (error)
125		return error;
126
127	if (args.fsbno == NULLFSBLOCK) {
128		*stat = 0;
129		return 0;
130	}
131	ASSERT(args.len == 1);
132
133	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
134	*stat = 1;
135	xfs_inobt_mod_blockcount(cur, 1);
136	return 0;
137}
138
139STATIC int
140xfs_inobt_alloc_block(
141	struct xfs_btree_cur		*cur,
142	const union xfs_btree_ptr	*start,
143	union xfs_btree_ptr		*new,
144	int				*stat)
145{
146	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
147}
148
149STATIC int
150xfs_finobt_alloc_block(
151	struct xfs_btree_cur		*cur,
152	const union xfs_btree_ptr	*start,
153	union xfs_btree_ptr		*new,
154	int				*stat)
155{
156	if (cur->bc_mp->m_finobt_nores)
157		return xfs_inobt_alloc_block(cur, start, new, stat);
158	return __xfs_inobt_alloc_block(cur, start, new, stat,
159			XFS_AG_RESV_METADATA);
160}
161
162STATIC int
163__xfs_inobt_free_block(
164	struct xfs_btree_cur	*cur,
165	struct xfs_buf		*bp,
166	enum xfs_ag_resv_type	resv)
167{
168	xfs_fsblock_t		fsbno;
169
170	xfs_inobt_mod_blockcount(cur, -1);
171	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
172	return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
173			&XFS_RMAP_OINFO_INOBT, resv, 0);
174}
175
176STATIC int
177xfs_inobt_free_block(
178	struct xfs_btree_cur	*cur,
179	struct xfs_buf		*bp)
180{
181	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
182}
183
184STATIC int
185xfs_finobt_free_block(
186	struct xfs_btree_cur	*cur,
187	struct xfs_buf		*bp)
188{
189	if (cur->bc_mp->m_finobt_nores)
190		return xfs_inobt_free_block(cur, bp);
191	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
192}
193
194STATIC int
195xfs_inobt_get_maxrecs(
196	struct xfs_btree_cur	*cur,
197	int			level)
198{
199	return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
200}
201
202STATIC void
203xfs_inobt_init_key_from_rec(
204	union xfs_btree_key		*key,
205	const union xfs_btree_rec	*rec)
206{
207	key->inobt.ir_startino = rec->inobt.ir_startino;
208}
209
210STATIC void
211xfs_inobt_init_high_key_from_rec(
212	union xfs_btree_key		*key,
213	const union xfs_btree_rec	*rec)
214{
215	__u32				x;
216
217	x = be32_to_cpu(rec->inobt.ir_startino);
218	x += XFS_INODES_PER_CHUNK - 1;
219	key->inobt.ir_startino = cpu_to_be32(x);
220}
221
222STATIC void
223xfs_inobt_init_rec_from_cur(
224	struct xfs_btree_cur	*cur,
225	union xfs_btree_rec	*rec)
226{
227	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
228	if (xfs_has_sparseinodes(cur->bc_mp)) {
229		rec->inobt.ir_u.sp.ir_holemask =
230					cpu_to_be16(cur->bc_rec.i.ir_holemask);
231		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
232		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
233	} else {
234		/* ir_holemask/ir_count not supported on-disk */
235		rec->inobt.ir_u.f.ir_freecount =
236					cpu_to_be32(cur->bc_rec.i.ir_freecount);
237	}
238	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
239}
240
241/*
242 * initial value of ptr for lookup
243 */
244STATIC void
245xfs_inobt_init_ptr_from_cur(
246	struct xfs_btree_cur	*cur,
247	union xfs_btree_ptr	*ptr)
248{
249	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
250
251	ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
252
253	ptr->s = agi->agi_root;
254}
255
256STATIC void
257xfs_finobt_init_ptr_from_cur(
258	struct xfs_btree_cur	*cur,
259	union xfs_btree_ptr	*ptr)
260{
261	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
262
263	ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
264
265	ptr->s = agi->agi_free_root;
266}
267
268STATIC int64_t
269xfs_inobt_key_diff(
270	struct xfs_btree_cur		*cur,
271	const union xfs_btree_key	*key)
272{
273	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
274			  cur->bc_rec.i.ir_startino;
275}
276
277STATIC int64_t
278xfs_inobt_diff_two_keys(
279	struct xfs_btree_cur		*cur,
280	const union xfs_btree_key	*k1,
281	const union xfs_btree_key	*k2,
282	const union xfs_btree_key	*mask)
283{
284	ASSERT(!mask || mask->inobt.ir_startino);
285
286	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
287			be32_to_cpu(k2->inobt.ir_startino);
288}
289
290static xfs_failaddr_t
291xfs_inobt_verify(
292	struct xfs_buf		*bp)
293{
294	struct xfs_mount	*mp = bp->b_mount;
295	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
296	xfs_failaddr_t		fa;
297	unsigned int		level;
298
299	if (!xfs_verify_magic(bp, block->bb_magic))
300		return __this_address;
301
302	/*
303	 * During growfs operations, we can't verify the exact owner as the
304	 * perag is not fully initialised and hence not attached to the buffer.
305	 *
306	 * Similarly, during log recovery we will have a perag structure
307	 * attached, but the agi information will not yet have been initialised
308	 * from the on disk AGI. We don't currently use any of this information,
309	 * but beware of the landmine (i.e. need to check
310	 * xfs_perag_initialised_agi(pag)) if we ever do.
311	 */
312	if (xfs_has_crc(mp)) {
313		fa = xfs_btree_agblock_v5hdr_verify(bp);
314		if (fa)
315			return fa;
316	}
317
318	/* level verification */
319	level = be16_to_cpu(block->bb_level);
320	if (level >= M_IGEO(mp)->inobt_maxlevels)
321		return __this_address;
322
323	return xfs_btree_agblock_verify(bp,
324			M_IGEO(mp)->inobt_mxr[level != 0]);
325}
326
327static void
328xfs_inobt_read_verify(
329	struct xfs_buf	*bp)
330{
331	xfs_failaddr_t	fa;
332
333	if (!xfs_btree_agblock_verify_crc(bp))
334		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
335	else {
336		fa = xfs_inobt_verify(bp);
337		if (fa)
338			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
339	}
340
341	if (bp->b_error)
342		trace_xfs_btree_corrupt(bp, _RET_IP_);
343}
344
345static void
346xfs_inobt_write_verify(
347	struct xfs_buf	*bp)
348{
349	xfs_failaddr_t	fa;
350
351	fa = xfs_inobt_verify(bp);
352	if (fa) {
353		trace_xfs_btree_corrupt(bp, _RET_IP_);
354		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
355		return;
356	}
357	xfs_btree_agblock_calc_crc(bp);
358
359}
360
361const struct xfs_buf_ops xfs_inobt_buf_ops = {
362	.name = "xfs_inobt",
363	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
364	.verify_read = xfs_inobt_read_verify,
365	.verify_write = xfs_inobt_write_verify,
366	.verify_struct = xfs_inobt_verify,
367};
368
369const struct xfs_buf_ops xfs_finobt_buf_ops = {
370	.name = "xfs_finobt",
371	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
372		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
373	.verify_read = xfs_inobt_read_verify,
374	.verify_write = xfs_inobt_write_verify,
375	.verify_struct = xfs_inobt_verify,
376};
377
378STATIC int
379xfs_inobt_keys_inorder(
380	struct xfs_btree_cur		*cur,
381	const union xfs_btree_key	*k1,
382	const union xfs_btree_key	*k2)
383{
384	return be32_to_cpu(k1->inobt.ir_startino) <
385		be32_to_cpu(k2->inobt.ir_startino);
386}
387
388STATIC int
389xfs_inobt_recs_inorder(
390	struct xfs_btree_cur		*cur,
391	const union xfs_btree_rec	*r1,
392	const union xfs_btree_rec	*r2)
393{
394	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
395		be32_to_cpu(r2->inobt.ir_startino);
396}
397
398STATIC enum xbtree_key_contig
399xfs_inobt_keys_contiguous(
400	struct xfs_btree_cur		*cur,
401	const union xfs_btree_key	*key1,
402	const union xfs_btree_key	*key2,
403	const union xfs_btree_key	*mask)
404{
405	ASSERT(!mask || mask->inobt.ir_startino);
406
407	return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
408				 be32_to_cpu(key2->inobt.ir_startino));
409}
410
411const struct xfs_btree_ops xfs_inobt_ops = {
412	.name			= "ino",
413	.type			= XFS_BTREE_TYPE_AG,
414
415	.rec_len		= sizeof(xfs_inobt_rec_t),
416	.key_len		= sizeof(xfs_inobt_key_t),
417	.ptr_len		= XFS_BTREE_SHORT_PTR_LEN,
418
419	.lru_refs		= XFS_INO_BTREE_REF,
420	.statoff		= XFS_STATS_CALC_INDEX(xs_ibt_2),
421	.sick_mask		= XFS_SICK_AG_INOBT,
422
423	.dup_cursor		= xfs_inobt_dup_cursor,
424	.set_root		= xfs_inobt_set_root,
425	.alloc_block		= xfs_inobt_alloc_block,
426	.free_block		= xfs_inobt_free_block,
427	.get_minrecs		= xfs_inobt_get_minrecs,
428	.get_maxrecs		= xfs_inobt_get_maxrecs,
429	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
430	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
431	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
432	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
433	.key_diff		= xfs_inobt_key_diff,
434	.buf_ops		= &xfs_inobt_buf_ops,
435	.diff_two_keys		= xfs_inobt_diff_two_keys,
436	.keys_inorder		= xfs_inobt_keys_inorder,
437	.recs_inorder		= xfs_inobt_recs_inorder,
438	.keys_contiguous	= xfs_inobt_keys_contiguous,
439};
440
441const struct xfs_btree_ops xfs_finobt_ops = {
442	.name			= "fino",
443	.type			= XFS_BTREE_TYPE_AG,
444
445	.rec_len		= sizeof(xfs_inobt_rec_t),
446	.key_len		= sizeof(xfs_inobt_key_t),
447	.ptr_len		= XFS_BTREE_SHORT_PTR_LEN,
448
449	.lru_refs		= XFS_INO_BTREE_REF,
450	.statoff		= XFS_STATS_CALC_INDEX(xs_fibt_2),
451	.sick_mask		= XFS_SICK_AG_FINOBT,
452
453	.dup_cursor		= xfs_finobt_dup_cursor,
454	.set_root		= xfs_finobt_set_root,
455	.alloc_block		= xfs_finobt_alloc_block,
456	.free_block		= xfs_finobt_free_block,
457	.get_minrecs		= xfs_inobt_get_minrecs,
458	.get_maxrecs		= xfs_inobt_get_maxrecs,
459	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
460	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
461	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
462	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
463	.key_diff		= xfs_inobt_key_diff,
464	.buf_ops		= &xfs_finobt_buf_ops,
465	.diff_two_keys		= xfs_inobt_diff_two_keys,
466	.keys_inorder		= xfs_inobt_keys_inorder,
467	.recs_inorder		= xfs_inobt_recs_inorder,
468	.keys_contiguous	= xfs_inobt_keys_contiguous,
469};
470
471/*
472 * Create an inode btree cursor.
473 *
474 * For staging cursors tp and agbp are NULL.
475 */
476struct xfs_btree_cur *
477xfs_inobt_init_cursor(
478	struct xfs_perag	*pag,
479	struct xfs_trans	*tp,
480	struct xfs_buf		*agbp)
 
481{
482	struct xfs_mount	*mp = pag_mount(pag);
483	struct xfs_btree_cur	*cur;
484
485	cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
486			M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
487	cur->bc_group = xfs_group_hold(pag_group(pag));
488	cur->bc_ag.agbp = agbp;
489	if (agbp) {
490		struct xfs_agi		*agi = agbp->b_addr;
491
492		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
 
 
493	}
 
 
 
 
 
 
 
494	return cur;
495}
496
497/*
498 * Create a free inode btree cursor.
499 *
500 * For staging cursors tp and agbp are NULL.
501 */
502struct xfs_btree_cur *
503xfs_finobt_init_cursor(
504	struct xfs_perag	*pag,
505	struct xfs_trans	*tp,
506	struct xfs_buf		*agbp)
 
 
507{
508	struct xfs_mount	*mp = pag_mount(pag);
509	struct xfs_btree_cur	*cur;
 
510
511	cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
512			M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
513	cur->bc_group = xfs_group_hold(pag_group(pag));
 
 
514	cur->bc_ag.agbp = agbp;
515	if (agbp) {
516		struct xfs_agi		*agi = agbp->b_addr;
517
518		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
519	}
 
 
 
 
 
 
 
 
 
 
520	return cur;
521}
522
523/*
524 * Install a new inobt btree root.  Caller is responsible for invalidating
525 * and freeing the old btree blocks.
526 */
527void
528xfs_inobt_commit_staged_btree(
529	struct xfs_btree_cur	*cur,
530	struct xfs_trans	*tp,
531	struct xfs_buf		*agbp)
532{
533	struct xfs_agi		*agi = agbp->b_addr;
534	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
535	int			fields;
536
537	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
538
539	if (xfs_btree_is_ino(cur->bc_ops)) {
540		fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
541		agi->agi_root = cpu_to_be32(afake->af_root);
542		agi->agi_level = cpu_to_be32(afake->af_levels);
543		if (xfs_has_inobtcounts(cur->bc_mp)) {
544			agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
545			fields |= XFS_AGI_IBLOCKS;
546		}
547		xfs_ialloc_log_agi(tp, agbp, fields);
548		xfs_btree_commit_afakeroot(cur, tp, agbp);
549	} else {
550		fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
551		agi->agi_free_root = cpu_to_be32(afake->af_root);
552		agi->agi_free_level = cpu_to_be32(afake->af_levels);
553		if (xfs_has_inobtcounts(cur->bc_mp)) {
554			agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
555			fields |= XFS_AGI_IBLOCKS;
556		}
557		xfs_ialloc_log_agi(tp, agbp, fields);
558		xfs_btree_commit_afakeroot(cur, tp, agbp);
559	}
560}
561
562/* Calculate number of records in an inode btree block. */
563static inline unsigned int
564xfs_inobt_block_maxrecs(
565	unsigned int		blocklen,
566	bool			leaf)
567{
568	if (leaf)
569		return blocklen / sizeof(xfs_inobt_rec_t);
570	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
571}
572
573/*
574 * Calculate number of records in an inobt btree block.
575 */
576unsigned int
577xfs_inobt_maxrecs(
578	struct xfs_mount	*mp,
579	unsigned int		blocklen,
580	bool			leaf)
581{
582	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
583	return xfs_inobt_block_maxrecs(blocklen, leaf);
584}
585
586/*
587 * Maximum number of inode btree records per AG.  Pretend that we can fill an
588 * entire AG completely full of inodes except for the AG headers.
589 */
590#define XFS_MAX_INODE_RECORDS \
591	((XFS_MAX_AG_BYTES - (4 * BBSIZE)) / XFS_DINODE_MIN_SIZE) / \
592			XFS_INODES_PER_CHUNK
593
594/* Compute the max possible height for the inode btree. */
595static inline unsigned int
596xfs_inobt_maxlevels_ondisk(void)
597{
598	unsigned int		minrecs[2];
599	unsigned int		blocklen;
600
601	blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
602		       XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
603
604	minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
605	minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
606
607	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
608}
609
610/* Compute the max possible height for the free inode btree. */
611static inline unsigned int
612xfs_finobt_maxlevels_ondisk(void)
613{
614	unsigned int		minrecs[2];
615	unsigned int		blocklen;
616
617	blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
618
619	minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
620	minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
621
622	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
623}
624
625/* Compute the max possible height for either inode btree. */
626unsigned int
627xfs_iallocbt_maxlevels_ondisk(void)
628{
629	return max(xfs_inobt_maxlevels_ondisk(),
630		   xfs_finobt_maxlevels_ondisk());
631}
632
633/*
634 * Convert the inode record holemask to an inode allocation bitmap. The inode
635 * allocation bitmap is inode granularity and specifies whether an inode is
636 * physically allocated on disk (not whether the inode is considered allocated
637 * or free by the fs).
638 *
639 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
640 */
641uint64_t
642xfs_inobt_irec_to_allocmask(
643	const struct xfs_inobt_rec_incore	*rec)
644{
645	uint64_t			bitmap = 0;
646	uint64_t			inodespbit;
647	int				nextbit;
648	uint				allocbitmap;
649
650	/*
651	 * The holemask has 16-bits for a 64 inode record. Therefore each
652	 * holemask bit represents multiple inodes. Create a mask of bits to set
653	 * in the allocmask for each holemask bit.
654	 */
655	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
656
657	/*
658	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
659	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
660	 * anything beyond the 16 holemask bits since this casts to a larger
661	 * type.
662	 */
663	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
664
665	/*
666	 * allocbitmap is the inverted holemask so every set bit represents
667	 * allocated inodes. To expand from 16-bit holemask granularity to
668	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
669	 * bitmap for every holemask bit.
670	 */
671	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
672	while (nextbit != -1) {
673		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
674
675		bitmap |= (inodespbit <<
676			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
677
678		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
679	}
680
681	return bitmap;
682}
683
684#if defined(DEBUG) || defined(XFS_WARN)
685/*
686 * Verify that an in-core inode record has a valid inode count.
687 */
688int
689xfs_inobt_rec_check_count(
690	struct xfs_mount		*mp,
691	struct xfs_inobt_rec_incore	*rec)
692{
693	int				inocount = 0;
694	int				nextbit = 0;
695	uint64_t			allocbmap;
696	int				wordsz;
697
698	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
699	allocbmap = xfs_inobt_irec_to_allocmask(rec);
700
701	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
702	while (nextbit != -1) {
703		inocount++;
704		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
705				       nextbit + 1);
706	}
707
708	if (inocount != rec->ir_count)
709		return -EFSCORRUPTED;
710
711	return 0;
712}
713#endif	/* DEBUG */
714
715static xfs_extlen_t
716xfs_inobt_max_size(
717	struct xfs_perag	*pag)
 
718{
719	struct xfs_mount	*mp = pag_mount(pag);
720	xfs_agblock_t		agblocks = pag_group(pag)->xg_block_count;
721
722	/* Bail out if we're uninitialized, which can happen in mkfs. */
723	if (M_IGEO(mp)->inobt_mxr[0] == 0)
724		return 0;
725
726	/*
727	 * The log is permanently allocated, so the space it occupies will
728	 * never be available for the kinds of things that would require btree
729	 * expansion.  We therefore can pretend the space isn't there.
730	 */
731	if (xfs_ag_contains_log(mp, pag_agno(pag)))
 
732		agblocks -= mp->m_sb.sb_logblocks;
733
734	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
735				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
736					XFS_INODES_PER_CHUNK);
737}
738
739static int
740xfs_finobt_count_blocks(
741	struct xfs_perag	*pag,
 
742	struct xfs_trans	*tp,
743	xfs_extlen_t		*tree_blocks)
 
 
 
744{
745	struct xfs_buf		*agbp = NULL;
746	struct xfs_btree_cur	*cur;
747	xfs_filblks_t		blocks;
748	int			error;
749
750	error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
 
 
 
751	if (error)
752		return error;
753
754	cur = xfs_finobt_init_cursor(pag, tp, agbp);
755	error = xfs_btree_count_blocks(cur, &blocks);
756	xfs_btree_del_cursor(cur, error);
757	xfs_trans_brelse(tp, agbp);
758	*tree_blocks = blocks;
759
760	return error;
 
761}
762
763/* Read finobt block count from AGI header. */
764static int
765xfs_finobt_read_blocks(
766	struct xfs_perag	*pag,
767	struct xfs_trans	*tp,
 
 
768	xfs_extlen_t		*tree_blocks)
769{
770	struct xfs_buf		*agbp;
771	struct xfs_agi		*agi;
772	int			error;
773
774	error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
775	if (error)
776		return error;
777
778	agi = agbp->b_addr;
779	*tree_blocks = be32_to_cpu(agi->agi_fblocks);
780	xfs_trans_brelse(tp, agbp);
781	return 0;
 
782}
783
784/*
785 * Figure out how many blocks to reserve and how many are used by this btree.
786 */
787int
788xfs_finobt_calc_reserves(
789	struct xfs_perag	*pag,
790	struct xfs_trans	*tp,
 
791	xfs_extlen_t		*ask,
792	xfs_extlen_t		*used)
793{
794	xfs_extlen_t		tree_len = 0;
795	int			error;
796
797	if (!xfs_has_finobt(pag_mount(pag)))
798		return 0;
799
800	if (xfs_has_inobtcounts(pag_mount(pag)))
801		error = xfs_finobt_read_blocks(pag, tp, &tree_len);
802	else
803		error = xfs_finobt_count_blocks(pag, tp, &tree_len);
804	if (error)
805		return error;
806
807	*ask += xfs_inobt_max_size(pag);
808	*used += tree_len;
809	return 0;
810}
811
812/* Calculate the inobt btree size for some records. */
813xfs_extlen_t
814xfs_iallocbt_calc_size(
815	struct xfs_mount	*mp,
816	unsigned long long	len)
817{
818	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
819}
820
821int __init
822xfs_inobt_init_cur_cache(void)
823{
824	xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur",
825			xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()),
826			0, 0, NULL);
827
828	if (!xfs_inobt_cur_cache)
829		return -ENOMEM;
830	return 0;
831}
832
833void
834xfs_inobt_destroy_cur_cache(void)
835{
836	kmem_cache_destroy(xfs_inobt_cur_cache);
837	xfs_inobt_cur_cache = NULL;
838}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_bit.h"
 13#include "xfs_mount.h"
 14#include "xfs_btree.h"
 15#include "xfs_btree_staging.h"
 16#include "xfs_ialloc.h"
 17#include "xfs_ialloc_btree.h"
 18#include "xfs_alloc.h"
 19#include "xfs_error.h"
 
 20#include "xfs_trace.h"
 21#include "xfs_trans.h"
 22#include "xfs_rmap.h"
 
 
 
 23
 24STATIC int
 25xfs_inobt_get_minrecs(
 26	struct xfs_btree_cur	*cur,
 27	int			level)
 28{
 29	return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
 30}
 31
 32STATIC struct xfs_btree_cur *
 33xfs_inobt_dup_cursor(
 34	struct xfs_btree_cur	*cur)
 35{
 36	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
 37			cur->bc_ag.agbp, cur->bc_ag.agno,
 38			cur->bc_btnum);
 
 
 
 
 
 
 
 39}
 40
 41STATIC void
 42xfs_inobt_set_root(
 43	struct xfs_btree_cur	*cur,
 44	union xfs_btree_ptr	*nptr,
 45	int			inc)	/* level change */
 46{
 47	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 48	struct xfs_agi		*agi = agbp->b_addr;
 49
 50	agi->agi_root = nptr->s;
 51	be32_add_cpu(&agi->agi_level, inc);
 52	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 53}
 54
 55STATIC void
 56xfs_finobt_set_root(
 57	struct xfs_btree_cur	*cur,
 58	union xfs_btree_ptr	*nptr,
 59	int			inc)	/* level change */
 60{
 61	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 62	struct xfs_agi		*agi = agbp->b_addr;
 63
 64	agi->agi_free_root = nptr->s;
 65	be32_add_cpu(&agi->agi_free_level, inc);
 66	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 67			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 68}
 69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70STATIC int
 71__xfs_inobt_alloc_block(
 72	struct xfs_btree_cur	*cur,
 73	union xfs_btree_ptr	*start,
 74	union xfs_btree_ptr	*new,
 75	int			*stat,
 76	enum xfs_ag_resv_type	resv)
 77{
 78	xfs_alloc_arg_t		args;		/* block allocation args */
 79	int			error;		/* error return value */
 80	xfs_agblock_t		sbno = be32_to_cpu(start->s);
 81
 82	memset(&args, 0, sizeof(args));
 83	args.tp = cur->bc_tp;
 84	args.mp = cur->bc_mp;
 
 85	args.oinfo = XFS_RMAP_OINFO_INOBT;
 86	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
 87	args.minlen = 1;
 88	args.maxlen = 1;
 89	args.prod = 1;
 90	args.type = XFS_ALLOCTYPE_NEAR_BNO;
 91	args.resv = resv;
 92
 93	error = xfs_alloc_vextent(&args);
 
 94	if (error)
 95		return error;
 96
 97	if (args.fsbno == NULLFSBLOCK) {
 98		*stat = 0;
 99		return 0;
100	}
101	ASSERT(args.len == 1);
102
103	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
104	*stat = 1;
 
105	return 0;
106}
107
108STATIC int
109xfs_inobt_alloc_block(
110	struct xfs_btree_cur	*cur,
111	union xfs_btree_ptr	*start,
112	union xfs_btree_ptr	*new,
113	int			*stat)
114{
115	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
116}
117
118STATIC int
119xfs_finobt_alloc_block(
120	struct xfs_btree_cur	*cur,
121	union xfs_btree_ptr	*start,
122	union xfs_btree_ptr	*new,
123	int			*stat)
124{
125	if (cur->bc_mp->m_finobt_nores)
126		return xfs_inobt_alloc_block(cur, start, new, stat);
127	return __xfs_inobt_alloc_block(cur, start, new, stat,
128			XFS_AG_RESV_METADATA);
129}
130
131STATIC int
132__xfs_inobt_free_block(
133	struct xfs_btree_cur	*cur,
134	struct xfs_buf		*bp,
135	enum xfs_ag_resv_type	resv)
136{
137	return xfs_free_extent(cur->bc_tp,
138			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
139			&XFS_RMAP_OINFO_INOBT, resv);
 
 
 
140}
141
142STATIC int
143xfs_inobt_free_block(
144	struct xfs_btree_cur	*cur,
145	struct xfs_buf		*bp)
146{
147	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
148}
149
150STATIC int
151xfs_finobt_free_block(
152	struct xfs_btree_cur	*cur,
153	struct xfs_buf		*bp)
154{
155	if (cur->bc_mp->m_finobt_nores)
156		return xfs_inobt_free_block(cur, bp);
157	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
158}
159
160STATIC int
161xfs_inobt_get_maxrecs(
162	struct xfs_btree_cur	*cur,
163	int			level)
164{
165	return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
166}
167
168STATIC void
169xfs_inobt_init_key_from_rec(
170	union xfs_btree_key	*key,
171	union xfs_btree_rec	*rec)
172{
173	key->inobt.ir_startino = rec->inobt.ir_startino;
174}
175
176STATIC void
177xfs_inobt_init_high_key_from_rec(
178	union xfs_btree_key	*key,
179	union xfs_btree_rec	*rec)
180{
181	__u32			x;
182
183	x = be32_to_cpu(rec->inobt.ir_startino);
184	x += XFS_INODES_PER_CHUNK - 1;
185	key->inobt.ir_startino = cpu_to_be32(x);
186}
187
188STATIC void
189xfs_inobt_init_rec_from_cur(
190	struct xfs_btree_cur	*cur,
191	union xfs_btree_rec	*rec)
192{
193	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
194	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
195		rec->inobt.ir_u.sp.ir_holemask =
196					cpu_to_be16(cur->bc_rec.i.ir_holemask);
197		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
198		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
199	} else {
200		/* ir_holemask/ir_count not supported on-disk */
201		rec->inobt.ir_u.f.ir_freecount =
202					cpu_to_be32(cur->bc_rec.i.ir_freecount);
203	}
204	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
205}
206
207/*
208 * initial value of ptr for lookup
209 */
210STATIC void
211xfs_inobt_init_ptr_from_cur(
212	struct xfs_btree_cur	*cur,
213	union xfs_btree_ptr	*ptr)
214{
215	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
216
217	ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
218
219	ptr->s = agi->agi_root;
220}
221
222STATIC void
223xfs_finobt_init_ptr_from_cur(
224	struct xfs_btree_cur	*cur,
225	union xfs_btree_ptr	*ptr)
226{
227	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
228
229	ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
 
230	ptr->s = agi->agi_free_root;
231}
232
233STATIC int64_t
234xfs_inobt_key_diff(
235	struct xfs_btree_cur	*cur,
236	union xfs_btree_key	*key)
237{
238	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
239			  cur->bc_rec.i.ir_startino;
240}
241
242STATIC int64_t
243xfs_inobt_diff_two_keys(
244	struct xfs_btree_cur	*cur,
245	union xfs_btree_key	*k1,
246	union xfs_btree_key	*k2)
 
247{
 
 
248	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
249			  be32_to_cpu(k2->inobt.ir_startino);
250}
251
252static xfs_failaddr_t
253xfs_inobt_verify(
254	struct xfs_buf		*bp)
255{
256	struct xfs_mount	*mp = bp->b_mount;
257	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
258	xfs_failaddr_t		fa;
259	unsigned int		level;
260
261	if (!xfs_verify_magic(bp, block->bb_magic))
262		return __this_address;
263
264	/*
265	 * During growfs operations, we can't verify the exact owner as the
266	 * perag is not fully initialised and hence not attached to the buffer.
267	 *
268	 * Similarly, during log recovery we will have a perag structure
269	 * attached, but the agi information will not yet have been initialised
270	 * from the on disk AGI. We don't currently use any of this information,
271	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
272	 * ever do.
273	 */
274	if (xfs_sb_version_hascrc(&mp->m_sb)) {
275		fa = xfs_btree_sblock_v5hdr_verify(bp);
276		if (fa)
277			return fa;
278	}
279
280	/* level verification */
281	level = be16_to_cpu(block->bb_level);
282	if (level >= M_IGEO(mp)->inobt_maxlevels)
283		return __this_address;
284
285	return xfs_btree_sblock_verify(bp,
286			M_IGEO(mp)->inobt_mxr[level != 0]);
287}
288
289static void
290xfs_inobt_read_verify(
291	struct xfs_buf	*bp)
292{
293	xfs_failaddr_t	fa;
294
295	if (!xfs_btree_sblock_verify_crc(bp))
296		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
297	else {
298		fa = xfs_inobt_verify(bp);
299		if (fa)
300			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
301	}
302
303	if (bp->b_error)
304		trace_xfs_btree_corrupt(bp, _RET_IP_);
305}
306
307static void
308xfs_inobt_write_verify(
309	struct xfs_buf	*bp)
310{
311	xfs_failaddr_t	fa;
312
313	fa = xfs_inobt_verify(bp);
314	if (fa) {
315		trace_xfs_btree_corrupt(bp, _RET_IP_);
316		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
317		return;
318	}
319	xfs_btree_sblock_calc_crc(bp);
320
321}
322
323const struct xfs_buf_ops xfs_inobt_buf_ops = {
324	.name = "xfs_inobt",
325	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
326	.verify_read = xfs_inobt_read_verify,
327	.verify_write = xfs_inobt_write_verify,
328	.verify_struct = xfs_inobt_verify,
329};
330
331const struct xfs_buf_ops xfs_finobt_buf_ops = {
332	.name = "xfs_finobt",
333	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
334		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
335	.verify_read = xfs_inobt_read_verify,
336	.verify_write = xfs_inobt_write_verify,
337	.verify_struct = xfs_inobt_verify,
338};
339
340STATIC int
341xfs_inobt_keys_inorder(
342	struct xfs_btree_cur	*cur,
343	union xfs_btree_key	*k1,
344	union xfs_btree_key	*k2)
345{
346	return be32_to_cpu(k1->inobt.ir_startino) <
347		be32_to_cpu(k2->inobt.ir_startino);
348}
349
350STATIC int
351xfs_inobt_recs_inorder(
352	struct xfs_btree_cur	*cur,
353	union xfs_btree_rec	*r1,
354	union xfs_btree_rec	*r2)
355{
356	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
357		be32_to_cpu(r2->inobt.ir_startino);
358}
359
360static const struct xfs_btree_ops xfs_inobt_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361	.rec_len		= sizeof(xfs_inobt_rec_t),
362	.key_len		= sizeof(xfs_inobt_key_t),
 
 
 
 
 
363
364	.dup_cursor		= xfs_inobt_dup_cursor,
365	.set_root		= xfs_inobt_set_root,
366	.alloc_block		= xfs_inobt_alloc_block,
367	.free_block		= xfs_inobt_free_block,
368	.get_minrecs		= xfs_inobt_get_minrecs,
369	.get_maxrecs		= xfs_inobt_get_maxrecs,
370	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
371	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
372	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
373	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
374	.key_diff		= xfs_inobt_key_diff,
375	.buf_ops		= &xfs_inobt_buf_ops,
376	.diff_two_keys		= xfs_inobt_diff_two_keys,
377	.keys_inorder		= xfs_inobt_keys_inorder,
378	.recs_inorder		= xfs_inobt_recs_inorder,
 
379};
380
381static const struct xfs_btree_ops xfs_finobt_ops = {
 
 
 
382	.rec_len		= sizeof(xfs_inobt_rec_t),
383	.key_len		= sizeof(xfs_inobt_key_t),
 
384
385	.dup_cursor		= xfs_inobt_dup_cursor,
 
 
 
 
386	.set_root		= xfs_finobt_set_root,
387	.alloc_block		= xfs_finobt_alloc_block,
388	.free_block		= xfs_finobt_free_block,
389	.get_minrecs		= xfs_inobt_get_minrecs,
390	.get_maxrecs		= xfs_inobt_get_maxrecs,
391	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
392	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
393	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
394	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
395	.key_diff		= xfs_inobt_key_diff,
396	.buf_ops		= &xfs_finobt_buf_ops,
397	.diff_two_keys		= xfs_inobt_diff_two_keys,
398	.keys_inorder		= xfs_inobt_keys_inorder,
399	.recs_inorder		= xfs_inobt_recs_inorder,
 
400};
401
402/*
403 * Initialize a new inode btree cursor.
 
 
404 */
405static struct xfs_btree_cur *
406xfs_inobt_init_common(
407	struct xfs_mount	*mp,		/* file system mount point */
408	struct xfs_trans	*tp,		/* transaction pointer */
409	xfs_agnumber_t		agno,		/* allocation group number */
410	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
411{
 
412	struct xfs_btree_cur	*cur;
413
414	cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
415	cur->bc_tp = tp;
416	cur->bc_mp = mp;
417	cur->bc_btnum = btnum;
418	if (btnum == XFS_BTNUM_INO) {
419		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
420		cur->bc_ops = &xfs_inobt_ops;
421	} else {
422		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
423		cur->bc_ops = &xfs_finobt_ops;
424	}
425
426	cur->bc_blocklog = mp->m_sb.sb_blocklog;
427
428	if (xfs_sb_version_hascrc(&mp->m_sb))
429		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
430
431	cur->bc_ag.agno = agno;
432	return cur;
433}
434
435/* Create an inode btree cursor. */
 
 
 
 
436struct xfs_btree_cur *
437xfs_inobt_init_cursor(
438	struct xfs_mount	*mp,
439	struct xfs_trans	*tp,
440	struct xfs_buf		*agbp,
441	xfs_agnumber_t		agno,
442	xfs_btnum_t		btnum)
443{
 
444	struct xfs_btree_cur	*cur;
445	struct xfs_agi		*agi = agbp->b_addr;
446
447	cur = xfs_inobt_init_common(mp, tp, agno, btnum);
448	if (btnum == XFS_BTNUM_INO)
449		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
450	else
451		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
452	cur->bc_ag.agbp = agbp;
453	return cur;
454}
455
456/* Create an inode btree cursor with a fake root for staging. */
457struct xfs_btree_cur *
458xfs_inobt_stage_cursor(
459	struct xfs_mount	*mp,
460	struct xbtree_afakeroot	*afake,
461	xfs_agnumber_t		agno,
462	xfs_btnum_t		btnum)
463{
464	struct xfs_btree_cur	*cur;
465
466	cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
467	xfs_btree_stage_afakeroot(cur, afake);
468	return cur;
469}
470
471/*
472 * Install a new inobt btree root.  Caller is responsible for invalidating
473 * and freeing the old btree blocks.
474 */
475void
476xfs_inobt_commit_staged_btree(
477	struct xfs_btree_cur	*cur,
478	struct xfs_trans	*tp,
479	struct xfs_buf		*agbp)
480{
481	struct xfs_agi		*agi = agbp->b_addr;
482	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
 
483
484	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
485
486	if (cur->bc_btnum == XFS_BTNUM_INO) {
 
487		agi->agi_root = cpu_to_be32(afake->af_root);
488		agi->agi_level = cpu_to_be32(afake->af_levels);
489		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
490		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
 
 
 
 
491	} else {
 
492		agi->agi_free_root = cpu_to_be32(afake->af_root);
493		agi->agi_free_level = cpu_to_be32(afake->af_levels);
494		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREE_ROOT |
495					     XFS_AGI_FREE_LEVEL);
496		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
 
 
 
497	}
498}
499
 
 
 
 
 
 
 
 
 
 
 
500/*
501 * Calculate number of records in an inobt btree block.
502 */
503int
504xfs_inobt_maxrecs(
505	struct xfs_mount	*mp,
506	int			blocklen,
507	int			leaf)
508{
509	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
511	if (leaf)
512		return blocklen / sizeof(xfs_inobt_rec_t);
513	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514}
515
516/*
517 * Convert the inode record holemask to an inode allocation bitmap. The inode
518 * allocation bitmap is inode granularity and specifies whether an inode is
519 * physically allocated on disk (not whether the inode is considered allocated
520 * or free by the fs).
521 *
522 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
523 */
524uint64_t
525xfs_inobt_irec_to_allocmask(
526	struct xfs_inobt_rec_incore	*rec)
527{
528	uint64_t			bitmap = 0;
529	uint64_t			inodespbit;
530	int				nextbit;
531	uint				allocbitmap;
532
533	/*
534	 * The holemask has 16-bits for a 64 inode record. Therefore each
535	 * holemask bit represents multiple inodes. Create a mask of bits to set
536	 * in the allocmask for each holemask bit.
537	 */
538	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
539
540	/*
541	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
542	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
543	 * anything beyond the 16 holemask bits since this casts to a larger
544	 * type.
545	 */
546	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
547
548	/*
549	 * allocbitmap is the inverted holemask so every set bit represents
550	 * allocated inodes. To expand from 16-bit holemask granularity to
551	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
552	 * bitmap for every holemask bit.
553	 */
554	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
555	while (nextbit != -1) {
556		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
557
558		bitmap |= (inodespbit <<
559			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
560
561		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
562	}
563
564	return bitmap;
565}
566
567#if defined(DEBUG) || defined(XFS_WARN)
568/*
569 * Verify that an in-core inode record has a valid inode count.
570 */
571int
572xfs_inobt_rec_check_count(
573	struct xfs_mount		*mp,
574	struct xfs_inobt_rec_incore	*rec)
575{
576	int				inocount = 0;
577	int				nextbit = 0;
578	uint64_t			allocbmap;
579	int				wordsz;
580
581	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
582	allocbmap = xfs_inobt_irec_to_allocmask(rec);
583
584	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
585	while (nextbit != -1) {
586		inocount++;
587		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
588				       nextbit + 1);
589	}
590
591	if (inocount != rec->ir_count)
592		return -EFSCORRUPTED;
593
594	return 0;
595}
596#endif	/* DEBUG */
597
598static xfs_extlen_t
599xfs_inobt_max_size(
600	struct xfs_mount	*mp,
601	xfs_agnumber_t		agno)
602{
603	xfs_agblock_t		agblocks = xfs_ag_block_count(mp, agno);
 
604
605	/* Bail out if we're uninitialized, which can happen in mkfs. */
606	if (M_IGEO(mp)->inobt_mxr[0] == 0)
607		return 0;
608
609	/*
610	 * The log is permanently allocated, so the space it occupies will
611	 * never be available for the kinds of things that would require btree
612	 * expansion.  We therefore can pretend the space isn't there.
613	 */
614	if (mp->m_sb.sb_logstart &&
615	    XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
616		agblocks -= mp->m_sb.sb_logblocks;
617
618	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
619				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
620					XFS_INODES_PER_CHUNK);
621}
622
623/* Read AGI and create inobt cursor. */
624int
625xfs_inobt_cur(
626	struct xfs_mount	*mp,
627	struct xfs_trans	*tp,
628	xfs_agnumber_t		agno,
629	xfs_btnum_t		which,
630	struct xfs_btree_cur	**curpp,
631	struct xfs_buf		**agi_bpp)
632{
 
633	struct xfs_btree_cur	*cur;
 
634	int			error;
635
636	ASSERT(*agi_bpp == NULL);
637	ASSERT(*curpp == NULL);
638
639	error = xfs_ialloc_read_agi(mp, tp, agno, agi_bpp);
640	if (error)
641		return error;
642
643	cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, agno, which);
644	if (!cur) {
645		xfs_trans_brelse(tp, *agi_bpp);
646		*agi_bpp = NULL;
647		return -ENOMEM;
648	}
649	*curpp = cur;
650	return 0;
651}
652
 
653static int
654xfs_inobt_count_blocks(
655	struct xfs_mount	*mp,
656	struct xfs_trans	*tp,
657	xfs_agnumber_t		agno,
658	xfs_btnum_t		btnum,
659	xfs_extlen_t		*tree_blocks)
660{
661	struct xfs_buf		*agbp = NULL;
662	struct xfs_btree_cur	*cur = NULL;
663	int			error;
664
665	error = xfs_inobt_cur(mp, tp, agno, btnum, &cur, &agbp);
666	if (error)
667		return error;
668
669	error = xfs_btree_count_blocks(cur, tree_blocks);
670	xfs_btree_del_cursor(cur, error);
671	xfs_trans_brelse(tp, agbp);
672
673	return error;
674}
675
676/*
677 * Figure out how many blocks to reserve and how many are used by this btree.
678 */
679int
680xfs_finobt_calc_reserves(
681	struct xfs_mount	*mp,
682	struct xfs_trans	*tp,
683	xfs_agnumber_t		agno,
684	xfs_extlen_t		*ask,
685	xfs_extlen_t		*used)
686{
687	xfs_extlen_t		tree_len = 0;
688	int			error;
689
690	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
691		return 0;
692
693	error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
 
 
 
694	if (error)
695		return error;
696
697	*ask += xfs_inobt_max_size(mp, agno);
698	*used += tree_len;
699	return 0;
700}
701
702/* Calculate the inobt btree size for some records. */
703xfs_extlen_t
704xfs_iallocbt_calc_size(
705	struct xfs_mount	*mp,
706	unsigned long long	len)
707{
708	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709}