Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_bit.h"
 13#include "xfs_mount.h"
 
 14#include "xfs_btree.h"
 15#include "xfs_btree_staging.h"
 16#include "xfs_ialloc.h"
 17#include "xfs_ialloc_btree.h"
 18#include "xfs_alloc.h"
 19#include "xfs_error.h"
 20#include "xfs_trace.h"
 
 21#include "xfs_trans.h"
 22#include "xfs_rmap.h"
 23#include "xfs_ag.h"
 24
 25static struct kmem_cache	*xfs_inobt_cur_cache;
 26
 27STATIC int
 28xfs_inobt_get_minrecs(
 29	struct xfs_btree_cur	*cur,
 30	int			level)
 31{
 32	return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
 33}
 34
 35STATIC struct xfs_btree_cur *
 36xfs_inobt_dup_cursor(
 37	struct xfs_btree_cur	*cur)
 38{
 39	return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
 40			cur->bc_ag.agbp, cur->bc_btnum);
 
 41}
 42
 43STATIC void
 44xfs_inobt_set_root(
 45	struct xfs_btree_cur		*cur,
 46	const union xfs_btree_ptr	*nptr,
 47	int				inc)	/* level change */
 48{
 49	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 50	struct xfs_agi		*agi = agbp->b_addr;
 51
 52	agi->agi_root = nptr->s;
 53	be32_add_cpu(&agi->agi_level, inc);
 54	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 55}
 56
 57STATIC void
 58xfs_finobt_set_root(
 59	struct xfs_btree_cur		*cur,
 60	const union xfs_btree_ptr	*nptr,
 61	int				inc)	/* level change */
 62{
 63	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 64	struct xfs_agi		*agi = agbp->b_addr;
 65
 66	agi->agi_free_root = nptr->s;
 67	be32_add_cpu(&agi->agi_free_level, inc);
 68	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 69			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 70}
 71
 72/* Update the inode btree block counter for this btree. */
 73static inline void
 74xfs_inobt_mod_blockcount(
 75	struct xfs_btree_cur	*cur,
 76	int			howmuch)
 77{
 78	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 79	struct xfs_agi		*agi = agbp->b_addr;
 80
 81	if (!xfs_has_inobtcounts(cur->bc_mp))
 82		return;
 83
 84	if (cur->bc_btnum == XFS_BTNUM_FINO)
 85		be32_add_cpu(&agi->agi_fblocks, howmuch);
 86	else if (cur->bc_btnum == XFS_BTNUM_INO)
 87		be32_add_cpu(&agi->agi_iblocks, howmuch);
 88	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
 89}
 90
 91STATIC int
 92__xfs_inobt_alloc_block(
 93	struct xfs_btree_cur		*cur,
 94	const union xfs_btree_ptr	*start,
 95	union xfs_btree_ptr		*new,
 96	int				*stat,
 97	enum xfs_ag_resv_type		resv)
 98{
 99	xfs_alloc_arg_t		args;		/* block allocation args */
100	int			error;		/* error return value */
101	xfs_agblock_t		sbno = be32_to_cpu(start->s);
102
 
 
103	memset(&args, 0, sizeof(args));
104	args.tp = cur->bc_tp;
105	args.mp = cur->bc_mp;
106	args.pag = cur->bc_ag.pag;
107	args.oinfo = XFS_RMAP_OINFO_INOBT;
108	args.minlen = 1;
109	args.maxlen = 1;
110	args.prod = 1;
 
111	args.resv = resv;
112
113	error = xfs_alloc_vextent_near_bno(&args,
114			XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno, sbno));
115	if (error)
116		return error;
117
118	if (args.fsbno == NULLFSBLOCK) {
 
119		*stat = 0;
120		return 0;
121	}
122	ASSERT(args.len == 1);
 
123
124	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
125	*stat = 1;
126	xfs_inobt_mod_blockcount(cur, 1);
127	return 0;
128}
129
130STATIC int
131xfs_inobt_alloc_block(
132	struct xfs_btree_cur		*cur,
133	const union xfs_btree_ptr	*start,
134	union xfs_btree_ptr		*new,
135	int				*stat)
136{
137	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
138}
139
140STATIC int
141xfs_finobt_alloc_block(
142	struct xfs_btree_cur		*cur,
143	const union xfs_btree_ptr	*start,
144	union xfs_btree_ptr		*new,
145	int				*stat)
146{
147	if (cur->bc_mp->m_finobt_nores)
148		return xfs_inobt_alloc_block(cur, start, new, stat);
149	return __xfs_inobt_alloc_block(cur, start, new, stat,
150			XFS_AG_RESV_METADATA);
151}
152
153STATIC int
154__xfs_inobt_free_block(
155	struct xfs_btree_cur	*cur,
156	struct xfs_buf		*bp,
157	enum xfs_ag_resv_type	resv)
158{
159	xfs_fsblock_t		fsbno;
160
161	xfs_inobt_mod_blockcount(cur, -1);
162	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
163	return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
164			&XFS_RMAP_OINFO_INOBT, resv, false);
165}
166
167STATIC int
168xfs_inobt_free_block(
169	struct xfs_btree_cur	*cur,
170	struct xfs_buf		*bp)
171{
172	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
173}
174
175STATIC int
176xfs_finobt_free_block(
177	struct xfs_btree_cur	*cur,
178	struct xfs_buf		*bp)
179{
180	if (cur->bc_mp->m_finobt_nores)
181		return xfs_inobt_free_block(cur, bp);
182	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
183}
184
185STATIC int
186xfs_inobt_get_maxrecs(
187	struct xfs_btree_cur	*cur,
188	int			level)
189{
190	return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
191}
192
193STATIC void
194xfs_inobt_init_key_from_rec(
195	union xfs_btree_key		*key,
196	const union xfs_btree_rec	*rec)
197{
198	key->inobt.ir_startino = rec->inobt.ir_startino;
199}
200
201STATIC void
202xfs_inobt_init_high_key_from_rec(
203	union xfs_btree_key		*key,
204	const union xfs_btree_rec	*rec)
205{
206	__u32				x;
207
208	x = be32_to_cpu(rec->inobt.ir_startino);
209	x += XFS_INODES_PER_CHUNK - 1;
210	key->inobt.ir_startino = cpu_to_be32(x);
211}
212
213STATIC void
214xfs_inobt_init_rec_from_cur(
215	struct xfs_btree_cur	*cur,
216	union xfs_btree_rec	*rec)
217{
218	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
219	if (xfs_has_sparseinodes(cur->bc_mp)) {
220		rec->inobt.ir_u.sp.ir_holemask =
221					cpu_to_be16(cur->bc_rec.i.ir_holemask);
222		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
223		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
224	} else {
225		/* ir_holemask/ir_count not supported on-disk */
226		rec->inobt.ir_u.f.ir_freecount =
227					cpu_to_be32(cur->bc_rec.i.ir_freecount);
228	}
229	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
230}
231
232/*
233 * initial value of ptr for lookup
234 */
235STATIC void
236xfs_inobt_init_ptr_from_cur(
237	struct xfs_btree_cur	*cur,
238	union xfs_btree_ptr	*ptr)
239{
240	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
241
242	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
243
244	ptr->s = agi->agi_root;
245}
246
247STATIC void
248xfs_finobt_init_ptr_from_cur(
249	struct xfs_btree_cur	*cur,
250	union xfs_btree_ptr	*ptr)
251{
252	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
253
254	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
255	ptr->s = agi->agi_free_root;
256}
257
258STATIC int64_t
259xfs_inobt_key_diff(
260	struct xfs_btree_cur		*cur,
261	const union xfs_btree_key	*key)
262{
263	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
264			  cur->bc_rec.i.ir_startino;
265}
266
267STATIC int64_t
268xfs_inobt_diff_two_keys(
269	struct xfs_btree_cur		*cur,
270	const union xfs_btree_key	*k1,
271	const union xfs_btree_key	*k2,
272	const union xfs_btree_key	*mask)
273{
274	ASSERT(!mask || mask->inobt.ir_startino);
275
276	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
277			be32_to_cpu(k2->inobt.ir_startino);
278}
279
280static xfs_failaddr_t
281xfs_inobt_verify(
282	struct xfs_buf		*bp)
283{
284	struct xfs_mount	*mp = bp->b_mount;
285	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
286	xfs_failaddr_t		fa;
287	unsigned int		level;
288
289	if (!xfs_verify_magic(bp, block->bb_magic))
290		return __this_address;
291
292	/*
293	 * During growfs operations, we can't verify the exact owner as the
294	 * perag is not fully initialised and hence not attached to the buffer.
295	 *
296	 * Similarly, during log recovery we will have a perag structure
297	 * attached, but the agi information will not yet have been initialised
298	 * from the on disk AGI. We don't currently use any of this information,
299	 * but beware of the landmine (i.e. need to check
300	 * xfs_perag_initialised_agi(pag)) if we ever do.
301	 */
302	if (xfs_has_crc(mp)) {
303		fa = xfs_btree_sblock_v5hdr_verify(bp);
304		if (fa)
305			return fa;
 
 
 
 
 
 
 
306	}
307
308	/* level verification */
309	level = be16_to_cpu(block->bb_level);
310	if (level >= M_IGEO(mp)->inobt_maxlevels)
311		return __this_address;
312
313	return xfs_btree_sblock_verify(bp,
314			M_IGEO(mp)->inobt_mxr[level != 0]);
315}
316
317static void
318xfs_inobt_read_verify(
319	struct xfs_buf	*bp)
320{
321	xfs_failaddr_t	fa;
322
323	if (!xfs_btree_sblock_verify_crc(bp))
324		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
325	else {
326		fa = xfs_inobt_verify(bp);
327		if (fa)
328			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
329	}
330
331	if (bp->b_error)
332		trace_xfs_btree_corrupt(bp, _RET_IP_);
 
 
333}
334
335static void
336xfs_inobt_write_verify(
337	struct xfs_buf	*bp)
338{
339	xfs_failaddr_t	fa;
340
341	fa = xfs_inobt_verify(bp);
342	if (fa) {
343		trace_xfs_btree_corrupt(bp, _RET_IP_);
344		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
 
345		return;
346	}
347	xfs_btree_sblock_calc_crc(bp);
348
349}
350
351const struct xfs_buf_ops xfs_inobt_buf_ops = {
352	.name = "xfs_inobt",
353	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
354	.verify_read = xfs_inobt_read_verify,
355	.verify_write = xfs_inobt_write_verify,
356	.verify_struct = xfs_inobt_verify,
357};
358
359const struct xfs_buf_ops xfs_finobt_buf_ops = {
360	.name = "xfs_finobt",
361	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
362		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
363	.verify_read = xfs_inobt_read_verify,
364	.verify_write = xfs_inobt_write_verify,
365	.verify_struct = xfs_inobt_verify,
366};
367
 
368STATIC int
369xfs_inobt_keys_inorder(
370	struct xfs_btree_cur		*cur,
371	const union xfs_btree_key	*k1,
372	const union xfs_btree_key	*k2)
373{
374	return be32_to_cpu(k1->inobt.ir_startino) <
375		be32_to_cpu(k2->inobt.ir_startino);
376}
377
378STATIC int
379xfs_inobt_recs_inorder(
380	struct xfs_btree_cur		*cur,
381	const union xfs_btree_rec	*r1,
382	const union xfs_btree_rec	*r2)
383{
384	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
385		be32_to_cpu(r2->inobt.ir_startino);
386}
387
388STATIC enum xbtree_key_contig
389xfs_inobt_keys_contiguous(
390	struct xfs_btree_cur		*cur,
391	const union xfs_btree_key	*key1,
392	const union xfs_btree_key	*key2,
393	const union xfs_btree_key	*mask)
394{
395	ASSERT(!mask || mask->inobt.ir_startino);
396
397	return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
398				 be32_to_cpu(key2->inobt.ir_startino));
399}
400
401static const struct xfs_btree_ops xfs_inobt_ops = {
402	.rec_len		= sizeof(xfs_inobt_rec_t),
403	.key_len		= sizeof(xfs_inobt_key_t),
404
405	.dup_cursor		= xfs_inobt_dup_cursor,
406	.set_root		= xfs_inobt_set_root,
407	.alloc_block		= xfs_inobt_alloc_block,
408	.free_block		= xfs_inobt_free_block,
409	.get_minrecs		= xfs_inobt_get_minrecs,
410	.get_maxrecs		= xfs_inobt_get_maxrecs,
411	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
412	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
413	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
414	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
415	.key_diff		= xfs_inobt_key_diff,
416	.buf_ops		= &xfs_inobt_buf_ops,
417	.diff_two_keys		= xfs_inobt_diff_two_keys,
418	.keys_inorder		= xfs_inobt_keys_inorder,
419	.recs_inorder		= xfs_inobt_recs_inorder,
420	.keys_contiguous	= xfs_inobt_keys_contiguous,
421};
422
423static const struct xfs_btree_ops xfs_finobt_ops = {
424	.rec_len		= sizeof(xfs_inobt_rec_t),
425	.key_len		= sizeof(xfs_inobt_key_t),
426
427	.dup_cursor		= xfs_inobt_dup_cursor,
428	.set_root		= xfs_finobt_set_root,
429	.alloc_block		= xfs_finobt_alloc_block,
430	.free_block		= xfs_finobt_free_block,
431	.get_minrecs		= xfs_inobt_get_minrecs,
432	.get_maxrecs		= xfs_inobt_get_maxrecs,
433	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
434	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
435	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
436	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
437	.key_diff		= xfs_inobt_key_diff,
438	.buf_ops		= &xfs_finobt_buf_ops,
439	.diff_two_keys		= xfs_inobt_diff_two_keys,
440	.keys_inorder		= xfs_inobt_keys_inorder,
441	.recs_inorder		= xfs_inobt_recs_inorder,
442	.keys_contiguous	= xfs_inobt_keys_contiguous,
443};
444
445/*
446 * Initialize a new inode btree cursor.
447 */
448static struct xfs_btree_cur *
449xfs_inobt_init_common(
450	struct xfs_perag	*pag,
451	struct xfs_trans	*tp,		/* transaction pointer */
 
 
452	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
453{
454	struct xfs_mount	*mp = pag->pag_mount;
455	struct xfs_btree_cur	*cur;
456
457	cur = xfs_btree_alloc_cursor(mp, tp, btnum,
458			M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
 
 
 
459	if (btnum == XFS_BTNUM_INO) {
460		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
461		cur->bc_ops = &xfs_inobt_ops;
 
462	} else {
463		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
464		cur->bc_ops = &xfs_finobt_ops;
 
465	}
466
467	if (xfs_has_crc(mp))
468		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
469
470	cur->bc_ag.pag = xfs_perag_hold(pag);
471	return cur;
472}
473
474/* Create an inode btree cursor. */
475struct xfs_btree_cur *
476xfs_inobt_init_cursor(
477	struct xfs_perag	*pag,
478	struct xfs_trans	*tp,
479	struct xfs_buf		*agbp,
480	xfs_btnum_t		btnum)
481{
482	struct xfs_btree_cur	*cur;
483	struct xfs_agi		*agi = agbp->b_addr;
484
485	cur = xfs_inobt_init_common(pag, tp, btnum);
486	if (btnum == XFS_BTNUM_INO)
487		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
488	else
489		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
490	cur->bc_ag.agbp = agbp;
491	return cur;
492}
493
494/* Create an inode btree cursor with a fake root for staging. */
495struct xfs_btree_cur *
496xfs_inobt_stage_cursor(
497	struct xfs_perag	*pag,
498	struct xbtree_afakeroot	*afake,
499	xfs_btnum_t		btnum)
500{
501	struct xfs_btree_cur	*cur;
502
503	cur = xfs_inobt_init_common(pag, NULL, btnum);
504	xfs_btree_stage_afakeroot(cur, afake);
505	return cur;
506}
507
508/*
509 * Install a new inobt btree root.  Caller is responsible for invalidating
510 * and freeing the old btree blocks.
511 */
512void
513xfs_inobt_commit_staged_btree(
514	struct xfs_btree_cur	*cur,
515	struct xfs_trans	*tp,
516	struct xfs_buf		*agbp)
517{
518	struct xfs_agi		*agi = agbp->b_addr;
519	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
520	int			fields;
521
522	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
523
524	if (cur->bc_btnum == XFS_BTNUM_INO) {
525		fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
526		agi->agi_root = cpu_to_be32(afake->af_root);
527		agi->agi_level = cpu_to_be32(afake->af_levels);
528		if (xfs_has_inobtcounts(cur->bc_mp)) {
529			agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
530			fields |= XFS_AGI_IBLOCKS;
531		}
532		xfs_ialloc_log_agi(tp, agbp, fields);
533		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
534	} else {
535		fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
536		agi->agi_free_root = cpu_to_be32(afake->af_root);
537		agi->agi_free_level = cpu_to_be32(afake->af_levels);
538		if (xfs_has_inobtcounts(cur->bc_mp)) {
539			agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
540			fields |= XFS_AGI_IBLOCKS;
541		}
542		xfs_ialloc_log_agi(tp, agbp, fields);
543		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
544	}
545}
546
547/* Calculate number of records in an inode btree block. */
548static inline unsigned int
549xfs_inobt_block_maxrecs(
550	unsigned int		blocklen,
551	bool			leaf)
552{
553	if (leaf)
554		return blocklen / sizeof(xfs_inobt_rec_t);
555	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
556}
557
558/*
559 * Calculate number of records in an inobt btree block.
560 */
561int
562xfs_inobt_maxrecs(
563	struct xfs_mount	*mp,
564	int			blocklen,
565	int			leaf)
566{
567	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
568	return xfs_inobt_block_maxrecs(blocklen, leaf);
569}
570
571/*
572 * Maximum number of inode btree records per AG.  Pretend that we can fill an
573 * entire AG completely full of inodes except for the AG headers.
574 */
575#define XFS_MAX_INODE_RECORDS \
576	((XFS_MAX_AG_BYTES - (4 * BBSIZE)) / XFS_DINODE_MIN_SIZE) / \
577			XFS_INODES_PER_CHUNK
578
579/* Compute the max possible height for the inode btree. */
580static inline unsigned int
581xfs_inobt_maxlevels_ondisk(void)
582{
583	unsigned int		minrecs[2];
584	unsigned int		blocklen;
585
586	blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
587		       XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
588
589	minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
590	minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
591
592	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
593}
594
595/* Compute the max possible height for the free inode btree. */
596static inline unsigned int
597xfs_finobt_maxlevels_ondisk(void)
598{
599	unsigned int		minrecs[2];
600	unsigned int		blocklen;
601
602	blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
603
604	minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2;
605	minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2;
606
607	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS);
608}
609
610/* Compute the max possible height for either inode btree. */
611unsigned int
612xfs_iallocbt_maxlevels_ondisk(void)
613{
614	return max(xfs_inobt_maxlevels_ondisk(),
615		   xfs_finobt_maxlevels_ondisk());
616}
617
618/*
619 * Convert the inode record holemask to an inode allocation bitmap. The inode
620 * allocation bitmap is inode granularity and specifies whether an inode is
621 * physically allocated on disk (not whether the inode is considered allocated
622 * or free by the fs).
623 *
624 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
625 */
626uint64_t
627xfs_inobt_irec_to_allocmask(
628	const struct xfs_inobt_rec_incore	*rec)
629{
630	uint64_t			bitmap = 0;
631	uint64_t			inodespbit;
632	int				nextbit;
633	uint				allocbitmap;
634
635	/*
636	 * The holemask has 16-bits for a 64 inode record. Therefore each
637	 * holemask bit represents multiple inodes. Create a mask of bits to set
638	 * in the allocmask for each holemask bit.
639	 */
640	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
641
642	/*
643	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
644	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
645	 * anything beyond the 16 holemask bits since this casts to a larger
646	 * type.
647	 */
648	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
649
650	/*
651	 * allocbitmap is the inverted holemask so every set bit represents
652	 * allocated inodes. To expand from 16-bit holemask granularity to
653	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
654	 * bitmap for every holemask bit.
655	 */
656	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
657	while (nextbit != -1) {
658		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
659
660		bitmap |= (inodespbit <<
661			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
662
663		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
664	}
665
666	return bitmap;
667}
668
669#if defined(DEBUG) || defined(XFS_WARN)
670/*
671 * Verify that an in-core inode record has a valid inode count.
672 */
673int
674xfs_inobt_rec_check_count(
675	struct xfs_mount		*mp,
676	struct xfs_inobt_rec_incore	*rec)
677{
678	int				inocount = 0;
679	int				nextbit = 0;
680	uint64_t			allocbmap;
681	int				wordsz;
682
683	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
684	allocbmap = xfs_inobt_irec_to_allocmask(rec);
685
686	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
687	while (nextbit != -1) {
688		inocount++;
689		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
690				       nextbit + 1);
691	}
692
693	if (inocount != rec->ir_count)
694		return -EFSCORRUPTED;
695
696	return 0;
697}
698#endif	/* DEBUG */
699
700static xfs_extlen_t
701xfs_inobt_max_size(
702	struct xfs_perag	*pag)
703{
704	struct xfs_mount	*mp = pag->pag_mount;
705	xfs_agblock_t		agblocks = pag->block_count;
706
707	/* Bail out if we're uninitialized, which can happen in mkfs. */
708	if (M_IGEO(mp)->inobt_mxr[0] == 0)
709		return 0;
710
711	/*
712	 * The log is permanently allocated, so the space it occupies will
713	 * never be available for the kinds of things that would require btree
714	 * expansion.  We therefore can pretend the space isn't there.
715	 */
716	if (xfs_ag_contains_log(mp, pag->pag_agno))
717		agblocks -= mp->m_sb.sb_logblocks;
718
719	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
720				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
721					XFS_INODES_PER_CHUNK);
722}
723
724/* Read AGI and create inobt cursor. */
725int
726xfs_inobt_cur(
727	struct xfs_perag	*pag,
728	struct xfs_trans	*tp,
729	xfs_btnum_t		which,
730	struct xfs_btree_cur	**curpp,
731	struct xfs_buf		**agi_bpp)
732{
733	struct xfs_btree_cur	*cur;
734	int			error;
735
736	ASSERT(*agi_bpp == NULL);
737	ASSERT(*curpp == NULL);
738
739	error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
740	if (error)
741		return error;
742
743	cur = xfs_inobt_init_cursor(pag, tp, *agi_bpp, which);
744	*curpp = cur;
745	return 0;
746}
747
748static int
749xfs_inobt_count_blocks(
750	struct xfs_perag	*pag,
751	struct xfs_trans	*tp,
752	xfs_btnum_t		btnum,
753	xfs_extlen_t		*tree_blocks)
754{
755	struct xfs_buf		*agbp = NULL;
756	struct xfs_btree_cur	*cur = NULL;
757	int			error;
758
759	error = xfs_inobt_cur(pag, tp, btnum, &cur, &agbp);
760	if (error)
761		return error;
762
 
763	error = xfs_btree_count_blocks(cur, tree_blocks);
764	xfs_btree_del_cursor(cur, error);
765	xfs_trans_brelse(tp, agbp);
766
767	return error;
768}
769
770/* Read finobt block count from AGI header. */
771static int
772xfs_finobt_read_blocks(
773	struct xfs_perag	*pag,
774	struct xfs_trans	*tp,
775	xfs_extlen_t		*tree_blocks)
776{
777	struct xfs_buf		*agbp;
778	struct xfs_agi		*agi;
779	int			error;
780
781	error = xfs_ialloc_read_agi(pag, tp, &agbp);
782	if (error)
783		return error;
784
785	agi = agbp->b_addr;
786	*tree_blocks = be32_to_cpu(agi->agi_fblocks);
787	xfs_trans_brelse(tp, agbp);
788	return 0;
789}
790
791/*
792 * Figure out how many blocks to reserve and how many are used by this btree.
793 */
794int
795xfs_finobt_calc_reserves(
796	struct xfs_perag	*pag,
797	struct xfs_trans	*tp,
798	xfs_extlen_t		*ask,
799	xfs_extlen_t		*used)
800{
801	xfs_extlen_t		tree_len = 0;
802	int			error;
803
804	if (!xfs_has_finobt(pag->pag_mount))
805		return 0;
806
807	if (xfs_has_inobtcounts(pag->pag_mount))
808		error = xfs_finobt_read_blocks(pag, tp, &tree_len);
809	else
810		error = xfs_inobt_count_blocks(pag, tp, XFS_BTNUM_FINO,
811				&tree_len);
812	if (error)
813		return error;
814
815	*ask += xfs_inobt_max_size(pag);
816	*used += tree_len;
817	return 0;
818}
819
820/* Calculate the inobt btree size for some records. */
821xfs_extlen_t
822xfs_iallocbt_calc_size(
823	struct xfs_mount	*mp,
824	unsigned long long	len)
825{
826	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
827}
828
829int __init
830xfs_inobt_init_cur_cache(void)
831{
832	xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur",
833			xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()),
834			0, 0, NULL);
835
836	if (!xfs_inobt_cur_cache)
837		return -ENOMEM;
838	return 0;
839}
840
841void
842xfs_inobt_destroy_cur_cache(void)
843{
844	kmem_cache_destroy(xfs_inobt_cur_cache);
845	xfs_inobt_cur_cache = NULL;
846}
v4.10.11
 
  1/*
  2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_bit.h"
 25#include "xfs_mount.h"
 26#include "xfs_inode.h"
 27#include "xfs_btree.h"
 
 28#include "xfs_ialloc.h"
 29#include "xfs_ialloc_btree.h"
 30#include "xfs_alloc.h"
 31#include "xfs_error.h"
 32#include "xfs_trace.h"
 33#include "xfs_cksum.h"
 34#include "xfs_trans.h"
 35#include "xfs_rmap.h"
 
 36
 
 37
 38STATIC int
 39xfs_inobt_get_minrecs(
 40	struct xfs_btree_cur	*cur,
 41	int			level)
 42{
 43	return cur->bc_mp->m_inobt_mnr[level != 0];
 44}
 45
 46STATIC struct xfs_btree_cur *
 47xfs_inobt_dup_cursor(
 48	struct xfs_btree_cur	*cur)
 49{
 50	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
 51			cur->bc_private.a.agbp, cur->bc_private.a.agno,
 52			cur->bc_btnum);
 53}
 54
 55STATIC void
 56xfs_inobt_set_root(
 57	struct xfs_btree_cur	*cur,
 58	union xfs_btree_ptr	*nptr,
 59	int			inc)	/* level change */
 60{
 61	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 62	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 63
 64	agi->agi_root = nptr->s;
 65	be32_add_cpu(&agi->agi_level, inc);
 66	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 67}
 68
 69STATIC void
 70xfs_finobt_set_root(
 71	struct xfs_btree_cur	*cur,
 72	union xfs_btree_ptr	*nptr,
 73	int			inc)	/* level change */
 74{
 75	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 76	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 77
 78	agi->agi_free_root = nptr->s;
 79	be32_add_cpu(&agi->agi_free_level, inc);
 80	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 81			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 82}
 83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84STATIC int
 85__xfs_inobt_alloc_block(
 86	struct xfs_btree_cur	*cur,
 87	union xfs_btree_ptr	*start,
 88	union xfs_btree_ptr	*new,
 89	int			*stat,
 90	enum xfs_ag_resv_type	resv)
 91{
 92	xfs_alloc_arg_t		args;		/* block allocation args */
 93	int			error;		/* error return value */
 94	xfs_agblock_t		sbno = be32_to_cpu(start->s);
 95
 96	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
 97
 98	memset(&args, 0, sizeof(args));
 99	args.tp = cur->bc_tp;
100	args.mp = cur->bc_mp;
101	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
102	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
103	args.minlen = 1;
104	args.maxlen = 1;
105	args.prod = 1;
106	args.type = XFS_ALLOCTYPE_NEAR_BNO;
107	args.resv = resv;
108
109	error = xfs_alloc_vextent(&args);
110	if (error) {
111		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
112		return error;
113	}
114	if (args.fsbno == NULLFSBLOCK) {
115		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
116		*stat = 0;
117		return 0;
118	}
119	ASSERT(args.len == 1);
120	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
121
122	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
123	*stat = 1;
 
124	return 0;
125}
126
127STATIC int
128xfs_inobt_alloc_block(
129	struct xfs_btree_cur	*cur,
130	union xfs_btree_ptr	*start,
131	union xfs_btree_ptr	*new,
132	int			*stat)
133{
134	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
135}
136
137STATIC int
138xfs_finobt_alloc_block(
139	struct xfs_btree_cur	*cur,
140	union xfs_btree_ptr	*start,
141	union xfs_btree_ptr	*new,
142	int			*stat)
143{
 
 
144	return __xfs_inobt_alloc_block(cur, start, new, stat,
145			XFS_AG_RESV_METADATA);
146}
147
148STATIC int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149xfs_inobt_free_block(
150	struct xfs_btree_cur	*cur,
151	struct xfs_buf		*bp)
152{
153	struct xfs_owner_info	oinfo;
 
154
155	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
156	return xfs_free_extent(cur->bc_tp,
157			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
158			&oinfo, XFS_AG_RESV_NONE);
 
 
 
 
159}
160
161STATIC int
162xfs_inobt_get_maxrecs(
163	struct xfs_btree_cur	*cur,
164	int			level)
165{
166	return cur->bc_mp->m_inobt_mxr[level != 0];
167}
168
169STATIC void
170xfs_inobt_init_key_from_rec(
171	union xfs_btree_key	*key,
172	union xfs_btree_rec	*rec)
173{
174	key->inobt.ir_startino = rec->inobt.ir_startino;
175}
176
177STATIC void
 
 
 
 
 
 
 
 
 
 
 
 
178xfs_inobt_init_rec_from_cur(
179	struct xfs_btree_cur	*cur,
180	union xfs_btree_rec	*rec)
181{
182	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
183	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
184		rec->inobt.ir_u.sp.ir_holemask =
185					cpu_to_be16(cur->bc_rec.i.ir_holemask);
186		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
187		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
188	} else {
189		/* ir_holemask/ir_count not supported on-disk */
190		rec->inobt.ir_u.f.ir_freecount =
191					cpu_to_be32(cur->bc_rec.i.ir_freecount);
192	}
193	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
194}
195
196/*
197 * initial value of ptr for lookup
198 */
199STATIC void
200xfs_inobt_init_ptr_from_cur(
201	struct xfs_btree_cur	*cur,
202	union xfs_btree_ptr	*ptr)
203{
204	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
205
206	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
207
208	ptr->s = agi->agi_root;
209}
210
211STATIC void
212xfs_finobt_init_ptr_from_cur(
213	struct xfs_btree_cur	*cur,
214	union xfs_btree_ptr	*ptr)
215{
216	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
217
218	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
219	ptr->s = agi->agi_free_root;
220}
221
222STATIC __int64_t
223xfs_inobt_key_diff(
224	struct xfs_btree_cur	*cur,
225	union xfs_btree_key	*key)
226{
227	return (__int64_t)be32_to_cpu(key->inobt.ir_startino) -
228			  cur->bc_rec.i.ir_startino;
229}
230
231static int
 
 
 
 
 
 
 
 
 
 
 
 
 
232xfs_inobt_verify(
233	struct xfs_buf		*bp)
234{
235	struct xfs_mount	*mp = bp->b_target->bt_mount;
236	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
 
237	unsigned int		level;
238
 
 
 
239	/*
240	 * During growfs operations, we can't verify the exact owner as the
241	 * perag is not fully initialised and hence not attached to the buffer.
242	 *
243	 * Similarly, during log recovery we will have a perag structure
244	 * attached, but the agi information will not yet have been initialised
245	 * from the on disk AGI. We don't currently use any of this information,
246	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
247	 * ever do.
248	 */
249	switch (block->bb_magic) {
250	case cpu_to_be32(XFS_IBT_CRC_MAGIC):
251	case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
252		if (!xfs_btree_sblock_v5hdr_verify(bp))
253			return false;
254		/* fall through */
255	case cpu_to_be32(XFS_IBT_MAGIC):
256	case cpu_to_be32(XFS_FIBT_MAGIC):
257		break;
258	default:
259		return 0;
260	}
261
262	/* level verification */
263	level = be16_to_cpu(block->bb_level);
264	if (level >= mp->m_in_maxlevels)
265		return false;
266
267	return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
 
268}
269
270static void
271xfs_inobt_read_verify(
272	struct xfs_buf	*bp)
273{
 
 
274	if (!xfs_btree_sblock_verify_crc(bp))
275		xfs_buf_ioerror(bp, -EFSBADCRC);
276	else if (!xfs_inobt_verify(bp))
277		xfs_buf_ioerror(bp, -EFSCORRUPTED);
 
 
 
278
279	if (bp->b_error) {
280		trace_xfs_btree_corrupt(bp, _RET_IP_);
281		xfs_verifier_error(bp);
282	}
283}
284
285static void
286xfs_inobt_write_verify(
287	struct xfs_buf	*bp)
288{
289	if (!xfs_inobt_verify(bp)) {
 
 
 
290		trace_xfs_btree_corrupt(bp, _RET_IP_);
291		xfs_buf_ioerror(bp, -EFSCORRUPTED);
292		xfs_verifier_error(bp);
293		return;
294	}
295	xfs_btree_sblock_calc_crc(bp);
296
297}
298
299const struct xfs_buf_ops xfs_inobt_buf_ops = {
300	.name = "xfs_inobt",
 
301	.verify_read = xfs_inobt_read_verify,
302	.verify_write = xfs_inobt_write_verify,
 
 
 
 
 
 
 
 
 
 
303};
304
305#if defined(DEBUG) || defined(XFS_WARN)
306STATIC int
307xfs_inobt_keys_inorder(
308	struct xfs_btree_cur	*cur,
309	union xfs_btree_key	*k1,
310	union xfs_btree_key	*k2)
311{
312	return be32_to_cpu(k1->inobt.ir_startino) <
313		be32_to_cpu(k2->inobt.ir_startino);
314}
315
316STATIC int
317xfs_inobt_recs_inorder(
318	struct xfs_btree_cur	*cur,
319	union xfs_btree_rec	*r1,
320	union xfs_btree_rec	*r2)
321{
322	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
323		be32_to_cpu(r2->inobt.ir_startino);
324}
325#endif	/* DEBUG */
 
 
 
 
 
 
 
 
 
 
 
 
326
327static const struct xfs_btree_ops xfs_inobt_ops = {
328	.rec_len		= sizeof(xfs_inobt_rec_t),
329	.key_len		= sizeof(xfs_inobt_key_t),
330
331	.dup_cursor		= xfs_inobt_dup_cursor,
332	.set_root		= xfs_inobt_set_root,
333	.alloc_block		= xfs_inobt_alloc_block,
334	.free_block		= xfs_inobt_free_block,
335	.get_minrecs		= xfs_inobt_get_minrecs,
336	.get_maxrecs		= xfs_inobt_get_maxrecs,
337	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
 
338	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
339	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
340	.key_diff		= xfs_inobt_key_diff,
341	.buf_ops		= &xfs_inobt_buf_ops,
342#if defined(DEBUG) || defined(XFS_WARN)
343	.keys_inorder		= xfs_inobt_keys_inorder,
344	.recs_inorder		= xfs_inobt_recs_inorder,
345#endif
346};
347
348static const struct xfs_btree_ops xfs_finobt_ops = {
349	.rec_len		= sizeof(xfs_inobt_rec_t),
350	.key_len		= sizeof(xfs_inobt_key_t),
351
352	.dup_cursor		= xfs_inobt_dup_cursor,
353	.set_root		= xfs_finobt_set_root,
354	.alloc_block		= xfs_finobt_alloc_block,
355	.free_block		= xfs_inobt_free_block,
356	.get_minrecs		= xfs_inobt_get_minrecs,
357	.get_maxrecs		= xfs_inobt_get_maxrecs,
358	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
 
359	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
360	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
361	.key_diff		= xfs_inobt_key_diff,
362	.buf_ops		= &xfs_inobt_buf_ops,
363#if defined(DEBUG) || defined(XFS_WARN)
364	.keys_inorder		= xfs_inobt_keys_inorder,
365	.recs_inorder		= xfs_inobt_recs_inorder,
366#endif
367};
368
369/*
370 * Allocate a new inode btree cursor.
371 */
372struct xfs_btree_cur *				/* new inode btree cursor */
373xfs_inobt_init_cursor(
374	struct xfs_mount	*mp,		/* file system mount point */
375	struct xfs_trans	*tp,		/* transaction pointer */
376	struct xfs_buf		*agbp,		/* buffer for agi structure */
377	xfs_agnumber_t		agno,		/* allocation group number */
378	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
379{
380	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
381	struct xfs_btree_cur	*cur;
382
383	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
384
385	cur->bc_tp = tp;
386	cur->bc_mp = mp;
387	cur->bc_btnum = btnum;
388	if (btnum == XFS_BTNUM_INO) {
389		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
390		cur->bc_ops = &xfs_inobt_ops;
391		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
392	} else {
393		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
394		cur->bc_ops = &xfs_finobt_ops;
395		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
396	}
397
398	cur->bc_blocklog = mp->m_sb.sb_blocklog;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
400	if (xfs_sb_version_hascrc(&mp->m_sb))
401		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
 
 
 
 
 
 
402
403	cur->bc_private.a.agbp = agbp;
404	cur->bc_private.a.agno = agno;
 
 
 
 
 
 
405
 
 
406	return cur;
407}
408
409/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410 * Calculate number of records in an inobt btree block.
411 */
412int
413xfs_inobt_maxrecs(
414	struct xfs_mount	*mp,
415	int			blocklen,
416	int			leaf)
417{
418	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
420	if (leaf)
421		return blocklen / sizeof(xfs_inobt_rec_t);
422	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
 
 
 
 
 
 
423}
424
425/*
426 * Convert the inode record holemask to an inode allocation bitmap. The inode
427 * allocation bitmap is inode granularity and specifies whether an inode is
428 * physically allocated on disk (not whether the inode is considered allocated
429 * or free by the fs).
430 *
431 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
432 */
433uint64_t
434xfs_inobt_irec_to_allocmask(
435	struct xfs_inobt_rec_incore	*rec)
436{
437	uint64_t			bitmap = 0;
438	uint64_t			inodespbit;
439	int				nextbit;
440	uint				allocbitmap;
441
442	/*
443	 * The holemask has 16-bits for a 64 inode record. Therefore each
444	 * holemask bit represents multiple inodes. Create a mask of bits to set
445	 * in the allocmask for each holemask bit.
446	 */
447	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
448
449	/*
450	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
451	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
452	 * anything beyond the 16 holemask bits since this casts to a larger
453	 * type.
454	 */
455	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
456
457	/*
458	 * allocbitmap is the inverted holemask so every set bit represents
459	 * allocated inodes. To expand from 16-bit holemask granularity to
460	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
461	 * bitmap for every holemask bit.
462	 */
463	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
464	while (nextbit != -1) {
465		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
466
467		bitmap |= (inodespbit <<
468			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
469
470		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
471	}
472
473	return bitmap;
474}
475
476#if defined(DEBUG) || defined(XFS_WARN)
477/*
478 * Verify that an in-core inode record has a valid inode count.
479 */
480int
481xfs_inobt_rec_check_count(
482	struct xfs_mount		*mp,
483	struct xfs_inobt_rec_incore	*rec)
484{
485	int				inocount = 0;
486	int				nextbit = 0;
487	uint64_t			allocbmap;
488	int				wordsz;
489
490	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
491	allocbmap = xfs_inobt_irec_to_allocmask(rec);
492
493	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
494	while (nextbit != -1) {
495		inocount++;
496		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
497				       nextbit + 1);
498	}
499
500	if (inocount != rec->ir_count)
501		return -EFSCORRUPTED;
502
503	return 0;
504}
505#endif	/* DEBUG */
506
507static xfs_extlen_t
508xfs_inobt_max_size(
509	struct xfs_mount	*mp)
510{
 
 
 
511	/* Bail out if we're uninitialized, which can happen in mkfs. */
512	if (mp->m_inobt_mxr[0] == 0)
513		return 0;
514
515	return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
516		(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
517				XFS_INODES_PER_CHUNK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518}
519
520static int
521xfs_inobt_count_blocks(
522	struct xfs_mount	*mp,
523	xfs_agnumber_t		agno,
524	xfs_btnum_t		btnum,
525	xfs_extlen_t		*tree_blocks)
526{
527	struct xfs_buf		*agbp;
528	struct xfs_btree_cur	*cur;
529	int			error;
530
531	error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
532	if (error)
533		return error;
534
535	cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
536	error = xfs_btree_count_blocks(cur, tree_blocks);
537	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
538	xfs_buf_relse(agbp);
539
540	return error;
541}
542
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543/*
544 * Figure out how many blocks to reserve and how many are used by this btree.
545 */
546int
547xfs_finobt_calc_reserves(
548	struct xfs_mount	*mp,
549	xfs_agnumber_t		agno,
550	xfs_extlen_t		*ask,
551	xfs_extlen_t		*used)
552{
553	xfs_extlen_t		tree_len = 0;
554	int			error;
555
556	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
557		return 0;
558
559	error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
 
 
 
 
560	if (error)
561		return error;
562
563	*ask += xfs_inobt_max_size(mp);
564	*used += tree_len;
565	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566}