Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_bit.h"
 25#include "xfs_mount.h"
 26#include "xfs_inode.h"
 27#include "xfs_btree.h"
 
 28#include "xfs_ialloc.h"
 29#include "xfs_ialloc_btree.h"
 30#include "xfs_alloc.h"
 31#include "xfs_error.h"
 32#include "xfs_trace.h"
 33#include "xfs_cksum.h"
 34#include "xfs_trans.h"
 35#include "xfs_rmap.h"
 36
 37
 38STATIC int
 39xfs_inobt_get_minrecs(
 40	struct xfs_btree_cur	*cur,
 41	int			level)
 42{
 43	return cur->bc_mp->m_inobt_mnr[level != 0];
 44}
 45
 46STATIC struct xfs_btree_cur *
 47xfs_inobt_dup_cursor(
 48	struct xfs_btree_cur	*cur)
 49{
 50	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
 51			cur->bc_private.a.agbp, cur->bc_private.a.agno,
 52			cur->bc_btnum);
 53}
 54
 55STATIC void
 56xfs_inobt_set_root(
 57	struct xfs_btree_cur	*cur,
 58	union xfs_btree_ptr	*nptr,
 59	int			inc)	/* level change */
 60{
 61	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 62	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 63
 64	agi->agi_root = nptr->s;
 65	be32_add_cpu(&agi->agi_level, inc);
 66	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 67}
 68
 69STATIC void
 70xfs_finobt_set_root(
 71	struct xfs_btree_cur	*cur,
 72	union xfs_btree_ptr	*nptr,
 73	int			inc)	/* level change */
 74{
 75	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 76	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 77
 78	agi->agi_free_root = nptr->s;
 79	be32_add_cpu(&agi->agi_free_level, inc);
 80	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 81			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 82}
 83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84STATIC int
 85__xfs_inobt_alloc_block(
 86	struct xfs_btree_cur	*cur,
 87	union xfs_btree_ptr	*start,
 88	union xfs_btree_ptr	*new,
 89	int			*stat,
 90	enum xfs_ag_resv_type	resv)
 91{
 92	xfs_alloc_arg_t		args;		/* block allocation args */
 93	int			error;		/* error return value */
 94	xfs_agblock_t		sbno = be32_to_cpu(start->s);
 95
 96	memset(&args, 0, sizeof(args));
 97	args.tp = cur->bc_tp;
 98	args.mp = cur->bc_mp;
 99	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
100	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
101	args.minlen = 1;
102	args.maxlen = 1;
103	args.prod = 1;
104	args.type = XFS_ALLOCTYPE_NEAR_BNO;
105	args.resv = resv;
106
107	error = xfs_alloc_vextent(&args);
108	if (error)
109		return error;
110
111	if (args.fsbno == NULLFSBLOCK) {
112		*stat = 0;
113		return 0;
114	}
115	ASSERT(args.len == 1);
116
117	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
118	*stat = 1;
 
119	return 0;
120}
121
122STATIC int
123xfs_inobt_alloc_block(
124	struct xfs_btree_cur	*cur,
125	union xfs_btree_ptr	*start,
126	union xfs_btree_ptr	*new,
127	int			*stat)
128{
129	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
130}
131
132STATIC int
133xfs_finobt_alloc_block(
134	struct xfs_btree_cur	*cur,
135	union xfs_btree_ptr	*start,
136	union xfs_btree_ptr	*new,
137	int			*stat)
138{
139	if (cur->bc_mp->m_inotbt_nores)
140		return xfs_inobt_alloc_block(cur, start, new, stat);
141	return __xfs_inobt_alloc_block(cur, start, new, stat,
142			XFS_AG_RESV_METADATA);
143}
144
145STATIC int
146__xfs_inobt_free_block(
147	struct xfs_btree_cur	*cur,
148	struct xfs_buf		*bp,
149	enum xfs_ag_resv_type	resv)
150{
151	struct xfs_owner_info	oinfo;
152
153	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
154	return xfs_free_extent(cur->bc_tp,
155			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
156			&oinfo, resv);
157}
158
159STATIC int
160xfs_inobt_free_block(
161	struct xfs_btree_cur	*cur,
162	struct xfs_buf		*bp)
163{
164	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
165}
166
167STATIC int
168xfs_finobt_free_block(
169	struct xfs_btree_cur	*cur,
170	struct xfs_buf		*bp)
171{
172	if (cur->bc_mp->m_inotbt_nores)
173		return xfs_inobt_free_block(cur, bp);
174	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
175}
176
177STATIC int
178xfs_inobt_get_maxrecs(
179	struct xfs_btree_cur	*cur,
180	int			level)
181{
182	return cur->bc_mp->m_inobt_mxr[level != 0];
183}
184
185STATIC void
186xfs_inobt_init_key_from_rec(
187	union xfs_btree_key	*key,
188	union xfs_btree_rec	*rec)
189{
190	key->inobt.ir_startino = rec->inobt.ir_startino;
191}
192
193STATIC void
194xfs_inobt_init_high_key_from_rec(
195	union xfs_btree_key	*key,
196	union xfs_btree_rec	*rec)
197{
198	__u32			x;
199
200	x = be32_to_cpu(rec->inobt.ir_startino);
201	x += XFS_INODES_PER_CHUNK - 1;
202	key->inobt.ir_startino = cpu_to_be32(x);
203}
204
205STATIC void
206xfs_inobt_init_rec_from_cur(
207	struct xfs_btree_cur	*cur,
208	union xfs_btree_rec	*rec)
209{
210	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
211	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
212		rec->inobt.ir_u.sp.ir_holemask =
213					cpu_to_be16(cur->bc_rec.i.ir_holemask);
214		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
215		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
216	} else {
217		/* ir_holemask/ir_count not supported on-disk */
218		rec->inobt.ir_u.f.ir_freecount =
219					cpu_to_be32(cur->bc_rec.i.ir_freecount);
220	}
221	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
222}
223
224/*
225 * initial value of ptr for lookup
226 */
227STATIC void
228xfs_inobt_init_ptr_from_cur(
229	struct xfs_btree_cur	*cur,
230	union xfs_btree_ptr	*ptr)
231{
232	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
233
234	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
235
236	ptr->s = agi->agi_root;
237}
238
239STATIC void
240xfs_finobt_init_ptr_from_cur(
241	struct xfs_btree_cur	*cur,
242	union xfs_btree_ptr	*ptr)
243{
244	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
245
246	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
247	ptr->s = agi->agi_free_root;
248}
249
250STATIC int64_t
251xfs_inobt_key_diff(
252	struct xfs_btree_cur	*cur,
253	union xfs_btree_key	*key)
254{
255	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
256			  cur->bc_rec.i.ir_startino;
257}
258
259STATIC int64_t
260xfs_inobt_diff_two_keys(
261	struct xfs_btree_cur	*cur,
262	union xfs_btree_key	*k1,
263	union xfs_btree_key	*k2)
264{
265	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
266			  be32_to_cpu(k2->inobt.ir_startino);
267}
268
269static xfs_failaddr_t
270xfs_inobt_verify(
271	struct xfs_buf		*bp)
272{
273	struct xfs_mount	*mp = bp->b_target->bt_mount;
274	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
275	xfs_failaddr_t		fa;
276	unsigned int		level;
277
 
 
 
278	/*
279	 * During growfs operations, we can't verify the exact owner as the
280	 * perag is not fully initialised and hence not attached to the buffer.
281	 *
282	 * Similarly, during log recovery we will have a perag structure
283	 * attached, but the agi information will not yet have been initialised
284	 * from the on disk AGI. We don't currently use any of this information,
285	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
286	 * ever do.
287	 */
288	switch (block->bb_magic) {
289	case cpu_to_be32(XFS_IBT_CRC_MAGIC):
290	case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
291		fa = xfs_btree_sblock_v5hdr_verify(bp);
292		if (fa)
293			return fa;
294		/* fall through */
295	case cpu_to_be32(XFS_IBT_MAGIC):
296	case cpu_to_be32(XFS_FIBT_MAGIC):
297		break;
298	default:
299		return NULL;
300	}
301
302	/* level verification */
303	level = be16_to_cpu(block->bb_level);
304	if (level >= mp->m_in_maxlevels)
305		return __this_address;
306
307	return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
 
308}
309
310static void
311xfs_inobt_read_verify(
312	struct xfs_buf	*bp)
313{
314	xfs_failaddr_t	fa;
315
316	if (!xfs_btree_sblock_verify_crc(bp))
317		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
318	else {
319		fa = xfs_inobt_verify(bp);
320		if (fa)
321			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
322	}
323
324	if (bp->b_error)
325		trace_xfs_btree_corrupt(bp, _RET_IP_);
326}
327
328static void
329xfs_inobt_write_verify(
330	struct xfs_buf	*bp)
331{
332	xfs_failaddr_t	fa;
333
334	fa = xfs_inobt_verify(bp);
335	if (fa) {
336		trace_xfs_btree_corrupt(bp, _RET_IP_);
337		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338		return;
339	}
340	xfs_btree_sblock_calc_crc(bp);
341
342}
343
344const struct xfs_buf_ops xfs_inobt_buf_ops = {
345	.name = "xfs_inobt",
 
 
 
 
 
 
 
 
 
 
346	.verify_read = xfs_inobt_read_verify,
347	.verify_write = xfs_inobt_write_verify,
348	.verify_struct = xfs_inobt_verify,
349};
350
351STATIC int
352xfs_inobt_keys_inorder(
353	struct xfs_btree_cur	*cur,
354	union xfs_btree_key	*k1,
355	union xfs_btree_key	*k2)
356{
357	return be32_to_cpu(k1->inobt.ir_startino) <
358		be32_to_cpu(k2->inobt.ir_startino);
359}
360
361STATIC int
362xfs_inobt_recs_inorder(
363	struct xfs_btree_cur	*cur,
364	union xfs_btree_rec	*r1,
365	union xfs_btree_rec	*r2)
366{
367	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
368		be32_to_cpu(r2->inobt.ir_startino);
369}
370
371static const struct xfs_btree_ops xfs_inobt_ops = {
372	.rec_len		= sizeof(xfs_inobt_rec_t),
373	.key_len		= sizeof(xfs_inobt_key_t),
374
375	.dup_cursor		= xfs_inobt_dup_cursor,
376	.set_root		= xfs_inobt_set_root,
377	.alloc_block		= xfs_inobt_alloc_block,
378	.free_block		= xfs_inobt_free_block,
379	.get_minrecs		= xfs_inobt_get_minrecs,
380	.get_maxrecs		= xfs_inobt_get_maxrecs,
381	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
382	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
383	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
384	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
385	.key_diff		= xfs_inobt_key_diff,
386	.buf_ops		= &xfs_inobt_buf_ops,
387	.diff_two_keys		= xfs_inobt_diff_two_keys,
388	.keys_inorder		= xfs_inobt_keys_inorder,
389	.recs_inorder		= xfs_inobt_recs_inorder,
390};
391
392static const struct xfs_btree_ops xfs_finobt_ops = {
393	.rec_len		= sizeof(xfs_inobt_rec_t),
394	.key_len		= sizeof(xfs_inobt_key_t),
395
396	.dup_cursor		= xfs_inobt_dup_cursor,
397	.set_root		= xfs_finobt_set_root,
398	.alloc_block		= xfs_finobt_alloc_block,
399	.free_block		= xfs_finobt_free_block,
400	.get_minrecs		= xfs_inobt_get_minrecs,
401	.get_maxrecs		= xfs_inobt_get_maxrecs,
402	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
403	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
404	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
405	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
406	.key_diff		= xfs_inobt_key_diff,
407	.buf_ops		= &xfs_inobt_buf_ops,
408	.diff_two_keys		= xfs_inobt_diff_two_keys,
409	.keys_inorder		= xfs_inobt_keys_inorder,
410	.recs_inorder		= xfs_inobt_recs_inorder,
411};
412
413/*
414 * Allocate a new inode btree cursor.
415 */
416struct xfs_btree_cur *				/* new inode btree cursor */
417xfs_inobt_init_cursor(
418	struct xfs_mount	*mp,		/* file system mount point */
419	struct xfs_trans	*tp,		/* transaction pointer */
420	struct xfs_buf		*agbp,		/* buffer for agi structure */
421	xfs_agnumber_t		agno,		/* allocation group number */
422	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
423{
424	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
425	struct xfs_btree_cur	*cur;
426
427	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
428
429	cur->bc_tp = tp;
430	cur->bc_mp = mp;
431	cur->bc_btnum = btnum;
432	if (btnum == XFS_BTNUM_INO) {
433		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
434		cur->bc_ops = &xfs_inobt_ops;
435		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
 
436	} else {
437		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
438		cur->bc_ops = &xfs_finobt_ops;
439		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
 
440	}
441
442	cur->bc_blocklog = mp->m_sb.sb_blocklog;
443
444	if (xfs_sb_version_hascrc(&mp->m_sb))
445		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
446
447	cur->bc_private.a.agbp = agbp;
448	cur->bc_private.a.agno = agno;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
 
 
 
 
 
 
 
 
 
 
 
 
450	return cur;
451}
452
453/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454 * Calculate number of records in an inobt btree block.
455 */
456int
457xfs_inobt_maxrecs(
458	struct xfs_mount	*mp,
459	int			blocklen,
460	int			leaf)
461{
462	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
463
464	if (leaf)
465		return blocklen / sizeof(xfs_inobt_rec_t);
466	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
467}
468
469/*
470 * Convert the inode record holemask to an inode allocation bitmap. The inode
471 * allocation bitmap is inode granularity and specifies whether an inode is
472 * physically allocated on disk (not whether the inode is considered allocated
473 * or free by the fs).
474 *
475 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
476 */
477uint64_t
478xfs_inobt_irec_to_allocmask(
479	struct xfs_inobt_rec_incore	*rec)
480{
481	uint64_t			bitmap = 0;
482	uint64_t			inodespbit;
483	int				nextbit;
484	uint				allocbitmap;
485
486	/*
487	 * The holemask has 16-bits for a 64 inode record. Therefore each
488	 * holemask bit represents multiple inodes. Create a mask of bits to set
489	 * in the allocmask for each holemask bit.
490	 */
491	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
492
493	/*
494	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
495	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
496	 * anything beyond the 16 holemask bits since this casts to a larger
497	 * type.
498	 */
499	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
500
501	/*
502	 * allocbitmap is the inverted holemask so every set bit represents
503	 * allocated inodes. To expand from 16-bit holemask granularity to
504	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
505	 * bitmap for every holemask bit.
506	 */
507	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
508	while (nextbit != -1) {
509		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
510
511		bitmap |= (inodespbit <<
512			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
513
514		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
515	}
516
517	return bitmap;
518}
519
520#if defined(DEBUG) || defined(XFS_WARN)
521/*
522 * Verify that an in-core inode record has a valid inode count.
523 */
524int
525xfs_inobt_rec_check_count(
526	struct xfs_mount		*mp,
527	struct xfs_inobt_rec_incore	*rec)
528{
529	int				inocount = 0;
530	int				nextbit = 0;
531	uint64_t			allocbmap;
532	int				wordsz;
533
534	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
535	allocbmap = xfs_inobt_irec_to_allocmask(rec);
536
537	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
538	while (nextbit != -1) {
539		inocount++;
540		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
541				       nextbit + 1);
542	}
543
544	if (inocount != rec->ir_count)
545		return -EFSCORRUPTED;
546
547	return 0;
548}
549#endif	/* DEBUG */
550
551static xfs_extlen_t
552xfs_inobt_max_size(
553	struct xfs_mount	*mp)
 
554{
 
 
555	/* Bail out if we're uninitialized, which can happen in mkfs. */
556	if (mp->m_inobt_mxr[0] == 0)
557		return 0;
558
559	return xfs_btree_calc_size(mp->m_inobt_mnr,
560		(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
561				XFS_INODES_PER_CHUNK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562}
563
564static int
565xfs_inobt_count_blocks(
566	struct xfs_mount	*mp,
567	xfs_agnumber_t		agno,
 
568	xfs_btnum_t		btnum,
569	xfs_extlen_t		*tree_blocks)
570{
571	struct xfs_buf		*agbp;
572	struct xfs_btree_cur	*cur;
573	int			error;
574
575	error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
576	if (error)
577		return error;
578
579	cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
580	error = xfs_btree_count_blocks(cur, tree_blocks);
581	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
582	xfs_buf_relse(agbp);
583
584	return error;
585}
586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587/*
588 * Figure out how many blocks to reserve and how many are used by this btree.
589 */
590int
591xfs_finobt_calc_reserves(
592	struct xfs_mount	*mp,
593	xfs_agnumber_t		agno,
 
594	xfs_extlen_t		*ask,
595	xfs_extlen_t		*used)
596{
597	xfs_extlen_t		tree_len = 0;
598	int			error;
599
600	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
601		return 0;
602
603	error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
 
 
 
 
604	if (error)
605		return error;
606
607	*ask += xfs_inobt_max_size(mp);
608	*used += tree_len;
609	return 0;
 
 
 
 
 
 
 
 
 
610}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_bit.h"
 13#include "xfs_mount.h"
 
 14#include "xfs_btree.h"
 15#include "xfs_btree_staging.h"
 16#include "xfs_ialloc.h"
 17#include "xfs_ialloc_btree.h"
 18#include "xfs_alloc.h"
 19#include "xfs_error.h"
 20#include "xfs_trace.h"
 
 21#include "xfs_trans.h"
 22#include "xfs_rmap.h"
 23#include "xfs_ag.h"
 24
 25STATIC int
 26xfs_inobt_get_minrecs(
 27	struct xfs_btree_cur	*cur,
 28	int			level)
 29{
 30	return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
 31}
 32
 33STATIC struct xfs_btree_cur *
 34xfs_inobt_dup_cursor(
 35	struct xfs_btree_cur	*cur)
 36{
 37	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
 38			cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
 
 39}
 40
 41STATIC void
 42xfs_inobt_set_root(
 43	struct xfs_btree_cur	*cur,
 44	union xfs_btree_ptr	*nptr,
 45	int			inc)	/* level change */
 46{
 47	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 48	struct xfs_agi		*agi = agbp->b_addr;
 49
 50	agi->agi_root = nptr->s;
 51	be32_add_cpu(&agi->agi_level, inc);
 52	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
 53}
 54
 55STATIC void
 56xfs_finobt_set_root(
 57	struct xfs_btree_cur	*cur,
 58	union xfs_btree_ptr	*nptr,
 59	int			inc)	/* level change */
 60{
 61	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 62	struct xfs_agi		*agi = agbp->b_addr;
 63
 64	agi->agi_free_root = nptr->s;
 65	be32_add_cpu(&agi->agi_free_level, inc);
 66	xfs_ialloc_log_agi(cur->bc_tp, agbp,
 67			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
 68}
 69
 70/* Update the inode btree block counter for this btree. */
 71static inline void
 72xfs_inobt_mod_blockcount(
 73	struct xfs_btree_cur	*cur,
 74	int			howmuch)
 75{
 76	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 77	struct xfs_agi		*agi = agbp->b_addr;
 78
 79	if (!xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb))
 80		return;
 81
 82	if (cur->bc_btnum == XFS_BTNUM_FINO)
 83		be32_add_cpu(&agi->agi_fblocks, howmuch);
 84	else if (cur->bc_btnum == XFS_BTNUM_INO)
 85		be32_add_cpu(&agi->agi_iblocks, howmuch);
 86	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
 87}
 88
 89STATIC int
 90__xfs_inobt_alloc_block(
 91	struct xfs_btree_cur	*cur,
 92	union xfs_btree_ptr	*start,
 93	union xfs_btree_ptr	*new,
 94	int			*stat,
 95	enum xfs_ag_resv_type	resv)
 96{
 97	xfs_alloc_arg_t		args;		/* block allocation args */
 98	int			error;		/* error return value */
 99	xfs_agblock_t		sbno = be32_to_cpu(start->s);
100
101	memset(&args, 0, sizeof(args));
102	args.tp = cur->bc_tp;
103	args.mp = cur->bc_mp;
104	args.oinfo = XFS_RMAP_OINFO_INOBT;
105	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno);
106	args.minlen = 1;
107	args.maxlen = 1;
108	args.prod = 1;
109	args.type = XFS_ALLOCTYPE_NEAR_BNO;
110	args.resv = resv;
111
112	error = xfs_alloc_vextent(&args);
113	if (error)
114		return error;
115
116	if (args.fsbno == NULLFSBLOCK) {
117		*stat = 0;
118		return 0;
119	}
120	ASSERT(args.len == 1);
121
122	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
123	*stat = 1;
124	xfs_inobt_mod_blockcount(cur, 1);
125	return 0;
126}
127
128STATIC int
129xfs_inobt_alloc_block(
130	struct xfs_btree_cur	*cur,
131	union xfs_btree_ptr	*start,
132	union xfs_btree_ptr	*new,
133	int			*stat)
134{
135	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
136}
137
138STATIC int
139xfs_finobt_alloc_block(
140	struct xfs_btree_cur	*cur,
141	union xfs_btree_ptr	*start,
142	union xfs_btree_ptr	*new,
143	int			*stat)
144{
145	if (cur->bc_mp->m_finobt_nores)
146		return xfs_inobt_alloc_block(cur, start, new, stat);
147	return __xfs_inobt_alloc_block(cur, start, new, stat,
148			XFS_AG_RESV_METADATA);
149}
150
151STATIC int
152__xfs_inobt_free_block(
153	struct xfs_btree_cur	*cur,
154	struct xfs_buf		*bp,
155	enum xfs_ag_resv_type	resv)
156{
157	xfs_inobt_mod_blockcount(cur, -1);
 
 
158	return xfs_free_extent(cur->bc_tp,
159			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
160			&XFS_RMAP_OINFO_INOBT, resv);
161}
162
163STATIC int
164xfs_inobt_free_block(
165	struct xfs_btree_cur	*cur,
166	struct xfs_buf		*bp)
167{
168	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
169}
170
171STATIC int
172xfs_finobt_free_block(
173	struct xfs_btree_cur	*cur,
174	struct xfs_buf		*bp)
175{
176	if (cur->bc_mp->m_finobt_nores)
177		return xfs_inobt_free_block(cur, bp);
178	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
179}
180
181STATIC int
182xfs_inobt_get_maxrecs(
183	struct xfs_btree_cur	*cur,
184	int			level)
185{
186	return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
187}
188
189STATIC void
190xfs_inobt_init_key_from_rec(
191	union xfs_btree_key	*key,
192	union xfs_btree_rec	*rec)
193{
194	key->inobt.ir_startino = rec->inobt.ir_startino;
195}
196
197STATIC void
198xfs_inobt_init_high_key_from_rec(
199	union xfs_btree_key	*key,
200	union xfs_btree_rec	*rec)
201{
202	__u32			x;
203
204	x = be32_to_cpu(rec->inobt.ir_startino);
205	x += XFS_INODES_PER_CHUNK - 1;
206	key->inobt.ir_startino = cpu_to_be32(x);
207}
208
209STATIC void
210xfs_inobt_init_rec_from_cur(
211	struct xfs_btree_cur	*cur,
212	union xfs_btree_rec	*rec)
213{
214	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
215	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
216		rec->inobt.ir_u.sp.ir_holemask =
217					cpu_to_be16(cur->bc_rec.i.ir_holemask);
218		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
219		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
220	} else {
221		/* ir_holemask/ir_count not supported on-disk */
222		rec->inobt.ir_u.f.ir_freecount =
223					cpu_to_be32(cur->bc_rec.i.ir_freecount);
224	}
225	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
226}
227
228/*
229 * initial value of ptr for lookup
230 */
231STATIC void
232xfs_inobt_init_ptr_from_cur(
233	struct xfs_btree_cur	*cur,
234	union xfs_btree_ptr	*ptr)
235{
236	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
237
238	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
239
240	ptr->s = agi->agi_root;
241}
242
243STATIC void
244xfs_finobt_init_ptr_from_cur(
245	struct xfs_btree_cur	*cur,
246	union xfs_btree_ptr	*ptr)
247{
248	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
249
250	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
251	ptr->s = agi->agi_free_root;
252}
253
254STATIC int64_t
255xfs_inobt_key_diff(
256	struct xfs_btree_cur	*cur,
257	union xfs_btree_key	*key)
258{
259	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
260			  cur->bc_rec.i.ir_startino;
261}
262
263STATIC int64_t
264xfs_inobt_diff_two_keys(
265	struct xfs_btree_cur	*cur,
266	union xfs_btree_key	*k1,
267	union xfs_btree_key	*k2)
268{
269	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
270			  be32_to_cpu(k2->inobt.ir_startino);
271}
272
273static xfs_failaddr_t
274xfs_inobt_verify(
275	struct xfs_buf		*bp)
276{
277	struct xfs_mount	*mp = bp->b_mount;
278	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
279	xfs_failaddr_t		fa;
280	unsigned int		level;
281
282	if (!xfs_verify_magic(bp, block->bb_magic))
283		return __this_address;
284
285	/*
286	 * During growfs operations, we can't verify the exact owner as the
287	 * perag is not fully initialised and hence not attached to the buffer.
288	 *
289	 * Similarly, during log recovery we will have a perag structure
290	 * attached, but the agi information will not yet have been initialised
291	 * from the on disk AGI. We don't currently use any of this information,
292	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
293	 * ever do.
294	 */
295	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 
 
296		fa = xfs_btree_sblock_v5hdr_verify(bp);
297		if (fa)
298			return fa;
 
 
 
 
 
 
299	}
300
301	/* level verification */
302	level = be16_to_cpu(block->bb_level);
303	if (level >= M_IGEO(mp)->inobt_maxlevels)
304		return __this_address;
305
306	return xfs_btree_sblock_verify(bp,
307			M_IGEO(mp)->inobt_mxr[level != 0]);
308}
309
310static void
311xfs_inobt_read_verify(
312	struct xfs_buf	*bp)
313{
314	xfs_failaddr_t	fa;
315
316	if (!xfs_btree_sblock_verify_crc(bp))
317		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
318	else {
319		fa = xfs_inobt_verify(bp);
320		if (fa)
321			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
322	}
323
324	if (bp->b_error)
325		trace_xfs_btree_corrupt(bp, _RET_IP_);
326}
327
328static void
329xfs_inobt_write_verify(
330	struct xfs_buf	*bp)
331{
332	xfs_failaddr_t	fa;
333
334	fa = xfs_inobt_verify(bp);
335	if (fa) {
336		trace_xfs_btree_corrupt(bp, _RET_IP_);
337		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338		return;
339	}
340	xfs_btree_sblock_calc_crc(bp);
341
342}
343
344const struct xfs_buf_ops xfs_inobt_buf_ops = {
345	.name = "xfs_inobt",
346	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
347	.verify_read = xfs_inobt_read_verify,
348	.verify_write = xfs_inobt_write_verify,
349	.verify_struct = xfs_inobt_verify,
350};
351
352const struct xfs_buf_ops xfs_finobt_buf_ops = {
353	.name = "xfs_finobt",
354	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
355		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
356	.verify_read = xfs_inobt_read_verify,
357	.verify_write = xfs_inobt_write_verify,
358	.verify_struct = xfs_inobt_verify,
359};
360
361STATIC int
362xfs_inobt_keys_inorder(
363	struct xfs_btree_cur	*cur,
364	union xfs_btree_key	*k1,
365	union xfs_btree_key	*k2)
366{
367	return be32_to_cpu(k1->inobt.ir_startino) <
368		be32_to_cpu(k2->inobt.ir_startino);
369}
370
371STATIC int
372xfs_inobt_recs_inorder(
373	struct xfs_btree_cur	*cur,
374	union xfs_btree_rec	*r1,
375	union xfs_btree_rec	*r2)
376{
377	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
378		be32_to_cpu(r2->inobt.ir_startino);
379}
380
381static const struct xfs_btree_ops xfs_inobt_ops = {
382	.rec_len		= sizeof(xfs_inobt_rec_t),
383	.key_len		= sizeof(xfs_inobt_key_t),
384
385	.dup_cursor		= xfs_inobt_dup_cursor,
386	.set_root		= xfs_inobt_set_root,
387	.alloc_block		= xfs_inobt_alloc_block,
388	.free_block		= xfs_inobt_free_block,
389	.get_minrecs		= xfs_inobt_get_minrecs,
390	.get_maxrecs		= xfs_inobt_get_maxrecs,
391	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
392	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
393	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
394	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
395	.key_diff		= xfs_inobt_key_diff,
396	.buf_ops		= &xfs_inobt_buf_ops,
397	.diff_two_keys		= xfs_inobt_diff_two_keys,
398	.keys_inorder		= xfs_inobt_keys_inorder,
399	.recs_inorder		= xfs_inobt_recs_inorder,
400};
401
402static const struct xfs_btree_ops xfs_finobt_ops = {
403	.rec_len		= sizeof(xfs_inobt_rec_t),
404	.key_len		= sizeof(xfs_inobt_key_t),
405
406	.dup_cursor		= xfs_inobt_dup_cursor,
407	.set_root		= xfs_finobt_set_root,
408	.alloc_block		= xfs_finobt_alloc_block,
409	.free_block		= xfs_finobt_free_block,
410	.get_minrecs		= xfs_inobt_get_minrecs,
411	.get_maxrecs		= xfs_inobt_get_maxrecs,
412	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
413	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
414	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
415	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
416	.key_diff		= xfs_inobt_key_diff,
417	.buf_ops		= &xfs_finobt_buf_ops,
418	.diff_two_keys		= xfs_inobt_diff_two_keys,
419	.keys_inorder		= xfs_inobt_keys_inorder,
420	.recs_inorder		= xfs_inobt_recs_inorder,
421};
422
423/*
424 * Initialize a new inode btree cursor.
425 */
426static struct xfs_btree_cur *
427xfs_inobt_init_common(
428	struct xfs_mount	*mp,		/* file system mount point */
429	struct xfs_trans	*tp,		/* transaction pointer */
430	struct xfs_perag	*pag,
 
431	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
432{
 
433	struct xfs_btree_cur	*cur;
434
435	cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
 
436	cur->bc_tp = tp;
437	cur->bc_mp = mp;
438	cur->bc_btnum = btnum;
439	if (btnum == XFS_BTNUM_INO) {
 
 
440		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
441		cur->bc_ops = &xfs_inobt_ops;
442	} else {
 
 
443		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
444		cur->bc_ops = &xfs_finobt_ops;
445	}
446
447	cur->bc_blocklog = mp->m_sb.sb_blocklog;
448
449	if (xfs_sb_version_hascrc(&mp->m_sb))
450		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
451
452	/* take a reference for the cursor */
453	atomic_inc(&pag->pag_ref);
454	cur->bc_ag.pag = pag;
455	return cur;
456}
457
458/* Create an inode btree cursor. */
459struct xfs_btree_cur *
460xfs_inobt_init_cursor(
461	struct xfs_mount	*mp,
462	struct xfs_trans	*tp,
463	struct xfs_buf		*agbp,
464	struct xfs_perag	*pag,
465	xfs_btnum_t		btnum)
466{
467	struct xfs_btree_cur	*cur;
468	struct xfs_agi		*agi = agbp->b_addr;
469
470	cur = xfs_inobt_init_common(mp, tp, pag, btnum);
471	if (btnum == XFS_BTNUM_INO)
472		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
473	else
474		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
475	cur->bc_ag.agbp = agbp;
476	return cur;
477}
478
479/* Create an inode btree cursor with a fake root for staging. */
480struct xfs_btree_cur *
481xfs_inobt_stage_cursor(
482	struct xfs_mount	*mp,
483	struct xbtree_afakeroot	*afake,
484	struct xfs_perag	*pag,
485	xfs_btnum_t		btnum)
486{
487	struct xfs_btree_cur	*cur;
488
489	cur = xfs_inobt_init_common(mp, NULL, pag, btnum);
490	xfs_btree_stage_afakeroot(cur, afake);
491	return cur;
492}
493
494/*
495 * Install a new inobt btree root.  Caller is responsible for invalidating
496 * and freeing the old btree blocks.
497 */
498void
499xfs_inobt_commit_staged_btree(
500	struct xfs_btree_cur	*cur,
501	struct xfs_trans	*tp,
502	struct xfs_buf		*agbp)
503{
504	struct xfs_agi		*agi = agbp->b_addr;
505	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
506	int			fields;
507
508	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
509
510	if (cur->bc_btnum == XFS_BTNUM_INO) {
511		fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
512		agi->agi_root = cpu_to_be32(afake->af_root);
513		agi->agi_level = cpu_to_be32(afake->af_levels);
514		if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) {
515			agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
516			fields |= XFS_AGI_IBLOCKS;
517		}
518		xfs_ialloc_log_agi(tp, agbp, fields);
519		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
520	} else {
521		fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
522		agi->agi_free_root = cpu_to_be32(afake->af_root);
523		agi->agi_free_level = cpu_to_be32(afake->af_levels);
524		if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) {
525			agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
526			fields |= XFS_AGI_IBLOCKS;
527		}
528		xfs_ialloc_log_agi(tp, agbp, fields);
529		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
530	}
531}
532
533/*
534 * Calculate number of records in an inobt btree block.
535 */
536int
537xfs_inobt_maxrecs(
538	struct xfs_mount	*mp,
539	int			blocklen,
540	int			leaf)
541{
542	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
543
544	if (leaf)
545		return blocklen / sizeof(xfs_inobt_rec_t);
546	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
547}
548
549/*
550 * Convert the inode record holemask to an inode allocation bitmap. The inode
551 * allocation bitmap is inode granularity and specifies whether an inode is
552 * physically allocated on disk (not whether the inode is considered allocated
553 * or free by the fs).
554 *
555 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
556 */
557uint64_t
558xfs_inobt_irec_to_allocmask(
559	struct xfs_inobt_rec_incore	*rec)
560{
561	uint64_t			bitmap = 0;
562	uint64_t			inodespbit;
563	int				nextbit;
564	uint				allocbitmap;
565
566	/*
567	 * The holemask has 16-bits for a 64 inode record. Therefore each
568	 * holemask bit represents multiple inodes. Create a mask of bits to set
569	 * in the allocmask for each holemask bit.
570	 */
571	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
572
573	/*
574	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
575	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
576	 * anything beyond the 16 holemask bits since this casts to a larger
577	 * type.
578	 */
579	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
580
581	/*
582	 * allocbitmap is the inverted holemask so every set bit represents
583	 * allocated inodes. To expand from 16-bit holemask granularity to
584	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
585	 * bitmap for every holemask bit.
586	 */
587	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
588	while (nextbit != -1) {
589		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
590
591		bitmap |= (inodespbit <<
592			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
593
594		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
595	}
596
597	return bitmap;
598}
599
600#if defined(DEBUG) || defined(XFS_WARN)
601/*
602 * Verify that an in-core inode record has a valid inode count.
603 */
604int
605xfs_inobt_rec_check_count(
606	struct xfs_mount		*mp,
607	struct xfs_inobt_rec_incore	*rec)
608{
609	int				inocount = 0;
610	int				nextbit = 0;
611	uint64_t			allocbmap;
612	int				wordsz;
613
614	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
615	allocbmap = xfs_inobt_irec_to_allocmask(rec);
616
617	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
618	while (nextbit != -1) {
619		inocount++;
620		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
621				       nextbit + 1);
622	}
623
624	if (inocount != rec->ir_count)
625		return -EFSCORRUPTED;
626
627	return 0;
628}
629#endif	/* DEBUG */
630
631static xfs_extlen_t
632xfs_inobt_max_size(
633	struct xfs_mount	*mp,
634	xfs_agnumber_t		agno)
635{
636	xfs_agblock_t		agblocks = xfs_ag_block_count(mp, agno);
637
638	/* Bail out if we're uninitialized, which can happen in mkfs. */
639	if (M_IGEO(mp)->inobt_mxr[0] == 0)
640		return 0;
641
642	/*
643	 * The log is permanently allocated, so the space it occupies will
644	 * never be available for the kinds of things that would require btree
645	 * expansion.  We therefore can pretend the space isn't there.
646	 */
647	if (mp->m_sb.sb_logstart &&
648	    XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
649		agblocks -= mp->m_sb.sb_logblocks;
650
651	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
652				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
653					XFS_INODES_PER_CHUNK);
654}
655
656/* Read AGI and create inobt cursor. */
657int
658xfs_inobt_cur(
659	struct xfs_mount	*mp,
660	struct xfs_trans	*tp,
661	struct xfs_perag	*pag,
662	xfs_btnum_t		which,
663	struct xfs_btree_cur	**curpp,
664	struct xfs_buf		**agi_bpp)
665{
666	struct xfs_btree_cur	*cur;
667	int			error;
668
669	ASSERT(*agi_bpp == NULL);
670	ASSERT(*curpp == NULL);
671
672	error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp);
673	if (error)
674		return error;
675
676	cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which);
677	*curpp = cur;
678	return 0;
679}
680
681static int
682xfs_inobt_count_blocks(
683	struct xfs_mount	*mp,
684	struct xfs_trans	*tp,
685	struct xfs_perag	*pag,
686	xfs_btnum_t		btnum,
687	xfs_extlen_t		*tree_blocks)
688{
689	struct xfs_buf		*agbp = NULL;
690	struct xfs_btree_cur	*cur = NULL;
691	int			error;
692
693	error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp);
694	if (error)
695		return error;
696
 
697	error = xfs_btree_count_blocks(cur, tree_blocks);
698	xfs_btree_del_cursor(cur, error);
699	xfs_trans_brelse(tp, agbp);
700
701	return error;
702}
703
704/* Read finobt block count from AGI header. */
705static int
706xfs_finobt_read_blocks(
707	struct xfs_mount	*mp,
708	struct xfs_trans	*tp,
709	struct xfs_perag	*pag,
710	xfs_extlen_t		*tree_blocks)
711{
712	struct xfs_buf		*agbp;
713	struct xfs_agi		*agi;
714	int			error;
715
716	error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
717	if (error)
718		return error;
719
720	agi = agbp->b_addr;
721	*tree_blocks = be32_to_cpu(agi->agi_fblocks);
722	xfs_trans_brelse(tp, agbp);
723	return 0;
724}
725
726/*
727 * Figure out how many blocks to reserve and how many are used by this btree.
728 */
729int
730xfs_finobt_calc_reserves(
731	struct xfs_mount	*mp,
732	struct xfs_trans	*tp,
733	struct xfs_perag	*pag,
734	xfs_extlen_t		*ask,
735	xfs_extlen_t		*used)
736{
737	xfs_extlen_t		tree_len = 0;
738	int			error;
739
740	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
741		return 0;
742
743	if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
744		error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len);
745	else
746		error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO,
747				&tree_len);
748	if (error)
749		return error;
750
751	*ask += xfs_inobt_max_size(mp, pag->pag_agno);
752	*used += tree_len;
753	return 0;
754}
755
756/* Calculate the inobt btree size for some records. */
757xfs_extlen_t
758xfs_iallocbt_calc_size(
759	struct xfs_mount	*mp,
760	unsigned long long	len)
761{
762	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
763}