Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_btree.h"
  17#include "xfs_ialloc.h"
  18#include "xfs_ialloc_btree.h"
  19#include "xfs_alloc.h"
  20#include "xfs_errortag.h"
  21#include "xfs_error.h"
  22#include "xfs_bmap.h"
  23#include "xfs_trans.h"
  24#include "xfs_buf_item.h"
  25#include "xfs_icreate_item.h"
  26#include "xfs_icache.h"
  27#include "xfs_trace.h"
  28#include "xfs_log.h"
  29#include "xfs_rmap.h"
  30
  31/*
  32 * Lookup a record by ino in the btree given by cur.
  33 */
  34int					/* error */
  35xfs_inobt_lookup(
  36	struct xfs_btree_cur	*cur,	/* btree cursor */
  37	xfs_agino_t		ino,	/* starting inode of chunk */
  38	xfs_lookup_t		dir,	/* <=, >=, == */
  39	int			*stat)	/* success/failure */
  40{
  41	cur->bc_rec.i.ir_startino = ino;
  42	cur->bc_rec.i.ir_holemask = 0;
  43	cur->bc_rec.i.ir_count = 0;
  44	cur->bc_rec.i.ir_freecount = 0;
  45	cur->bc_rec.i.ir_free = 0;
  46	return xfs_btree_lookup(cur, dir, stat);
  47}
  48
  49/*
  50 * Update the record referred to by cur to the value given.
  51 * This either works (return 0) or gets an EFSCORRUPTED error.
  52 */
  53STATIC int				/* error */
  54xfs_inobt_update(
  55	struct xfs_btree_cur	*cur,	/* btree cursor */
  56	xfs_inobt_rec_incore_t	*irec)	/* btree record */
  57{
  58	union xfs_btree_rec	rec;
  59
  60	rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
  61	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
  62		rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
  63		rec.inobt.ir_u.sp.ir_count = irec->ir_count;
  64		rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
  65	} else {
  66		/* ir_holemask/ir_count not supported on-disk */
  67		rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
  68	}
  69	rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
  70	return xfs_btree_update(cur, &rec);
  71}
  72
  73/* Convert on-disk btree record to incore inobt record. */
  74void
  75xfs_inobt_btrec_to_irec(
  76	struct xfs_mount		*mp,
  77	union xfs_btree_rec		*rec,
  78	struct xfs_inobt_rec_incore	*irec)
  79{
  80	irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
  81	if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
  82		irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
  83		irec->ir_count = rec->inobt.ir_u.sp.ir_count;
  84		irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
  85	} else {
  86		/*
  87		 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
  88		 * values for full inode chunks.
  89		 */
  90		irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
  91		irec->ir_count = XFS_INODES_PER_CHUNK;
  92		irec->ir_freecount =
  93				be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
  94	}
  95	irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
  96}
  97
  98/*
  99 * Get the data from the pointed-to record.
 100 */
 101int
 102xfs_inobt_get_rec(
 103	struct xfs_btree_cur		*cur,
 104	struct xfs_inobt_rec_incore	*irec,
 105	int				*stat)
 106{
 107	struct xfs_mount		*mp = cur->bc_mp;
 108	xfs_agnumber_t			agno = cur->bc_private.a.agno;
 109	union xfs_btree_rec		*rec;
 110	int				error;
 111	uint64_t			realfree;
 112
 113	error = xfs_btree_get_rec(cur, &rec, stat);
 114	if (error || *stat == 0)
 115		return error;
 116
 117	xfs_inobt_btrec_to_irec(mp, rec, irec);
 118
 119	if (!xfs_verify_agino(mp, agno, irec->ir_startino))
 120		goto out_bad_rec;
 121	if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
 122	    irec->ir_count > XFS_INODES_PER_CHUNK)
 123		goto out_bad_rec;
 124	if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
 125		goto out_bad_rec;
 126
 127	/* if there are no holes, return the first available offset */
 128	if (!xfs_inobt_issparse(irec->ir_holemask))
 129		realfree = irec->ir_free;
 130	else
 131		realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
 132	if (hweight64(realfree) != irec->ir_freecount)
 133		goto out_bad_rec;
 134
 135	return 0;
 136
 137out_bad_rec:
 138	xfs_warn(mp,
 139		"%s Inode BTree record corruption in AG %d detected!",
 140		cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
 141	xfs_warn(mp,
 142"start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
 143		irec->ir_startino, irec->ir_count, irec->ir_freecount,
 144		irec->ir_free, irec->ir_holemask);
 145	return -EFSCORRUPTED;
 146}
 147
 148/*
 149 * Insert a single inobt record. Cursor must already point to desired location.
 150 */
 151int
 152xfs_inobt_insert_rec(
 153	struct xfs_btree_cur	*cur,
 154	uint16_t		holemask,
 155	uint8_t			count,
 156	int32_t			freecount,
 157	xfs_inofree_t		free,
 158	int			*stat)
 159{
 160	cur->bc_rec.i.ir_holemask = holemask;
 161	cur->bc_rec.i.ir_count = count;
 162	cur->bc_rec.i.ir_freecount = freecount;
 163	cur->bc_rec.i.ir_free = free;
 164	return xfs_btree_insert(cur, stat);
 165}
 166
 167/*
 168 * Insert records describing a newly allocated inode chunk into the inobt.
 169 */
 170STATIC int
 171xfs_inobt_insert(
 172	struct xfs_mount	*mp,
 173	struct xfs_trans	*tp,
 174	struct xfs_buf		*agbp,
 175	xfs_agino_t		newino,
 176	xfs_agino_t		newlen,
 177	xfs_btnum_t		btnum)
 178{
 179	struct xfs_btree_cur	*cur;
 180	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 181	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
 182	xfs_agino_t		thisino;
 183	int			i;
 184	int			error;
 185
 186	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
 187
 188	for (thisino = newino;
 189	     thisino < newino + newlen;
 190	     thisino += XFS_INODES_PER_CHUNK) {
 191		error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
 192		if (error) {
 193			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 194			return error;
 195		}
 196		ASSERT(i == 0);
 197
 198		error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
 199					     XFS_INODES_PER_CHUNK,
 200					     XFS_INODES_PER_CHUNK,
 201					     XFS_INOBT_ALL_FREE, &i);
 202		if (error) {
 203			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 204			return error;
 205		}
 206		ASSERT(i == 1);
 207	}
 208
 209	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 210
 211	return 0;
 212}
 213
 214/*
 215 * Verify that the number of free inodes in the AGI is correct.
 216 */
 217#ifdef DEBUG
 218STATIC int
 219xfs_check_agi_freecount(
 220	struct xfs_btree_cur	*cur,
 221	struct xfs_agi		*agi)
 222{
 223	if (cur->bc_nlevels == 1) {
 224		xfs_inobt_rec_incore_t rec;
 225		int		freecount = 0;
 226		int		error;
 227		int		i;
 228
 229		error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
 230		if (error)
 231			return error;
 232
 233		do {
 234			error = xfs_inobt_get_rec(cur, &rec, &i);
 235			if (error)
 236				return error;
 237
 238			if (i) {
 239				freecount += rec.ir_freecount;
 240				error = xfs_btree_increment(cur, 0, &i);
 241				if (error)
 242					return error;
 243			}
 244		} while (i == 1);
 245
 246		if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
 247			ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
 248	}
 249	return 0;
 250}
 251#else
 252#define xfs_check_agi_freecount(cur, agi)	0
 253#endif
 254
 255/*
 256 * Initialise a new set of inodes. When called without a transaction context
 257 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
 258 * than logging them (which in a transaction context puts them into the AIL
 259 * for writeback rather than the xfsbufd queue).
 260 */
 261int
 262xfs_ialloc_inode_init(
 263	struct xfs_mount	*mp,
 264	struct xfs_trans	*tp,
 265	struct list_head	*buffer_list,
 266	int			icount,
 267	xfs_agnumber_t		agno,
 268	xfs_agblock_t		agbno,
 269	xfs_agblock_t		length,
 270	unsigned int		gen)
 271{
 272	struct xfs_buf		*fbuf;
 273	struct xfs_dinode	*free;
 274	int			nbufs;
 275	int			version;
 276	int			i, j;
 277	xfs_daddr_t		d;
 278	xfs_ino_t		ino = 0;
 279
 280	/*
 281	 * Loop over the new block(s), filling in the inodes.  For small block
 282	 * sizes, manipulate the inodes in buffers  which are multiples of the
 283	 * blocks size.
 284	 */
 285	nbufs = length / M_IGEO(mp)->blocks_per_cluster;
 286
 287	/*
 288	 * Figure out what version number to use in the inodes we create.  If
 289	 * the superblock version has caught up to the one that supports the new
 290	 * inode format, then use the new inode version.  Otherwise use the old
 291	 * version so that old kernels will continue to be able to use the file
 292	 * system.
 293	 *
 294	 * For v3 inodes, we also need to write the inode number into the inode,
 295	 * so calculate the first inode number of the chunk here as
 296	 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
 297	 * across multiple filesystem blocks (such as a cluster) and so cannot
 298	 * be used in the cluster buffer loop below.
 299	 *
 300	 * Further, because we are writing the inode directly into the buffer
 301	 * and calculating a CRC on the entire inode, we have ot log the entire
 302	 * inode so that the entire range the CRC covers is present in the log.
 303	 * That means for v3 inode we log the entire buffer rather than just the
 304	 * inode cores.
 305	 */
 306	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 307		version = 3;
 308		ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
 309
 310		/*
 311		 * log the initialisation that is about to take place as an
 312		 * logical operation. This means the transaction does not
 313		 * need to log the physical changes to the inode buffers as log
 314		 * recovery will know what initialisation is actually needed.
 315		 * Hence we only need to log the buffers as "ordered" buffers so
 316		 * they track in the AIL as if they were physically logged.
 317		 */
 318		if (tp)
 319			xfs_icreate_log(tp, agno, agbno, icount,
 320					mp->m_sb.sb_inodesize, length, gen);
 321	} else
 322		version = 2;
 323
 324	for (j = 0; j < nbufs; j++) {
 325		/*
 326		 * Get the block.
 327		 */
 328		d = XFS_AGB_TO_DADDR(mp, agno, agbno +
 329				(j * M_IGEO(mp)->blocks_per_cluster));
 330		fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
 331					 mp->m_bsize *
 332					 M_IGEO(mp)->blocks_per_cluster,
 333					 XBF_UNMAPPED);
 334		if (!fbuf)
 335			return -ENOMEM;
 336
 337		/* Initialize the inode buffers and log them appropriately. */
 338		fbuf->b_ops = &xfs_inode_buf_ops;
 339		xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
 340		for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
 341			int	ioffset = i << mp->m_sb.sb_inodelog;
 342			uint	isize = xfs_dinode_size(version);
 343
 344			free = xfs_make_iptr(mp, fbuf, i);
 345			free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
 346			free->di_version = version;
 347			free->di_gen = cpu_to_be32(gen);
 348			free->di_next_unlinked = cpu_to_be32(NULLAGINO);
 349
 350			if (version == 3) {
 351				free->di_ino = cpu_to_be64(ino);
 352				ino++;
 353				uuid_copy(&free->di_uuid,
 354					  &mp->m_sb.sb_meta_uuid);
 355				xfs_dinode_calc_crc(mp, free);
 356			} else if (tp) {
 357				/* just log the inode core */
 358				xfs_trans_log_buf(tp, fbuf, ioffset,
 359						  ioffset + isize - 1);
 360			}
 361		}
 362
 363		if (tp) {
 364			/*
 365			 * Mark the buffer as an inode allocation buffer so it
 366			 * sticks in AIL at the point of this allocation
 367			 * transaction. This ensures the they are on disk before
 368			 * the tail of the log can be moved past this
 369			 * transaction (i.e. by preventing relogging from moving
 370			 * it forward in the log).
 371			 */
 372			xfs_trans_inode_alloc_buf(tp, fbuf);
 373			if (version == 3) {
 374				/*
 375				 * Mark the buffer as ordered so that they are
 376				 * not physically logged in the transaction but
 377				 * still tracked in the AIL as part of the
 378				 * transaction and pin the log appropriately.
 379				 */
 380				xfs_trans_ordered_buf(tp, fbuf);
 381			}
 382		} else {
 383			fbuf->b_flags |= XBF_DONE;
 384			xfs_buf_delwri_queue(fbuf, buffer_list);
 385			xfs_buf_relse(fbuf);
 386		}
 387	}
 388	return 0;
 389}
 390
 391/*
 392 * Align startino and allocmask for a recently allocated sparse chunk such that
 393 * they are fit for insertion (or merge) into the on-disk inode btrees.
 394 *
 395 * Background:
 396 *
 397 * When enabled, sparse inode support increases the inode alignment from cluster
 398 * size to inode chunk size. This means that the minimum range between two
 399 * non-adjacent inode records in the inobt is large enough for a full inode
 400 * record. This allows for cluster sized, cluster aligned block allocation
 401 * without need to worry about whether the resulting inode record overlaps with
 402 * another record in the tree. Without this basic rule, we would have to deal
 403 * with the consequences of overlap by potentially undoing recent allocations in
 404 * the inode allocation codepath.
 405 *
 406 * Because of this alignment rule (which is enforced on mount), there are two
 407 * inobt possibilities for newly allocated sparse chunks. One is that the
 408 * aligned inode record for the chunk covers a range of inodes not already
 409 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
 410 * other is that a record already exists at the aligned startino that considers
 411 * the newly allocated range as sparse. In the latter case, record content is
 412 * merged in hope that sparse inode chunks fill to full chunks over time.
 413 */
 414STATIC void
 415xfs_align_sparse_ino(
 416	struct xfs_mount		*mp,
 417	xfs_agino_t			*startino,
 418	uint16_t			*allocmask)
 419{
 420	xfs_agblock_t			agbno;
 421	xfs_agblock_t			mod;
 422	int				offset;
 423
 424	agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
 425	mod = agbno % mp->m_sb.sb_inoalignmt;
 426	if (!mod)
 427		return;
 428
 429	/* calculate the inode offset and align startino */
 430	offset = XFS_AGB_TO_AGINO(mp, mod);
 431	*startino -= offset;
 432
 433	/*
 434	 * Since startino has been aligned down, left shift allocmask such that
 435	 * it continues to represent the same physical inodes relative to the
 436	 * new startino.
 437	 */
 438	*allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
 439}
 440
 441/*
 442 * Determine whether the source inode record can merge into the target. Both
 443 * records must be sparse, the inode ranges must match and there must be no
 444 * allocation overlap between the records.
 445 */
 446STATIC bool
 447__xfs_inobt_can_merge(
 448	struct xfs_inobt_rec_incore	*trec,	/* tgt record */
 449	struct xfs_inobt_rec_incore	*srec)	/* src record */
 450{
 451	uint64_t			talloc;
 452	uint64_t			salloc;
 453
 454	/* records must cover the same inode range */
 455	if (trec->ir_startino != srec->ir_startino)
 456		return false;
 457
 458	/* both records must be sparse */
 459	if (!xfs_inobt_issparse(trec->ir_holemask) ||
 460	    !xfs_inobt_issparse(srec->ir_holemask))
 461		return false;
 462
 463	/* both records must track some inodes */
 464	if (!trec->ir_count || !srec->ir_count)
 465		return false;
 466
 467	/* can't exceed capacity of a full record */
 468	if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
 469		return false;
 470
 471	/* verify there is no allocation overlap */
 472	talloc = xfs_inobt_irec_to_allocmask(trec);
 473	salloc = xfs_inobt_irec_to_allocmask(srec);
 474	if (talloc & salloc)
 475		return false;
 476
 477	return true;
 478}
 479
 480/*
 481 * Merge the source inode record into the target. The caller must call
 482 * __xfs_inobt_can_merge() to ensure the merge is valid.
 483 */
 484STATIC void
 485__xfs_inobt_rec_merge(
 486	struct xfs_inobt_rec_incore	*trec,	/* target */
 487	struct xfs_inobt_rec_incore	*srec)	/* src */
 488{
 489	ASSERT(trec->ir_startino == srec->ir_startino);
 490
 491	/* combine the counts */
 492	trec->ir_count += srec->ir_count;
 493	trec->ir_freecount += srec->ir_freecount;
 494
 495	/*
 496	 * Merge the holemask and free mask. For both fields, 0 bits refer to
 497	 * allocated inodes. We combine the allocated ranges with bitwise AND.
 498	 */
 499	trec->ir_holemask &= srec->ir_holemask;
 500	trec->ir_free &= srec->ir_free;
 501}
 502
 503/*
 504 * Insert a new sparse inode chunk into the associated inode btree. The inode
 505 * record for the sparse chunk is pre-aligned to a startino that should match
 506 * any pre-existing sparse inode record in the tree. This allows sparse chunks
 507 * to fill over time.
 508 *
 509 * This function supports two modes of handling preexisting records depending on
 510 * the merge flag. If merge is true, the provided record is merged with the
 511 * existing record and updated in place. The merged record is returned in nrec.
 512 * If merge is false, an existing record is replaced with the provided record.
 513 * If no preexisting record exists, the provided record is always inserted.
 514 *
 515 * It is considered corruption if a merge is requested and not possible. Given
 516 * the sparse inode alignment constraints, this should never happen.
 517 */
 518STATIC int
 519xfs_inobt_insert_sprec(
 520	struct xfs_mount		*mp,
 521	struct xfs_trans		*tp,
 522	struct xfs_buf			*agbp,
 523	int				btnum,
 524	struct xfs_inobt_rec_incore	*nrec,	/* in/out: new/merged rec. */
 525	bool				merge)	/* merge or replace */
 526{
 527	struct xfs_btree_cur		*cur;
 528	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
 529	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
 530	int				error;
 531	int				i;
 532	struct xfs_inobt_rec_incore	rec;
 533
 534	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
 535
 536	/* the new record is pre-aligned so we know where to look */
 537	error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
 538	if (error)
 539		goto error;
 540	/* if nothing there, insert a new record and return */
 541	if (i == 0) {
 542		error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
 543					     nrec->ir_count, nrec->ir_freecount,
 544					     nrec->ir_free, &i);
 545		if (error)
 546			goto error;
 547		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
 548
 549		goto out;
 550	}
 551
 552	/*
 553	 * A record exists at this startino. Merge or replace the record
 554	 * depending on what we've been asked to do.
 555	 */
 556	if (merge) {
 557		error = xfs_inobt_get_rec(cur, &rec, &i);
 558		if (error)
 559			goto error;
 560		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
 561		XFS_WANT_CORRUPTED_GOTO(mp,
 562					rec.ir_startino == nrec->ir_startino,
 563					error);
 564
 565		/*
 566		 * This should never fail. If we have coexisting records that
 567		 * cannot merge, something is seriously wrong.
 568		 */
 569		XFS_WANT_CORRUPTED_GOTO(mp, __xfs_inobt_can_merge(nrec, &rec),
 570					error);
 571
 572		trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
 573					 rec.ir_holemask, nrec->ir_startino,
 574					 nrec->ir_holemask);
 575
 576		/* merge to nrec to output the updated record */
 577		__xfs_inobt_rec_merge(nrec, &rec);
 578
 579		trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
 580					  nrec->ir_holemask);
 581
 582		error = xfs_inobt_rec_check_count(mp, nrec);
 583		if (error)
 584			goto error;
 585	}
 586
 587	error = xfs_inobt_update(cur, nrec);
 588	if (error)
 589		goto error;
 590
 591out:
 592	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 593	return 0;
 594error:
 595	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 596	return error;
 597}
 598
 599/*
 600 * Allocate new inodes in the allocation group specified by agbp.
 601 * Return 0 for success, else error code.
 602 */
 603STATIC int
 604xfs_ialloc_ag_alloc(
 605	struct xfs_trans	*tp,
 606	struct xfs_buf		*agbp,
 607	int			*alloc)
 608{
 609	struct xfs_agi		*agi;
 610	struct xfs_alloc_arg	args;
 611	xfs_agnumber_t		agno;
 612	int			error;
 613	xfs_agino_t		newino;		/* new first inode's number */
 614	xfs_agino_t		newlen;		/* new number of inodes */
 615	int			isaligned = 0;	/* inode allocation at stripe */
 616						/* unit boundary */
 617	/* init. to full chunk */
 618	uint16_t		allocmask = (uint16_t) -1;
 619	struct xfs_inobt_rec_incore rec;
 620	struct xfs_perag	*pag;
 621	struct xfs_ino_geometry	*igeo = M_IGEO(tp->t_mountp);
 622	int			do_sparse = 0;
 623
 624	memset(&args, 0, sizeof(args));
 625	args.tp = tp;
 626	args.mp = tp->t_mountp;
 627	args.fsbno = NULLFSBLOCK;
 628	args.oinfo = XFS_RMAP_OINFO_INODES;
 629
 630#ifdef DEBUG
 631	/* randomly do sparse inode allocations */
 632	if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
 633	    igeo->ialloc_min_blks < igeo->ialloc_blks)
 634		do_sparse = prandom_u32() & 1;
 635#endif
 636
 637	/*
 638	 * Locking will ensure that we don't have two callers in here
 639	 * at one time.
 640	 */
 641	newlen = igeo->ialloc_inos;
 642	if (igeo->maxicount &&
 643	    percpu_counter_read_positive(&args.mp->m_icount) + newlen >
 644							igeo->maxicount)
 645		return -ENOSPC;
 646	args.minlen = args.maxlen = igeo->ialloc_blks;
 647	/*
 648	 * First try to allocate inodes contiguous with the last-allocated
 649	 * chunk of inodes.  If the filesystem is striped, this will fill
 650	 * an entire stripe unit with inodes.
 651	 */
 652	agi = XFS_BUF_TO_AGI(agbp);
 653	newino = be32_to_cpu(agi->agi_newino);
 654	agno = be32_to_cpu(agi->agi_seqno);
 655	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
 656		     igeo->ialloc_blks;
 657	if (do_sparse)
 658		goto sparse_alloc;
 659	if (likely(newino != NULLAGINO &&
 660		  (args.agbno < be32_to_cpu(agi->agi_length)))) {
 661		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 662		args.type = XFS_ALLOCTYPE_THIS_BNO;
 663		args.prod = 1;
 664
 665		/*
 666		 * We need to take into account alignment here to ensure that
 667		 * we don't modify the free list if we fail to have an exact
 668		 * block. If we don't have an exact match, and every oher
 669		 * attempt allocation attempt fails, we'll end up cancelling
 670		 * a dirty transaction and shutting down.
 671		 *
 672		 * For an exact allocation, alignment must be 1,
 673		 * however we need to take cluster alignment into account when
 674		 * fixing up the freelist. Use the minalignslop field to
 675		 * indicate that extra blocks might be required for alignment,
 676		 * but not to use them in the actual exact allocation.
 677		 */
 678		args.alignment = 1;
 679		args.minalignslop = igeo->cluster_align - 1;
 680
 681		/* Allow space for the inode btree to split. */
 682		args.minleft = igeo->inobt_maxlevels - 1;
 683		if ((error = xfs_alloc_vextent(&args)))
 684			return error;
 685
 686		/*
 687		 * This request might have dirtied the transaction if the AG can
 688		 * satisfy the request, but the exact block was not available.
 689		 * If the allocation did fail, subsequent requests will relax
 690		 * the exact agbno requirement and increase the alignment
 691		 * instead. It is critical that the total size of the request
 692		 * (len + alignment + slop) does not increase from this point
 693		 * on, so reset minalignslop to ensure it is not included in
 694		 * subsequent requests.
 695		 */
 696		args.minalignslop = 0;
 697	}
 698
 699	if (unlikely(args.fsbno == NULLFSBLOCK)) {
 700		/*
 701		 * Set the alignment for the allocation.
 702		 * If stripe alignment is turned on then align at stripe unit
 703		 * boundary.
 704		 * If the cluster size is smaller than a filesystem block
 705		 * then we're doing I/O for inodes in filesystem block size
 706		 * pieces, so don't need alignment anyway.
 707		 */
 708		isaligned = 0;
 709		if (igeo->ialloc_align) {
 710			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
 711			args.alignment = args.mp->m_dalign;
 712			isaligned = 1;
 713		} else
 714			args.alignment = igeo->cluster_align;
 715		/*
 716		 * Need to figure out where to allocate the inode blocks.
 717		 * Ideally they should be spaced out through the a.g.
 718		 * For now, just allocate blocks up front.
 719		 */
 720		args.agbno = be32_to_cpu(agi->agi_root);
 721		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 722		/*
 723		 * Allocate a fixed-size extent of inodes.
 724		 */
 725		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 726		args.prod = 1;
 727		/*
 728		 * Allow space for the inode btree to split.
 729		 */
 730		args.minleft = igeo->inobt_maxlevels - 1;
 731		if ((error = xfs_alloc_vextent(&args)))
 732			return error;
 733	}
 734
 735	/*
 736	 * If stripe alignment is turned on, then try again with cluster
 737	 * alignment.
 738	 */
 739	if (isaligned && args.fsbno == NULLFSBLOCK) {
 740		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 741		args.agbno = be32_to_cpu(agi->agi_root);
 742		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 743		args.alignment = igeo->cluster_align;
 744		if ((error = xfs_alloc_vextent(&args)))
 745			return error;
 746	}
 747
 748	/*
 749	 * Finally, try a sparse allocation if the filesystem supports it and
 750	 * the sparse allocation length is smaller than a full chunk.
 751	 */
 752	if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
 753	    igeo->ialloc_min_blks < igeo->ialloc_blks &&
 754	    args.fsbno == NULLFSBLOCK) {
 755sparse_alloc:
 756		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 757		args.agbno = be32_to_cpu(agi->agi_root);
 758		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 759		args.alignment = args.mp->m_sb.sb_spino_align;
 760		args.prod = 1;
 761
 762		args.minlen = igeo->ialloc_min_blks;
 763		args.maxlen = args.minlen;
 764
 765		/*
 766		 * The inode record will be aligned to full chunk size. We must
 767		 * prevent sparse allocation from AG boundaries that result in
 768		 * invalid inode records, such as records that start at agbno 0
 769		 * or extend beyond the AG.
 770		 *
 771		 * Set min agbno to the first aligned, non-zero agbno and max to
 772		 * the last aligned agbno that is at least one full chunk from
 773		 * the end of the AG.
 774		 */
 775		args.min_agbno = args.mp->m_sb.sb_inoalignmt;
 776		args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
 777					    args.mp->m_sb.sb_inoalignmt) -
 778				 igeo->ialloc_blks;
 779
 780		error = xfs_alloc_vextent(&args);
 781		if (error)
 782			return error;
 783
 784		newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
 785		ASSERT(newlen <= XFS_INODES_PER_CHUNK);
 786		allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
 787	}
 788
 789	if (args.fsbno == NULLFSBLOCK) {
 790		*alloc = 0;
 791		return 0;
 792	}
 793	ASSERT(args.len == args.minlen);
 794
 795	/*
 796	 * Stamp and write the inode buffers.
 797	 *
 798	 * Seed the new inode cluster with a random generation number. This
 799	 * prevents short-term reuse of generation numbers if a chunk is
 800	 * freed and then immediately reallocated. We use random numbers
 801	 * rather than a linear progression to prevent the next generation
 802	 * number from being easily guessable.
 803	 */
 804	error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
 805			args.agbno, args.len, prandom_u32());
 806
 807	if (error)
 808		return error;
 809	/*
 810	 * Convert the results.
 811	 */
 812	newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
 813
 814	if (xfs_inobt_issparse(~allocmask)) {
 815		/*
 816		 * We've allocated a sparse chunk. Align the startino and mask.
 817		 */
 818		xfs_align_sparse_ino(args.mp, &newino, &allocmask);
 819
 820		rec.ir_startino = newino;
 821		rec.ir_holemask = ~allocmask;
 822		rec.ir_count = newlen;
 823		rec.ir_freecount = newlen;
 824		rec.ir_free = XFS_INOBT_ALL_FREE;
 825
 826		/*
 827		 * Insert the sparse record into the inobt and allow for a merge
 828		 * if necessary. If a merge does occur, rec is updated to the
 829		 * merged record.
 830		 */
 831		error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
 832					       &rec, true);
 833		if (error == -EFSCORRUPTED) {
 834			xfs_alert(args.mp,
 835	"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
 836				  XFS_AGINO_TO_INO(args.mp, agno,
 837						   rec.ir_startino),
 838				  rec.ir_holemask, rec.ir_count);
 839			xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
 840		}
 841		if (error)
 842			return error;
 843
 844		/*
 845		 * We can't merge the part we've just allocated as for the inobt
 846		 * due to finobt semantics. The original record may or may not
 847		 * exist independent of whether physical inodes exist in this
 848		 * sparse chunk.
 849		 *
 850		 * We must update the finobt record based on the inobt record.
 851		 * rec contains the fully merged and up to date inobt record
 852		 * from the previous call. Set merge false to replace any
 853		 * existing record with this one.
 854		 */
 855		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
 856			error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
 857						       XFS_BTNUM_FINO, &rec,
 858						       false);
 859			if (error)
 860				return error;
 861		}
 862	} else {
 863		/* full chunk - insert new records to both btrees */
 864		error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
 865					 XFS_BTNUM_INO);
 866		if (error)
 867			return error;
 868
 869		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
 870			error = xfs_inobt_insert(args.mp, tp, agbp, newino,
 871						 newlen, XFS_BTNUM_FINO);
 872			if (error)
 873				return error;
 874		}
 875	}
 876
 877	/*
 878	 * Update AGI counts and newino.
 879	 */
 880	be32_add_cpu(&agi->agi_count, newlen);
 881	be32_add_cpu(&agi->agi_freecount, newlen);
 882	pag = xfs_perag_get(args.mp, agno);
 883	pag->pagi_freecount += newlen;
 884	pag->pagi_count += newlen;
 885	xfs_perag_put(pag);
 886	agi->agi_newino = cpu_to_be32(newino);
 887
 888	/*
 889	 * Log allocation group header fields
 890	 */
 891	xfs_ialloc_log_agi(tp, agbp,
 892		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
 893	/*
 894	 * Modify/log superblock values for inode count and inode free count.
 895	 */
 896	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
 897	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
 898	*alloc = 1;
 899	return 0;
 900}
 901
 902STATIC xfs_agnumber_t
 903xfs_ialloc_next_ag(
 904	xfs_mount_t	*mp)
 905{
 906	xfs_agnumber_t	agno;
 907
 908	spin_lock(&mp->m_agirotor_lock);
 909	agno = mp->m_agirotor;
 910	if (++mp->m_agirotor >= mp->m_maxagi)
 911		mp->m_agirotor = 0;
 912	spin_unlock(&mp->m_agirotor_lock);
 913
 914	return agno;
 915}
 916
 917/*
 918 * Select an allocation group to look for a free inode in, based on the parent
 919 * inode and the mode.  Return the allocation group buffer.
 920 */
 921STATIC xfs_agnumber_t
 922xfs_ialloc_ag_select(
 923	xfs_trans_t	*tp,		/* transaction pointer */
 924	xfs_ino_t	parent,		/* parent directory inode number */
 925	umode_t		mode)		/* bits set to indicate file type */
 926{
 927	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */
 928	xfs_agnumber_t	agno;		/* current ag number */
 929	int		flags;		/* alloc buffer locking flags */
 930	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */
 931	xfs_extlen_t	longest = 0;	/* longest extent available */
 932	xfs_mount_t	*mp;		/* mount point structure */
 933	int		needspace;	/* file mode implies space allocated */
 934	xfs_perag_t	*pag;		/* per allocation group data */
 935	xfs_agnumber_t	pagno;		/* parent (starting) ag number */
 936	int		error;
 937
 938	/*
 939	 * Files of these types need at least one block if length > 0
 940	 * (and they won't fit in the inode, but that's hard to figure out).
 941	 */
 942	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
 943	mp = tp->t_mountp;
 944	agcount = mp->m_maxagi;
 945	if (S_ISDIR(mode))
 946		pagno = xfs_ialloc_next_ag(mp);
 947	else {
 948		pagno = XFS_INO_TO_AGNO(mp, parent);
 949		if (pagno >= agcount)
 950			pagno = 0;
 951	}
 952
 953	ASSERT(pagno < agcount);
 954
 955	/*
 956	 * Loop through allocation groups, looking for one with a little
 957	 * free space in it.  Note we don't look for free inodes, exactly.
 958	 * Instead, we include whether there is a need to allocate inodes
 959	 * to mean that blocks must be allocated for them,
 960	 * if none are currently free.
 961	 */
 962	agno = pagno;
 963	flags = XFS_ALLOC_FLAG_TRYLOCK;
 964	for (;;) {
 965		pag = xfs_perag_get(mp, agno);
 966		if (!pag->pagi_inodeok) {
 967			xfs_ialloc_next_ag(mp);
 968			goto nextag;
 969		}
 970
 971		if (!pag->pagi_init) {
 972			error = xfs_ialloc_pagi_init(mp, tp, agno);
 973			if (error)
 974				goto nextag;
 975		}
 976
 977		if (pag->pagi_freecount) {
 978			xfs_perag_put(pag);
 979			return agno;
 980		}
 981
 982		if (!pag->pagf_init) {
 983			error = xfs_alloc_pagf_init(mp, tp, agno, flags);
 984			if (error)
 985				goto nextag;
 986		}
 987
 988		/*
 989		 * Check that there is enough free space for the file plus a
 990		 * chunk of inodes if we need to allocate some. If this is the
 991		 * first pass across the AGs, take into account the potential
 992		 * space needed for alignment of inode chunks when checking the
 993		 * longest contiguous free space in the AG - this prevents us
 994		 * from getting ENOSPC because we have free space larger than
 995		 * ialloc_blks but alignment constraints prevent us from using
 996		 * it.
 997		 *
 998		 * If we can't find an AG with space for full alignment slack to
 999		 * be taken into account, we must be near ENOSPC in all AGs.
1000		 * Hence we don't include alignment for the second pass and so
1001		 * if we fail allocation due to alignment issues then it is most
1002		 * likely a real ENOSPC condition.
1003		 */
1004		ineed = M_IGEO(mp)->ialloc_min_blks;
1005		if (flags && ineed > 1)
1006			ineed += M_IGEO(mp)->cluster_align;
1007		longest = pag->pagf_longest;
1008		if (!longest)
1009			longest = pag->pagf_flcount > 0;
1010
1011		if (pag->pagf_freeblks >= needspace + ineed &&
1012		    longest >= ineed) {
1013			xfs_perag_put(pag);
1014			return agno;
1015		}
1016nextag:
1017		xfs_perag_put(pag);
1018		/*
1019		 * No point in iterating over the rest, if we're shutting
1020		 * down.
1021		 */
1022		if (XFS_FORCED_SHUTDOWN(mp))
1023			return NULLAGNUMBER;
1024		agno++;
1025		if (agno >= agcount)
1026			agno = 0;
1027		if (agno == pagno) {
1028			if (flags == 0)
1029				return NULLAGNUMBER;
1030			flags = 0;
1031		}
1032	}
1033}
1034
1035/*
1036 * Try to retrieve the next record to the left/right from the current one.
1037 */
1038STATIC int
1039xfs_ialloc_next_rec(
1040	struct xfs_btree_cur	*cur,
1041	xfs_inobt_rec_incore_t	*rec,
1042	int			*done,
1043	int			left)
1044{
1045	int                     error;
1046	int			i;
1047
1048	if (left)
1049		error = xfs_btree_decrement(cur, 0, &i);
1050	else
1051		error = xfs_btree_increment(cur, 0, &i);
1052
1053	if (error)
1054		return error;
1055	*done = !i;
1056	if (i) {
1057		error = xfs_inobt_get_rec(cur, rec, &i);
1058		if (error)
1059			return error;
1060		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1061	}
1062
1063	return 0;
1064}
1065
1066STATIC int
1067xfs_ialloc_get_rec(
1068	struct xfs_btree_cur	*cur,
1069	xfs_agino_t		agino,
1070	xfs_inobt_rec_incore_t	*rec,
1071	int			*done)
1072{
1073	int                     error;
1074	int			i;
1075
1076	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1077	if (error)
1078		return error;
1079	*done = !i;
1080	if (i) {
1081		error = xfs_inobt_get_rec(cur, rec, &i);
1082		if (error)
1083			return error;
1084		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1085	}
1086
1087	return 0;
1088}
1089
1090/*
1091 * Return the offset of the first free inode in the record. If the inode chunk
1092 * is sparsely allocated, we convert the record holemask to inode granularity
1093 * and mask off the unallocated regions from the inode free mask.
1094 */
1095STATIC int
1096xfs_inobt_first_free_inode(
1097	struct xfs_inobt_rec_incore	*rec)
1098{
1099	xfs_inofree_t			realfree;
1100
1101	/* if there are no holes, return the first available offset */
1102	if (!xfs_inobt_issparse(rec->ir_holemask))
1103		return xfs_lowbit64(rec->ir_free);
1104
1105	realfree = xfs_inobt_irec_to_allocmask(rec);
1106	realfree &= rec->ir_free;
1107
1108	return xfs_lowbit64(realfree);
1109}
1110
1111/*
1112 * Allocate an inode using the inobt-only algorithm.
1113 */
1114STATIC int
1115xfs_dialloc_ag_inobt(
1116	struct xfs_trans	*tp,
1117	struct xfs_buf		*agbp,
1118	xfs_ino_t		parent,
1119	xfs_ino_t		*inop)
1120{
1121	struct xfs_mount	*mp = tp->t_mountp;
1122	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
1123	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
1124	xfs_agnumber_t		pagno = XFS_INO_TO_AGNO(mp, parent);
1125	xfs_agino_t		pagino = XFS_INO_TO_AGINO(mp, parent);
1126	struct xfs_perag	*pag;
1127	struct xfs_btree_cur	*cur, *tcur;
1128	struct xfs_inobt_rec_incore rec, trec;
1129	xfs_ino_t		ino;
1130	int			error;
1131	int			offset;
1132	int			i, j;
1133	int			searchdistance = 10;
1134
1135	pag = xfs_perag_get(mp, agno);
1136
1137	ASSERT(pag->pagi_init);
1138	ASSERT(pag->pagi_inodeok);
1139	ASSERT(pag->pagi_freecount > 0);
1140
1141 restart_pagno:
1142	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1143	/*
1144	 * If pagino is 0 (this is the root inode allocation) use newino.
1145	 * This must work because we've just allocated some.
1146	 */
1147	if (!pagino)
1148		pagino = be32_to_cpu(agi->agi_newino);
1149
1150	error = xfs_check_agi_freecount(cur, agi);
1151	if (error)
1152		goto error0;
1153
1154	/*
1155	 * If in the same AG as the parent, try to get near the parent.
1156	 */
1157	if (pagno == agno) {
1158		int		doneleft;	/* done, to the left */
1159		int		doneright;	/* done, to the right */
1160
1161		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1162		if (error)
1163			goto error0;
1164		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1165
1166		error = xfs_inobt_get_rec(cur, &rec, &j);
1167		if (error)
1168			goto error0;
1169		XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
1170
1171		if (rec.ir_freecount > 0) {
1172			/*
1173			 * Found a free inode in the same chunk
1174			 * as the parent, done.
1175			 */
1176			goto alloc_inode;
1177		}
1178
1179
1180		/*
1181		 * In the same AG as parent, but parent's chunk is full.
1182		 */
1183
1184		/* duplicate the cursor, search left & right simultaneously */
1185		error = xfs_btree_dup_cursor(cur, &tcur);
1186		if (error)
1187			goto error0;
1188
1189		/*
1190		 * Skip to last blocks looked up if same parent inode.
1191		 */
1192		if (pagino != NULLAGINO &&
1193		    pag->pagl_pagino == pagino &&
1194		    pag->pagl_leftrec != NULLAGINO &&
1195		    pag->pagl_rightrec != NULLAGINO) {
1196			error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1197						   &trec, &doneleft);
1198			if (error)
1199				goto error1;
1200
1201			error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1202						   &rec, &doneright);
1203			if (error)
1204				goto error1;
1205		} else {
1206			/* search left with tcur, back up 1 record */
1207			error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1208			if (error)
1209				goto error1;
1210
1211			/* search right with cur, go forward 1 record. */
1212			error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1213			if (error)
1214				goto error1;
1215		}
1216
1217		/*
1218		 * Loop until we find an inode chunk with a free inode.
1219		 */
1220		while (--searchdistance > 0 && (!doneleft || !doneright)) {
1221			int	useleft;  /* using left inode chunk this time */
1222
1223			/* figure out the closer block if both are valid. */
1224			if (!doneleft && !doneright) {
1225				useleft = pagino -
1226				 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1227				  rec.ir_startino - pagino;
1228			} else {
1229				useleft = !doneleft;
1230			}
1231
1232			/* free inodes to the left? */
1233			if (useleft && trec.ir_freecount) {
1234				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1235				cur = tcur;
1236
1237				pag->pagl_leftrec = trec.ir_startino;
1238				pag->pagl_rightrec = rec.ir_startino;
1239				pag->pagl_pagino = pagino;
1240				rec = trec;
1241				goto alloc_inode;
1242			}
1243
1244			/* free inodes to the right? */
1245			if (!useleft && rec.ir_freecount) {
1246				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1247
1248				pag->pagl_leftrec = trec.ir_startino;
1249				pag->pagl_rightrec = rec.ir_startino;
1250				pag->pagl_pagino = pagino;
1251				goto alloc_inode;
1252			}
1253
1254			/* get next record to check */
1255			if (useleft) {
1256				error = xfs_ialloc_next_rec(tcur, &trec,
1257								 &doneleft, 1);
1258			} else {
1259				error = xfs_ialloc_next_rec(cur, &rec,
1260								 &doneright, 0);
1261			}
1262			if (error)
1263				goto error1;
1264		}
1265
1266		if (searchdistance <= 0) {
1267			/*
1268			 * Not in range - save last search
1269			 * location and allocate a new inode
1270			 */
1271			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1272			pag->pagl_leftrec = trec.ir_startino;
1273			pag->pagl_rightrec = rec.ir_startino;
1274			pag->pagl_pagino = pagino;
1275
1276		} else {
1277			/*
1278			 * We've reached the end of the btree. because
1279			 * we are only searching a small chunk of the
1280			 * btree each search, there is obviously free
1281			 * inodes closer to the parent inode than we
1282			 * are now. restart the search again.
1283			 */
1284			pag->pagl_pagino = NULLAGINO;
1285			pag->pagl_leftrec = NULLAGINO;
1286			pag->pagl_rightrec = NULLAGINO;
1287			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1288			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1289			goto restart_pagno;
1290		}
1291	}
1292
1293	/*
1294	 * In a different AG from the parent.
1295	 * See if the most recently allocated block has any free.
1296	 */
1297	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1298		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1299					 XFS_LOOKUP_EQ, &i);
1300		if (error)
1301			goto error0;
1302
1303		if (i == 1) {
1304			error = xfs_inobt_get_rec(cur, &rec, &j);
1305			if (error)
1306				goto error0;
1307
1308			if (j == 1 && rec.ir_freecount > 0) {
1309				/*
1310				 * The last chunk allocated in the group
1311				 * still has a free inode.
1312				 */
1313				goto alloc_inode;
1314			}
1315		}
1316	}
1317
1318	/*
1319	 * None left in the last group, search the whole AG
1320	 */
1321	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1322	if (error)
1323		goto error0;
1324	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1325
1326	for (;;) {
1327		error = xfs_inobt_get_rec(cur, &rec, &i);
1328		if (error)
1329			goto error0;
1330		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1331		if (rec.ir_freecount > 0)
1332			break;
1333		error = xfs_btree_increment(cur, 0, &i);
1334		if (error)
1335			goto error0;
1336		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1337	}
1338
1339alloc_inode:
1340	offset = xfs_inobt_first_free_inode(&rec);
1341	ASSERT(offset >= 0);
1342	ASSERT(offset < XFS_INODES_PER_CHUNK);
1343	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1344				   XFS_INODES_PER_CHUNK) == 0);
1345	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1346	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1347	rec.ir_freecount--;
1348	error = xfs_inobt_update(cur, &rec);
1349	if (error)
1350		goto error0;
1351	be32_add_cpu(&agi->agi_freecount, -1);
1352	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1353	pag->pagi_freecount--;
1354
1355	error = xfs_check_agi_freecount(cur, agi);
1356	if (error)
1357		goto error0;
1358
1359	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1360	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1361	xfs_perag_put(pag);
1362	*inop = ino;
1363	return 0;
1364error1:
1365	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1366error0:
1367	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1368	xfs_perag_put(pag);
1369	return error;
1370}
1371
1372/*
1373 * Use the free inode btree to allocate an inode based on distance from the
1374 * parent. Note that the provided cursor may be deleted and replaced.
1375 */
1376STATIC int
1377xfs_dialloc_ag_finobt_near(
1378	xfs_agino_t			pagino,
1379	struct xfs_btree_cur		**ocur,
1380	struct xfs_inobt_rec_incore	*rec)
1381{
1382	struct xfs_btree_cur		*lcur = *ocur;	/* left search cursor */
1383	struct xfs_btree_cur		*rcur;	/* right search cursor */
1384	struct xfs_inobt_rec_incore	rrec;
1385	int				error;
1386	int				i, j;
1387
1388	error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1389	if (error)
1390		return error;
1391
1392	if (i == 1) {
1393		error = xfs_inobt_get_rec(lcur, rec, &i);
1394		if (error)
1395			return error;
1396		XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
1397
1398		/*
1399		 * See if we've landed in the parent inode record. The finobt
1400		 * only tracks chunks with at least one free inode, so record
1401		 * existence is enough.
1402		 */
1403		if (pagino >= rec->ir_startino &&
1404		    pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1405			return 0;
1406	}
1407
1408	error = xfs_btree_dup_cursor(lcur, &rcur);
1409	if (error)
1410		return error;
1411
1412	error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1413	if (error)
1414		goto error_rcur;
1415	if (j == 1) {
1416		error = xfs_inobt_get_rec(rcur, &rrec, &j);
1417		if (error)
1418			goto error_rcur;
1419		XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
1420	}
1421
1422	XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
1423	if (i == 1 && j == 1) {
1424		/*
1425		 * Both the left and right records are valid. Choose the closer
1426		 * inode chunk to the target.
1427		 */
1428		if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1429		    (rrec.ir_startino - pagino)) {
1430			*rec = rrec;
1431			xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1432			*ocur = rcur;
1433		} else {
1434			xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1435		}
1436	} else if (j == 1) {
1437		/* only the right record is valid */
1438		*rec = rrec;
1439		xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1440		*ocur = rcur;
1441	} else if (i == 1) {
1442		/* only the left record is valid */
1443		xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1444	}
1445
1446	return 0;
1447
1448error_rcur:
1449	xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1450	return error;
1451}
1452
1453/*
1454 * Use the free inode btree to find a free inode based on a newino hint. If
1455 * the hint is NULL, find the first free inode in the AG.
1456 */
1457STATIC int
1458xfs_dialloc_ag_finobt_newino(
1459	struct xfs_agi			*agi,
1460	struct xfs_btree_cur		*cur,
1461	struct xfs_inobt_rec_incore	*rec)
1462{
1463	int error;
1464	int i;
1465
1466	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1467		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1468					 XFS_LOOKUP_EQ, &i);
1469		if (error)
1470			return error;
1471		if (i == 1) {
1472			error = xfs_inobt_get_rec(cur, rec, &i);
1473			if (error)
1474				return error;
1475			XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1476			return 0;
1477		}
1478	}
1479
1480	/*
1481	 * Find the first inode available in the AG.
1482	 */
1483	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1484	if (error)
1485		return error;
1486	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1487
1488	error = xfs_inobt_get_rec(cur, rec, &i);
1489	if (error)
1490		return error;
1491	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1492
1493	return 0;
1494}
1495
1496/*
1497 * Update the inobt based on a modification made to the finobt. Also ensure that
1498 * the records from both trees are equivalent post-modification.
1499 */
1500STATIC int
1501xfs_dialloc_ag_update_inobt(
1502	struct xfs_btree_cur		*cur,	/* inobt cursor */
1503	struct xfs_inobt_rec_incore	*frec,	/* finobt record */
1504	int				offset) /* inode offset */
1505{
1506	struct xfs_inobt_rec_incore	rec;
1507	int				error;
1508	int				i;
1509
1510	error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1511	if (error)
1512		return error;
1513	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1514
1515	error = xfs_inobt_get_rec(cur, &rec, &i);
1516	if (error)
1517		return error;
1518	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1519	ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1520				   XFS_INODES_PER_CHUNK) == 0);
1521
1522	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1523	rec.ir_freecount--;
1524
1525	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
1526				  (rec.ir_freecount == frec->ir_freecount));
1527
1528	return xfs_inobt_update(cur, &rec);
1529}
1530
1531/*
1532 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1533 * back to the inobt search algorithm.
1534 *
1535 * The caller selected an AG for us, and made sure that free inodes are
1536 * available.
1537 */
1538STATIC int
1539xfs_dialloc_ag(
1540	struct xfs_trans	*tp,
1541	struct xfs_buf		*agbp,
1542	xfs_ino_t		parent,
1543	xfs_ino_t		*inop)
1544{
1545	struct xfs_mount		*mp = tp->t_mountp;
1546	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1547	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1548	xfs_agnumber_t			pagno = XFS_INO_TO_AGNO(mp, parent);
1549	xfs_agino_t			pagino = XFS_INO_TO_AGINO(mp, parent);
1550	struct xfs_perag		*pag;
1551	struct xfs_btree_cur		*cur;	/* finobt cursor */
1552	struct xfs_btree_cur		*icur;	/* inobt cursor */
1553	struct xfs_inobt_rec_incore	rec;
1554	xfs_ino_t			ino;
1555	int				error;
1556	int				offset;
1557	int				i;
1558
1559	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1560		return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
1561
1562	pag = xfs_perag_get(mp, agno);
1563
1564	/*
1565	 * If pagino is 0 (this is the root inode allocation) use newino.
1566	 * This must work because we've just allocated some.
1567	 */
1568	if (!pagino)
1569		pagino = be32_to_cpu(agi->agi_newino);
1570
1571	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
1572
1573	error = xfs_check_agi_freecount(cur, agi);
1574	if (error)
1575		goto error_cur;
1576
1577	/*
1578	 * The search algorithm depends on whether we're in the same AG as the
1579	 * parent. If so, find the closest available inode to the parent. If
1580	 * not, consider the agi hint or find the first free inode in the AG.
1581	 */
1582	if (agno == pagno)
1583		error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1584	else
1585		error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1586	if (error)
1587		goto error_cur;
1588
1589	offset = xfs_inobt_first_free_inode(&rec);
1590	ASSERT(offset >= 0);
1591	ASSERT(offset < XFS_INODES_PER_CHUNK);
1592	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1593				   XFS_INODES_PER_CHUNK) == 0);
1594	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1595
1596	/*
1597	 * Modify or remove the finobt record.
1598	 */
1599	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1600	rec.ir_freecount--;
1601	if (rec.ir_freecount)
1602		error = xfs_inobt_update(cur, &rec);
1603	else
1604		error = xfs_btree_delete(cur, &i);
1605	if (error)
1606		goto error_cur;
1607
1608	/*
1609	 * The finobt has now been updated appropriately. We haven't updated the
1610	 * agi and superblock yet, so we can create an inobt cursor and validate
1611	 * the original freecount. If all is well, make the equivalent update to
1612	 * the inobt using the finobt record and offset information.
1613	 */
1614	icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1615
1616	error = xfs_check_agi_freecount(icur, agi);
1617	if (error)
1618		goto error_icur;
1619
1620	error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1621	if (error)
1622		goto error_icur;
1623
1624	/*
1625	 * Both trees have now been updated. We must update the perag and
1626	 * superblock before we can check the freecount for each btree.
1627	 */
1628	be32_add_cpu(&agi->agi_freecount, -1);
1629	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1630	pag->pagi_freecount--;
1631
1632	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1633
1634	error = xfs_check_agi_freecount(icur, agi);
1635	if (error)
1636		goto error_icur;
1637	error = xfs_check_agi_freecount(cur, agi);
1638	if (error)
1639		goto error_icur;
1640
1641	xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1642	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1643	xfs_perag_put(pag);
1644	*inop = ino;
1645	return 0;
1646
1647error_icur:
1648	xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1649error_cur:
1650	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1651	xfs_perag_put(pag);
1652	return error;
1653}
1654
1655/*
1656 * Allocate an inode on disk.
1657 *
1658 * Mode is used to tell whether the new inode will need space, and whether it
1659 * is a directory.
1660 *
1661 * This function is designed to be called twice if it has to do an allocation
1662 * to make more free inodes.  On the first call, *IO_agbp should be set to NULL.
1663 * If an inode is available without having to performn an allocation, an inode
1664 * number is returned.  In this case, *IO_agbp is set to NULL.  If an allocation
1665 * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp.
1666 * The caller should then commit the current transaction, allocate a
1667 * new transaction, and call xfs_dialloc() again, passing in the previous value
1668 * of *IO_agbp.  IO_agbp should be held across the transactions. Since the AGI
1669 * buffer is locked across the two calls, the second call is guaranteed to have
1670 * a free inode available.
1671 *
1672 * Once we successfully pick an inode its number is returned and the on-disk
1673 * data structures are updated.  The inode itself is not read in, since doing so
1674 * would break ordering constraints with xfs_reclaim.
1675 */
1676int
1677xfs_dialloc(
1678	struct xfs_trans	*tp,
1679	xfs_ino_t		parent,
1680	umode_t			mode,
1681	struct xfs_buf		**IO_agbp,
1682	xfs_ino_t		*inop)
1683{
1684	struct xfs_mount	*mp = tp->t_mountp;
1685	struct xfs_buf		*agbp;
1686	xfs_agnumber_t		agno;
1687	int			error;
1688	int			ialloced;
1689	int			noroom = 0;
1690	xfs_agnumber_t		start_agno;
1691	struct xfs_perag	*pag;
1692	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
1693	int			okalloc = 1;
1694
1695	if (*IO_agbp) {
1696		/*
1697		 * If the caller passes in a pointer to the AGI buffer,
1698		 * continue where we left off before.  In this case, we
1699		 * know that the allocation group has free inodes.
1700		 */
1701		agbp = *IO_agbp;
1702		goto out_alloc;
1703	}
1704
1705	/*
1706	 * We do not have an agbp, so select an initial allocation
1707	 * group for inode allocation.
1708	 */
1709	start_agno = xfs_ialloc_ag_select(tp, parent, mode);
1710	if (start_agno == NULLAGNUMBER) {
1711		*inop = NULLFSINO;
1712		return 0;
1713	}
1714
1715	/*
1716	 * If we have already hit the ceiling of inode blocks then clear
1717	 * okalloc so we scan all available agi structures for a free
1718	 * inode.
1719	 *
1720	 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1721	 * which will sacrifice the preciseness but improve the performance.
1722	 */
1723	if (igeo->maxicount &&
1724	    percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1725							> igeo->maxicount) {
1726		noroom = 1;
1727		okalloc = 0;
1728	}
1729
1730	/*
1731	 * Loop until we find an allocation group that either has free inodes
1732	 * or in which we can allocate some inodes.  Iterate through the
1733	 * allocation groups upward, wrapping at the end.
1734	 */
1735	agno = start_agno;
1736	for (;;) {
1737		pag = xfs_perag_get(mp, agno);
1738		if (!pag->pagi_inodeok) {
1739			xfs_ialloc_next_ag(mp);
1740			goto nextag;
1741		}
1742
1743		if (!pag->pagi_init) {
1744			error = xfs_ialloc_pagi_init(mp, tp, agno);
1745			if (error)
1746				goto out_error;
1747		}
1748
1749		/*
1750		 * Do a first racy fast path check if this AG is usable.
1751		 */
1752		if (!pag->pagi_freecount && !okalloc)
1753			goto nextag;
1754
1755		/*
1756		 * Then read in the AGI buffer and recheck with the AGI buffer
1757		 * lock held.
1758		 */
1759		error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1760		if (error)
1761			goto out_error;
1762
1763		if (pag->pagi_freecount) {
1764			xfs_perag_put(pag);
1765			goto out_alloc;
1766		}
1767
1768		if (!okalloc)
1769			goto nextag_relse_buffer;
1770
1771
1772		error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
1773		if (error) {
1774			xfs_trans_brelse(tp, agbp);
1775
1776			if (error != -ENOSPC)
1777				goto out_error;
1778
1779			xfs_perag_put(pag);
1780			*inop = NULLFSINO;
1781			return 0;
1782		}
1783
1784		if (ialloced) {
1785			/*
1786			 * We successfully allocated some inodes, return
1787			 * the current context to the caller so that it
1788			 * can commit the current transaction and call
1789			 * us again where we left off.
1790			 */
1791			ASSERT(pag->pagi_freecount > 0);
1792			xfs_perag_put(pag);
1793
1794			*IO_agbp = agbp;
1795			*inop = NULLFSINO;
1796			return 0;
1797		}
1798
1799nextag_relse_buffer:
1800		xfs_trans_brelse(tp, agbp);
1801nextag:
1802		xfs_perag_put(pag);
1803		if (++agno == mp->m_sb.sb_agcount)
1804			agno = 0;
1805		if (agno == start_agno) {
1806			*inop = NULLFSINO;
1807			return noroom ? -ENOSPC : 0;
1808		}
1809	}
1810
1811out_alloc:
1812	*IO_agbp = NULL;
1813	return xfs_dialloc_ag(tp, agbp, parent, inop);
1814out_error:
1815	xfs_perag_put(pag);
1816	return error;
1817}
1818
1819/*
1820 * Free the blocks of an inode chunk. We must consider that the inode chunk
1821 * might be sparse and only free the regions that are allocated as part of the
1822 * chunk.
1823 */
1824STATIC void
1825xfs_difree_inode_chunk(
1826	struct xfs_trans		*tp,
1827	xfs_agnumber_t			agno,
1828	struct xfs_inobt_rec_incore	*rec)
1829{
1830	struct xfs_mount		*mp = tp->t_mountp;
1831	xfs_agblock_t			sagbno = XFS_AGINO_TO_AGBNO(mp,
1832							rec->ir_startino);
1833	int				startidx, endidx;
1834	int				nextbit;
1835	xfs_agblock_t			agbno;
1836	int				contigblk;
1837	DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1838
1839	if (!xfs_inobt_issparse(rec->ir_holemask)) {
1840		/* not sparse, calculate extent info directly */
1841		xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
1842				  M_IGEO(mp)->ialloc_blks,
1843				  &XFS_RMAP_OINFO_INODES);
1844		return;
1845	}
1846
1847	/* holemask is only 16-bits (fits in an unsigned long) */
1848	ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1849	holemask[0] = rec->ir_holemask;
1850
1851	/*
1852	 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1853	 * holemask and convert the start/end index of each range to an extent.
1854	 * We start with the start and end index both pointing at the first 0 in
1855	 * the mask.
1856	 */
1857	startidx = endidx = find_first_zero_bit(holemask,
1858						XFS_INOBT_HOLEMASK_BITS);
1859	nextbit = startidx + 1;
1860	while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1861		nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1862					     nextbit);
1863		/*
1864		 * If the next zero bit is contiguous, update the end index of
1865		 * the current range and continue.
1866		 */
1867		if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1868		    nextbit == endidx + 1) {
1869			endidx = nextbit;
1870			goto next;
1871		}
1872
1873		/*
1874		 * nextbit is not contiguous with the current end index. Convert
1875		 * the current start/end to an extent and add it to the free
1876		 * list.
1877		 */
1878		agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1879				  mp->m_sb.sb_inopblock;
1880		contigblk = ((endidx - startidx + 1) *
1881			     XFS_INODES_PER_HOLEMASK_BIT) /
1882			    mp->m_sb.sb_inopblock;
1883
1884		ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1885		ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1886		xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
1887				  contigblk, &XFS_RMAP_OINFO_INODES);
1888
1889		/* reset range to current bit and carry on... */
1890		startidx = endidx = nextbit;
1891
1892next:
1893		nextbit++;
1894	}
1895}
1896
1897STATIC int
1898xfs_difree_inobt(
1899	struct xfs_mount		*mp,
1900	struct xfs_trans		*tp,
1901	struct xfs_buf			*agbp,
1902	xfs_agino_t			agino,
1903	struct xfs_icluster		*xic,
1904	struct xfs_inobt_rec_incore	*orec)
1905{
1906	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1907	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1908	struct xfs_perag		*pag;
1909	struct xfs_btree_cur		*cur;
1910	struct xfs_inobt_rec_incore	rec;
1911	int				ilen;
1912	int				error;
1913	int				i;
1914	int				off;
1915
1916	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1917	ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1918
1919	/*
1920	 * Initialize the cursor.
1921	 */
1922	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1923
1924	error = xfs_check_agi_freecount(cur, agi);
1925	if (error)
1926		goto error0;
1927
1928	/*
1929	 * Look for the entry describing this inode.
1930	 */
1931	if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1932		xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1933			__func__, error);
1934		goto error0;
1935	}
1936	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1937	error = xfs_inobt_get_rec(cur, &rec, &i);
1938	if (error) {
1939		xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1940			__func__, error);
1941		goto error0;
1942	}
1943	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1944	/*
1945	 * Get the offset in the inode chunk.
1946	 */
1947	off = agino - rec.ir_startino;
1948	ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1949	ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1950	/*
1951	 * Mark the inode free & increment the count.
1952	 */
1953	rec.ir_free |= XFS_INOBT_MASK(off);
1954	rec.ir_freecount++;
1955
1956	/*
1957	 * When an inode chunk is free, it becomes eligible for removal. Don't
1958	 * remove the chunk if the block size is large enough for multiple inode
1959	 * chunks (that might not be free).
1960	 */
1961	if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1962	    rec.ir_free == XFS_INOBT_ALL_FREE &&
1963	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1964		xic->deleted = true;
1965		xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1966		xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1967
1968		/*
1969		 * Remove the inode cluster from the AGI B+Tree, adjust the
1970		 * AGI and Superblock inode counts, and mark the disk space
1971		 * to be freed when the transaction is committed.
1972		 */
1973		ilen = rec.ir_freecount;
1974		be32_add_cpu(&agi->agi_count, -ilen);
1975		be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1976		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1977		pag = xfs_perag_get(mp, agno);
1978		pag->pagi_freecount -= ilen - 1;
1979		pag->pagi_count -= ilen;
1980		xfs_perag_put(pag);
1981		xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1982		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1983
1984		if ((error = xfs_btree_delete(cur, &i))) {
1985			xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
1986				__func__, error);
1987			goto error0;
1988		}
1989
1990		xfs_difree_inode_chunk(tp, agno, &rec);
1991	} else {
1992		xic->deleted = false;
1993
1994		error = xfs_inobt_update(cur, &rec);
1995		if (error) {
1996			xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
1997				__func__, error);
1998			goto error0;
1999		}
2000
2001		/* 
2002		 * Change the inode free counts and log the ag/sb changes.
2003		 */
2004		be32_add_cpu(&agi->agi_freecount, 1);
2005		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2006		pag = xfs_perag_get(mp, agno);
2007		pag->pagi_freecount++;
2008		xfs_perag_put(pag);
2009		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2010	}
2011
2012	error = xfs_check_agi_freecount(cur, agi);
2013	if (error)
2014		goto error0;
2015
2016	*orec = rec;
2017	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2018	return 0;
2019
2020error0:
2021	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2022	return error;
2023}
2024
2025/*
2026 * Free an inode in the free inode btree.
2027 */
2028STATIC int
2029xfs_difree_finobt(
2030	struct xfs_mount		*mp,
2031	struct xfs_trans		*tp,
2032	struct xfs_buf			*agbp,
2033	xfs_agino_t			agino,
2034	struct xfs_inobt_rec_incore	*ibtrec) /* inobt record */
2035{
2036	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
2037	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
2038	struct xfs_btree_cur		*cur;
2039	struct xfs_inobt_rec_incore	rec;
2040	int				offset = agino - ibtrec->ir_startino;
2041	int				error;
2042	int				i;
2043
2044	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
2045
2046	error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2047	if (error)
2048		goto error;
2049	if (i == 0) {
2050		/*
2051		 * If the record does not exist in the finobt, we must have just
2052		 * freed an inode in a previously fully allocated chunk. If not,
2053		 * something is out of sync.
2054		 */
2055		XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
2056
2057		error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2058					     ibtrec->ir_count,
2059					     ibtrec->ir_freecount,
2060					     ibtrec->ir_free, &i);
2061		if (error)
2062			goto error;
2063		ASSERT(i == 1);
2064
2065		goto out;
2066	}
2067
2068	/*
2069	 * Read and update the existing record. We could just copy the ibtrec
2070	 * across here, but that would defeat the purpose of having redundant
2071	 * metadata. By making the modifications independently, we can catch
2072	 * corruptions that we wouldn't see if we just copied from one record
2073	 * to another.
2074	 */
2075	error = xfs_inobt_get_rec(cur, &rec, &i);
2076	if (error)
2077		goto error;
2078	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
2079
2080	rec.ir_free |= XFS_INOBT_MASK(offset);
2081	rec.ir_freecount++;
2082
2083	XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
2084				(rec.ir_freecount == ibtrec->ir_freecount),
2085				error);
2086
2087	/*
2088	 * The content of inobt records should always match between the inobt
2089	 * and finobt. The lifecycle of records in the finobt is different from
2090	 * the inobt in that the finobt only tracks records with at least one
2091	 * free inode. Hence, if all of the inodes are free and we aren't
2092	 * keeping inode chunks permanently on disk, remove the record.
2093	 * Otherwise, update the record with the new information.
2094	 *
2095	 * Note that we currently can't free chunks when the block size is large
2096	 * enough for multiple chunks. Leave the finobt record to remain in sync
2097	 * with the inobt.
2098	 */
2099	if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2100	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2101	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2102		error = xfs_btree_delete(cur, &i);
2103		if (error)
2104			goto error;
2105		ASSERT(i == 1);
2106	} else {
2107		error = xfs_inobt_update(cur, &rec);
2108		if (error)
2109			goto error;
2110	}
2111
2112out:
2113	error = xfs_check_agi_freecount(cur, agi);
2114	if (error)
2115		goto error;
2116
2117	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2118	return 0;
2119
2120error:
2121	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2122	return error;
2123}
2124
2125/*
2126 * Free disk inode.  Carefully avoids touching the incore inode, all
2127 * manipulations incore are the caller's responsibility.
2128 * The on-disk inode is not changed by this operation, only the
2129 * btree (free inode mask) is changed.
2130 */
2131int
2132xfs_difree(
2133	struct xfs_trans	*tp,		/* transaction pointer */
2134	xfs_ino_t		inode,		/* inode to be freed */
2135	struct xfs_icluster	*xic)	/* cluster info if deleted */
2136{
2137	/* REFERENCED */
2138	xfs_agblock_t		agbno;	/* block number containing inode */
2139	struct xfs_buf		*agbp;	/* buffer for allocation group header */
2140	xfs_agino_t		agino;	/* allocation group inode number */
2141	xfs_agnumber_t		agno;	/* allocation group number */
2142	int			error;	/* error return value */
2143	struct xfs_mount	*mp;	/* mount structure for filesystem */
2144	struct xfs_inobt_rec_incore rec;/* btree record */
2145
2146	mp = tp->t_mountp;
2147
2148	/*
2149	 * Break up inode number into its components.
2150	 */
2151	agno = XFS_INO_TO_AGNO(mp, inode);
2152	if (agno >= mp->m_sb.sb_agcount)  {
2153		xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2154			__func__, agno, mp->m_sb.sb_agcount);
2155		ASSERT(0);
2156		return -EINVAL;
2157	}
2158	agino = XFS_INO_TO_AGINO(mp, inode);
2159	if (inode != XFS_AGINO_TO_INO(mp, agno, agino))  {
2160		xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2161			__func__, (unsigned long long)inode,
2162			(unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2163		ASSERT(0);
2164		return -EINVAL;
2165	}
2166	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2167	if (agbno >= mp->m_sb.sb_agblocks)  {
2168		xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2169			__func__, agbno, mp->m_sb.sb_agblocks);
2170		ASSERT(0);
2171		return -EINVAL;
2172	}
2173	/*
2174	 * Get the allocation group header.
2175	 */
2176	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2177	if (error) {
2178		xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2179			__func__, error);
2180		return error;
2181	}
2182
2183	/*
2184	 * Fix up the inode allocation btree.
2185	 */
2186	error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
2187	if (error)
2188		goto error0;
2189
2190	/*
2191	 * Fix up the free inode btree.
2192	 */
2193	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2194		error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
2195		if (error)
2196			goto error0;
2197	}
2198
2199	return 0;
2200
2201error0:
2202	return error;
2203}
2204
2205STATIC int
2206xfs_imap_lookup(
2207	struct xfs_mount	*mp,
2208	struct xfs_trans	*tp,
2209	xfs_agnumber_t		agno,
2210	xfs_agino_t		agino,
2211	xfs_agblock_t		agbno,
2212	xfs_agblock_t		*chunk_agbno,
2213	xfs_agblock_t		*offset_agbno,
2214	int			flags)
2215{
2216	struct xfs_inobt_rec_incore rec;
2217	struct xfs_btree_cur	*cur;
2218	struct xfs_buf		*agbp;
2219	int			error;
2220	int			i;
2221
2222	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2223	if (error) {
2224		xfs_alert(mp,
2225			"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2226			__func__, error, agno);
2227		return error;
2228	}
2229
2230	/*
2231	 * Lookup the inode record for the given agino. If the record cannot be
2232	 * found, then it's an invalid inode number and we should abort. Once
2233	 * we have a record, we need to ensure it contains the inode number
2234	 * we are looking up.
2235	 */
2236	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
2237	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2238	if (!error) {
2239		if (i)
2240			error = xfs_inobt_get_rec(cur, &rec, &i);
2241		if (!error && i == 0)
2242			error = -EINVAL;
2243	}
2244
2245	xfs_trans_brelse(tp, agbp);
2246	xfs_btree_del_cursor(cur, error);
2247	if (error)
2248		return error;
2249
2250	/* check that the returned record contains the required inode */
2251	if (rec.ir_startino > agino ||
2252	    rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2253		return -EINVAL;
2254
2255	/* for untrusted inodes check it is allocated first */
2256	if ((flags & XFS_IGET_UNTRUSTED) &&
2257	    (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2258		return -EINVAL;
2259
2260	*chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2261	*offset_agbno = agbno - *chunk_agbno;
2262	return 0;
2263}
2264
2265/*
2266 * Return the location of the inode in imap, for mapping it into a buffer.
2267 */
2268int
2269xfs_imap(
2270	xfs_mount_t	 *mp,	/* file system mount structure */
2271	xfs_trans_t	 *tp,	/* transaction pointer */
2272	xfs_ino_t	ino,	/* inode to locate */
2273	struct xfs_imap	*imap,	/* location map structure */
2274	uint		flags)	/* flags for inode btree lookup */
2275{
2276	xfs_agblock_t	agbno;	/* block number of inode in the alloc group */
2277	xfs_agino_t	agino;	/* inode number within alloc group */
2278	xfs_agnumber_t	agno;	/* allocation group number */
2279	xfs_agblock_t	chunk_agbno;	/* first block in inode chunk */
2280	xfs_agblock_t	cluster_agbno;	/* first block in inode cluster */
2281	int		error;	/* error code */
2282	int		offset;	/* index of inode in its buffer */
2283	xfs_agblock_t	offset_agbno;	/* blks from chunk start to inode */
2284
2285	ASSERT(ino != NULLFSINO);
2286
2287	/*
2288	 * Split up the inode number into its parts.
2289	 */
2290	agno = XFS_INO_TO_AGNO(mp, ino);
2291	agino = XFS_INO_TO_AGINO(mp, ino);
2292	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2293	if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2294	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2295#ifdef DEBUG
2296		/*
2297		 * Don't output diagnostic information for untrusted inodes
2298		 * as they can be invalid without implying corruption.
2299		 */
2300		if (flags & XFS_IGET_UNTRUSTED)
2301			return -EINVAL;
2302		if (agno >= mp->m_sb.sb_agcount) {
2303			xfs_alert(mp,
2304				"%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2305				__func__, agno, mp->m_sb.sb_agcount);
2306		}
2307		if (agbno >= mp->m_sb.sb_agblocks) {
2308			xfs_alert(mp,
2309		"%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2310				__func__, (unsigned long long)agbno,
2311				(unsigned long)mp->m_sb.sb_agblocks);
2312		}
2313		if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2314			xfs_alert(mp,
2315		"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2316				__func__, ino,
2317				XFS_AGINO_TO_INO(mp, agno, agino));
2318		}
2319		xfs_stack_trace();
2320#endif /* DEBUG */
2321		return -EINVAL;
2322	}
2323
2324	/*
2325	 * For bulkstat and handle lookups, we have an untrusted inode number
2326	 * that we have to verify is valid. We cannot do this just by reading
2327	 * the inode buffer as it may have been unlinked and removed leaving
2328	 * inodes in stale state on disk. Hence we have to do a btree lookup
2329	 * in all cases where an untrusted inode number is passed.
2330	 */
2331	if (flags & XFS_IGET_UNTRUSTED) {
2332		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2333					&chunk_agbno, &offset_agbno, flags);
2334		if (error)
2335			return error;
2336		goto out_map;
2337	}
2338
2339	/*
2340	 * If the inode cluster size is the same as the blocksize or
2341	 * smaller we get to the buffer by simple arithmetics.
2342	 */
2343	if (M_IGEO(mp)->blocks_per_cluster == 1) {
2344		offset = XFS_INO_TO_OFFSET(mp, ino);
2345		ASSERT(offset < mp->m_sb.sb_inopblock);
2346
2347		imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
2348		imap->im_len = XFS_FSB_TO_BB(mp, 1);
2349		imap->im_boffset = (unsigned short)(offset <<
2350							mp->m_sb.sb_inodelog);
2351		return 0;
2352	}
2353
2354	/*
2355	 * If the inode chunks are aligned then use simple maths to
2356	 * find the location. Otherwise we have to do a btree
2357	 * lookup to find the location.
2358	 */
2359	if (M_IGEO(mp)->inoalign_mask) {
2360		offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2361		chunk_agbno = agbno - offset_agbno;
2362	} else {
2363		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2364					&chunk_agbno, &offset_agbno, flags);
2365		if (error)
2366			return error;
2367	}
2368
2369out_map:
2370	ASSERT(agbno >= chunk_agbno);
2371	cluster_agbno = chunk_agbno +
2372		((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2373		 M_IGEO(mp)->blocks_per_cluster);
2374	offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2375		XFS_INO_TO_OFFSET(mp, ino);
2376
2377	imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
2378	imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2379	imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2380
2381	/*
2382	 * If the inode number maps to a block outside the bounds
2383	 * of the file system then return NULL rather than calling
2384	 * read_buf and panicing when we get an error from the
2385	 * driver.
2386	 */
2387	if ((imap->im_blkno + imap->im_len) >
2388	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2389		xfs_alert(mp,
2390	"%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2391			__func__, (unsigned long long) imap->im_blkno,
2392			(unsigned long long) imap->im_len,
2393			XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2394		return -EINVAL;
2395	}
2396	return 0;
2397}
2398
2399/*
2400 * Log specified fields for the ag hdr (inode section). The growth of the agi
2401 * structure over time requires that we interpret the buffer as two logical
2402 * regions delineated by the end of the unlinked list. This is due to the size
2403 * of the hash table and its location in the middle of the agi.
2404 *
2405 * For example, a request to log a field before agi_unlinked and a field after
2406 * agi_unlinked could cause us to log the entire hash table and use an excessive
2407 * amount of log space. To avoid this behavior, log the region up through
2408 * agi_unlinked in one call and the region after agi_unlinked through the end of
2409 * the structure in another.
2410 */
2411void
2412xfs_ialloc_log_agi(
2413	xfs_trans_t	*tp,		/* transaction pointer */
2414	xfs_buf_t	*bp,		/* allocation group header buffer */
2415	int		fields)		/* bitmask of fields to log */
2416{
2417	int			first;		/* first byte number */
2418	int			last;		/* last byte number */
2419	static const short	offsets[] = {	/* field starting offsets */
2420					/* keep in sync with bit definitions */
2421		offsetof(xfs_agi_t, agi_magicnum),
2422		offsetof(xfs_agi_t, agi_versionnum),
2423		offsetof(xfs_agi_t, agi_seqno),
2424		offsetof(xfs_agi_t, agi_length),
2425		offsetof(xfs_agi_t, agi_count),
2426		offsetof(xfs_agi_t, agi_root),
2427		offsetof(xfs_agi_t, agi_level),
2428		offsetof(xfs_agi_t, agi_freecount),
2429		offsetof(xfs_agi_t, agi_newino),
2430		offsetof(xfs_agi_t, agi_dirino),
2431		offsetof(xfs_agi_t, agi_unlinked),
2432		offsetof(xfs_agi_t, agi_free_root),
2433		offsetof(xfs_agi_t, agi_free_level),
2434		sizeof(xfs_agi_t)
2435	};
2436#ifdef DEBUG
2437	xfs_agi_t		*agi;	/* allocation group header */
2438
2439	agi = XFS_BUF_TO_AGI(bp);
2440	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2441#endif
2442
2443	/*
2444	 * Compute byte offsets for the first and last fields in the first
2445	 * region and log the agi buffer. This only logs up through
2446	 * agi_unlinked.
2447	 */
2448	if (fields & XFS_AGI_ALL_BITS_R1) {
2449		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2450				  &first, &last);
2451		xfs_trans_log_buf(tp, bp, first, last);
2452	}
2453
2454	/*
2455	 * Mask off the bits in the first region and calculate the first and
2456	 * last field offsets for any bits in the second region.
2457	 */
2458	fields &= ~XFS_AGI_ALL_BITS_R1;
2459	if (fields) {
2460		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2461				  &first, &last);
2462		xfs_trans_log_buf(tp, bp, first, last);
2463	}
2464}
2465
2466static xfs_failaddr_t
2467xfs_agi_verify(
2468	struct xfs_buf	*bp)
2469{
2470	struct xfs_mount *mp = bp->b_mount;
2471	struct xfs_agi	*agi = XFS_BUF_TO_AGI(bp);
2472	int		i;
2473
2474	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2475		if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2476			return __this_address;
2477		if (!xfs_log_check_lsn(mp,
2478				be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
2479			return __this_address;
2480	}
2481
2482	/*
2483	 * Validate the magic number of the agi block.
2484	 */
2485	if (!xfs_verify_magic(bp, agi->agi_magicnum))
2486		return __this_address;
2487	if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2488		return __this_address;
2489
2490	if (be32_to_cpu(agi->agi_level) < 1 ||
2491	    be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
2492		return __this_address;
2493
2494	if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2495	    (be32_to_cpu(agi->agi_free_level) < 1 ||
2496	     be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
2497		return __this_address;
2498
2499	/*
2500	 * during growfs operations, the perag is not fully initialised,
2501	 * so we can't use it for any useful checking. growfs ensures we can't
2502	 * use it by using uncached buffers that don't have the perag attached
2503	 * so we can detect and avoid this problem.
2504	 */
2505	if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2506		return __this_address;
2507
2508	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2509		if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2510			continue;
2511		if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2512			return __this_address;
2513	}
2514
2515	return NULL;
2516}
2517
2518static void
2519xfs_agi_read_verify(
2520	struct xfs_buf	*bp)
2521{
2522	struct xfs_mount *mp = bp->b_mount;
2523	xfs_failaddr_t	fa;
2524
2525	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2526	    !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2527		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2528	else {
2529		fa = xfs_agi_verify(bp);
2530		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2531			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2532	}
2533}
2534
2535static void
2536xfs_agi_write_verify(
2537	struct xfs_buf	*bp)
2538{
2539	struct xfs_mount	*mp = bp->b_mount;
2540	struct xfs_buf_log_item	*bip = bp->b_log_item;
2541	xfs_failaddr_t		fa;
2542
2543	fa = xfs_agi_verify(bp);
2544	if (fa) {
2545		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2546		return;
2547	}
2548
2549	if (!xfs_sb_version_hascrc(&mp->m_sb))
2550		return;
2551
2552	if (bip)
2553		XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2554	xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2555}
2556
2557const struct xfs_buf_ops xfs_agi_buf_ops = {
2558	.name = "xfs_agi",
2559	.magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2560	.verify_read = xfs_agi_read_verify,
2561	.verify_write = xfs_agi_write_verify,
2562	.verify_struct = xfs_agi_verify,
2563};
2564
2565/*
2566 * Read in the allocation group header (inode allocation section)
2567 */
2568int
2569xfs_read_agi(
2570	struct xfs_mount	*mp,	/* file system mount structure */
2571	struct xfs_trans	*tp,	/* transaction pointer */
2572	xfs_agnumber_t		agno,	/* allocation group number */
2573	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2574{
2575	int			error;
2576
2577	trace_xfs_read_agi(mp, agno);
2578
2579	ASSERT(agno != NULLAGNUMBER);
2580	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2581			XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2582			XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2583	if (error)
2584		return error;
2585	if (tp)
2586		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2587
2588	xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2589	return 0;
2590}
2591
2592int
2593xfs_ialloc_read_agi(
2594	struct xfs_mount	*mp,	/* file system mount structure */
2595	struct xfs_trans	*tp,	/* transaction pointer */
2596	xfs_agnumber_t		agno,	/* allocation group number */
2597	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2598{
2599	struct xfs_agi		*agi;	/* allocation group header */
2600	struct xfs_perag	*pag;	/* per allocation group data */
2601	int			error;
2602
2603	trace_xfs_ialloc_read_agi(mp, agno);
2604
2605	error = xfs_read_agi(mp, tp, agno, bpp);
2606	if (error)
2607		return error;
2608
2609	agi = XFS_BUF_TO_AGI(*bpp);
2610	pag = xfs_perag_get(mp, agno);
2611	if (!pag->pagi_init) {
2612		pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2613		pag->pagi_count = be32_to_cpu(agi->agi_count);
2614		pag->pagi_init = 1;
2615	}
2616
2617	/*
2618	 * It's possible for these to be out of sync if
2619	 * we are in the middle of a forced shutdown.
2620	 */
2621	ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2622		XFS_FORCED_SHUTDOWN(mp));
2623	xfs_perag_put(pag);
2624	return 0;
2625}
2626
2627/*
2628 * Read in the agi to initialise the per-ag data in the mount structure
2629 */
2630int
2631xfs_ialloc_pagi_init(
2632	xfs_mount_t	*mp,		/* file system mount structure */
2633	xfs_trans_t	*tp,		/* transaction pointer */
2634	xfs_agnumber_t	agno)		/* allocation group number */
2635{
2636	xfs_buf_t	*bp = NULL;
2637	int		error;
2638
2639	error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2640	if (error)
2641		return error;
2642	if (bp)
2643		xfs_trans_brelse(tp, bp);
2644	return 0;
2645}
2646
2647/* Is there an inode record covering a given range of inode numbers? */
2648int
2649xfs_ialloc_has_inode_record(
2650	struct xfs_btree_cur	*cur,
2651	xfs_agino_t		low,
2652	xfs_agino_t		high,
2653	bool			*exists)
2654{
2655	struct xfs_inobt_rec_incore	irec;
2656	xfs_agino_t		agino;
2657	uint16_t		holemask;
2658	int			has_record;
2659	int			i;
2660	int			error;
2661
2662	*exists = false;
2663	error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2664	while (error == 0 && has_record) {
2665		error = xfs_inobt_get_rec(cur, &irec, &has_record);
2666		if (error || irec.ir_startino > high)
2667			break;
2668
2669		agino = irec.ir_startino;
2670		holemask = irec.ir_holemask;
2671		for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
2672				i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
2673			if (holemask & 1)
2674				continue;
2675			if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
2676					agino <= high) {
2677				*exists = true;
2678				return 0;
2679			}
2680		}
2681
2682		error = xfs_btree_increment(cur, 0, &has_record);
2683	}
2684	return error;
2685}
2686
2687/* Is there an inode record covering a given extent? */
2688int
2689xfs_ialloc_has_inodes_at_extent(
2690	struct xfs_btree_cur	*cur,
2691	xfs_agblock_t		bno,
2692	xfs_extlen_t		len,
2693	bool			*exists)
2694{
2695	xfs_agino_t		low;
2696	xfs_agino_t		high;
2697
2698	low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2699	high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2700
2701	return xfs_ialloc_has_inode_record(cur, low, high, exists);
2702}
2703
2704struct xfs_ialloc_count_inodes {
2705	xfs_agino_t			count;
2706	xfs_agino_t			freecount;
2707};
2708
2709/* Record inode counts across all inobt records. */
2710STATIC int
2711xfs_ialloc_count_inodes_rec(
2712	struct xfs_btree_cur		*cur,
2713	union xfs_btree_rec		*rec,
2714	void				*priv)
2715{
2716	struct xfs_inobt_rec_incore	irec;
2717	struct xfs_ialloc_count_inodes	*ci = priv;
2718
2719	xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2720	ci->count += irec.ir_count;
2721	ci->freecount += irec.ir_freecount;
2722
2723	return 0;
2724}
2725
2726/* Count allocated and free inodes under an inobt. */
2727int
2728xfs_ialloc_count_inodes(
2729	struct xfs_btree_cur		*cur,
2730	xfs_agino_t			*count,
2731	xfs_agino_t			*freecount)
2732{
2733	struct xfs_ialloc_count_inodes	ci = {0};
2734	int				error;
2735
2736	ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2737	error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2738	if (error)
2739		return error;
2740
2741	*count = ci.count;
2742	*freecount = ci.freecount;
2743	return 0;
2744}
2745
2746/*
2747 * Initialize inode-related geometry information.
2748 *
2749 * Compute the inode btree min and max levels and set maxicount.
2750 *
2751 * Set the inode cluster size.  This may still be overridden by the file
2752 * system block size if it is larger than the chosen cluster size.
2753 *
2754 * For v5 filesystems, scale the cluster size with the inode size to keep a
2755 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2756 * inode alignment value appropriately for larger cluster sizes.
2757 *
2758 * Then compute the inode cluster alignment information.
2759 */
2760void
2761xfs_ialloc_setup_geometry(
2762	struct xfs_mount	*mp)
2763{
2764	struct xfs_sb		*sbp = &mp->m_sb;
2765	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2766	uint64_t		icount;
2767	uint			inodes;
2768
2769	/* Compute inode btree geometry. */
2770	igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2771	igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2772	igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2773	igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2774	igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2775
2776	igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2777			sbp->sb_inopblock);
2778	igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2779
2780	if (sbp->sb_spino_align)
2781		igeo->ialloc_min_blks = sbp->sb_spino_align;
2782	else
2783		igeo->ialloc_min_blks = igeo->ialloc_blks;
2784
2785	/* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2786	inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2787	igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2788			inodes);
2789
2790	/*
2791	 * Set the maximum inode count for this filesystem, being careful not
2792	 * to use obviously garbage sb_inopblog/sb_inopblock values.  Regular
2793	 * users should never get here due to failing sb verification, but
2794	 * certain users (xfs_db) need to be usable even with corrupt metadata.
2795	 */
2796	if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2797		/*
2798		 * Make sure the maximum inode count is a multiple
2799		 * of the units we allocate inodes in.
2800		 */
2801		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2802		do_div(icount, 100);
2803		do_div(icount, igeo->ialloc_blks);
2804		igeo->maxicount = XFS_FSB_TO_INO(mp,
2805				icount * igeo->ialloc_blks);
2806	} else {
2807		igeo->maxicount = 0;
2808	}
2809
2810	/*
2811	 * Compute the desired size of an inode cluster buffer size, which
2812	 * starts at 8K and (on v5 filesystems) scales up with larger inode
2813	 * sizes.
2814	 *
2815	 * Preserve the desired inode cluster size because the sparse inodes
2816	 * feature uses that desired size (not the actual size) to compute the
2817	 * sparse inode alignment.  The mount code validates this value, so we
2818	 * cannot change the behavior.
2819	 */
2820	igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2821	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2822		int	new_size = igeo->inode_cluster_size_raw;
2823
2824		new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2825		if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2826			igeo->inode_cluster_size_raw = new_size;
2827	}
2828
2829	/* Calculate inode cluster ratios. */
2830	if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2831		igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2832				igeo->inode_cluster_size_raw);
2833	else
2834		igeo->blocks_per_cluster = 1;
2835	igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2836	igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2837
2838	/* Calculate inode cluster alignment. */
2839	if (xfs_sb_version_hasalign(&mp->m_sb) &&
2840	    mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2841		igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2842	else
2843		igeo->cluster_align = 1;
2844	igeo->inoalign_mask = igeo->cluster_align - 1;
2845	igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2846
2847	/*
2848	 * If we are using stripe alignment, check whether
2849	 * the stripe unit is a multiple of the inode alignment
2850	 */
2851	if (mp->m_dalign && igeo->inoalign_mask &&
2852	    !(mp->m_dalign & igeo->inoalign_mask))
2853		igeo->ialloc_align = mp->m_dalign;
2854	else
2855		igeo->ialloc_align = 0;
2856}