Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_sb.h"
  26#include "xfs_mount.h"
  27#include "xfs_inode.h"
  28#include "xfs_btree.h"
  29#include "xfs_ialloc.h"
  30#include "xfs_ialloc_btree.h"
  31#include "xfs_alloc.h"
  32#include "xfs_rtalloc.h"
  33#include "xfs_error.h"
  34#include "xfs_bmap.h"
  35#include "xfs_cksum.h"
  36#include "xfs_trans.h"
  37#include "xfs_buf_item.h"
  38#include "xfs_icreate_item.h"
  39#include "xfs_icache.h"
  40#include "xfs_trace.h"
  41#include "xfs_log.h"
  42
  43
  44/*
  45 * Allocation group level functions.
  46 */
  47static inline int
  48xfs_ialloc_cluster_alignment(
  49	struct xfs_mount	*mp)
  50{
  51	if (xfs_sb_version_hasalign(&mp->m_sb) &&
  52	    mp->m_sb.sb_inoalignmt >=
  53			XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
  54		return mp->m_sb.sb_inoalignmt;
  55	return 1;
  56}
  57
  58/*
  59 * Lookup a record by ino in the btree given by cur.
  60 */
  61int					/* error */
  62xfs_inobt_lookup(
  63	struct xfs_btree_cur	*cur,	/* btree cursor */
  64	xfs_agino_t		ino,	/* starting inode of chunk */
  65	xfs_lookup_t		dir,	/* <=, >=, == */
  66	int			*stat)	/* success/failure */
  67{
  68	cur->bc_rec.i.ir_startino = ino;
  69	cur->bc_rec.i.ir_holemask = 0;
  70	cur->bc_rec.i.ir_count = 0;
  71	cur->bc_rec.i.ir_freecount = 0;
  72	cur->bc_rec.i.ir_free = 0;
  73	return xfs_btree_lookup(cur, dir, stat);
  74}
  75
  76/*
  77 * Update the record referred to by cur to the value given.
  78 * This either works (return 0) or gets an EFSCORRUPTED error.
  79 */
  80STATIC int				/* error */
  81xfs_inobt_update(
  82	struct xfs_btree_cur	*cur,	/* btree cursor */
  83	xfs_inobt_rec_incore_t	*irec)	/* btree record */
  84{
  85	union xfs_btree_rec	rec;
  86
  87	rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
  88	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
  89		rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
  90		rec.inobt.ir_u.sp.ir_count = irec->ir_count;
  91		rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
  92	} else {
  93		/* ir_holemask/ir_count not supported on-disk */
  94		rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
  95	}
  96	rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
  97	return xfs_btree_update(cur, &rec);
  98}
  99
 100/*
 101 * Get the data from the pointed-to record.
 102 */
 103int					/* error */
 104xfs_inobt_get_rec(
 105	struct xfs_btree_cur	*cur,	/* btree cursor */
 106	xfs_inobt_rec_incore_t	*irec,	/* btree record */
 107	int			*stat)	/* output: success/failure */
 108{
 109	union xfs_btree_rec	*rec;
 110	int			error;
 111
 112	error = xfs_btree_get_rec(cur, &rec, stat);
 113	if (error || *stat == 0)
 114		return error;
 115
 116	irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
 117	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
 118		irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
 119		irec->ir_count = rec->inobt.ir_u.sp.ir_count;
 120		irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
 121	} else {
 122		/*
 123		 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
 124		 * values for full inode chunks.
 125		 */
 126		irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
 127		irec->ir_count = XFS_INODES_PER_CHUNK;
 128		irec->ir_freecount =
 129				be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
 130	}
 131	irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
 132
 133	return 0;
 134}
 135
 136/*
 137 * Insert a single inobt record. Cursor must already point to desired location.
 138 */
 139STATIC int
 140xfs_inobt_insert_rec(
 141	struct xfs_btree_cur	*cur,
 142	__uint16_t		holemask,
 143	__uint8_t		count,
 144	__int32_t		freecount,
 145	xfs_inofree_t		free,
 146	int			*stat)
 147{
 148	cur->bc_rec.i.ir_holemask = holemask;
 149	cur->bc_rec.i.ir_count = count;
 150	cur->bc_rec.i.ir_freecount = freecount;
 151	cur->bc_rec.i.ir_free = free;
 152	return xfs_btree_insert(cur, stat);
 153}
 154
 155/*
 156 * Insert records describing a newly allocated inode chunk into the inobt.
 157 */
 158STATIC int
 159xfs_inobt_insert(
 160	struct xfs_mount	*mp,
 161	struct xfs_trans	*tp,
 162	struct xfs_buf		*agbp,
 163	xfs_agino_t		newino,
 164	xfs_agino_t		newlen,
 165	xfs_btnum_t		btnum)
 166{
 167	struct xfs_btree_cur	*cur;
 168	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 169	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
 170	xfs_agino_t		thisino;
 171	int			i;
 172	int			error;
 173
 174	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
 175
 176	for (thisino = newino;
 177	     thisino < newino + newlen;
 178	     thisino += XFS_INODES_PER_CHUNK) {
 179		error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
 180		if (error) {
 181			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 182			return error;
 183		}
 184		ASSERT(i == 0);
 185
 186		error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
 187					     XFS_INODES_PER_CHUNK,
 188					     XFS_INODES_PER_CHUNK,
 189					     XFS_INOBT_ALL_FREE, &i);
 190		if (error) {
 191			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 192			return error;
 193		}
 194		ASSERT(i == 1);
 195	}
 196
 197	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 198
 199	return 0;
 200}
 201
 202/*
 203 * Verify that the number of free inodes in the AGI is correct.
 204 */
 205#ifdef DEBUG
 206STATIC int
 207xfs_check_agi_freecount(
 208	struct xfs_btree_cur	*cur,
 209	struct xfs_agi		*agi)
 210{
 211	if (cur->bc_nlevels == 1) {
 212		xfs_inobt_rec_incore_t rec;
 213		int		freecount = 0;
 214		int		error;
 215		int		i;
 216
 217		error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
 218		if (error)
 219			return error;
 220
 221		do {
 222			error = xfs_inobt_get_rec(cur, &rec, &i);
 223			if (error)
 224				return error;
 225
 226			if (i) {
 227				freecount += rec.ir_freecount;
 228				error = xfs_btree_increment(cur, 0, &i);
 229				if (error)
 230					return error;
 231			}
 232		} while (i == 1);
 233
 234		if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
 235			ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
 236	}
 237	return 0;
 238}
 239#else
 240#define xfs_check_agi_freecount(cur, agi)	0
 241#endif
 242
 243/*
 244 * Initialise a new set of inodes. When called without a transaction context
 245 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
 246 * than logging them (which in a transaction context puts them into the AIL
 247 * for writeback rather than the xfsbufd queue).
 248 */
 249int
 250xfs_ialloc_inode_init(
 251	struct xfs_mount	*mp,
 252	struct xfs_trans	*tp,
 253	struct list_head	*buffer_list,
 254	int			icount,
 255	xfs_agnumber_t		agno,
 256	xfs_agblock_t		agbno,
 257	xfs_agblock_t		length,
 258	unsigned int		gen)
 259{
 260	struct xfs_buf		*fbuf;
 261	struct xfs_dinode	*free;
 262	int			nbufs, blks_per_cluster, inodes_per_cluster;
 263	int			version;
 264	int			i, j;
 265	xfs_daddr_t		d;
 266	xfs_ino_t		ino = 0;
 267
 268	/*
 269	 * Loop over the new block(s), filling in the inodes.  For small block
 270	 * sizes, manipulate the inodes in buffers  which are multiples of the
 271	 * blocks size.
 272	 */
 273	blks_per_cluster = xfs_icluster_size_fsb(mp);
 274	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
 275	nbufs = length / blks_per_cluster;
 276
 277	/*
 278	 * Figure out what version number to use in the inodes we create.  If
 279	 * the superblock version has caught up to the one that supports the new
 280	 * inode format, then use the new inode version.  Otherwise use the old
 281	 * version so that old kernels will continue to be able to use the file
 282	 * system.
 283	 *
 284	 * For v3 inodes, we also need to write the inode number into the inode,
 285	 * so calculate the first inode number of the chunk here as
 286	 * XFS_OFFBNO_TO_AGINO() only works within a filesystem block, not
 287	 * across multiple filesystem blocks (such as a cluster) and so cannot
 288	 * be used in the cluster buffer loop below.
 289	 *
 290	 * Further, because we are writing the inode directly into the buffer
 291	 * and calculating a CRC on the entire inode, we have ot log the entire
 292	 * inode so that the entire range the CRC covers is present in the log.
 293	 * That means for v3 inode we log the entire buffer rather than just the
 294	 * inode cores.
 295	 */
 296	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 297		version = 3;
 298		ino = XFS_AGINO_TO_INO(mp, agno,
 299				       XFS_OFFBNO_TO_AGINO(mp, agbno, 0));
 300
 301		/*
 302		 * log the initialisation that is about to take place as an
 303		 * logical operation. This means the transaction does not
 304		 * need to log the physical changes to the inode buffers as log
 305		 * recovery will know what initialisation is actually needed.
 306		 * Hence we only need to log the buffers as "ordered" buffers so
 307		 * they track in the AIL as if they were physically logged.
 308		 */
 309		if (tp)
 310			xfs_icreate_log(tp, agno, agbno, icount,
 311					mp->m_sb.sb_inodesize, length, gen);
 312	} else
 313		version = 2;
 314
 315	for (j = 0; j < nbufs; j++) {
 316		/*
 317		 * Get the block.
 318		 */
 319		d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
 320		fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
 321					 mp->m_bsize * blks_per_cluster,
 322					 XBF_UNMAPPED);
 323		if (!fbuf)
 324			return -ENOMEM;
 325
 326		/* Initialize the inode buffers and log them appropriately. */
 327		fbuf->b_ops = &xfs_inode_buf_ops;
 328		xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
 329		for (i = 0; i < inodes_per_cluster; i++) {
 330			int	ioffset = i << mp->m_sb.sb_inodelog;
 331			uint	isize = xfs_dinode_size(version);
 332
 333			free = xfs_make_iptr(mp, fbuf, i);
 334			free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
 335			free->di_version = version;
 336			free->di_gen = cpu_to_be32(gen);
 337			free->di_next_unlinked = cpu_to_be32(NULLAGINO);
 338
 339			if (version == 3) {
 340				free->di_ino = cpu_to_be64(ino);
 341				ino++;
 342				uuid_copy(&free->di_uuid,
 343					  &mp->m_sb.sb_meta_uuid);
 344				xfs_dinode_calc_crc(mp, free);
 345			} else if (tp) {
 346				/* just log the inode core */
 347				xfs_trans_log_buf(tp, fbuf, ioffset,
 348						  ioffset + isize - 1);
 349			}
 350		}
 351
 352		if (tp) {
 353			/*
 354			 * Mark the buffer as an inode allocation buffer so it
 355			 * sticks in AIL at the point of this allocation
 356			 * transaction. This ensures the they are on disk before
 357			 * the tail of the log can be moved past this
 358			 * transaction (i.e. by preventing relogging from moving
 359			 * it forward in the log).
 360			 */
 361			xfs_trans_inode_alloc_buf(tp, fbuf);
 362			if (version == 3) {
 363				/*
 364				 * Mark the buffer as ordered so that they are
 365				 * not physically logged in the transaction but
 366				 * still tracked in the AIL as part of the
 367				 * transaction and pin the log appropriately.
 368				 */
 369				xfs_trans_ordered_buf(tp, fbuf);
 370				xfs_trans_log_buf(tp, fbuf, 0,
 371						  BBTOB(fbuf->b_length) - 1);
 372			}
 373		} else {
 374			fbuf->b_flags |= XBF_DONE;
 375			xfs_buf_delwri_queue(fbuf, buffer_list);
 376			xfs_buf_relse(fbuf);
 377		}
 378	}
 379	return 0;
 380}
 381
 382/*
 383 * Align startino and allocmask for a recently allocated sparse chunk such that
 384 * they are fit for insertion (or merge) into the on-disk inode btrees.
 385 *
 386 * Background:
 387 *
 388 * When enabled, sparse inode support increases the inode alignment from cluster
 389 * size to inode chunk size. This means that the minimum range between two
 390 * non-adjacent inode records in the inobt is large enough for a full inode
 391 * record. This allows for cluster sized, cluster aligned block allocation
 392 * without need to worry about whether the resulting inode record overlaps with
 393 * another record in the tree. Without this basic rule, we would have to deal
 394 * with the consequences of overlap by potentially undoing recent allocations in
 395 * the inode allocation codepath.
 396 *
 397 * Because of this alignment rule (which is enforced on mount), there are two
 398 * inobt possibilities for newly allocated sparse chunks. One is that the
 399 * aligned inode record for the chunk covers a range of inodes not already
 400 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
 401 * other is that a record already exists at the aligned startino that considers
 402 * the newly allocated range as sparse. In the latter case, record content is
 403 * merged in hope that sparse inode chunks fill to full chunks over time.
 404 */
 405STATIC void
 406xfs_align_sparse_ino(
 407	struct xfs_mount		*mp,
 408	xfs_agino_t			*startino,
 409	uint16_t			*allocmask)
 410{
 411	xfs_agblock_t			agbno;
 412	xfs_agblock_t			mod;
 413	int				offset;
 414
 415	agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
 416	mod = agbno % mp->m_sb.sb_inoalignmt;
 417	if (!mod)
 418		return;
 419
 420	/* calculate the inode offset and align startino */
 421	offset = mod << mp->m_sb.sb_inopblog;
 422	*startino -= offset;
 423
 424	/*
 425	 * Since startino has been aligned down, left shift allocmask such that
 426	 * it continues to represent the same physical inodes relative to the
 427	 * new startino.
 428	 */
 429	*allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
 430}
 431
 432/*
 433 * Determine whether the source inode record can merge into the target. Both
 434 * records must be sparse, the inode ranges must match and there must be no
 435 * allocation overlap between the records.
 436 */
 437STATIC bool
 438__xfs_inobt_can_merge(
 439	struct xfs_inobt_rec_incore	*trec,	/* tgt record */
 440	struct xfs_inobt_rec_incore	*srec)	/* src record */
 441{
 442	uint64_t			talloc;
 443	uint64_t			salloc;
 444
 445	/* records must cover the same inode range */
 446	if (trec->ir_startino != srec->ir_startino)
 447		return false;
 448
 449	/* both records must be sparse */
 450	if (!xfs_inobt_issparse(trec->ir_holemask) ||
 451	    !xfs_inobt_issparse(srec->ir_holemask))
 452		return false;
 453
 454	/* both records must track some inodes */
 455	if (!trec->ir_count || !srec->ir_count)
 456		return false;
 457
 458	/* can't exceed capacity of a full record */
 459	if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
 460		return false;
 461
 462	/* verify there is no allocation overlap */
 463	talloc = xfs_inobt_irec_to_allocmask(trec);
 464	salloc = xfs_inobt_irec_to_allocmask(srec);
 465	if (talloc & salloc)
 466		return false;
 467
 468	return true;
 469}
 470
 471/*
 472 * Merge the source inode record into the target. The caller must call
 473 * __xfs_inobt_can_merge() to ensure the merge is valid.
 474 */
 475STATIC void
 476__xfs_inobt_rec_merge(
 477	struct xfs_inobt_rec_incore	*trec,	/* target */
 478	struct xfs_inobt_rec_incore	*srec)	/* src */
 479{
 480	ASSERT(trec->ir_startino == srec->ir_startino);
 481
 482	/* combine the counts */
 483	trec->ir_count += srec->ir_count;
 484	trec->ir_freecount += srec->ir_freecount;
 485
 486	/*
 487	 * Merge the holemask and free mask. For both fields, 0 bits refer to
 488	 * allocated inodes. We combine the allocated ranges with bitwise AND.
 489	 */
 490	trec->ir_holemask &= srec->ir_holemask;
 491	trec->ir_free &= srec->ir_free;
 492}
 493
 494/*
 495 * Insert a new sparse inode chunk into the associated inode btree. The inode
 496 * record for the sparse chunk is pre-aligned to a startino that should match
 497 * any pre-existing sparse inode record in the tree. This allows sparse chunks
 498 * to fill over time.
 499 *
 500 * This function supports two modes of handling preexisting records depending on
 501 * the merge flag. If merge is true, the provided record is merged with the
 502 * existing record and updated in place. The merged record is returned in nrec.
 503 * If merge is false, an existing record is replaced with the provided record.
 504 * If no preexisting record exists, the provided record is always inserted.
 505 *
 506 * It is considered corruption if a merge is requested and not possible. Given
 507 * the sparse inode alignment constraints, this should never happen.
 508 */
 509STATIC int
 510xfs_inobt_insert_sprec(
 511	struct xfs_mount		*mp,
 512	struct xfs_trans		*tp,
 513	struct xfs_buf			*agbp,
 514	int				btnum,
 515	struct xfs_inobt_rec_incore	*nrec,	/* in/out: new/merged rec. */
 516	bool				merge)	/* merge or replace */
 517{
 518	struct xfs_btree_cur		*cur;
 519	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
 520	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
 521	int				error;
 522	int				i;
 523	struct xfs_inobt_rec_incore	rec;
 524
 525	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
 526
 527	/* the new record is pre-aligned so we know where to look */
 528	error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
 529	if (error)
 530		goto error;
 531	/* if nothing there, insert a new record and return */
 532	if (i == 0) {
 533		error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
 534					     nrec->ir_count, nrec->ir_freecount,
 535					     nrec->ir_free, &i);
 536		if (error)
 537			goto error;
 538		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
 539
 540		goto out;
 541	}
 542
 543	/*
 544	 * A record exists at this startino. Merge or replace the record
 545	 * depending on what we've been asked to do.
 546	 */
 547	if (merge) {
 548		error = xfs_inobt_get_rec(cur, &rec, &i);
 549		if (error)
 550			goto error;
 551		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
 552		XFS_WANT_CORRUPTED_GOTO(mp,
 553					rec.ir_startino == nrec->ir_startino,
 554					error);
 555
 556		/*
 557		 * This should never fail. If we have coexisting records that
 558		 * cannot merge, something is seriously wrong.
 559		 */
 560		XFS_WANT_CORRUPTED_GOTO(mp, __xfs_inobt_can_merge(nrec, &rec),
 561					error);
 562
 563		trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
 564					 rec.ir_holemask, nrec->ir_startino,
 565					 nrec->ir_holemask);
 566
 567		/* merge to nrec to output the updated record */
 568		__xfs_inobt_rec_merge(nrec, &rec);
 569
 570		trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
 571					  nrec->ir_holemask);
 572
 573		error = xfs_inobt_rec_check_count(mp, nrec);
 574		if (error)
 575			goto error;
 576	}
 577
 578	error = xfs_inobt_update(cur, nrec);
 579	if (error)
 580		goto error;
 581
 582out:
 583	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 584	return 0;
 585error:
 586	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 587	return error;
 588}
 589
 590/*
 591 * Allocate new inodes in the allocation group specified by agbp.
 592 * Return 0 for success, else error code.
 593 */
 594STATIC int				/* error code or 0 */
 595xfs_ialloc_ag_alloc(
 596	xfs_trans_t	*tp,		/* transaction pointer */
 597	xfs_buf_t	*agbp,		/* alloc group buffer */
 598	int		*alloc)
 599{
 600	xfs_agi_t	*agi;		/* allocation group header */
 601	xfs_alloc_arg_t	args;		/* allocation argument structure */
 602	xfs_agnumber_t	agno;
 603	int		error;
 604	xfs_agino_t	newino;		/* new first inode's number */
 605	xfs_agino_t	newlen;		/* new number of inodes */
 606	int		isaligned = 0;	/* inode allocation at stripe unit */
 607					/* boundary */
 608	uint16_t	allocmask = (uint16_t) -1; /* init. to full chunk */
 609	struct xfs_inobt_rec_incore rec;
 610	struct xfs_perag *pag;
 611	int		do_sparse = 0;
 612
 613	memset(&args, 0, sizeof(args));
 614	args.tp = tp;
 615	args.mp = tp->t_mountp;
 616	args.fsbno = NULLFSBLOCK;
 617
 618#ifdef DEBUG
 619	/* randomly do sparse inode allocations */
 620	if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
 621	    args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks)
 622		do_sparse = prandom_u32() & 1;
 623#endif
 624
 625	/*
 626	 * Locking will ensure that we don't have two callers in here
 627	 * at one time.
 628	 */
 629	newlen = args.mp->m_ialloc_inos;
 630	if (args.mp->m_maxicount &&
 631	    percpu_counter_read_positive(&args.mp->m_icount) + newlen >
 632							args.mp->m_maxicount)
 633		return -ENOSPC;
 634	args.minlen = args.maxlen = args.mp->m_ialloc_blks;
 635	/*
 636	 * First try to allocate inodes contiguous with the last-allocated
 637	 * chunk of inodes.  If the filesystem is striped, this will fill
 638	 * an entire stripe unit with inodes.
 639	 */
 640	agi = XFS_BUF_TO_AGI(agbp);
 641	newino = be32_to_cpu(agi->agi_newino);
 642	agno = be32_to_cpu(agi->agi_seqno);
 643	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
 644		     args.mp->m_ialloc_blks;
 645	if (do_sparse)
 646		goto sparse_alloc;
 647	if (likely(newino != NULLAGINO &&
 648		  (args.agbno < be32_to_cpu(agi->agi_length)))) {
 649		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 650		args.type = XFS_ALLOCTYPE_THIS_BNO;
 651		args.prod = 1;
 652
 653		/*
 654		 * We need to take into account alignment here to ensure that
 655		 * we don't modify the free list if we fail to have an exact
 656		 * block. If we don't have an exact match, and every oher
 657		 * attempt allocation attempt fails, we'll end up cancelling
 658		 * a dirty transaction and shutting down.
 659		 *
 660		 * For an exact allocation, alignment must be 1,
 661		 * however we need to take cluster alignment into account when
 662		 * fixing up the freelist. Use the minalignslop field to
 663		 * indicate that extra blocks might be required for alignment,
 664		 * but not to use them in the actual exact allocation.
 665		 */
 666		args.alignment = 1;
 667		args.minalignslop = xfs_ialloc_cluster_alignment(args.mp) - 1;
 668
 669		/* Allow space for the inode btree to split. */
 670		args.minleft = args.mp->m_in_maxlevels - 1;
 671		if ((error = xfs_alloc_vextent(&args)))
 672			return error;
 673
 674		/*
 675		 * This request might have dirtied the transaction if the AG can
 676		 * satisfy the request, but the exact block was not available.
 677		 * If the allocation did fail, subsequent requests will relax
 678		 * the exact agbno requirement and increase the alignment
 679		 * instead. It is critical that the total size of the request
 680		 * (len + alignment + slop) does not increase from this point
 681		 * on, so reset minalignslop to ensure it is not included in
 682		 * subsequent requests.
 683		 */
 684		args.minalignslop = 0;
 685	}
 686
 687	if (unlikely(args.fsbno == NULLFSBLOCK)) {
 688		/*
 689		 * Set the alignment for the allocation.
 690		 * If stripe alignment is turned on then align at stripe unit
 691		 * boundary.
 692		 * If the cluster size is smaller than a filesystem block
 693		 * then we're doing I/O for inodes in filesystem block size
 694		 * pieces, so don't need alignment anyway.
 695		 */
 696		isaligned = 0;
 697		if (args.mp->m_sinoalign) {
 698			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
 699			args.alignment = args.mp->m_dalign;
 700			isaligned = 1;
 701		} else
 702			args.alignment = xfs_ialloc_cluster_alignment(args.mp);
 703		/*
 704		 * Need to figure out where to allocate the inode blocks.
 705		 * Ideally they should be spaced out through the a.g.
 706		 * For now, just allocate blocks up front.
 707		 */
 708		args.agbno = be32_to_cpu(agi->agi_root);
 709		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 710		/*
 711		 * Allocate a fixed-size extent of inodes.
 712		 */
 713		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 714		args.prod = 1;
 715		/*
 716		 * Allow space for the inode btree to split.
 717		 */
 718		args.minleft = args.mp->m_in_maxlevels - 1;
 719		if ((error = xfs_alloc_vextent(&args)))
 720			return error;
 721	}
 722
 723	/*
 724	 * If stripe alignment is turned on, then try again with cluster
 725	 * alignment.
 726	 */
 727	if (isaligned && args.fsbno == NULLFSBLOCK) {
 728		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 729		args.agbno = be32_to_cpu(agi->agi_root);
 730		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 731		args.alignment = xfs_ialloc_cluster_alignment(args.mp);
 732		if ((error = xfs_alloc_vextent(&args)))
 733			return error;
 734	}
 735
 736	/*
 737	 * Finally, try a sparse allocation if the filesystem supports it and
 738	 * the sparse allocation length is smaller than a full chunk.
 739	 */
 740	if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
 741	    args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks &&
 742	    args.fsbno == NULLFSBLOCK) {
 743sparse_alloc:
 744		args.type = XFS_ALLOCTYPE_NEAR_BNO;
 745		args.agbno = be32_to_cpu(agi->agi_root);
 746		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
 747		args.alignment = args.mp->m_sb.sb_spino_align;
 748		args.prod = 1;
 749
 750		args.minlen = args.mp->m_ialloc_min_blks;
 751		args.maxlen = args.minlen;
 752
 753		/*
 754		 * The inode record will be aligned to full chunk size. We must
 755		 * prevent sparse allocation from AG boundaries that result in
 756		 * invalid inode records, such as records that start at agbno 0
 757		 * or extend beyond the AG.
 758		 *
 759		 * Set min agbno to the first aligned, non-zero agbno and max to
 760		 * the last aligned agbno that is at least one full chunk from
 761		 * the end of the AG.
 762		 */
 763		args.min_agbno = args.mp->m_sb.sb_inoalignmt;
 764		args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
 765					    args.mp->m_sb.sb_inoalignmt) -
 766				 args.mp->m_ialloc_blks;
 767
 768		error = xfs_alloc_vextent(&args);
 769		if (error)
 770			return error;
 771
 772		newlen = args.len << args.mp->m_sb.sb_inopblog;
 773		ASSERT(newlen <= XFS_INODES_PER_CHUNK);
 774		allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
 775	}
 776
 777	if (args.fsbno == NULLFSBLOCK) {
 778		*alloc = 0;
 779		return 0;
 780	}
 781	ASSERT(args.len == args.minlen);
 782
 783	/*
 784	 * Stamp and write the inode buffers.
 785	 *
 786	 * Seed the new inode cluster with a random generation number. This
 787	 * prevents short-term reuse of generation numbers if a chunk is
 788	 * freed and then immediately reallocated. We use random numbers
 789	 * rather than a linear progression to prevent the next generation
 790	 * number from being easily guessable.
 791	 */
 792	error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
 793			args.agbno, args.len, prandom_u32());
 794
 795	if (error)
 796		return error;
 797	/*
 798	 * Convert the results.
 799	 */
 800	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
 801
 802	if (xfs_inobt_issparse(~allocmask)) {
 803		/*
 804		 * We've allocated a sparse chunk. Align the startino and mask.
 805		 */
 806		xfs_align_sparse_ino(args.mp, &newino, &allocmask);
 807
 808		rec.ir_startino = newino;
 809		rec.ir_holemask = ~allocmask;
 810		rec.ir_count = newlen;
 811		rec.ir_freecount = newlen;
 812		rec.ir_free = XFS_INOBT_ALL_FREE;
 813
 814		/*
 815		 * Insert the sparse record into the inobt and allow for a merge
 816		 * if necessary. If a merge does occur, rec is updated to the
 817		 * merged record.
 818		 */
 819		error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
 820					       &rec, true);
 821		if (error == -EFSCORRUPTED) {
 822			xfs_alert(args.mp,
 823	"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
 824				  XFS_AGINO_TO_INO(args.mp, agno,
 825						   rec.ir_startino),
 826				  rec.ir_holemask, rec.ir_count);
 827			xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
 828		}
 829		if (error)
 830			return error;
 831
 832		/*
 833		 * We can't merge the part we've just allocated as for the inobt
 834		 * due to finobt semantics. The original record may or may not
 835		 * exist independent of whether physical inodes exist in this
 836		 * sparse chunk.
 837		 *
 838		 * We must update the finobt record based on the inobt record.
 839		 * rec contains the fully merged and up to date inobt record
 840		 * from the previous call. Set merge false to replace any
 841		 * existing record with this one.
 842		 */
 843		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
 844			error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
 845						       XFS_BTNUM_FINO, &rec,
 846						       false);
 847			if (error)
 848				return error;
 849		}
 850	} else {
 851		/* full chunk - insert new records to both btrees */
 852		error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
 853					 XFS_BTNUM_INO);
 854		if (error)
 855			return error;
 856
 857		if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
 858			error = xfs_inobt_insert(args.mp, tp, agbp, newino,
 859						 newlen, XFS_BTNUM_FINO);
 860			if (error)
 861				return error;
 862		}
 863	}
 864
 865	/*
 866	 * Update AGI counts and newino.
 867	 */
 868	be32_add_cpu(&agi->agi_count, newlen);
 869	be32_add_cpu(&agi->agi_freecount, newlen);
 870	pag = xfs_perag_get(args.mp, agno);
 871	pag->pagi_freecount += newlen;
 872	xfs_perag_put(pag);
 873	agi->agi_newino = cpu_to_be32(newino);
 874
 875	/*
 876	 * Log allocation group header fields
 877	 */
 878	xfs_ialloc_log_agi(tp, agbp,
 879		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
 880	/*
 881	 * Modify/log superblock values for inode count and inode free count.
 882	 */
 883	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
 884	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
 885	*alloc = 1;
 886	return 0;
 887}
 888
 889STATIC xfs_agnumber_t
 890xfs_ialloc_next_ag(
 891	xfs_mount_t	*mp)
 892{
 893	xfs_agnumber_t	agno;
 894
 895	spin_lock(&mp->m_agirotor_lock);
 896	agno = mp->m_agirotor;
 897	if (++mp->m_agirotor >= mp->m_maxagi)
 898		mp->m_agirotor = 0;
 899	spin_unlock(&mp->m_agirotor_lock);
 900
 901	return agno;
 902}
 903
 904/*
 905 * Select an allocation group to look for a free inode in, based on the parent
 906 * inode and the mode.  Return the allocation group buffer.
 907 */
 908STATIC xfs_agnumber_t
 909xfs_ialloc_ag_select(
 910	xfs_trans_t	*tp,		/* transaction pointer */
 911	xfs_ino_t	parent,		/* parent directory inode number */
 912	umode_t		mode,		/* bits set to indicate file type */
 913	int		okalloc)	/* ok to allocate more space */
 914{
 915	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */
 916	xfs_agnumber_t	agno;		/* current ag number */
 917	int		flags;		/* alloc buffer locking flags */
 918	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */
 919	xfs_extlen_t	longest = 0;	/* longest extent available */
 920	xfs_mount_t	*mp;		/* mount point structure */
 921	int		needspace;	/* file mode implies space allocated */
 922	xfs_perag_t	*pag;		/* per allocation group data */
 923	xfs_agnumber_t	pagno;		/* parent (starting) ag number */
 924	int		error;
 925
 926	/*
 927	 * Files of these types need at least one block if length > 0
 928	 * (and they won't fit in the inode, but that's hard to figure out).
 929	 */
 930	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
 931	mp = tp->t_mountp;
 932	agcount = mp->m_maxagi;
 933	if (S_ISDIR(mode))
 934		pagno = xfs_ialloc_next_ag(mp);
 935	else {
 936		pagno = XFS_INO_TO_AGNO(mp, parent);
 937		if (pagno >= agcount)
 938			pagno = 0;
 939	}
 940
 941	ASSERT(pagno < agcount);
 942
 943	/*
 944	 * Loop through allocation groups, looking for one with a little
 945	 * free space in it.  Note we don't look for free inodes, exactly.
 946	 * Instead, we include whether there is a need to allocate inodes
 947	 * to mean that blocks must be allocated for them,
 948	 * if none are currently free.
 949	 */
 950	agno = pagno;
 951	flags = XFS_ALLOC_FLAG_TRYLOCK;
 952	for (;;) {
 953		pag = xfs_perag_get(mp, agno);
 954		if (!pag->pagi_inodeok) {
 955			xfs_ialloc_next_ag(mp);
 956			goto nextag;
 957		}
 958
 959		if (!pag->pagi_init) {
 960			error = xfs_ialloc_pagi_init(mp, tp, agno);
 961			if (error)
 962				goto nextag;
 963		}
 964
 965		if (pag->pagi_freecount) {
 966			xfs_perag_put(pag);
 967			return agno;
 968		}
 969
 970		if (!okalloc)
 971			goto nextag;
 972
 973		if (!pag->pagf_init) {
 974			error = xfs_alloc_pagf_init(mp, tp, agno, flags);
 975			if (error)
 976				goto nextag;
 977		}
 978
 979		/*
 980		 * Check that there is enough free space for the file plus a
 981		 * chunk of inodes if we need to allocate some. If this is the
 982		 * first pass across the AGs, take into account the potential
 983		 * space needed for alignment of inode chunks when checking the
 984		 * longest contiguous free space in the AG - this prevents us
 985		 * from getting ENOSPC because we have free space larger than
 986		 * m_ialloc_blks but alignment constraints prevent us from using
 987		 * it.
 988		 *
 989		 * If we can't find an AG with space for full alignment slack to
 990		 * be taken into account, we must be near ENOSPC in all AGs.
 991		 * Hence we don't include alignment for the second pass and so
 992		 * if we fail allocation due to alignment issues then it is most
 993		 * likely a real ENOSPC condition.
 994		 */
 995		ineed = mp->m_ialloc_min_blks;
 996		if (flags && ineed > 1)
 997			ineed += xfs_ialloc_cluster_alignment(mp);
 998		longest = pag->pagf_longest;
 999		if (!longest)
1000			longest = pag->pagf_flcount > 0;
1001
1002		if (pag->pagf_freeblks >= needspace + ineed &&
1003		    longest >= ineed) {
1004			xfs_perag_put(pag);
1005			return agno;
1006		}
1007nextag:
1008		xfs_perag_put(pag);
1009		/*
1010		 * No point in iterating over the rest, if we're shutting
1011		 * down.
1012		 */
1013		if (XFS_FORCED_SHUTDOWN(mp))
1014			return NULLAGNUMBER;
1015		agno++;
1016		if (agno >= agcount)
1017			agno = 0;
1018		if (agno == pagno) {
1019			if (flags == 0)
1020				return NULLAGNUMBER;
1021			flags = 0;
1022		}
1023	}
1024}
1025
1026/*
1027 * Try to retrieve the next record to the left/right from the current one.
1028 */
1029STATIC int
1030xfs_ialloc_next_rec(
1031	struct xfs_btree_cur	*cur,
1032	xfs_inobt_rec_incore_t	*rec,
1033	int			*done,
1034	int			left)
1035{
1036	int                     error;
1037	int			i;
1038
1039	if (left)
1040		error = xfs_btree_decrement(cur, 0, &i);
1041	else
1042		error = xfs_btree_increment(cur, 0, &i);
1043
1044	if (error)
1045		return error;
1046	*done = !i;
1047	if (i) {
1048		error = xfs_inobt_get_rec(cur, rec, &i);
1049		if (error)
1050			return error;
1051		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1052	}
1053
1054	return 0;
1055}
1056
1057STATIC int
1058xfs_ialloc_get_rec(
1059	struct xfs_btree_cur	*cur,
1060	xfs_agino_t		agino,
1061	xfs_inobt_rec_incore_t	*rec,
1062	int			*done)
1063{
1064	int                     error;
1065	int			i;
1066
1067	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1068	if (error)
1069		return error;
1070	*done = !i;
1071	if (i) {
1072		error = xfs_inobt_get_rec(cur, rec, &i);
1073		if (error)
1074			return error;
1075		XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1076	}
1077
1078	return 0;
1079}
1080
1081/*
1082 * Return the offset of the first free inode in the record. If the inode chunk
1083 * is sparsely allocated, we convert the record holemask to inode granularity
1084 * and mask off the unallocated regions from the inode free mask.
1085 */
1086STATIC int
1087xfs_inobt_first_free_inode(
1088	struct xfs_inobt_rec_incore	*rec)
1089{
1090	xfs_inofree_t			realfree;
1091
1092	/* if there are no holes, return the first available offset */
1093	if (!xfs_inobt_issparse(rec->ir_holemask))
1094		return xfs_lowbit64(rec->ir_free);
1095
1096	realfree = xfs_inobt_irec_to_allocmask(rec);
1097	realfree &= rec->ir_free;
1098
1099	return xfs_lowbit64(realfree);
1100}
1101
1102/*
1103 * Allocate an inode using the inobt-only algorithm.
1104 */
1105STATIC int
1106xfs_dialloc_ag_inobt(
1107	struct xfs_trans	*tp,
1108	struct xfs_buf		*agbp,
1109	xfs_ino_t		parent,
1110	xfs_ino_t		*inop)
1111{
1112	struct xfs_mount	*mp = tp->t_mountp;
1113	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
1114	xfs_agnumber_t		agno = be32_to_cpu(agi->agi_seqno);
1115	xfs_agnumber_t		pagno = XFS_INO_TO_AGNO(mp, parent);
1116	xfs_agino_t		pagino = XFS_INO_TO_AGINO(mp, parent);
1117	struct xfs_perag	*pag;
1118	struct xfs_btree_cur	*cur, *tcur;
1119	struct xfs_inobt_rec_incore rec, trec;
1120	xfs_ino_t		ino;
1121	int			error;
1122	int			offset;
1123	int			i, j;
1124
1125	pag = xfs_perag_get(mp, agno);
1126
1127	ASSERT(pag->pagi_init);
1128	ASSERT(pag->pagi_inodeok);
1129	ASSERT(pag->pagi_freecount > 0);
1130
1131 restart_pagno:
1132	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1133	/*
1134	 * If pagino is 0 (this is the root inode allocation) use newino.
1135	 * This must work because we've just allocated some.
1136	 */
1137	if (!pagino)
1138		pagino = be32_to_cpu(agi->agi_newino);
1139
1140	error = xfs_check_agi_freecount(cur, agi);
1141	if (error)
1142		goto error0;
1143
1144	/*
1145	 * If in the same AG as the parent, try to get near the parent.
1146	 */
1147	if (pagno == agno) {
1148		int		doneleft;	/* done, to the left */
1149		int		doneright;	/* done, to the right */
1150		int		searchdistance = 10;
1151
1152		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1153		if (error)
1154			goto error0;
1155		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1156
1157		error = xfs_inobt_get_rec(cur, &rec, &j);
1158		if (error)
1159			goto error0;
1160		XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
1161
1162		if (rec.ir_freecount > 0) {
1163			/*
1164			 * Found a free inode in the same chunk
1165			 * as the parent, done.
1166			 */
1167			goto alloc_inode;
1168		}
1169
1170
1171		/*
1172		 * In the same AG as parent, but parent's chunk is full.
1173		 */
1174
1175		/* duplicate the cursor, search left & right simultaneously */
1176		error = xfs_btree_dup_cursor(cur, &tcur);
1177		if (error)
1178			goto error0;
1179
1180		/*
1181		 * Skip to last blocks looked up if same parent inode.
1182		 */
1183		if (pagino != NULLAGINO &&
1184		    pag->pagl_pagino == pagino &&
1185		    pag->pagl_leftrec != NULLAGINO &&
1186		    pag->pagl_rightrec != NULLAGINO) {
1187			error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1188						   &trec, &doneleft);
1189			if (error)
1190				goto error1;
1191
1192			error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1193						   &rec, &doneright);
1194			if (error)
1195				goto error1;
1196		} else {
1197			/* search left with tcur, back up 1 record */
1198			error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1199			if (error)
1200				goto error1;
1201
1202			/* search right with cur, go forward 1 record. */
1203			error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1204			if (error)
1205				goto error1;
1206		}
1207
1208		/*
1209		 * Loop until we find an inode chunk with a free inode.
1210		 */
1211		while (!doneleft || !doneright) {
1212			int	useleft;  /* using left inode chunk this time */
1213
1214			if (!--searchdistance) {
1215				/*
1216				 * Not in range - save last search
1217				 * location and allocate a new inode
1218				 */
1219				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1220				pag->pagl_leftrec = trec.ir_startino;
1221				pag->pagl_rightrec = rec.ir_startino;
1222				pag->pagl_pagino = pagino;
1223				goto newino;
1224			}
1225
1226			/* figure out the closer block if both are valid. */
1227			if (!doneleft && !doneright) {
1228				useleft = pagino -
1229				 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1230				  rec.ir_startino - pagino;
1231			} else {
1232				useleft = !doneleft;
1233			}
1234
1235			/* free inodes to the left? */
1236			if (useleft && trec.ir_freecount) {
1237				rec = trec;
1238				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1239				cur = tcur;
1240
1241				pag->pagl_leftrec = trec.ir_startino;
1242				pag->pagl_rightrec = rec.ir_startino;
1243				pag->pagl_pagino = pagino;
1244				goto alloc_inode;
1245			}
1246
1247			/* free inodes to the right? */
1248			if (!useleft && rec.ir_freecount) {
1249				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1250
1251				pag->pagl_leftrec = trec.ir_startino;
1252				pag->pagl_rightrec = rec.ir_startino;
1253				pag->pagl_pagino = pagino;
1254				goto alloc_inode;
1255			}
1256
1257			/* get next record to check */
1258			if (useleft) {
1259				error = xfs_ialloc_next_rec(tcur, &trec,
1260								 &doneleft, 1);
1261			} else {
1262				error = xfs_ialloc_next_rec(cur, &rec,
1263								 &doneright, 0);
1264			}
1265			if (error)
1266				goto error1;
1267		}
1268
1269		/*
1270		 * We've reached the end of the btree. because
1271		 * we are only searching a small chunk of the
1272		 * btree each search, there is obviously free
1273		 * inodes closer to the parent inode than we
1274		 * are now. restart the search again.
1275		 */
1276		pag->pagl_pagino = NULLAGINO;
1277		pag->pagl_leftrec = NULLAGINO;
1278		pag->pagl_rightrec = NULLAGINO;
1279		xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1280		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1281		goto restart_pagno;
1282	}
1283
1284	/*
1285	 * In a different AG from the parent.
1286	 * See if the most recently allocated block has any free.
1287	 */
1288newino:
1289	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1290		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1291					 XFS_LOOKUP_EQ, &i);
1292		if (error)
1293			goto error0;
1294
1295		if (i == 1) {
1296			error = xfs_inobt_get_rec(cur, &rec, &j);
1297			if (error)
1298				goto error0;
1299
1300			if (j == 1 && rec.ir_freecount > 0) {
1301				/*
1302				 * The last chunk allocated in the group
1303				 * still has a free inode.
1304				 */
1305				goto alloc_inode;
1306			}
1307		}
1308	}
1309
1310	/*
1311	 * None left in the last group, search the whole AG
1312	 */
1313	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1314	if (error)
1315		goto error0;
1316	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1317
1318	for (;;) {
1319		error = xfs_inobt_get_rec(cur, &rec, &i);
1320		if (error)
1321			goto error0;
1322		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1323		if (rec.ir_freecount > 0)
1324			break;
1325		error = xfs_btree_increment(cur, 0, &i);
1326		if (error)
1327			goto error0;
1328		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1329	}
1330
1331alloc_inode:
1332	offset = xfs_inobt_first_free_inode(&rec);
1333	ASSERT(offset >= 0);
1334	ASSERT(offset < XFS_INODES_PER_CHUNK);
1335	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1336				   XFS_INODES_PER_CHUNK) == 0);
1337	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1338	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1339	rec.ir_freecount--;
1340	error = xfs_inobt_update(cur, &rec);
1341	if (error)
1342		goto error0;
1343	be32_add_cpu(&agi->agi_freecount, -1);
1344	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1345	pag->pagi_freecount--;
1346
1347	error = xfs_check_agi_freecount(cur, agi);
1348	if (error)
1349		goto error0;
1350
1351	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1352	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1353	xfs_perag_put(pag);
1354	*inop = ino;
1355	return 0;
1356error1:
1357	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1358error0:
1359	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1360	xfs_perag_put(pag);
1361	return error;
1362}
1363
1364/*
1365 * Use the free inode btree to allocate an inode based on distance from the
1366 * parent. Note that the provided cursor may be deleted and replaced.
1367 */
1368STATIC int
1369xfs_dialloc_ag_finobt_near(
1370	xfs_agino_t			pagino,
1371	struct xfs_btree_cur		**ocur,
1372	struct xfs_inobt_rec_incore	*rec)
1373{
1374	struct xfs_btree_cur		*lcur = *ocur;	/* left search cursor */
1375	struct xfs_btree_cur		*rcur;	/* right search cursor */
1376	struct xfs_inobt_rec_incore	rrec;
1377	int				error;
1378	int				i, j;
1379
1380	error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1381	if (error)
1382		return error;
1383
1384	if (i == 1) {
1385		error = xfs_inobt_get_rec(lcur, rec, &i);
1386		if (error)
1387			return error;
1388		XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
1389
1390		/*
1391		 * See if we've landed in the parent inode record. The finobt
1392		 * only tracks chunks with at least one free inode, so record
1393		 * existence is enough.
1394		 */
1395		if (pagino >= rec->ir_startino &&
1396		    pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1397			return 0;
1398	}
1399
1400	error = xfs_btree_dup_cursor(lcur, &rcur);
1401	if (error)
1402		return error;
1403
1404	error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1405	if (error)
1406		goto error_rcur;
1407	if (j == 1) {
1408		error = xfs_inobt_get_rec(rcur, &rrec, &j);
1409		if (error)
1410			goto error_rcur;
1411		XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
1412	}
1413
1414	XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
1415	if (i == 1 && j == 1) {
1416		/*
1417		 * Both the left and right records are valid. Choose the closer
1418		 * inode chunk to the target.
1419		 */
1420		if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1421		    (rrec.ir_startino - pagino)) {
1422			*rec = rrec;
1423			xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1424			*ocur = rcur;
1425		} else {
1426			xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1427		}
1428	} else if (j == 1) {
1429		/* only the right record is valid */
1430		*rec = rrec;
1431		xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1432		*ocur = rcur;
1433	} else if (i == 1) {
1434		/* only the left record is valid */
1435		xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1436	}
1437
1438	return 0;
1439
1440error_rcur:
1441	xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1442	return error;
1443}
1444
1445/*
1446 * Use the free inode btree to find a free inode based on a newino hint. If
1447 * the hint is NULL, find the first free inode in the AG.
1448 */
1449STATIC int
1450xfs_dialloc_ag_finobt_newino(
1451	struct xfs_agi			*agi,
1452	struct xfs_btree_cur		*cur,
1453	struct xfs_inobt_rec_incore	*rec)
1454{
1455	int error;
1456	int i;
1457
1458	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1459		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1460					 XFS_LOOKUP_EQ, &i);
1461		if (error)
1462			return error;
1463		if (i == 1) {
1464			error = xfs_inobt_get_rec(cur, rec, &i);
1465			if (error)
1466				return error;
1467			XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1468			return 0;
1469		}
1470	}
1471
1472	/*
1473	 * Find the first inode available in the AG.
1474	 */
1475	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1476	if (error)
1477		return error;
1478	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1479
1480	error = xfs_inobt_get_rec(cur, rec, &i);
1481	if (error)
1482		return error;
1483	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1484
1485	return 0;
1486}
1487
1488/*
1489 * Update the inobt based on a modification made to the finobt. Also ensure that
1490 * the records from both trees are equivalent post-modification.
1491 */
1492STATIC int
1493xfs_dialloc_ag_update_inobt(
1494	struct xfs_btree_cur		*cur,	/* inobt cursor */
1495	struct xfs_inobt_rec_incore	*frec,	/* finobt record */
1496	int				offset) /* inode offset */
1497{
1498	struct xfs_inobt_rec_incore	rec;
1499	int				error;
1500	int				i;
1501
1502	error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1503	if (error)
1504		return error;
1505	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1506
1507	error = xfs_inobt_get_rec(cur, &rec, &i);
1508	if (error)
1509		return error;
1510	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1511	ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1512				   XFS_INODES_PER_CHUNK) == 0);
1513
1514	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1515	rec.ir_freecount--;
1516
1517	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
1518				  (rec.ir_freecount == frec->ir_freecount));
1519
1520	return xfs_inobt_update(cur, &rec);
1521}
1522
1523/*
1524 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1525 * back to the inobt search algorithm.
1526 *
1527 * The caller selected an AG for us, and made sure that free inodes are
1528 * available.
1529 */
1530STATIC int
1531xfs_dialloc_ag(
1532	struct xfs_trans	*tp,
1533	struct xfs_buf		*agbp,
1534	xfs_ino_t		parent,
1535	xfs_ino_t		*inop)
1536{
1537	struct xfs_mount		*mp = tp->t_mountp;
1538	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1539	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1540	xfs_agnumber_t			pagno = XFS_INO_TO_AGNO(mp, parent);
1541	xfs_agino_t			pagino = XFS_INO_TO_AGINO(mp, parent);
1542	struct xfs_perag		*pag;
1543	struct xfs_btree_cur		*cur;	/* finobt cursor */
1544	struct xfs_btree_cur		*icur;	/* inobt cursor */
1545	struct xfs_inobt_rec_incore	rec;
1546	xfs_ino_t			ino;
1547	int				error;
1548	int				offset;
1549	int				i;
1550
1551	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1552		return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
1553
1554	pag = xfs_perag_get(mp, agno);
1555
1556	/*
1557	 * If pagino is 0 (this is the root inode allocation) use newino.
1558	 * This must work because we've just allocated some.
1559	 */
1560	if (!pagino)
1561		pagino = be32_to_cpu(agi->agi_newino);
1562
1563	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
1564
1565	error = xfs_check_agi_freecount(cur, agi);
1566	if (error)
1567		goto error_cur;
1568
1569	/*
1570	 * The search algorithm depends on whether we're in the same AG as the
1571	 * parent. If so, find the closest available inode to the parent. If
1572	 * not, consider the agi hint or find the first free inode in the AG.
1573	 */
1574	if (agno == pagno)
1575		error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1576	else
1577		error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1578	if (error)
1579		goto error_cur;
1580
1581	offset = xfs_inobt_first_free_inode(&rec);
1582	ASSERT(offset >= 0);
1583	ASSERT(offset < XFS_INODES_PER_CHUNK);
1584	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1585				   XFS_INODES_PER_CHUNK) == 0);
1586	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1587
1588	/*
1589	 * Modify or remove the finobt record.
1590	 */
1591	rec.ir_free &= ~XFS_INOBT_MASK(offset);
1592	rec.ir_freecount--;
1593	if (rec.ir_freecount)
1594		error = xfs_inobt_update(cur, &rec);
1595	else
1596		error = xfs_btree_delete(cur, &i);
1597	if (error)
1598		goto error_cur;
1599
1600	/*
1601	 * The finobt has now been updated appropriately. We haven't updated the
1602	 * agi and superblock yet, so we can create an inobt cursor and validate
1603	 * the original freecount. If all is well, make the equivalent update to
1604	 * the inobt using the finobt record and offset information.
1605	 */
1606	icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1607
1608	error = xfs_check_agi_freecount(icur, agi);
1609	if (error)
1610		goto error_icur;
1611
1612	error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1613	if (error)
1614		goto error_icur;
1615
1616	/*
1617	 * Both trees have now been updated. We must update the perag and
1618	 * superblock before we can check the freecount for each btree.
1619	 */
1620	be32_add_cpu(&agi->agi_freecount, -1);
1621	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1622	pag->pagi_freecount--;
1623
1624	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1625
1626	error = xfs_check_agi_freecount(icur, agi);
1627	if (error)
1628		goto error_icur;
1629	error = xfs_check_agi_freecount(cur, agi);
1630	if (error)
1631		goto error_icur;
1632
1633	xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1634	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1635	xfs_perag_put(pag);
1636	*inop = ino;
1637	return 0;
1638
1639error_icur:
1640	xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1641error_cur:
1642	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1643	xfs_perag_put(pag);
1644	return error;
1645}
1646
1647/*
1648 * Allocate an inode on disk.
1649 *
1650 * Mode is used to tell whether the new inode will need space, and whether it
1651 * is a directory.
1652 *
1653 * This function is designed to be called twice if it has to do an allocation
1654 * to make more free inodes.  On the first call, *IO_agbp should be set to NULL.
1655 * If an inode is available without having to performn an allocation, an inode
1656 * number is returned.  In this case, *IO_agbp is set to NULL.  If an allocation
1657 * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp.
1658 * The caller should then commit the current transaction, allocate a
1659 * new transaction, and call xfs_dialloc() again, passing in the previous value
1660 * of *IO_agbp.  IO_agbp should be held across the transactions. Since the AGI
1661 * buffer is locked across the two calls, the second call is guaranteed to have
1662 * a free inode available.
1663 *
1664 * Once we successfully pick an inode its number is returned and the on-disk
1665 * data structures are updated.  The inode itself is not read in, since doing so
1666 * would break ordering constraints with xfs_reclaim.
1667 */
1668int
1669xfs_dialloc(
1670	struct xfs_trans	*tp,
1671	xfs_ino_t		parent,
1672	umode_t			mode,
1673	int			okalloc,
1674	struct xfs_buf		**IO_agbp,
1675	xfs_ino_t		*inop)
1676{
1677	struct xfs_mount	*mp = tp->t_mountp;
1678	struct xfs_buf		*agbp;
1679	xfs_agnumber_t		agno;
1680	int			error;
1681	int			ialloced;
1682	int			noroom = 0;
1683	xfs_agnumber_t		start_agno;
1684	struct xfs_perag	*pag;
1685
1686	if (*IO_agbp) {
1687		/*
1688		 * If the caller passes in a pointer to the AGI buffer,
1689		 * continue where we left off before.  In this case, we
1690		 * know that the allocation group has free inodes.
1691		 */
1692		agbp = *IO_agbp;
1693		goto out_alloc;
1694	}
1695
1696	/*
1697	 * We do not have an agbp, so select an initial allocation
1698	 * group for inode allocation.
1699	 */
1700	start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
1701	if (start_agno == NULLAGNUMBER) {
1702		*inop = NULLFSINO;
1703		return 0;
1704	}
1705
1706	/*
1707	 * If we have already hit the ceiling of inode blocks then clear
1708	 * okalloc so we scan all available agi structures for a free
1709	 * inode.
1710	 *
1711	 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1712	 * which will sacrifice the preciseness but improve the performance.
1713	 */
1714	if (mp->m_maxicount &&
1715	    percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
1716							> mp->m_maxicount) {
1717		noroom = 1;
1718		okalloc = 0;
1719	}
1720
1721	/*
1722	 * Loop until we find an allocation group that either has free inodes
1723	 * or in which we can allocate some inodes.  Iterate through the
1724	 * allocation groups upward, wrapping at the end.
1725	 */
1726	agno = start_agno;
1727	for (;;) {
1728		pag = xfs_perag_get(mp, agno);
1729		if (!pag->pagi_inodeok) {
1730			xfs_ialloc_next_ag(mp);
1731			goto nextag;
1732		}
1733
1734		if (!pag->pagi_init) {
1735			error = xfs_ialloc_pagi_init(mp, tp, agno);
1736			if (error)
1737				goto out_error;
1738		}
1739
1740		/*
1741		 * Do a first racy fast path check if this AG is usable.
1742		 */
1743		if (!pag->pagi_freecount && !okalloc)
1744			goto nextag;
1745
1746		/*
1747		 * Then read in the AGI buffer and recheck with the AGI buffer
1748		 * lock held.
1749		 */
1750		error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1751		if (error)
1752			goto out_error;
1753
1754		if (pag->pagi_freecount) {
1755			xfs_perag_put(pag);
1756			goto out_alloc;
1757		}
1758
1759		if (!okalloc)
1760			goto nextag_relse_buffer;
1761
1762
1763		error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
1764		if (error) {
1765			xfs_trans_brelse(tp, agbp);
1766
1767			if (error != -ENOSPC)
1768				goto out_error;
1769
1770			xfs_perag_put(pag);
1771			*inop = NULLFSINO;
1772			return 0;
1773		}
1774
1775		if (ialloced) {
1776			/*
1777			 * We successfully allocated some inodes, return
1778			 * the current context to the caller so that it
1779			 * can commit the current transaction and call
1780			 * us again where we left off.
1781			 */
1782			ASSERT(pag->pagi_freecount > 0);
1783			xfs_perag_put(pag);
1784
1785			*IO_agbp = agbp;
1786			*inop = NULLFSINO;
1787			return 0;
1788		}
1789
1790nextag_relse_buffer:
1791		xfs_trans_brelse(tp, agbp);
1792nextag:
1793		xfs_perag_put(pag);
1794		if (++agno == mp->m_sb.sb_agcount)
1795			agno = 0;
1796		if (agno == start_agno) {
1797			*inop = NULLFSINO;
1798			return noroom ? -ENOSPC : 0;
1799		}
1800	}
1801
1802out_alloc:
1803	*IO_agbp = NULL;
1804	return xfs_dialloc_ag(tp, agbp, parent, inop);
1805out_error:
1806	xfs_perag_put(pag);
1807	return error;
1808}
1809
1810/*
1811 * Free the blocks of an inode chunk. We must consider that the inode chunk
1812 * might be sparse and only free the regions that are allocated as part of the
1813 * chunk.
1814 */
1815STATIC void
1816xfs_difree_inode_chunk(
1817	struct xfs_mount		*mp,
1818	xfs_agnumber_t			agno,
1819	struct xfs_inobt_rec_incore	*rec,
1820	struct xfs_bmap_free		*flist)
1821{
1822	xfs_agblock_t	sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
1823	int		startidx, endidx;
1824	int		nextbit;
1825	xfs_agblock_t	agbno;
1826	int		contigblk;
1827	DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1828
1829	if (!xfs_inobt_issparse(rec->ir_holemask)) {
1830		/* not sparse, calculate extent info directly */
1831		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno,
1832				  XFS_AGINO_TO_AGBNO(mp, rec->ir_startino)),
1833				  mp->m_ialloc_blks, flist, mp);
1834		return;
1835	}
1836
1837	/* holemask is only 16-bits (fits in an unsigned long) */
1838	ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1839	holemask[0] = rec->ir_holemask;
1840
1841	/*
1842	 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1843	 * holemask and convert the start/end index of each range to an extent.
1844	 * We start with the start and end index both pointing at the first 0 in
1845	 * the mask.
1846	 */
1847	startidx = endidx = find_first_zero_bit(holemask,
1848						XFS_INOBT_HOLEMASK_BITS);
1849	nextbit = startidx + 1;
1850	while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1851		nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1852					     nextbit);
1853		/*
1854		 * If the next zero bit is contiguous, update the end index of
1855		 * the current range and continue.
1856		 */
1857		if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1858		    nextbit == endidx + 1) {
1859			endidx = nextbit;
1860			goto next;
1861		}
1862
1863		/*
1864		 * nextbit is not contiguous with the current end index. Convert
1865		 * the current start/end to an extent and add it to the free
1866		 * list.
1867		 */
1868		agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1869				  mp->m_sb.sb_inopblock;
1870		contigblk = ((endidx - startidx + 1) *
1871			     XFS_INODES_PER_HOLEMASK_BIT) /
1872			    mp->m_sb.sb_inopblock;
1873
1874		ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1875		ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1876		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
1877				  flist, mp);
1878
1879		/* reset range to current bit and carry on... */
1880		startidx = endidx = nextbit;
1881
1882next:
1883		nextbit++;
1884	}
1885}
1886
1887STATIC int
1888xfs_difree_inobt(
1889	struct xfs_mount		*mp,
1890	struct xfs_trans		*tp,
1891	struct xfs_buf			*agbp,
1892	xfs_agino_t			agino,
1893	struct xfs_bmap_free		*flist,
1894	struct xfs_icluster		*xic,
1895	struct xfs_inobt_rec_incore	*orec)
1896{
1897	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
1898	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
1899	struct xfs_perag		*pag;
1900	struct xfs_btree_cur		*cur;
1901	struct xfs_inobt_rec_incore	rec;
1902	int				ilen;
1903	int				error;
1904	int				i;
1905	int				off;
1906
1907	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1908	ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1909
1910	/*
1911	 * Initialize the cursor.
1912	 */
1913	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1914
1915	error = xfs_check_agi_freecount(cur, agi);
1916	if (error)
1917		goto error0;
1918
1919	/*
1920	 * Look for the entry describing this inode.
1921	 */
1922	if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1923		xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1924			__func__, error);
1925		goto error0;
1926	}
1927	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1928	error = xfs_inobt_get_rec(cur, &rec, &i);
1929	if (error) {
1930		xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1931			__func__, error);
1932		goto error0;
1933	}
1934	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1935	/*
1936	 * Get the offset in the inode chunk.
1937	 */
1938	off = agino - rec.ir_startino;
1939	ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1940	ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1941	/*
1942	 * Mark the inode free & increment the count.
1943	 */
1944	rec.ir_free |= XFS_INOBT_MASK(off);
1945	rec.ir_freecount++;
1946
1947	/*
1948	 * When an inode chunk is free, it becomes eligible for removal. Don't
1949	 * remove the chunk if the block size is large enough for multiple inode
1950	 * chunks (that might not be free).
1951	 */
1952	if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1953	    rec.ir_free == XFS_INOBT_ALL_FREE &&
1954	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1955		xic->deleted = 1;
1956		xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1957		xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1958
1959		/*
1960		 * Remove the inode cluster from the AGI B+Tree, adjust the
1961		 * AGI and Superblock inode counts, and mark the disk space
1962		 * to be freed when the transaction is committed.
1963		 */
1964		ilen = rec.ir_freecount;
1965		be32_add_cpu(&agi->agi_count, -ilen);
1966		be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1967		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1968		pag = xfs_perag_get(mp, agno);
1969		pag->pagi_freecount -= ilen - 1;
1970		xfs_perag_put(pag);
1971		xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1972		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1973
1974		if ((error = xfs_btree_delete(cur, &i))) {
1975			xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
1976				__func__, error);
1977			goto error0;
1978		}
1979
1980		xfs_difree_inode_chunk(mp, agno, &rec, flist);
1981	} else {
1982		xic->deleted = 0;
1983
1984		error = xfs_inobt_update(cur, &rec);
1985		if (error) {
1986			xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
1987				__func__, error);
1988			goto error0;
1989		}
1990
1991		/* 
1992		 * Change the inode free counts and log the ag/sb changes.
1993		 */
1994		be32_add_cpu(&agi->agi_freecount, 1);
1995		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1996		pag = xfs_perag_get(mp, agno);
1997		pag->pagi_freecount++;
1998		xfs_perag_put(pag);
1999		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2000	}
2001
2002	error = xfs_check_agi_freecount(cur, agi);
2003	if (error)
2004		goto error0;
2005
2006	*orec = rec;
2007	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2008	return 0;
2009
2010error0:
2011	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2012	return error;
2013}
2014
2015/*
2016 * Free an inode in the free inode btree.
2017 */
2018STATIC int
2019xfs_difree_finobt(
2020	struct xfs_mount		*mp,
2021	struct xfs_trans		*tp,
2022	struct xfs_buf			*agbp,
2023	xfs_agino_t			agino,
2024	struct xfs_inobt_rec_incore	*ibtrec) /* inobt record */
2025{
2026	struct xfs_agi			*agi = XFS_BUF_TO_AGI(agbp);
2027	xfs_agnumber_t			agno = be32_to_cpu(agi->agi_seqno);
2028	struct xfs_btree_cur		*cur;
2029	struct xfs_inobt_rec_incore	rec;
2030	int				offset = agino - ibtrec->ir_startino;
2031	int				error;
2032	int				i;
2033
2034	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
2035
2036	error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2037	if (error)
2038		goto error;
2039	if (i == 0) {
2040		/*
2041		 * If the record does not exist in the finobt, we must have just
2042		 * freed an inode in a previously fully allocated chunk. If not,
2043		 * something is out of sync.
2044		 */
2045		XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
2046
2047		error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2048					     ibtrec->ir_count,
2049					     ibtrec->ir_freecount,
2050					     ibtrec->ir_free, &i);
2051		if (error)
2052			goto error;
2053		ASSERT(i == 1);
2054
2055		goto out;
2056	}
2057
2058	/*
2059	 * Read and update the existing record. We could just copy the ibtrec
2060	 * across here, but that would defeat the purpose of having redundant
2061	 * metadata. By making the modifications independently, we can catch
2062	 * corruptions that we wouldn't see if we just copied from one record
2063	 * to another.
2064	 */
2065	error = xfs_inobt_get_rec(cur, &rec, &i);
2066	if (error)
2067		goto error;
2068	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
2069
2070	rec.ir_free |= XFS_INOBT_MASK(offset);
2071	rec.ir_freecount++;
2072
2073	XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
2074				(rec.ir_freecount == ibtrec->ir_freecount),
2075				error);
2076
2077	/*
2078	 * The content of inobt records should always match between the inobt
2079	 * and finobt. The lifecycle of records in the finobt is different from
2080	 * the inobt in that the finobt only tracks records with at least one
2081	 * free inode. Hence, if all of the inodes are free and we aren't
2082	 * keeping inode chunks permanently on disk, remove the record.
2083	 * Otherwise, update the record with the new information.
2084	 *
2085	 * Note that we currently can't free chunks when the block size is large
2086	 * enough for multiple chunks. Leave the finobt record to remain in sync
2087	 * with the inobt.
2088	 */
2089	if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2090	    mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2091	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2092		error = xfs_btree_delete(cur, &i);
2093		if (error)
2094			goto error;
2095		ASSERT(i == 1);
2096	} else {
2097		error = xfs_inobt_update(cur, &rec);
2098		if (error)
2099			goto error;
2100	}
2101
2102out:
2103	error = xfs_check_agi_freecount(cur, agi);
2104	if (error)
2105		goto error;
2106
2107	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2108	return 0;
2109
2110error:
2111	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2112	return error;
2113}
2114
2115/*
2116 * Free disk inode.  Carefully avoids touching the incore inode, all
2117 * manipulations incore are the caller's responsibility.
2118 * The on-disk inode is not changed by this operation, only the
2119 * btree (free inode mask) is changed.
2120 */
2121int
2122xfs_difree(
2123	struct xfs_trans	*tp,		/* transaction pointer */
2124	xfs_ino_t		inode,		/* inode to be freed */
2125	struct xfs_bmap_free	*flist,		/* extents to free */
2126	struct xfs_icluster	*xic)	/* cluster info if deleted */
2127{
2128	/* REFERENCED */
2129	xfs_agblock_t		agbno;	/* block number containing inode */
2130	struct xfs_buf		*agbp;	/* buffer for allocation group header */
2131	xfs_agino_t		agino;	/* allocation group inode number */
2132	xfs_agnumber_t		agno;	/* allocation group number */
2133	int			error;	/* error return value */
2134	struct xfs_mount	*mp;	/* mount structure for filesystem */
2135	struct xfs_inobt_rec_incore rec;/* btree record */
2136
2137	mp = tp->t_mountp;
2138
2139	/*
2140	 * Break up inode number into its components.
2141	 */
2142	agno = XFS_INO_TO_AGNO(mp, inode);
2143	if (agno >= mp->m_sb.sb_agcount)  {
2144		xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2145			__func__, agno, mp->m_sb.sb_agcount);
2146		ASSERT(0);
2147		return -EINVAL;
2148	}
2149	agino = XFS_INO_TO_AGINO(mp, inode);
2150	if (inode != XFS_AGINO_TO_INO(mp, agno, agino))  {
2151		xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2152			__func__, (unsigned long long)inode,
2153			(unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2154		ASSERT(0);
2155		return -EINVAL;
2156	}
2157	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2158	if (agbno >= mp->m_sb.sb_agblocks)  {
2159		xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2160			__func__, agbno, mp->m_sb.sb_agblocks);
2161		ASSERT(0);
2162		return -EINVAL;
2163	}
2164	/*
2165	 * Get the allocation group header.
2166	 */
2167	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2168	if (error) {
2169		xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2170			__func__, error);
2171		return error;
2172	}
2173
2174	/*
2175	 * Fix up the inode allocation btree.
2176	 */
2177	error = xfs_difree_inobt(mp, tp, agbp, agino, flist, xic, &rec);
2178	if (error)
2179		goto error0;
2180
2181	/*
2182	 * Fix up the free inode btree.
2183	 */
2184	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2185		error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
2186		if (error)
2187			goto error0;
2188	}
2189
2190	return 0;
2191
2192error0:
2193	return error;
2194}
2195
2196STATIC int
2197xfs_imap_lookup(
2198	struct xfs_mount	*mp,
2199	struct xfs_trans	*tp,
2200	xfs_agnumber_t		agno,
2201	xfs_agino_t		agino,
2202	xfs_agblock_t		agbno,
2203	xfs_agblock_t		*chunk_agbno,
2204	xfs_agblock_t		*offset_agbno,
2205	int			flags)
2206{
2207	struct xfs_inobt_rec_incore rec;
2208	struct xfs_btree_cur	*cur;
2209	struct xfs_buf		*agbp;
2210	int			error;
2211	int			i;
2212
2213	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2214	if (error) {
2215		xfs_alert(mp,
2216			"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2217			__func__, error, agno);
2218		return error;
2219	}
2220
2221	/*
2222	 * Lookup the inode record for the given agino. If the record cannot be
2223	 * found, then it's an invalid inode number and we should abort. Once
2224	 * we have a record, we need to ensure it contains the inode number
2225	 * we are looking up.
2226	 */
2227	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
2228	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2229	if (!error) {
2230		if (i)
2231			error = xfs_inobt_get_rec(cur, &rec, &i);
2232		if (!error && i == 0)
2233			error = -EINVAL;
2234	}
2235
2236	xfs_trans_brelse(tp, agbp);
2237	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
2238	if (error)
2239		return error;
2240
2241	/* check that the returned record contains the required inode */
2242	if (rec.ir_startino > agino ||
2243	    rec.ir_startino + mp->m_ialloc_inos <= agino)
2244		return -EINVAL;
2245
2246	/* for untrusted inodes check it is allocated first */
2247	if ((flags & XFS_IGET_UNTRUSTED) &&
2248	    (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2249		return -EINVAL;
2250
2251	*chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2252	*offset_agbno = agbno - *chunk_agbno;
2253	return 0;
2254}
2255
2256/*
2257 * Return the location of the inode in imap, for mapping it into a buffer.
2258 */
2259int
2260xfs_imap(
2261	xfs_mount_t	 *mp,	/* file system mount structure */
2262	xfs_trans_t	 *tp,	/* transaction pointer */
2263	xfs_ino_t	ino,	/* inode to locate */
2264	struct xfs_imap	*imap,	/* location map structure */
2265	uint		flags)	/* flags for inode btree lookup */
2266{
2267	xfs_agblock_t	agbno;	/* block number of inode in the alloc group */
2268	xfs_agino_t	agino;	/* inode number within alloc group */
2269	xfs_agnumber_t	agno;	/* allocation group number */
2270	int		blks_per_cluster; /* num blocks per inode cluster */
2271	xfs_agblock_t	chunk_agbno;	/* first block in inode chunk */
2272	xfs_agblock_t	cluster_agbno;	/* first block in inode cluster */
2273	int		error;	/* error code */
2274	int		offset;	/* index of inode in its buffer */
2275	xfs_agblock_t	offset_agbno;	/* blks from chunk start to inode */
2276
2277	ASSERT(ino != NULLFSINO);
2278
2279	/*
2280	 * Split up the inode number into its parts.
2281	 */
2282	agno = XFS_INO_TO_AGNO(mp, ino);
2283	agino = XFS_INO_TO_AGINO(mp, ino);
2284	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2285	if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2286	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2287#ifdef DEBUG
2288		/*
2289		 * Don't output diagnostic information for untrusted inodes
2290		 * as they can be invalid without implying corruption.
2291		 */
2292		if (flags & XFS_IGET_UNTRUSTED)
2293			return -EINVAL;
2294		if (agno >= mp->m_sb.sb_agcount) {
2295			xfs_alert(mp,
2296				"%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2297				__func__, agno, mp->m_sb.sb_agcount);
2298		}
2299		if (agbno >= mp->m_sb.sb_agblocks) {
2300			xfs_alert(mp,
2301		"%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2302				__func__, (unsigned long long)agbno,
2303				(unsigned long)mp->m_sb.sb_agblocks);
2304		}
2305		if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2306			xfs_alert(mp,
2307		"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2308				__func__, ino,
2309				XFS_AGINO_TO_INO(mp, agno, agino));
2310		}
2311		xfs_stack_trace();
2312#endif /* DEBUG */
2313		return -EINVAL;
2314	}
2315
2316	blks_per_cluster = xfs_icluster_size_fsb(mp);
2317
2318	/*
2319	 * For bulkstat and handle lookups, we have an untrusted inode number
2320	 * that we have to verify is valid. We cannot do this just by reading
2321	 * the inode buffer as it may have been unlinked and removed leaving
2322	 * inodes in stale state on disk. Hence we have to do a btree lookup
2323	 * in all cases where an untrusted inode number is passed.
2324	 */
2325	if (flags & XFS_IGET_UNTRUSTED) {
2326		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2327					&chunk_agbno, &offset_agbno, flags);
2328		if (error)
2329			return error;
2330		goto out_map;
2331	}
2332
2333	/*
2334	 * If the inode cluster size is the same as the blocksize or
2335	 * smaller we get to the buffer by simple arithmetics.
2336	 */
2337	if (blks_per_cluster == 1) {
2338		offset = XFS_INO_TO_OFFSET(mp, ino);
2339		ASSERT(offset < mp->m_sb.sb_inopblock);
2340
2341		imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
2342		imap->im_len = XFS_FSB_TO_BB(mp, 1);
2343		imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
2344		return 0;
2345	}
2346
2347	/*
2348	 * If the inode chunks are aligned then use simple maths to
2349	 * find the location. Otherwise we have to do a btree
2350	 * lookup to find the location.
2351	 */
2352	if (mp->m_inoalign_mask) {
2353		offset_agbno = agbno & mp->m_inoalign_mask;
2354		chunk_agbno = agbno - offset_agbno;
2355	} else {
2356		error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2357					&chunk_agbno, &offset_agbno, flags);
2358		if (error)
2359			return error;
2360	}
2361
2362out_map:
2363	ASSERT(agbno >= chunk_agbno);
2364	cluster_agbno = chunk_agbno +
2365		((offset_agbno / blks_per_cluster) * blks_per_cluster);
2366	offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2367		XFS_INO_TO_OFFSET(mp, ino);
2368
2369	imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
2370	imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
2371	imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
2372
2373	/*
2374	 * If the inode number maps to a block outside the bounds
2375	 * of the file system then return NULL rather than calling
2376	 * read_buf and panicing when we get an error from the
2377	 * driver.
2378	 */
2379	if ((imap->im_blkno + imap->im_len) >
2380	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2381		xfs_alert(mp,
2382	"%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2383			__func__, (unsigned long long) imap->im_blkno,
2384			(unsigned long long) imap->im_len,
2385			XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2386		return -EINVAL;
2387	}
2388	return 0;
2389}
2390
2391/*
2392 * Compute and fill in value of m_in_maxlevels.
2393 */
2394void
2395xfs_ialloc_compute_maxlevels(
2396	xfs_mount_t	*mp)		/* file system mount structure */
2397{
2398	int		level;
2399	uint		maxblocks;
2400	uint		maxleafents;
2401	int		minleafrecs;
2402	int		minnoderecs;
2403
2404	maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
2405		XFS_INODES_PER_CHUNK_LOG;
2406	minleafrecs = mp->m_inobt_mnr[0];
2407	minnoderecs = mp->m_inobt_mnr[1];
2408	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
2409	for (level = 1; maxblocks > 1; level++)
2410		maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
2411	mp->m_in_maxlevels = level;
2412}
2413
2414/*
2415 * Log specified fields for the ag hdr (inode section). The growth of the agi
2416 * structure over time requires that we interpret the buffer as two logical
2417 * regions delineated by the end of the unlinked list. This is due to the size
2418 * of the hash table and its location in the middle of the agi.
2419 *
2420 * For example, a request to log a field before agi_unlinked and a field after
2421 * agi_unlinked could cause us to log the entire hash table and use an excessive
2422 * amount of log space. To avoid this behavior, log the region up through
2423 * agi_unlinked in one call and the region after agi_unlinked through the end of
2424 * the structure in another.
2425 */
2426void
2427xfs_ialloc_log_agi(
2428	xfs_trans_t	*tp,		/* transaction pointer */
2429	xfs_buf_t	*bp,		/* allocation group header buffer */
2430	int		fields)		/* bitmask of fields to log */
2431{
2432	int			first;		/* first byte number */
2433	int			last;		/* last byte number */
2434	static const short	offsets[] = {	/* field starting offsets */
2435					/* keep in sync with bit definitions */
2436		offsetof(xfs_agi_t, agi_magicnum),
2437		offsetof(xfs_agi_t, agi_versionnum),
2438		offsetof(xfs_agi_t, agi_seqno),
2439		offsetof(xfs_agi_t, agi_length),
2440		offsetof(xfs_agi_t, agi_count),
2441		offsetof(xfs_agi_t, agi_root),
2442		offsetof(xfs_agi_t, agi_level),
2443		offsetof(xfs_agi_t, agi_freecount),
2444		offsetof(xfs_agi_t, agi_newino),
2445		offsetof(xfs_agi_t, agi_dirino),
2446		offsetof(xfs_agi_t, agi_unlinked),
2447		offsetof(xfs_agi_t, agi_free_root),
2448		offsetof(xfs_agi_t, agi_free_level),
2449		sizeof(xfs_agi_t)
2450	};
2451#ifdef DEBUG
2452	xfs_agi_t		*agi;	/* allocation group header */
2453
2454	agi = XFS_BUF_TO_AGI(bp);
2455	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2456#endif
2457
2458	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF);
2459
2460	/*
2461	 * Compute byte offsets for the first and last fields in the first
2462	 * region and log the agi buffer. This only logs up through
2463	 * agi_unlinked.
2464	 */
2465	if (fields & XFS_AGI_ALL_BITS_R1) {
2466		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2467				  &first, &last);
2468		xfs_trans_log_buf(tp, bp, first, last);
2469	}
2470
2471	/*
2472	 * Mask off the bits in the first region and calculate the first and
2473	 * last field offsets for any bits in the second region.
2474	 */
2475	fields &= ~XFS_AGI_ALL_BITS_R1;
2476	if (fields) {
2477		xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2478				  &first, &last);
2479		xfs_trans_log_buf(tp, bp, first, last);
2480	}
2481}
2482
2483#ifdef DEBUG
2484STATIC void
2485xfs_check_agi_unlinked(
2486	struct xfs_agi		*agi)
2487{
2488	int			i;
2489
2490	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
2491		ASSERT(agi->agi_unlinked[i]);
2492}
2493#else
2494#define xfs_check_agi_unlinked(agi)
2495#endif
2496
2497static bool
2498xfs_agi_verify(
2499	struct xfs_buf	*bp)
2500{
2501	struct xfs_mount *mp = bp->b_target->bt_mount;
2502	struct xfs_agi	*agi = XFS_BUF_TO_AGI(bp);
2503
2504	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2505		if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2506			return false;
2507		if (!xfs_log_check_lsn(mp,
2508				be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
2509			return false;
2510	}
2511
2512	/*
2513	 * Validate the magic number of the agi block.
2514	 */
2515	if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC))
2516		return false;
2517	if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2518		return false;
2519
2520	if (be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
2521		return false;
2522	/*
2523	 * during growfs operations, the perag is not fully initialised,
2524	 * so we can't use it for any useful checking. growfs ensures we can't
2525	 * use it by using uncached buffers that don't have the perag attached
2526	 * so we can detect and avoid this problem.
2527	 */
2528	if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2529		return false;
2530
2531	xfs_check_agi_unlinked(agi);
2532	return true;
2533}
2534
2535static void
2536xfs_agi_read_verify(
2537	struct xfs_buf	*bp)
2538{
2539	struct xfs_mount *mp = bp->b_target->bt_mount;
2540
2541	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2542	    !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2543		xfs_buf_ioerror(bp, -EFSBADCRC);
2544	else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp,
2545				XFS_ERRTAG_IALLOC_READ_AGI,
2546				XFS_RANDOM_IALLOC_READ_AGI))
2547		xfs_buf_ioerror(bp, -EFSCORRUPTED);
2548
2549	if (bp->b_error)
2550		xfs_verifier_error(bp);
2551}
2552
2553static void
2554xfs_agi_write_verify(
2555	struct xfs_buf	*bp)
2556{
2557	struct xfs_mount *mp = bp->b_target->bt_mount;
2558	struct xfs_buf_log_item	*bip = bp->b_fspriv;
2559
2560	if (!xfs_agi_verify(bp)) {
2561		xfs_buf_ioerror(bp, -EFSCORRUPTED);
2562		xfs_verifier_error(bp);
2563		return;
2564	}
2565
2566	if (!xfs_sb_version_hascrc(&mp->m_sb))
2567		return;
2568
2569	if (bip)
2570		XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2571	xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2572}
2573
2574const struct xfs_buf_ops xfs_agi_buf_ops = {
2575	.name = "xfs_agi",
2576	.verify_read = xfs_agi_read_verify,
2577	.verify_write = xfs_agi_write_verify,
2578};
2579
2580/*
2581 * Read in the allocation group header (inode allocation section)
2582 */
2583int
2584xfs_read_agi(
2585	struct xfs_mount	*mp,	/* file system mount structure */
2586	struct xfs_trans	*tp,	/* transaction pointer */
2587	xfs_agnumber_t		agno,	/* allocation group number */
2588	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2589{
2590	int			error;
2591
2592	trace_xfs_read_agi(mp, agno);
2593
2594	ASSERT(agno != NULLAGNUMBER);
2595	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2596			XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2597			XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2598	if (error)
2599		return error;
2600
2601	xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2602	return 0;
2603}
2604
2605int
2606xfs_ialloc_read_agi(
2607	struct xfs_mount	*mp,	/* file system mount structure */
2608	struct xfs_trans	*tp,	/* transaction pointer */
2609	xfs_agnumber_t		agno,	/* allocation group number */
2610	struct xfs_buf		**bpp)	/* allocation group hdr buf */
2611{
2612	struct xfs_agi		*agi;	/* allocation group header */
2613	struct xfs_perag	*pag;	/* per allocation group data */
2614	int			error;
2615
2616	trace_xfs_ialloc_read_agi(mp, agno);
2617
2618	error = xfs_read_agi(mp, tp, agno, bpp);
2619	if (error)
2620		return error;
2621
2622	agi = XFS_BUF_TO_AGI(*bpp);
2623	pag = xfs_perag_get(mp, agno);
2624	if (!pag->pagi_init) {
2625		pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2626		pag->pagi_count = be32_to_cpu(agi->agi_count);
2627		pag->pagi_init = 1;
2628	}
2629
2630	/*
2631	 * It's possible for these to be out of sync if
2632	 * we are in the middle of a forced shutdown.
2633	 */
2634	ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2635		XFS_FORCED_SHUTDOWN(mp));
2636	xfs_perag_put(pag);
2637	return 0;
2638}
2639
2640/*
2641 * Read in the agi to initialise the per-ag data in the mount structure
2642 */
2643int
2644xfs_ialloc_pagi_init(
2645	xfs_mount_t	*mp,		/* file system mount structure */
2646	xfs_trans_t	*tp,		/* transaction pointer */
2647	xfs_agnumber_t	agno)		/* allocation group number */
2648{
2649	xfs_buf_t	*bp = NULL;
2650	int		error;
2651
2652	error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2653	if (error)
2654		return error;
2655	if (bp)
2656		xfs_trans_brelse(tp, bp);
2657	return 0;
2658}