Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include <linux/log2.h>
  19
  20#include "xfs.h"
  21#include "xfs_fs.h"
  22#include "xfs_types.h"
  23#include "xfs_bit.h"
  24#include "xfs_log.h"
  25#include "xfs_inum.h"
  26#include "xfs_trans.h"
  27#include "xfs_trans_priv.h"
  28#include "xfs_sb.h"
  29#include "xfs_ag.h"
  30#include "xfs_mount.h"
  31#include "xfs_bmap_btree.h"
  32#include "xfs_alloc_btree.h"
  33#include "xfs_ialloc_btree.h"
  34#include "xfs_attr_sf.h"
  35#include "xfs_dinode.h"
  36#include "xfs_inode.h"
  37#include "xfs_buf_item.h"
  38#include "xfs_inode_item.h"
  39#include "xfs_btree.h"
  40#include "xfs_alloc.h"
  41#include "xfs_ialloc.h"
  42#include "xfs_bmap.h"
  43#include "xfs_error.h"
  44#include "xfs_utils.h"
  45#include "xfs_quota.h"
  46#include "xfs_filestream.h"
  47#include "xfs_vnodeops.h"
  48#include "xfs_trace.h"
  49
  50kmem_zone_t *xfs_ifork_zone;
  51kmem_zone_t *xfs_inode_zone;
  52
  53/*
  54 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  55 * freed from a file in a single transaction.
  56 */
  57#define	XFS_ITRUNC_MAX_EXTENTS	2
  58
  59STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  60STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
  61STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
  62STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64#ifdef DEBUG
  65/*
  66 * Make sure that the extents in the given memory buffer
  67 * are valid.
  68 */
  69STATIC void
  70xfs_validate_extents(
  71	xfs_ifork_t		*ifp,
  72	int			nrecs,
  73	xfs_exntfmt_t		fmt)
  74{
  75	xfs_bmbt_irec_t		irec;
  76	xfs_bmbt_rec_host_t	rec;
  77	int			i;
  78
  79	for (i = 0; i < nrecs; i++) {
  80		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
  81		rec.l0 = get_unaligned(&ep->l0);
  82		rec.l1 = get_unaligned(&ep->l1);
  83		xfs_bmbt_get_all(&rec, &irec);
  84		if (fmt == XFS_EXTFMT_NOSTATE)
  85			ASSERT(irec.br_state == XFS_EXT_NORM);
  86	}
  87}
  88#else /* DEBUG */
  89#define xfs_validate_extents(ifp, nrecs, fmt)
  90#endif /* DEBUG */
  91
  92/*
  93 * Check that none of the inode's in the buffer have a next
  94 * unlinked field of 0.
  95 */
  96#if defined(DEBUG)
  97void
  98xfs_inobp_check(
  99	xfs_mount_t	*mp,
 100	xfs_buf_t	*bp)
 101{
 102	int		i;
 103	int		j;
 104	xfs_dinode_t	*dip;
 105
 106	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
 107
 108	for (i = 0; i < j; i++) {
 109		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
 110					i * mp->m_sb.sb_inodesize);
 111		if (!dip->di_next_unlinked)  {
 112			xfs_alert(mp,
 113	"Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
 114				bp);
 115			ASSERT(dip->di_next_unlinked);
 116		}
 117	}
 118}
 119#endif
 120
 121/*
 122 * Find the buffer associated with the given inode map
 123 * We do basic validation checks on the buffer once it has been
 124 * retrieved from disk.
 125 */
 126STATIC int
 127xfs_imap_to_bp(
 128	xfs_mount_t	*mp,
 129	xfs_trans_t	*tp,
 130	struct xfs_imap	*imap,
 131	xfs_buf_t	**bpp,
 132	uint		buf_flags,
 133	uint		iget_flags)
 134{
 135	int		error;
 136	int		i;
 137	int		ni;
 138	xfs_buf_t	*bp;
 139
 
 140	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
 141				   (int)imap->im_len, buf_flags, &bp);
 142	if (error) {
 143		if (error != EAGAIN) {
 144			xfs_warn(mp,
 145				"%s: xfs_trans_read_buf() returned error %d.",
 146				__func__, error);
 147		} else {
 148			ASSERT(buf_flags & XBF_TRYLOCK);
 149		}
 150		return error;
 151	}
 152
 153	/*
 154	 * Validate the magic number and version of every inode in the buffer
 155	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
 156	 */
 157#ifdef DEBUG
 158	ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
 159#else	/* usual case */
 160	ni = 1;
 161#endif
 162
 163	for (i = 0; i < ni; i++) {
 164		int		di_ok;
 165		xfs_dinode_t	*dip;
 166
 167		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
 168					(i << mp->m_sb.sb_inodelog));
 169		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
 170			    XFS_DINODE_GOOD_VERSION(dip->di_version);
 171		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
 172						XFS_ERRTAG_ITOBP_INOTOBP,
 173						XFS_RANDOM_ITOBP_INOTOBP))) {
 174			if (iget_flags & XFS_IGET_UNTRUSTED) {
 175				xfs_trans_brelse(tp, bp);
 176				return XFS_ERROR(EINVAL);
 177			}
 178			XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
 179						XFS_ERRLEVEL_HIGH, mp, dip);
 180#ifdef DEBUG
 181			xfs_emerg(mp,
 182				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
 183				(unsigned long long)imap->im_blkno, i,
 184				be16_to_cpu(dip->di_magic));
 185			ASSERT(0);
 186#endif
 187			xfs_trans_brelse(tp, bp);
 188			return XFS_ERROR(EFSCORRUPTED);
 189		}
 190	}
 191
 192	xfs_inobp_check(mp, bp);
 193
 194	/*
 195	 * Mark the buffer as an inode buffer now that it looks good
 196	 */
 197	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
 198
 199	*bpp = bp;
 200	return 0;
 201}
 202
 203/*
 204 * This routine is called to map an inode number within a file
 205 * system to the buffer containing the on-disk version of the
 206 * inode.  It returns a pointer to the buffer containing the
 207 * on-disk inode in the bpp parameter, and in the dip parameter
 208 * it returns a pointer to the on-disk inode within that buffer.
 209 *
 210 * If a non-zero error is returned, then the contents of bpp and
 211 * dipp are undefined.
 212 *
 213 * Use xfs_imap() to determine the size and location of the
 214 * buffer to read from disk.
 215 */
 216int
 217xfs_inotobp(
 218	xfs_mount_t	*mp,
 219	xfs_trans_t	*tp,
 220	xfs_ino_t	ino,
 221	xfs_dinode_t	**dipp,
 222	xfs_buf_t	**bpp,
 223	int		*offset,
 224	uint		imap_flags)
 225{
 226	struct xfs_imap	imap;
 227	xfs_buf_t	*bp;
 228	int		error;
 229
 230	imap.im_blkno = 0;
 231	error = xfs_imap(mp, tp, ino, &imap, imap_flags);
 232	if (error)
 233		return error;
 234
 235	error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
 236	if (error)
 237		return error;
 238
 239	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
 240	*bpp = bp;
 241	*offset = imap.im_boffset;
 242	return 0;
 243}
 244
 245
 246/*
 247 * This routine is called to map an inode to the buffer containing
 248 * the on-disk version of the inode.  It returns a pointer to the
 249 * buffer containing the on-disk inode in the bpp parameter, and in
 250 * the dip parameter it returns a pointer to the on-disk inode within
 251 * that buffer.
 252 *
 253 * If a non-zero error is returned, then the contents of bpp and
 254 * dipp are undefined.
 255 *
 256 * The inode is expected to already been mapped to its buffer and read
 257 * in once, thus we can use the mapping information stored in the inode
 258 * rather than calling xfs_imap().  This allows us to avoid the overhead
 259 * of looking at the inode btree for small block file systems
 260 * (see xfs_imap()).
 261 */
 262int
 263xfs_itobp(
 264	xfs_mount_t	*mp,
 265	xfs_trans_t	*tp,
 266	xfs_inode_t	*ip,
 267	xfs_dinode_t	**dipp,
 268	xfs_buf_t	**bpp,
 269	uint		buf_flags)
 270{
 271	xfs_buf_t	*bp;
 272	int		error;
 273
 274	ASSERT(ip->i_imap.im_blkno != 0);
 275
 276	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
 277	if (error)
 278		return error;
 279
 280	if (!bp) {
 281		ASSERT(buf_flags & XBF_TRYLOCK);
 282		ASSERT(tp == NULL);
 283		*bpp = NULL;
 284		return EAGAIN;
 285	}
 286
 287	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
 288	*bpp = bp;
 289	return 0;
 290}
 291
 292/*
 293 * Move inode type and inode format specific information from the
 294 * on-disk inode to the in-core inode.  For fifos, devs, and sockets
 295 * this means set if_rdev to the proper value.  For files, directories,
 296 * and symlinks this means to bring in the in-line data or extent
 297 * pointers.  For a file in B-tree format, only the root is immediately
 298 * brought in-core.  The rest will be in-lined in if_extents when it
 299 * is first referenced (see xfs_iread_extents()).
 300 */
 301STATIC int
 302xfs_iformat(
 303	xfs_inode_t		*ip,
 304	xfs_dinode_t		*dip)
 305{
 306	xfs_attr_shortform_t	*atp;
 307	int			size;
 308	int			error;
 309	xfs_fsize_t             di_size;
 310	ip->i_df.if_ext_max =
 311		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
 312	error = 0;
 313
 314	if (unlikely(be32_to_cpu(dip->di_nextents) +
 315		     be16_to_cpu(dip->di_anextents) >
 316		     be64_to_cpu(dip->di_nblocks))) {
 317		xfs_warn(ip->i_mount,
 318			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
 319			(unsigned long long)ip->i_ino,
 320			(int)(be32_to_cpu(dip->di_nextents) +
 321			      be16_to_cpu(dip->di_anextents)),
 322			(unsigned long long)
 323				be64_to_cpu(dip->di_nblocks));
 324		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
 325				     ip->i_mount, dip);
 326		return XFS_ERROR(EFSCORRUPTED);
 327	}
 328
 329	if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
 330		xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
 331			(unsigned long long)ip->i_ino,
 332			dip->di_forkoff);
 333		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
 334				     ip->i_mount, dip);
 335		return XFS_ERROR(EFSCORRUPTED);
 336	}
 337
 338	if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
 339		     !ip->i_mount->m_rtdev_targp)) {
 340		xfs_warn(ip->i_mount,
 341			"corrupt dinode %Lu, has realtime flag set.",
 342			ip->i_ino);
 343		XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
 344				     XFS_ERRLEVEL_LOW, ip->i_mount, dip);
 345		return XFS_ERROR(EFSCORRUPTED);
 346	}
 347
 348	switch (ip->i_d.di_mode & S_IFMT) {
 349	case S_IFIFO:
 350	case S_IFCHR:
 351	case S_IFBLK:
 352	case S_IFSOCK:
 353		if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
 354			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
 355					      ip->i_mount, dip);
 356			return XFS_ERROR(EFSCORRUPTED);
 357		}
 358		ip->i_d.di_size = 0;
 359		ip->i_size = 0;
 360		ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
 361		break;
 362
 363	case S_IFREG:
 364	case S_IFLNK:
 365	case S_IFDIR:
 366		switch (dip->di_format) {
 367		case XFS_DINODE_FMT_LOCAL:
 368			/*
 369			 * no local regular files yet
 370			 */
 371			if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
 372				xfs_warn(ip->i_mount,
 373			"corrupt inode %Lu (local format for regular file).",
 374					(unsigned long long) ip->i_ino);
 375				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
 376						     XFS_ERRLEVEL_LOW,
 377						     ip->i_mount, dip);
 378				return XFS_ERROR(EFSCORRUPTED);
 379			}
 380
 381			di_size = be64_to_cpu(dip->di_size);
 382			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
 383				xfs_warn(ip->i_mount,
 384			"corrupt inode %Lu (bad size %Ld for local inode).",
 385					(unsigned long long) ip->i_ino,
 386					(long long) di_size);
 387				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
 388						     XFS_ERRLEVEL_LOW,
 389						     ip->i_mount, dip);
 390				return XFS_ERROR(EFSCORRUPTED);
 391			}
 392
 393			size = (int)di_size;
 394			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
 395			break;
 396		case XFS_DINODE_FMT_EXTENTS:
 397			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
 398			break;
 399		case XFS_DINODE_FMT_BTREE:
 400			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
 401			break;
 402		default:
 403			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
 404					 ip->i_mount);
 405			return XFS_ERROR(EFSCORRUPTED);
 406		}
 407		break;
 408
 409	default:
 410		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
 411		return XFS_ERROR(EFSCORRUPTED);
 412	}
 413	if (error) {
 414		return error;
 415	}
 416	if (!XFS_DFORK_Q(dip))
 417		return 0;
 
 418	ASSERT(ip->i_afp == NULL);
 419	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
 420	ip->i_afp->if_ext_max =
 421		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
 422	switch (dip->di_aformat) {
 423	case XFS_DINODE_FMT_LOCAL:
 424		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
 425		size = be16_to_cpu(atp->hdr.totsize);
 426
 427		if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
 428			xfs_warn(ip->i_mount,
 429				"corrupt inode %Lu (bad attr fork size %Ld).",
 430				(unsigned long long) ip->i_ino,
 431				(long long) size);
 432			XFS_CORRUPTION_ERROR("xfs_iformat(8)",
 433					     XFS_ERRLEVEL_LOW,
 434					     ip->i_mount, dip);
 435			return XFS_ERROR(EFSCORRUPTED);
 436		}
 437
 438		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
 439		break;
 440	case XFS_DINODE_FMT_EXTENTS:
 441		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
 442		break;
 443	case XFS_DINODE_FMT_BTREE:
 444		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
 445		break;
 446	default:
 447		error = XFS_ERROR(EFSCORRUPTED);
 448		break;
 449	}
 450	if (error) {
 451		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
 452		ip->i_afp = NULL;
 453		xfs_idestroy_fork(ip, XFS_DATA_FORK);
 454	}
 455	return error;
 456}
 457
 458/*
 459 * The file is in-lined in the on-disk inode.
 460 * If it fits into if_inline_data, then copy
 461 * it there, otherwise allocate a buffer for it
 462 * and copy the data there.  Either way, set
 463 * if_data to point at the data.
 464 * If we allocate a buffer for the data, make
 465 * sure that its size is a multiple of 4 and
 466 * record the real size in i_real_bytes.
 467 */
 468STATIC int
 469xfs_iformat_local(
 470	xfs_inode_t	*ip,
 471	xfs_dinode_t	*dip,
 472	int		whichfork,
 473	int		size)
 474{
 475	xfs_ifork_t	*ifp;
 476	int		real_size;
 477
 478	/*
 479	 * If the size is unreasonable, then something
 480	 * is wrong and we just bail out rather than crash in
 481	 * kmem_alloc() or memcpy() below.
 482	 */
 483	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
 484		xfs_warn(ip->i_mount,
 485	"corrupt inode %Lu (bad size %d for local fork, size = %d).",
 486			(unsigned long long) ip->i_ino, size,
 487			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
 488		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
 489				     ip->i_mount, dip);
 490		return XFS_ERROR(EFSCORRUPTED);
 491	}
 492	ifp = XFS_IFORK_PTR(ip, whichfork);
 493	real_size = 0;
 494	if (size == 0)
 495		ifp->if_u1.if_data = NULL;
 496	else if (size <= sizeof(ifp->if_u2.if_inline_data))
 497		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
 498	else {
 499		real_size = roundup(size, 4);
 500		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
 501	}
 502	ifp->if_bytes = size;
 503	ifp->if_real_bytes = real_size;
 504	if (size)
 505		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
 506	ifp->if_flags &= ~XFS_IFEXTENTS;
 507	ifp->if_flags |= XFS_IFINLINE;
 508	return 0;
 509}
 510
 511/*
 512 * The file consists of a set of extents all
 513 * of which fit into the on-disk inode.
 514 * If there are few enough extents to fit into
 515 * the if_inline_ext, then copy them there.
 516 * Otherwise allocate a buffer for them and copy
 517 * them into it.  Either way, set if_extents
 518 * to point at the extents.
 519 */
 520STATIC int
 521xfs_iformat_extents(
 522	xfs_inode_t	*ip,
 523	xfs_dinode_t	*dip,
 524	int		whichfork)
 525{
 526	xfs_bmbt_rec_t	*dp;
 527	xfs_ifork_t	*ifp;
 528	int		nex;
 529	int		size;
 530	int		i;
 531
 532	ifp = XFS_IFORK_PTR(ip, whichfork);
 533	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
 534	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
 535
 536	/*
 537	 * If the number of extents is unreasonable, then something
 538	 * is wrong and we just bail out rather than crash in
 539	 * kmem_alloc() or memcpy() below.
 540	 */
 541	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
 542		xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
 543			(unsigned long long) ip->i_ino, nex);
 544		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
 545				     ip->i_mount, dip);
 546		return XFS_ERROR(EFSCORRUPTED);
 547	}
 548
 549	ifp->if_real_bytes = 0;
 550	if (nex == 0)
 551		ifp->if_u1.if_extents = NULL;
 552	else if (nex <= XFS_INLINE_EXTS)
 553		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
 554	else
 555		xfs_iext_add(ifp, 0, nex);
 556
 557	ifp->if_bytes = size;
 558	if (size) {
 559		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
 560		xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
 561		for (i = 0; i < nex; i++, dp++) {
 562			xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
 563			ep->l0 = get_unaligned_be64(&dp->l0);
 564			ep->l1 = get_unaligned_be64(&dp->l1);
 565		}
 566		XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
 567		if (whichfork != XFS_DATA_FORK ||
 568			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
 569				if (unlikely(xfs_check_nostate_extents(
 570				    ifp, 0, nex))) {
 571					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
 572							 XFS_ERRLEVEL_LOW,
 573							 ip->i_mount);
 574					return XFS_ERROR(EFSCORRUPTED);
 575				}
 576	}
 577	ifp->if_flags |= XFS_IFEXTENTS;
 578	return 0;
 579}
 580
 581/*
 582 * The file has too many extents to fit into
 583 * the inode, so they are in B-tree format.
 584 * Allocate a buffer for the root of the B-tree
 585 * and copy the root into it.  The i_extents
 586 * field will remain NULL until all of the
 587 * extents are read in (when they are needed).
 588 */
 589STATIC int
 590xfs_iformat_btree(
 591	xfs_inode_t		*ip,
 592	xfs_dinode_t		*dip,
 593	int			whichfork)
 594{
 595	xfs_bmdr_block_t	*dfp;
 596	xfs_ifork_t		*ifp;
 597	/* REFERENCED */
 598	int			nrecs;
 599	int			size;
 600
 601	ifp = XFS_IFORK_PTR(ip, whichfork);
 602	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
 603	size = XFS_BMAP_BROOT_SPACE(dfp);
 604	nrecs = be16_to_cpu(dfp->bb_numrecs);
 605
 606	/*
 607	 * blow out if -- fork has less extents than can fit in
 608	 * fork (fork shouldn't be a btree format), root btree
 609	 * block has more records than can fit into the fork,
 610	 * or the number of extents is greater than the number of
 611	 * blocks.
 612	 */
 613	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
 614	    || XFS_BMDR_SPACE_CALC(nrecs) >
 615			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
 616	    || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
 
 617		xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
 618			(unsigned long long) ip->i_ino);
 619		XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
 620				 ip->i_mount, dip);
 621		return XFS_ERROR(EFSCORRUPTED);
 622	}
 623
 624	ifp->if_broot_bytes = size;
 625	ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
 626	ASSERT(ifp->if_broot != NULL);
 627	/*
 628	 * Copy and convert from the on-disk structure
 629	 * to the in-memory structure.
 630	 */
 631	xfs_bmdr_to_bmbt(ip->i_mount, dfp,
 632			 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
 633			 ifp->if_broot, size);
 634	ifp->if_flags &= ~XFS_IFEXTENTS;
 635	ifp->if_flags |= XFS_IFBROOT;
 636
 637	return 0;
 638}
 639
 640STATIC void
 641xfs_dinode_from_disk(
 642	xfs_icdinode_t		*to,
 643	xfs_dinode_t		*from)
 644{
 645	to->di_magic = be16_to_cpu(from->di_magic);
 646	to->di_mode = be16_to_cpu(from->di_mode);
 647	to->di_version = from ->di_version;
 648	to->di_format = from->di_format;
 649	to->di_onlink = be16_to_cpu(from->di_onlink);
 650	to->di_uid = be32_to_cpu(from->di_uid);
 651	to->di_gid = be32_to_cpu(from->di_gid);
 652	to->di_nlink = be32_to_cpu(from->di_nlink);
 653	to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
 654	to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
 655	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
 656	to->di_flushiter = be16_to_cpu(from->di_flushiter);
 657	to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
 658	to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
 659	to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
 660	to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
 661	to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
 662	to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
 663	to->di_size = be64_to_cpu(from->di_size);
 664	to->di_nblocks = be64_to_cpu(from->di_nblocks);
 665	to->di_extsize = be32_to_cpu(from->di_extsize);
 666	to->di_nextents = be32_to_cpu(from->di_nextents);
 667	to->di_anextents = be16_to_cpu(from->di_anextents);
 668	to->di_forkoff = from->di_forkoff;
 669	to->di_aformat	= from->di_aformat;
 670	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
 671	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
 672	to->di_flags	= be16_to_cpu(from->di_flags);
 673	to->di_gen	= be32_to_cpu(from->di_gen);
 674}
 675
 676void
 677xfs_dinode_to_disk(
 678	xfs_dinode_t		*to,
 679	xfs_icdinode_t		*from)
 680{
 681	to->di_magic = cpu_to_be16(from->di_magic);
 682	to->di_mode = cpu_to_be16(from->di_mode);
 683	to->di_version = from ->di_version;
 684	to->di_format = from->di_format;
 685	to->di_onlink = cpu_to_be16(from->di_onlink);
 686	to->di_uid = cpu_to_be32(from->di_uid);
 687	to->di_gid = cpu_to_be32(from->di_gid);
 688	to->di_nlink = cpu_to_be32(from->di_nlink);
 689	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
 690	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
 691	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
 692	to->di_flushiter = cpu_to_be16(from->di_flushiter);
 693	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
 694	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
 695	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
 696	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
 697	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
 698	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
 699	to->di_size = cpu_to_be64(from->di_size);
 700	to->di_nblocks = cpu_to_be64(from->di_nblocks);
 701	to->di_extsize = cpu_to_be32(from->di_extsize);
 702	to->di_nextents = cpu_to_be32(from->di_nextents);
 703	to->di_anextents = cpu_to_be16(from->di_anextents);
 704	to->di_forkoff = from->di_forkoff;
 705	to->di_aformat = from->di_aformat;
 706	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
 707	to->di_dmstate = cpu_to_be16(from->di_dmstate);
 708	to->di_flags = cpu_to_be16(from->di_flags);
 709	to->di_gen = cpu_to_be32(from->di_gen);
 710}
 711
 712STATIC uint
 713_xfs_dic2xflags(
 714	__uint16_t		di_flags)
 715{
 716	uint			flags = 0;
 717
 718	if (di_flags & XFS_DIFLAG_ANY) {
 719		if (di_flags & XFS_DIFLAG_REALTIME)
 720			flags |= XFS_XFLAG_REALTIME;
 721		if (di_flags & XFS_DIFLAG_PREALLOC)
 722			flags |= XFS_XFLAG_PREALLOC;
 723		if (di_flags & XFS_DIFLAG_IMMUTABLE)
 724			flags |= XFS_XFLAG_IMMUTABLE;
 725		if (di_flags & XFS_DIFLAG_APPEND)
 726			flags |= XFS_XFLAG_APPEND;
 727		if (di_flags & XFS_DIFLAG_SYNC)
 728			flags |= XFS_XFLAG_SYNC;
 729		if (di_flags & XFS_DIFLAG_NOATIME)
 730			flags |= XFS_XFLAG_NOATIME;
 731		if (di_flags & XFS_DIFLAG_NODUMP)
 732			flags |= XFS_XFLAG_NODUMP;
 733		if (di_flags & XFS_DIFLAG_RTINHERIT)
 734			flags |= XFS_XFLAG_RTINHERIT;
 735		if (di_flags & XFS_DIFLAG_PROJINHERIT)
 736			flags |= XFS_XFLAG_PROJINHERIT;
 737		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
 738			flags |= XFS_XFLAG_NOSYMLINKS;
 739		if (di_flags & XFS_DIFLAG_EXTSIZE)
 740			flags |= XFS_XFLAG_EXTSIZE;
 741		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
 742			flags |= XFS_XFLAG_EXTSZINHERIT;
 743		if (di_flags & XFS_DIFLAG_NODEFRAG)
 744			flags |= XFS_XFLAG_NODEFRAG;
 745		if (di_flags & XFS_DIFLAG_FILESTREAM)
 746			flags |= XFS_XFLAG_FILESTREAM;
 747	}
 748
 749	return flags;
 750}
 751
 752uint
 753xfs_ip2xflags(
 754	xfs_inode_t		*ip)
 755{
 756	xfs_icdinode_t		*dic = &ip->i_d;
 757
 758	return _xfs_dic2xflags(dic->di_flags) |
 759				(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
 760}
 761
 762uint
 763xfs_dic2xflags(
 764	xfs_dinode_t		*dip)
 765{
 766	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
 767				(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
 768}
 769
 770/*
 771 * Read the disk inode attributes into the in-core inode structure.
 772 */
 773int
 774xfs_iread(
 775	xfs_mount_t	*mp,
 776	xfs_trans_t	*tp,
 777	xfs_inode_t	*ip,
 778	uint		iget_flags)
 779{
 780	xfs_buf_t	*bp;
 781	xfs_dinode_t	*dip;
 782	int		error;
 783
 784	/*
 785	 * Fill in the location information in the in-core inode.
 786	 */
 787	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
 788	if (error)
 789		return error;
 790
 791	/*
 792	 * Get pointers to the on-disk inode and the buffer containing it.
 793	 */
 794	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
 795			       XBF_LOCK, iget_flags);
 796	if (error)
 797		return error;
 798	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
 799
 800	/*
 801	 * If we got something that isn't an inode it means someone
 802	 * (nfs or dmi) has a stale handle.
 803	 */
 804	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
 805#ifdef DEBUG
 806		xfs_alert(mp,
 807			"%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
 808			__func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC);
 809#endif /* DEBUG */
 810		error = XFS_ERROR(EINVAL);
 811		goto out_brelse;
 812	}
 813
 814	/*
 815	 * If the on-disk inode is already linked to a directory
 816	 * entry, copy all of the inode into the in-core inode.
 817	 * xfs_iformat() handles copying in the inode format
 818	 * specific information.
 819	 * Otherwise, just get the truly permanent information.
 820	 */
 821	if (dip->di_mode) {
 822		xfs_dinode_from_disk(&ip->i_d, dip);
 823		error = xfs_iformat(ip, dip);
 824		if (error)  {
 825#ifdef DEBUG
 826			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
 827				__func__, error);
 828#endif /* DEBUG */
 829			goto out_brelse;
 830		}
 831	} else {
 832		ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
 833		ip->i_d.di_version = dip->di_version;
 834		ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
 835		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
 836		/*
 837		 * Make sure to pull in the mode here as well in
 838		 * case the inode is released without being used.
 839		 * This ensures that xfs_inactive() will see that
 840		 * the inode is already free and not try to mess
 841		 * with the uninitialized part of it.
 842		 */
 843		ip->i_d.di_mode = 0;
 844		/*
 845		 * Initialize the per-fork minima and maxima for a new
 846		 * inode here.  xfs_iformat will do it for old inodes.
 847		 */
 848		ip->i_df.if_ext_max =
 849			XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
 850	}
 851
 852	/*
 853	 * The inode format changed when we moved the link count and
 854	 * made it 32 bits long.  If this is an old format inode,
 855	 * convert it in memory to look like a new one.  If it gets
 856	 * flushed to disk we will convert back before flushing or
 857	 * logging it.  We zero out the new projid field and the old link
 858	 * count field.  We'll handle clearing the pad field (the remains
 859	 * of the old uuid field) when we actually convert the inode to
 860	 * the new format. We don't change the version number so that we
 861	 * can distinguish this from a real new format inode.
 862	 */
 863	if (ip->i_d.di_version == 1) {
 864		ip->i_d.di_nlink = ip->i_d.di_onlink;
 865		ip->i_d.di_onlink = 0;
 866		xfs_set_projid(ip, 0);
 867	}
 868
 869	ip->i_delayed_blks = 0;
 870	ip->i_size = ip->i_d.di_size;
 871
 872	/*
 873	 * Mark the buffer containing the inode as something to keep
 874	 * around for a while.  This helps to keep recently accessed
 875	 * meta-data in-core longer.
 876	 */
 877	xfs_buf_set_ref(bp, XFS_INO_REF);
 878
 879	/*
 880	 * Use xfs_trans_brelse() to release the buffer containing the
 881	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
 882	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
 883	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
 884	 * will only release the buffer if it is not dirty within the
 885	 * transaction.  It will be OK to release the buffer in this case,
 886	 * because inodes on disk are never destroyed and we will be
 887	 * locking the new in-core inode before putting it in the hash
 888	 * table where other processes can find it.  Thus we don't have
 889	 * to worry about the inode being changed just because we released
 890	 * the buffer.
 891	 */
 892 out_brelse:
 893	xfs_trans_brelse(tp, bp);
 894	return error;
 895}
 896
 897/*
 898 * Read in extents from a btree-format inode.
 899 * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
 900 */
 901int
 902xfs_iread_extents(
 903	xfs_trans_t	*tp,
 904	xfs_inode_t	*ip,
 905	int		whichfork)
 906{
 907	int		error;
 908	xfs_ifork_t	*ifp;
 909	xfs_extnum_t	nextents;
 910
 911	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
 912		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
 913				 ip->i_mount);
 914		return XFS_ERROR(EFSCORRUPTED);
 915	}
 916	nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
 917	ifp = XFS_IFORK_PTR(ip, whichfork);
 918
 919	/*
 920	 * We know that the size is valid (it's checked in iformat_btree)
 921	 */
 922	ifp->if_bytes = ifp->if_real_bytes = 0;
 923	ifp->if_flags |= XFS_IFEXTENTS;
 924	xfs_iext_add(ifp, 0, nextents);
 925	error = xfs_bmap_read_extents(tp, ip, whichfork);
 926	if (error) {
 927		xfs_iext_destroy(ifp);
 928		ifp->if_flags &= ~XFS_IFEXTENTS;
 929		return error;
 930	}
 931	xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
 932	return 0;
 933}
 934
 935/*
 936 * Allocate an inode on disk and return a copy of its in-core version.
 937 * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
 938 * appropriately within the inode.  The uid and gid for the inode are
 939 * set according to the contents of the given cred structure.
 940 *
 941 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
 942 * has a free inode available, call xfs_iget()
 943 * to obtain the in-core version of the allocated inode.  Finally,
 944 * fill in the inode and log its initial contents.  In this case,
 945 * ialloc_context would be set to NULL and call_again set to false.
 946 *
 947 * If xfs_dialloc() does not have an available inode,
 948 * it will replenish its supply by doing an allocation. Since we can
 949 * only do one allocation within a transaction without deadlocks, we
 950 * must commit the current transaction before returning the inode itself.
 951 * In this case, therefore, we will set call_again to true and return.
 952 * The caller should then commit the current transaction, start a new
 953 * transaction, and call xfs_ialloc() again to actually get the inode.
 954 *
 955 * To ensure that some other process does not grab the inode that
 956 * was allocated during the first call to xfs_ialloc(), this routine
 957 * also returns the [locked] bp pointing to the head of the freelist
 958 * as ialloc_context.  The caller should hold this buffer across
 959 * the commit and pass it back into this routine on the second call.
 960 *
 961 * If we are allocating quota inodes, we do not have a parent inode
 962 * to attach to or associate with (i.e. pip == NULL) because they
 963 * are not linked into the directory structure - they are attached
 964 * directly to the superblock - and so have no parent.
 965 */
 966int
 967xfs_ialloc(
 968	xfs_trans_t	*tp,
 969	xfs_inode_t	*pip,
 970	mode_t		mode,
 971	xfs_nlink_t	nlink,
 972	xfs_dev_t	rdev,
 973	prid_t		prid,
 974	int		okalloc,
 975	xfs_buf_t	**ialloc_context,
 976	boolean_t	*call_again,
 977	xfs_inode_t	**ipp)
 978{
 979	xfs_ino_t	ino;
 980	xfs_inode_t	*ip;
 981	uint		flags;
 982	int		error;
 983	timespec_t	tv;
 984	int		filestreams = 0;
 985
 986	/*
 987	 * Call the space management code to pick
 988	 * the on-disk inode to be allocated.
 989	 */
 990	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
 991			    ialloc_context, call_again, &ino);
 992	if (error)
 993		return error;
 994	if (*call_again || ino == NULLFSINO) {
 995		*ipp = NULL;
 996		return 0;
 997	}
 998	ASSERT(*ialloc_context == NULL);
 999
1000	/*
1001	 * Get the in-core inode with the lock held exclusively.
1002	 * This is because we're setting fields here we need
1003	 * to prevent others from looking at until we're done.
1004	 */
1005	error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE,
1006			 XFS_ILOCK_EXCL, &ip);
1007	if (error)
1008		return error;
1009	ASSERT(ip != NULL);
1010
1011	ip->i_d.di_mode = (__uint16_t)mode;
1012	ip->i_d.di_onlink = 0;
1013	ip->i_d.di_nlink = nlink;
1014	ASSERT(ip->i_d.di_nlink == nlink);
1015	ip->i_d.di_uid = current_fsuid();
1016	ip->i_d.di_gid = current_fsgid();
1017	xfs_set_projid(ip, prid);
1018	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1019
1020	/*
1021	 * If the superblock version is up to where we support new format
1022	 * inodes and this is currently an old format inode, then change
1023	 * the inode version number now.  This way we only do the conversion
1024	 * here rather than here and in the flush/logging code.
1025	 */
1026	if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1027	    ip->i_d.di_version == 1) {
1028		ip->i_d.di_version = 2;
1029		/*
1030		 * We've already zeroed the old link count, the projid field,
1031		 * and the pad field.
1032		 */
1033	}
1034
1035	/*
1036	 * Project ids won't be stored on disk if we are using a version 1 inode.
1037	 */
1038	if ((prid != 0) && (ip->i_d.di_version == 1))
1039		xfs_bump_ino_vers2(tp, ip);
1040
1041	if (pip && XFS_INHERIT_GID(pip)) {
1042		ip->i_d.di_gid = pip->i_d.di_gid;
1043		if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1044			ip->i_d.di_mode |= S_ISGID;
1045		}
1046	}
1047
1048	/*
1049	 * If the group ID of the new file does not match the effective group
1050	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1051	 * (and only if the irix_sgid_inherit compatibility variable is set).
1052	 */
1053	if ((irix_sgid_inherit) &&
1054	    (ip->i_d.di_mode & S_ISGID) &&
1055	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
1056		ip->i_d.di_mode &= ~S_ISGID;
1057	}
1058
1059	ip->i_d.di_size = 0;
1060	ip->i_size = 0;
1061	ip->i_d.di_nextents = 0;
1062	ASSERT(ip->i_d.di_nblocks == 0);
1063
1064	nanotime(&tv);
1065	ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1066	ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1067	ip->i_d.di_atime = ip->i_d.di_mtime;
1068	ip->i_d.di_ctime = ip->i_d.di_mtime;
1069
1070	/*
1071	 * di_gen will have been taken care of in xfs_iread.
1072	 */
1073	ip->i_d.di_extsize = 0;
1074	ip->i_d.di_dmevmask = 0;
1075	ip->i_d.di_dmstate = 0;
1076	ip->i_d.di_flags = 0;
1077	flags = XFS_ILOG_CORE;
1078	switch (mode & S_IFMT) {
1079	case S_IFIFO:
1080	case S_IFCHR:
1081	case S_IFBLK:
1082	case S_IFSOCK:
1083		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1084		ip->i_df.if_u2.if_rdev = rdev;
1085		ip->i_df.if_flags = 0;
1086		flags |= XFS_ILOG_DEV;
1087		break;
1088	case S_IFREG:
1089		/*
1090		 * we can't set up filestreams until after the VFS inode
1091		 * is set up properly.
1092		 */
1093		if (pip && xfs_inode_is_filestream(pip))
1094			filestreams = 1;
1095		/* fall through */
1096	case S_IFDIR:
1097		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1098			uint	di_flags = 0;
1099
1100			if (S_ISDIR(mode)) {
1101				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1102					di_flags |= XFS_DIFLAG_RTINHERIT;
1103				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1104					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1105					ip->i_d.di_extsize = pip->i_d.di_extsize;
1106				}
1107			} else if (S_ISREG(mode)) {
1108				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1109					di_flags |= XFS_DIFLAG_REALTIME;
1110				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1111					di_flags |= XFS_DIFLAG_EXTSIZE;
1112					ip->i_d.di_extsize = pip->i_d.di_extsize;
1113				}
1114			}
1115			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1116			    xfs_inherit_noatime)
1117				di_flags |= XFS_DIFLAG_NOATIME;
1118			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1119			    xfs_inherit_nodump)
1120				di_flags |= XFS_DIFLAG_NODUMP;
1121			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1122			    xfs_inherit_sync)
1123				di_flags |= XFS_DIFLAG_SYNC;
1124			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1125			    xfs_inherit_nosymlinks)
1126				di_flags |= XFS_DIFLAG_NOSYMLINKS;
1127			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1128				di_flags |= XFS_DIFLAG_PROJINHERIT;
1129			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1130			    xfs_inherit_nodefrag)
1131				di_flags |= XFS_DIFLAG_NODEFRAG;
1132			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1133				di_flags |= XFS_DIFLAG_FILESTREAM;
1134			ip->i_d.di_flags |= di_flags;
1135		}
1136		/* FALLTHROUGH */
1137	case S_IFLNK:
1138		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1139		ip->i_df.if_flags = XFS_IFEXTENTS;
1140		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1141		ip->i_df.if_u1.if_extents = NULL;
1142		break;
1143	default:
1144		ASSERT(0);
1145	}
1146	/*
1147	 * Attribute fork settings for new inode.
1148	 */
1149	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1150	ip->i_d.di_anextents = 0;
1151
1152	/*
1153	 * Log the new values stuffed into the inode.
1154	 */
1155	xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
1156	xfs_trans_log_inode(tp, ip, flags);
1157
1158	/* now that we have an i_mode we can setup inode ops and unlock */
1159	xfs_setup_inode(ip);
1160
1161	/* now we have set up the vfs inode we can associate the filestream */
1162	if (filestreams) {
1163		error = xfs_filestream_associate(pip, ip);
1164		if (error < 0)
1165			return -error;
1166		if (!error)
1167			xfs_iflags_set(ip, XFS_IFILESTREAM);
1168	}
1169
1170	*ipp = ip;
1171	return 0;
1172}
1173
1174/*
1175 * Check to make sure that there are no blocks allocated to the
1176 * file beyond the size of the file.  We don't check this for
1177 * files with fixed size extents or real time extents, but we
1178 * at least do it for regular files.
1179 */
1180#ifdef DEBUG
1181STATIC void
1182xfs_isize_check(
1183	struct xfs_inode	*ip,
1184	xfs_fsize_t		isize)
1185{
1186	struct xfs_mount	*mp = ip->i_mount;
1187	xfs_fileoff_t		map_first;
1188	int			nimaps;
1189	xfs_bmbt_irec_t		imaps[2];
1190
1191	if (!S_ISREG(ip->i_d.di_mode))
1192		return;
1193
1194	if (XFS_IS_REALTIME_INODE(ip))
1195		return;
1196
1197	if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1198		return;
1199
1200	nimaps = 2;
1201	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1202	/*
1203	 * The filesystem could be shutting down, so bmapi may return
1204	 * an error.
1205	 */
1206	if (xfs_bmapi(NULL, ip, map_first,
1207			 (XFS_B_TO_FSB(mp,
1208				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1209			  map_first),
1210			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1211			 NULL))
1212	    return;
1213	ASSERT(nimaps == 1);
1214	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1215}
1216#else	/* DEBUG */
1217#define xfs_isize_check(ip, isize)
1218#endif	/* DEBUG */
1219
1220/*
1221 * Free up the underlying blocks past new_size.  The new size must be smaller
1222 * than the current size.  This routine can be used both for the attribute and
1223 * data fork, and does not modify the inode size, which is left to the caller.
1224 *
1225 * The transaction passed to this routine must have made a permanent log
1226 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1227 * given transaction and start new ones, so make sure everything involved in
1228 * the transaction is tidy before calling here.  Some transaction will be
1229 * returned to the caller to be committed.  The incoming transaction must
1230 * already include the inode, and both inode locks must be held exclusively.
1231 * The inode must also be "held" within the transaction.  On return the inode
1232 * will be "held" within the returned transaction.  This routine does NOT
1233 * require any disk space to be reserved for it within the transaction.
1234 *
1235 * If we get an error, we must return with the inode locked and linked into the
1236 * current transaction. This keeps things simple for the higher level code,
1237 * because it always knows that the inode is locked and held in the transaction
1238 * that returns to it whether errors occur or not.  We don't mark the inode
1239 * dirty on error so that transactions can be easily aborted if possible.
1240 */
1241int
1242xfs_itruncate_extents(
1243	struct xfs_trans	**tpp,
1244	struct xfs_inode	*ip,
1245	int			whichfork,
1246	xfs_fsize_t		new_size)
1247{
1248	struct xfs_mount	*mp = ip->i_mount;
1249	struct xfs_trans	*tp = *tpp;
1250	struct xfs_trans	*ntp;
1251	xfs_bmap_free_t		free_list;
1252	xfs_fsblock_t		first_block;
1253	xfs_fileoff_t		first_unmap_block;
1254	xfs_fileoff_t		last_block;
1255	xfs_filblks_t		unmap_len;
1256	int			committed;
1257	int			error = 0;
1258	int			done = 0;
1259
1260	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1261	ASSERT(new_size <= ip->i_size);
1262	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1263	ASSERT(ip->i_itemp != NULL);
1264	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1265	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1266
 
 
1267	/*
1268	 * Since it is possible for space to become allocated beyond
1269	 * the end of the file (in a crash where the space is allocated
1270	 * but the inode size is not yet updated), simply remove any
1271	 * blocks which show up between the new EOF and the maximum
1272	 * possible file size.  If the first block to be removed is
1273	 * beyond the maximum file size (ie it is the same as last_block),
1274	 * then there is nothing to do.
1275	 */
1276	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1277	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1278	if (first_unmap_block == last_block)
1279		return 0;
1280
1281	ASSERT(first_unmap_block < last_block);
1282	unmap_len = last_block - first_unmap_block + 1;
1283	while (!done) {
1284		xfs_bmap_init(&free_list, &first_block);
1285		error = xfs_bunmapi(tp, ip,
1286				    first_unmap_block, unmap_len,
1287				    xfs_bmapi_aflag(whichfork),
1288				    XFS_ITRUNC_MAX_EXTENTS,
1289				    &first_block, &free_list,
1290				    &done);
1291		if (error)
1292			goto out_bmap_cancel;
1293
1294		/*
1295		 * Duplicate the transaction that has the permanent
1296		 * reservation and commit the old transaction.
1297		 */
1298		error = xfs_bmap_finish(&tp, &free_list, &committed);
1299		if (committed)
1300			xfs_trans_ijoin(tp, ip);
1301		if (error)
1302			goto out_bmap_cancel;
1303
1304		if (committed) {
1305			/*
1306			 * Mark the inode dirty so it will be logged and
1307			 * moved forward in the log as part of every commit.
1308			 */
1309			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1310		}
1311
1312		ntp = xfs_trans_dup(tp);
1313		error = xfs_trans_commit(tp, 0);
1314		tp = ntp;
1315
1316		xfs_trans_ijoin(tp, ip);
1317
1318		if (error)
1319			goto out;
1320
1321		/*
1322		 * Transaction commit worked ok so we can drop the extra ticket
1323		 * reference that we gained in xfs_trans_dup()
1324		 */
1325		xfs_log_ticket_put(tp->t_ticket);
1326		error = xfs_trans_reserve(tp, 0,
1327					XFS_ITRUNCATE_LOG_RES(mp), 0,
1328					XFS_TRANS_PERM_LOG_RES,
1329					XFS_ITRUNCATE_LOG_COUNT);
1330		if (error)
1331			goto out;
1332	}
1333
 
 
 
 
 
 
 
 
1334out:
1335	*tpp = tp;
1336	return error;
1337out_bmap_cancel:
1338	/*
1339	 * If the bunmapi call encounters an error, return to the caller where
1340	 * the transaction can be properly aborted.  We just need to make sure
1341	 * we're not holding any resources that we were not when we came in.
1342	 */
1343	xfs_bmap_cancel(&free_list);
1344	goto out;
1345}
1346
1347int
1348xfs_itruncate_data(
1349	struct xfs_trans	**tpp,
1350	struct xfs_inode	*ip,
1351	xfs_fsize_t		new_size)
1352{
1353	int			error;
1354
1355	trace_xfs_itruncate_data_start(ip, new_size);
1356
1357	/*
1358	 * The first thing we do is set the size to new_size permanently on
1359	 * disk.  This way we don't have to worry about anyone ever being able
1360	 * to look at the data being freed even in the face of a crash.
1361	 * What we're getting around here is the case where we free a block, it
1362	 * is allocated to another file, it is written to, and then we crash.
1363	 * If the new data gets written to the file but the log buffers
1364	 * containing the free and reallocation don't, then we'd end up with
1365	 * garbage in the blocks being freed.  As long as we make the new_size
1366	 * permanent before actually freeing any blocks it doesn't matter if
1367	 * they get written to.
1368	 */
1369	if (ip->i_d.di_nextents > 0) {
1370		/*
1371		 * If we are not changing the file size then do not update
1372		 * the on-disk file size - we may be called from
1373		 * xfs_inactive_free_eofblocks().  If we update the on-disk
1374		 * file size and then the system crashes before the contents
1375		 * of the file are flushed to disk then the files may be
1376		 * full of holes (ie NULL files bug).
1377		 */
1378		if (ip->i_size != new_size) {
1379			ip->i_d.di_size = new_size;
1380			ip->i_size = new_size;
1381			xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1382		}
1383	}
1384
1385	error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
1386	if (error)
1387		return error;
1388
1389	/*
1390	 * If we are not changing the file size then do not update the on-disk
1391	 * file size - we may be called from xfs_inactive_free_eofblocks().
1392	 * If we update the on-disk file size and then the system crashes
1393	 * before the contents of the file are flushed to disk then the files
1394	 * may be full of holes (ie NULL files bug).
1395	 */
1396	xfs_isize_check(ip, new_size);
1397	if (ip->i_size != new_size) {
1398		ip->i_d.di_size = new_size;
1399		ip->i_size = new_size;
1400	}
1401
1402	ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
1403	ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
1404
1405	/*
1406	 * Always re-log the inode so that our permanent transaction can keep
1407	 * on rolling it forward in the log.
1408	 */
1409	xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1410
1411	trace_xfs_itruncate_data_end(ip, new_size);
1412	return 0;
1413}
1414
1415/*
1416 * This is called when the inode's link count goes to 0.
1417 * We place the on-disk inode on a list in the AGI.  It
1418 * will be pulled from this list when the inode is freed.
1419 */
1420int
1421xfs_iunlink(
1422	xfs_trans_t	*tp,
1423	xfs_inode_t	*ip)
1424{
1425	xfs_mount_t	*mp;
1426	xfs_agi_t	*agi;
1427	xfs_dinode_t	*dip;
1428	xfs_buf_t	*agibp;
1429	xfs_buf_t	*ibp;
1430	xfs_agino_t	agino;
1431	short		bucket_index;
1432	int		offset;
1433	int		error;
1434
1435	ASSERT(ip->i_d.di_nlink == 0);
1436	ASSERT(ip->i_d.di_mode != 0);
1437
1438	mp = tp->t_mountp;
1439
1440	/*
1441	 * Get the agi buffer first.  It ensures lock ordering
1442	 * on the list.
1443	 */
1444	error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1445	if (error)
1446		return error;
1447	agi = XFS_BUF_TO_AGI(agibp);
1448
1449	/*
1450	 * Get the index into the agi hash table for the
1451	 * list this inode will go on.
1452	 */
1453	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1454	ASSERT(agino != 0);
1455	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1456	ASSERT(agi->agi_unlinked[bucket_index]);
1457	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1458
1459	if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1460		/*
1461		 * There is already another inode in the bucket we need
1462		 * to add ourselves to.  Add us at the front of the list.
1463		 * Here we put the head pointer into our next pointer,
1464		 * and then we fall through to point the head at us.
1465		 */
1466		error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1467		if (error)
1468			return error;
1469
1470		ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1471		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1472		offset = ip->i_imap.im_boffset +
1473			offsetof(xfs_dinode_t, di_next_unlinked);
1474		xfs_trans_inode_buf(tp, ibp);
1475		xfs_trans_log_buf(tp, ibp, offset,
1476				  (offset + sizeof(xfs_agino_t) - 1));
1477		xfs_inobp_check(mp, ibp);
1478	}
1479
1480	/*
1481	 * Point the bucket head pointer at the inode being inserted.
1482	 */
1483	ASSERT(agino != 0);
1484	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1485	offset = offsetof(xfs_agi_t, agi_unlinked) +
1486		(sizeof(xfs_agino_t) * bucket_index);
1487	xfs_trans_log_buf(tp, agibp, offset,
1488			  (offset + sizeof(xfs_agino_t) - 1));
1489	return 0;
1490}
1491
1492/*
1493 * Pull the on-disk inode from the AGI unlinked list.
1494 */
1495STATIC int
1496xfs_iunlink_remove(
1497	xfs_trans_t	*tp,
1498	xfs_inode_t	*ip)
1499{
1500	xfs_ino_t	next_ino;
1501	xfs_mount_t	*mp;
1502	xfs_agi_t	*agi;
1503	xfs_dinode_t	*dip;
1504	xfs_buf_t	*agibp;
1505	xfs_buf_t	*ibp;
1506	xfs_agnumber_t	agno;
1507	xfs_agino_t	agino;
1508	xfs_agino_t	next_agino;
1509	xfs_buf_t	*last_ibp;
1510	xfs_dinode_t	*last_dip = NULL;
1511	short		bucket_index;
1512	int		offset, last_offset = 0;
1513	int		error;
1514
1515	mp = tp->t_mountp;
1516	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1517
1518	/*
1519	 * Get the agi buffer first.  It ensures lock ordering
1520	 * on the list.
1521	 */
1522	error = xfs_read_agi(mp, tp, agno, &agibp);
1523	if (error)
1524		return error;
1525
1526	agi = XFS_BUF_TO_AGI(agibp);
1527
1528	/*
1529	 * Get the index into the agi hash table for the
1530	 * list this inode will go on.
1531	 */
1532	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1533	ASSERT(agino != 0);
1534	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1535	ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1536	ASSERT(agi->agi_unlinked[bucket_index]);
1537
1538	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1539		/*
1540		 * We're at the head of the list.  Get the inode's
1541		 * on-disk buffer to see if there is anyone after us
1542		 * on the list.  Only modify our next pointer if it
1543		 * is not already NULLAGINO.  This saves us the overhead
1544		 * of dealing with the buffer when there is no need to
1545		 * change it.
1546		 */
1547		error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1548		if (error) {
1549			xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
1550				__func__, error);
1551			return error;
1552		}
1553		next_agino = be32_to_cpu(dip->di_next_unlinked);
1554		ASSERT(next_agino != 0);
1555		if (next_agino != NULLAGINO) {
1556			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1557			offset = ip->i_imap.im_boffset +
1558				offsetof(xfs_dinode_t, di_next_unlinked);
1559			xfs_trans_inode_buf(tp, ibp);
1560			xfs_trans_log_buf(tp, ibp, offset,
1561					  (offset + sizeof(xfs_agino_t) - 1));
1562			xfs_inobp_check(mp, ibp);
1563		} else {
1564			xfs_trans_brelse(tp, ibp);
1565		}
1566		/*
1567		 * Point the bucket head pointer at the next inode.
1568		 */
1569		ASSERT(next_agino != 0);
1570		ASSERT(next_agino != agino);
1571		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1572		offset = offsetof(xfs_agi_t, agi_unlinked) +
1573			(sizeof(xfs_agino_t) * bucket_index);
1574		xfs_trans_log_buf(tp, agibp, offset,
1575				  (offset + sizeof(xfs_agino_t) - 1));
1576	} else {
1577		/*
1578		 * We need to search the list for the inode being freed.
1579		 */
1580		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1581		last_ibp = NULL;
1582		while (next_agino != agino) {
1583			/*
1584			 * If the last inode wasn't the one pointing to
1585			 * us, then release its buffer since we're not
1586			 * going to do anything with it.
1587			 */
1588			if (last_ibp != NULL) {
1589				xfs_trans_brelse(tp, last_ibp);
1590			}
1591			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1592			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1593					    &last_ibp, &last_offset, 0);
1594			if (error) {
1595				xfs_warn(mp,
1596					"%s: xfs_inotobp() returned error %d.",
1597					__func__, error);
1598				return error;
1599			}
1600			next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1601			ASSERT(next_agino != NULLAGINO);
1602			ASSERT(next_agino != 0);
1603		}
1604		/*
1605		 * Now last_ibp points to the buffer previous to us on
1606		 * the unlinked list.  Pull us from the list.
1607		 */
1608		error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
1609		if (error) {
1610			xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
1611				__func__, error);
1612			return error;
1613		}
1614		next_agino = be32_to_cpu(dip->di_next_unlinked);
1615		ASSERT(next_agino != 0);
1616		ASSERT(next_agino != agino);
1617		if (next_agino != NULLAGINO) {
1618			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1619			offset = ip->i_imap.im_boffset +
1620				offsetof(xfs_dinode_t, di_next_unlinked);
1621			xfs_trans_inode_buf(tp, ibp);
1622			xfs_trans_log_buf(tp, ibp, offset,
1623					  (offset + sizeof(xfs_agino_t) - 1));
1624			xfs_inobp_check(mp, ibp);
1625		} else {
1626			xfs_trans_brelse(tp, ibp);
1627		}
1628		/*
1629		 * Point the previous inode on the list to the next inode.
1630		 */
1631		last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1632		ASSERT(next_agino != 0);
1633		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1634		xfs_trans_inode_buf(tp, last_ibp);
1635		xfs_trans_log_buf(tp, last_ibp, offset,
1636				  (offset + sizeof(xfs_agino_t) - 1));
1637		xfs_inobp_check(mp, last_ibp);
1638	}
1639	return 0;
1640}
1641
1642/*
1643 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1644 * inodes that are in memory - they all must be marked stale and attached to
1645 * the cluster buffer.
1646 */
1647STATIC void
1648xfs_ifree_cluster(
1649	xfs_inode_t	*free_ip,
1650	xfs_trans_t	*tp,
1651	xfs_ino_t	inum)
1652{
1653	xfs_mount_t		*mp = free_ip->i_mount;
1654	int			blks_per_cluster;
1655	int			nbufs;
1656	int			ninodes;
1657	int			i, j;
1658	xfs_daddr_t		blkno;
1659	xfs_buf_t		*bp;
1660	xfs_inode_t		*ip;
1661	xfs_inode_log_item_t	*iip;
1662	xfs_log_item_t		*lip;
1663	struct xfs_perag	*pag;
1664
1665	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1666	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1667		blks_per_cluster = 1;
1668		ninodes = mp->m_sb.sb_inopblock;
1669		nbufs = XFS_IALLOC_BLOCKS(mp);
1670	} else {
1671		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1672					mp->m_sb.sb_blocksize;
1673		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1674		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1675	}
1676
1677	for (j = 0; j < nbufs; j++, inum += ninodes) {
1678		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1679					 XFS_INO_TO_AGBNO(mp, inum));
1680
1681		/*
1682		 * We obtain and lock the backing buffer first in the process
1683		 * here, as we have to ensure that any dirty inode that we
1684		 * can't get the flush lock on is attached to the buffer.
1685		 * If we scan the in-memory inodes first, then buffer IO can
1686		 * complete before we get a lock on it, and hence we may fail
1687		 * to mark all the active inodes on the buffer stale.
1688		 */
1689		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1690					mp->m_bsize * blks_per_cluster,
1691					XBF_LOCK);
1692
 
 
1693		/*
1694		 * Walk the inodes already attached to the buffer and mark them
1695		 * stale. These will all have the flush locks held, so an
1696		 * in-memory inode walk can't lock them. By marking them all
1697		 * stale first, we will not attempt to lock them in the loop
1698		 * below as the XFS_ISTALE flag will be set.
1699		 */
1700		lip = bp->b_fspriv;
1701		while (lip) {
1702			if (lip->li_type == XFS_LI_INODE) {
1703				iip = (xfs_inode_log_item_t *)lip;
1704				ASSERT(iip->ili_logged == 1);
1705				lip->li_cb = xfs_istale_done;
1706				xfs_trans_ail_copy_lsn(mp->m_ail,
1707							&iip->ili_flush_lsn,
1708							&iip->ili_item.li_lsn);
1709				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1710			}
1711			lip = lip->li_bio_list;
1712		}
1713
1714
1715		/*
1716		 * For each inode in memory attempt to add it to the inode
1717		 * buffer and set it up for being staled on buffer IO
1718		 * completion.  This is safe as we've locked out tail pushing
1719		 * and flushing by locking the buffer.
1720		 *
1721		 * We have already marked every inode that was part of a
1722		 * transaction stale above, which means there is no point in
1723		 * even trying to lock them.
1724		 */
1725		for (i = 0; i < ninodes; i++) {
1726retry:
1727			rcu_read_lock();
1728			ip = radix_tree_lookup(&pag->pag_ici_root,
1729					XFS_INO_TO_AGINO(mp, (inum + i)));
1730
1731			/* Inode not in memory, nothing to do */
1732			if (!ip) {
1733				rcu_read_unlock();
1734				continue;
1735			}
1736
1737			/*
1738			 * because this is an RCU protected lookup, we could
1739			 * find a recently freed or even reallocated inode
1740			 * during the lookup. We need to check under the
1741			 * i_flags_lock for a valid inode here. Skip it if it
1742			 * is not valid, the wrong inode or stale.
1743			 */
1744			spin_lock(&ip->i_flags_lock);
1745			if (ip->i_ino != inum + i ||
1746			    __xfs_iflags_test(ip, XFS_ISTALE)) {
1747				spin_unlock(&ip->i_flags_lock);
1748				rcu_read_unlock();
1749				continue;
1750			}
1751			spin_unlock(&ip->i_flags_lock);
1752
1753			/*
1754			 * Don't try to lock/unlock the current inode, but we
1755			 * _cannot_ skip the other inodes that we did not find
1756			 * in the list attached to the buffer and are not
1757			 * already marked stale. If we can't lock it, back off
1758			 * and retry.
1759			 */
1760			if (ip != free_ip &&
1761			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1762				rcu_read_unlock();
1763				delay(1);
1764				goto retry;
1765			}
1766			rcu_read_unlock();
1767
1768			xfs_iflock(ip);
1769			xfs_iflags_set(ip, XFS_ISTALE);
1770
1771			/*
1772			 * we don't need to attach clean inodes or those only
1773			 * with unlogged changes (which we throw away, anyway).
1774			 */
1775			iip = ip->i_itemp;
1776			if (!iip || xfs_inode_clean(ip)) {
1777				ASSERT(ip != free_ip);
1778				ip->i_update_core = 0;
1779				xfs_ifunlock(ip);
1780				xfs_iunlock(ip, XFS_ILOCK_EXCL);
1781				continue;
1782			}
1783
1784			iip->ili_last_fields = iip->ili_format.ilf_fields;
1785			iip->ili_format.ilf_fields = 0;
1786			iip->ili_logged = 1;
1787			xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
1788						&iip->ili_item.li_lsn);
1789
1790			xfs_buf_attach_iodone(bp, xfs_istale_done,
1791						  &iip->ili_item);
1792
1793			if (ip != free_ip)
1794				xfs_iunlock(ip, XFS_ILOCK_EXCL);
1795		}
1796
1797		xfs_trans_stale_inode_buf(tp, bp);
1798		xfs_trans_binval(tp, bp);
1799	}
1800
1801	xfs_perag_put(pag);
 
1802}
1803
1804/*
1805 * This is called to return an inode to the inode free list.
1806 * The inode should already be truncated to 0 length and have
1807 * no pages associated with it.  This routine also assumes that
1808 * the inode is already a part of the transaction.
1809 *
1810 * The on-disk copy of the inode will have been added to the list
1811 * of unlinked inodes in the AGI. We need to remove the inode from
1812 * that list atomically with respect to freeing it here.
1813 */
1814int
1815xfs_ifree(
1816	xfs_trans_t	*tp,
1817	xfs_inode_t	*ip,
1818	xfs_bmap_free_t	*flist)
1819{
1820	int			error;
1821	int			delete;
1822	xfs_ino_t		first_ino;
1823	xfs_dinode_t    	*dip;
1824	xfs_buf_t       	*ibp;
1825
1826	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1827	ASSERT(ip->i_d.di_nlink == 0);
1828	ASSERT(ip->i_d.di_nextents == 0);
1829	ASSERT(ip->i_d.di_anextents == 0);
1830	ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
1831	       (!S_ISREG(ip->i_d.di_mode)));
1832	ASSERT(ip->i_d.di_nblocks == 0);
1833
1834	/*
1835	 * Pull the on-disk inode from the AGI unlinked list.
1836	 */
1837	error = xfs_iunlink_remove(tp, ip);
1838	if (error != 0) {
1839		return error;
1840	}
1841
1842	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
1843	if (error != 0) {
1844		return error;
1845	}
1846	ip->i_d.di_mode = 0;		/* mark incore inode as free */
1847	ip->i_d.di_flags = 0;
1848	ip->i_d.di_dmevmask = 0;
1849	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
1850	ip->i_df.if_ext_max =
1851		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
1852	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1853	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1854	/*
1855	 * Bump the generation count so no one will be confused
1856	 * by reincarnations of this inode.
1857	 */
1858	ip->i_d.di_gen++;
1859
1860	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1861
1862	error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
1863	if (error)
1864		return error;
1865
1866        /*
1867	* Clear the on-disk di_mode. This is to prevent xfs_bulkstat
1868	* from picking up this inode when it is reclaimed (its incore state
1869	* initialzed but not flushed to disk yet). The in-core di_mode is
1870	* already cleared  and a corresponding transaction logged.
1871	* The hack here just synchronizes the in-core to on-disk
1872	* di_mode value in advance before the actual inode sync to disk.
1873	* This is OK because the inode is already unlinked and would never
1874	* change its di_mode again for this inode generation.
1875	* This is a temporary hack that would require a proper fix
1876	* in the future.
1877	*/
1878	dip->di_mode = 0;
1879
1880	if (delete) {
1881		xfs_ifree_cluster(ip, tp, first_ino);
1882	}
1883
1884	return 0;
1885}
1886
1887/*
1888 * Reallocate the space for if_broot based on the number of records
1889 * being added or deleted as indicated in rec_diff.  Move the records
1890 * and pointers in if_broot to fit the new size.  When shrinking this
1891 * will eliminate holes between the records and pointers created by
1892 * the caller.  When growing this will create holes to be filled in
1893 * by the caller.
1894 *
1895 * The caller must not request to add more records than would fit in
1896 * the on-disk inode root.  If the if_broot is currently NULL, then
1897 * if we adding records one will be allocated.  The caller must also
1898 * not request that the number of records go below zero, although
1899 * it can go to zero.
1900 *
1901 * ip -- the inode whose if_broot area is changing
1902 * ext_diff -- the change in the number of records, positive or negative,
1903 *	 requested for the if_broot array.
1904 */
1905void
1906xfs_iroot_realloc(
1907	xfs_inode_t		*ip,
1908	int			rec_diff,
1909	int			whichfork)
1910{
1911	struct xfs_mount	*mp = ip->i_mount;
1912	int			cur_max;
1913	xfs_ifork_t		*ifp;
1914	struct xfs_btree_block	*new_broot;
1915	int			new_max;
1916	size_t			new_size;
1917	char			*np;
1918	char			*op;
1919
1920	/*
1921	 * Handle the degenerate case quietly.
1922	 */
1923	if (rec_diff == 0) {
1924		return;
1925	}
1926
1927	ifp = XFS_IFORK_PTR(ip, whichfork);
1928	if (rec_diff > 0) {
1929		/*
1930		 * If there wasn't any memory allocated before, just
1931		 * allocate it now and get out.
1932		 */
1933		if (ifp->if_broot_bytes == 0) {
1934			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
1935			ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1936			ifp->if_broot_bytes = (int)new_size;
1937			return;
1938		}
1939
1940		/*
1941		 * If there is already an existing if_broot, then we need
1942		 * to realloc() it and shift the pointers to their new
1943		 * location.  The records don't change location because
1944		 * they are kept butted up against the btree block header.
1945		 */
1946		cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1947		new_max = cur_max + rec_diff;
1948		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1949		ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
1950				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
1951				KM_SLEEP | KM_NOFS);
1952		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1953						     ifp->if_broot_bytes);
1954		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1955						     (int)new_size);
1956		ifp->if_broot_bytes = (int)new_size;
1957		ASSERT(ifp->if_broot_bytes <=
1958			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
1959		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
1960		return;
1961	}
1962
1963	/*
1964	 * rec_diff is less than 0.  In this case, we are shrinking the
1965	 * if_broot buffer.  It must already exist.  If we go to zero
1966	 * records, just get rid of the root and clear the status bit.
1967	 */
1968	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
1969	cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1970	new_max = cur_max + rec_diff;
1971	ASSERT(new_max >= 0);
1972	if (new_max > 0)
1973		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1974	else
1975		new_size = 0;
1976	if (new_size > 0) {
1977		new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1978		/*
1979		 * First copy over the btree block header.
1980		 */
1981		memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
1982	} else {
1983		new_broot = NULL;
1984		ifp->if_flags &= ~XFS_IFBROOT;
1985	}
1986
1987	/*
1988	 * Only copy the records and pointers if there are any.
1989	 */
1990	if (new_max > 0) {
1991		/*
1992		 * First copy the records.
1993		 */
1994		op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
1995		np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
1996		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
1997
1998		/*
1999		 * Then copy the pointers.
2000		 */
2001		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2002						     ifp->if_broot_bytes);
2003		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2004						     (int)new_size);
2005		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2006	}
2007	kmem_free(ifp->if_broot);
2008	ifp->if_broot = new_broot;
2009	ifp->if_broot_bytes = (int)new_size;
2010	ASSERT(ifp->if_broot_bytes <=
2011		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2012	return;
2013}
2014
2015
2016/*
2017 * This is called when the amount of space needed for if_data
2018 * is increased or decreased.  The change in size is indicated by
2019 * the number of bytes that need to be added or deleted in the
2020 * byte_diff parameter.
2021 *
2022 * If the amount of space needed has decreased below the size of the
2023 * inline buffer, then switch to using the inline buffer.  Otherwise,
2024 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2025 * to what is needed.
2026 *
2027 * ip -- the inode whose if_data area is changing
2028 * byte_diff -- the change in the number of bytes, positive or negative,
2029 *	 requested for the if_data array.
2030 */
2031void
2032xfs_idata_realloc(
2033	xfs_inode_t	*ip,
2034	int		byte_diff,
2035	int		whichfork)
2036{
2037	xfs_ifork_t	*ifp;
2038	int		new_size;
2039	int		real_size;
2040
2041	if (byte_diff == 0) {
2042		return;
2043	}
2044
2045	ifp = XFS_IFORK_PTR(ip, whichfork);
2046	new_size = (int)ifp->if_bytes + byte_diff;
2047	ASSERT(new_size >= 0);
2048
2049	if (new_size == 0) {
2050		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2051			kmem_free(ifp->if_u1.if_data);
2052		}
2053		ifp->if_u1.if_data = NULL;
2054		real_size = 0;
2055	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2056		/*
2057		 * If the valid extents/data can fit in if_inline_ext/data,
2058		 * copy them from the malloc'd vector and free it.
2059		 */
2060		if (ifp->if_u1.if_data == NULL) {
2061			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2062		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2063			ASSERT(ifp->if_real_bytes != 0);
2064			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2065			      new_size);
2066			kmem_free(ifp->if_u1.if_data);
2067			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2068		}
2069		real_size = 0;
2070	} else {
2071		/*
2072		 * Stuck with malloc/realloc.
2073		 * For inline data, the underlying buffer must be
2074		 * a multiple of 4 bytes in size so that it can be
2075		 * logged and stay on word boundaries.  We enforce
2076		 * that here.
2077		 */
2078		real_size = roundup(new_size, 4);
2079		if (ifp->if_u1.if_data == NULL) {
2080			ASSERT(ifp->if_real_bytes == 0);
2081			ifp->if_u1.if_data = kmem_alloc(real_size,
2082							KM_SLEEP | KM_NOFS);
2083		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2084			/*
2085			 * Only do the realloc if the underlying size
2086			 * is really changing.
2087			 */
2088			if (ifp->if_real_bytes != real_size) {
2089				ifp->if_u1.if_data =
2090					kmem_realloc(ifp->if_u1.if_data,
2091							real_size,
2092							ifp->if_real_bytes,
2093							KM_SLEEP | KM_NOFS);
2094			}
2095		} else {
2096			ASSERT(ifp->if_real_bytes == 0);
2097			ifp->if_u1.if_data = kmem_alloc(real_size,
2098							KM_SLEEP | KM_NOFS);
2099			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2100				ifp->if_bytes);
2101		}
2102	}
2103	ifp->if_real_bytes = real_size;
2104	ifp->if_bytes = new_size;
2105	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2106}
2107
2108void
2109xfs_idestroy_fork(
2110	xfs_inode_t	*ip,
2111	int		whichfork)
2112{
2113	xfs_ifork_t	*ifp;
2114
2115	ifp = XFS_IFORK_PTR(ip, whichfork);
2116	if (ifp->if_broot != NULL) {
2117		kmem_free(ifp->if_broot);
2118		ifp->if_broot = NULL;
2119	}
2120
2121	/*
2122	 * If the format is local, then we can't have an extents
2123	 * array so just look for an inline data array.  If we're
2124	 * not local then we may or may not have an extents list,
2125	 * so check and free it up if we do.
2126	 */
2127	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2128		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2129		    (ifp->if_u1.if_data != NULL)) {
2130			ASSERT(ifp->if_real_bytes != 0);
2131			kmem_free(ifp->if_u1.if_data);
2132			ifp->if_u1.if_data = NULL;
2133			ifp->if_real_bytes = 0;
2134		}
2135	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2136		   ((ifp->if_flags & XFS_IFEXTIREC) ||
2137		    ((ifp->if_u1.if_extents != NULL) &&
2138		     (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2139		ASSERT(ifp->if_real_bytes != 0);
2140		xfs_iext_destroy(ifp);
2141	}
2142	ASSERT(ifp->if_u1.if_extents == NULL ||
2143	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2144	ASSERT(ifp->if_real_bytes == 0);
2145	if (whichfork == XFS_ATTR_FORK) {
2146		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2147		ip->i_afp = NULL;
2148	}
2149}
2150
2151/*
2152 * This is called to unpin an inode.  The caller must have the inode locked
2153 * in at least shared mode so that the buffer cannot be subsequently pinned
2154 * once someone is waiting for it to be unpinned.
2155 */
2156static void
2157xfs_iunpin_nowait(
2158	struct xfs_inode	*ip)
2159{
2160	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2161
2162	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2163
2164	/* Give the log a push to start the unpinning I/O */
2165	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2166
2167}
2168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2169void
2170xfs_iunpin_wait(
2171	struct xfs_inode	*ip)
2172{
2173	if (xfs_ipincount(ip)) {
2174		xfs_iunpin_nowait(ip);
2175		wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
2176	}
2177}
2178
2179/*
2180 * xfs_iextents_copy()
2181 *
2182 * This is called to copy the REAL extents (as opposed to the delayed
2183 * allocation extents) from the inode into the given buffer.  It
2184 * returns the number of bytes copied into the buffer.
2185 *
2186 * If there are no delayed allocation extents, then we can just
2187 * memcpy() the extents into the buffer.  Otherwise, we need to
2188 * examine each extent in turn and skip those which are delayed.
2189 */
2190int
2191xfs_iextents_copy(
2192	xfs_inode_t		*ip,
2193	xfs_bmbt_rec_t		*dp,
2194	int			whichfork)
2195{
2196	int			copied;
2197	int			i;
2198	xfs_ifork_t		*ifp;
2199	int			nrecs;
2200	xfs_fsblock_t		start_block;
2201
2202	ifp = XFS_IFORK_PTR(ip, whichfork);
2203	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2204	ASSERT(ifp->if_bytes > 0);
2205
2206	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2207	XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2208	ASSERT(nrecs > 0);
2209
2210	/*
2211	 * There are some delayed allocation extents in the
2212	 * inode, so copy the extents one at a time and skip
2213	 * the delayed ones.  There must be at least one
2214	 * non-delayed extent.
2215	 */
2216	copied = 0;
2217	for (i = 0; i < nrecs; i++) {
2218		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2219		start_block = xfs_bmbt_get_startblock(ep);
2220		if (isnullstartblock(start_block)) {
2221			/*
2222			 * It's a delayed allocation extent, so skip it.
2223			 */
2224			continue;
2225		}
2226
2227		/* Translate to on disk format */
2228		put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2229		put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2230		dp++;
2231		copied++;
2232	}
2233	ASSERT(copied != 0);
2234	xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2235
2236	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2237}
2238
2239/*
2240 * Each of the following cases stores data into the same region
2241 * of the on-disk inode, so only one of them can be valid at
2242 * any given time. While it is possible to have conflicting formats
2243 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2244 * in EXTENTS format, this can only happen when the fork has
2245 * changed formats after being modified but before being flushed.
2246 * In these cases, the format always takes precedence, because the
2247 * format indicates the current state of the fork.
2248 */
2249/*ARGSUSED*/
2250STATIC void
2251xfs_iflush_fork(
2252	xfs_inode_t		*ip,
2253	xfs_dinode_t		*dip,
2254	xfs_inode_log_item_t	*iip,
2255	int			whichfork,
2256	xfs_buf_t		*bp)
2257{
2258	char			*cp;
2259	xfs_ifork_t		*ifp;
2260	xfs_mount_t		*mp;
2261#ifdef XFS_TRANS_DEBUG
2262	int			first;
2263#endif
2264	static const short	brootflag[2] =
2265		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2266	static const short	dataflag[2] =
2267		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2268	static const short	extflag[2] =
2269		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2270
2271	if (!iip)
2272		return;
2273	ifp = XFS_IFORK_PTR(ip, whichfork);
2274	/*
2275	 * This can happen if we gave up in iformat in an error path,
2276	 * for the attribute fork.
2277	 */
2278	if (!ifp) {
2279		ASSERT(whichfork == XFS_ATTR_FORK);
2280		return;
2281	}
2282	cp = XFS_DFORK_PTR(dip, whichfork);
2283	mp = ip->i_mount;
2284	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2285	case XFS_DINODE_FMT_LOCAL:
2286		if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2287		    (ifp->if_bytes > 0)) {
2288			ASSERT(ifp->if_u1.if_data != NULL);
2289			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2290			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2291		}
2292		break;
2293
2294	case XFS_DINODE_FMT_EXTENTS:
2295		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2296		       !(iip->ili_format.ilf_fields & extflag[whichfork]));
2297		if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2298		    (ifp->if_bytes > 0)) {
2299			ASSERT(xfs_iext_get_ext(ifp, 0));
2300			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2301			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2302				whichfork);
2303		}
2304		break;
2305
2306	case XFS_DINODE_FMT_BTREE:
2307		if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2308		    (ifp->if_broot_bytes > 0)) {
2309			ASSERT(ifp->if_broot != NULL);
2310			ASSERT(ifp->if_broot_bytes <=
2311			       (XFS_IFORK_SIZE(ip, whichfork) +
2312				XFS_BROOT_SIZE_ADJ));
2313			xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2314				(xfs_bmdr_block_t *)cp,
2315				XFS_DFORK_SIZE(dip, mp, whichfork));
2316		}
2317		break;
2318
2319	case XFS_DINODE_FMT_DEV:
2320		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2321			ASSERT(whichfork == XFS_DATA_FORK);
2322			xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2323		}
2324		break;
2325
2326	case XFS_DINODE_FMT_UUID:
2327		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2328			ASSERT(whichfork == XFS_DATA_FORK);
2329			memcpy(XFS_DFORK_DPTR(dip),
2330			       &ip->i_df.if_u2.if_uuid,
2331			       sizeof(uuid_t));
2332		}
2333		break;
2334
2335	default:
2336		ASSERT(0);
2337		break;
2338	}
2339}
2340
2341STATIC int
2342xfs_iflush_cluster(
2343	xfs_inode_t	*ip,
2344	xfs_buf_t	*bp)
2345{
2346	xfs_mount_t		*mp = ip->i_mount;
2347	struct xfs_perag	*pag;
2348	unsigned long		first_index, mask;
2349	unsigned long		inodes_per_cluster;
2350	int			ilist_size;
2351	xfs_inode_t		**ilist;
2352	xfs_inode_t		*iq;
2353	int			nr_found;
2354	int			clcount = 0;
2355	int			bufwasdelwri;
2356	int			i;
2357
2358	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2359
2360	inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2361	ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2362	ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2363	if (!ilist)
2364		goto out_put;
2365
2366	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2367	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2368	rcu_read_lock();
2369	/* really need a gang lookup range call here */
2370	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2371					first_index, inodes_per_cluster);
2372	if (nr_found == 0)
2373		goto out_free;
2374
2375	for (i = 0; i < nr_found; i++) {
2376		iq = ilist[i];
2377		if (iq == ip)
2378			continue;
2379
2380		/*
2381		 * because this is an RCU protected lookup, we could find a
2382		 * recently freed or even reallocated inode during the lookup.
2383		 * We need to check under the i_flags_lock for a valid inode
2384		 * here. Skip it if it is not valid or the wrong inode.
2385		 */
2386		spin_lock(&ip->i_flags_lock);
2387		if (!ip->i_ino ||
2388		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2389			spin_unlock(&ip->i_flags_lock);
2390			continue;
2391		}
2392		spin_unlock(&ip->i_flags_lock);
2393
2394		/*
2395		 * Do an un-protected check to see if the inode is dirty and
2396		 * is a candidate for flushing.  These checks will be repeated
2397		 * later after the appropriate locks are acquired.
2398		 */
2399		if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2400			continue;
2401
2402		/*
2403		 * Try to get locks.  If any are unavailable or it is pinned,
2404		 * then this inode cannot be flushed and is skipped.
2405		 */
2406
2407		if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2408			continue;
2409		if (!xfs_iflock_nowait(iq)) {
2410			xfs_iunlock(iq, XFS_ILOCK_SHARED);
2411			continue;
2412		}
2413		if (xfs_ipincount(iq)) {
2414			xfs_ifunlock(iq);
2415			xfs_iunlock(iq, XFS_ILOCK_SHARED);
2416			continue;
2417		}
2418
2419		/*
2420		 * arriving here means that this inode can be flushed.  First
2421		 * re-check that it's dirty before flushing.
2422		 */
2423		if (!xfs_inode_clean(iq)) {
2424			int	error;
2425			error = xfs_iflush_int(iq, bp);
2426			if (error) {
2427				xfs_iunlock(iq, XFS_ILOCK_SHARED);
2428				goto cluster_corrupt_out;
2429			}
2430			clcount++;
2431		} else {
2432			xfs_ifunlock(iq);
2433		}
2434		xfs_iunlock(iq, XFS_ILOCK_SHARED);
2435	}
2436
2437	if (clcount) {
2438		XFS_STATS_INC(xs_icluster_flushcnt);
2439		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2440	}
2441
2442out_free:
2443	rcu_read_unlock();
2444	kmem_free(ilist);
2445out_put:
2446	xfs_perag_put(pag);
2447	return 0;
2448
2449
2450cluster_corrupt_out:
2451	/*
2452	 * Corruption detected in the clustering loop.  Invalidate the
2453	 * inode buffer and shut down the filesystem.
2454	 */
2455	rcu_read_unlock();
2456	/*
2457	 * Clean up the buffer.  If it was B_DELWRI, just release it --
2458	 * brelse can handle it with no problems.  If not, shut down the
2459	 * filesystem before releasing the buffer.
2460	 */
2461	bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
2462	if (bufwasdelwri)
2463		xfs_buf_relse(bp);
2464
2465	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2466
2467	if (!bufwasdelwri) {
2468		/*
2469		 * Just like incore_relse: if we have b_iodone functions,
2470		 * mark the buffer as an error and call them.  Otherwise
2471		 * mark it as stale and brelse.
2472		 */
2473		if (bp->b_iodone) {
2474			XFS_BUF_UNDONE(bp);
2475			XFS_BUF_STALE(bp);
2476			xfs_buf_ioerror(bp, EIO);
2477			xfs_buf_ioend(bp, 0);
2478		} else {
2479			XFS_BUF_STALE(bp);
2480			xfs_buf_relse(bp);
2481		}
2482	}
2483
2484	/*
2485	 * Unlocks the flush lock
2486	 */
2487	xfs_iflush_abort(iq);
2488	kmem_free(ilist);
2489	xfs_perag_put(pag);
2490	return XFS_ERROR(EFSCORRUPTED);
2491}
2492
2493/*
2494 * xfs_iflush() will write a modified inode's changes out to the
2495 * inode's on disk home.  The caller must have the inode lock held
2496 * in at least shared mode and the inode flush completion must be
2497 * active as well.  The inode lock will still be held upon return from
2498 * the call and the caller is free to unlock it.
2499 * The inode flush will be completed when the inode reaches the disk.
2500 * The flags indicate how the inode's buffer should be written out.
2501 */
2502int
2503xfs_iflush(
2504	xfs_inode_t		*ip,
2505	uint			flags)
2506{
2507	xfs_inode_log_item_t	*iip;
2508	xfs_buf_t		*bp;
2509	xfs_dinode_t		*dip;
2510	xfs_mount_t		*mp;
2511	int			error;
2512
2513	XFS_STATS_INC(xs_iflush_count);
2514
2515	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2516	ASSERT(!completion_done(&ip->i_flush));
2517	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2518	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
2519
2520	iip = ip->i_itemp;
2521	mp = ip->i_mount;
2522
2523	/*
2524	 * We can't flush the inode until it is unpinned, so wait for it if we
2525	 * are allowed to block.  We know no one new can pin it, because we are
2526	 * holding the inode lock shared and you need to hold it exclusively to
2527	 * pin the inode.
2528	 *
2529	 * If we are not allowed to block, force the log out asynchronously so
2530	 * that when we come back the inode will be unpinned. If other inodes
2531	 * in the same cluster are dirty, they will probably write the inode
2532	 * out for us if they occur after the log force completes.
2533	 */
2534	if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
2535		xfs_iunpin_nowait(ip);
2536		xfs_ifunlock(ip);
2537		return EAGAIN;
2538	}
2539	xfs_iunpin_wait(ip);
2540
2541	/*
2542	 * For stale inodes we cannot rely on the backing buffer remaining
2543	 * stale in cache for the remaining life of the stale inode and so
2544	 * xfs_itobp() below may give us a buffer that no longer contains
2545	 * inodes below. We have to check this after ensuring the inode is
2546	 * unpinned so that it is safe to reclaim the stale inode after the
2547	 * flush call.
2548	 */
2549	if (xfs_iflags_test(ip, XFS_ISTALE)) {
2550		xfs_ifunlock(ip);
2551		return 0;
2552	}
2553
2554	/*
2555	 * This may have been unpinned because the filesystem is shutting
2556	 * down forcibly. If that's the case we must not write this inode
2557	 * to disk, because the log record didn't make it to disk!
 
 
 
2558	 */
2559	if (XFS_FORCED_SHUTDOWN(mp)) {
2560		ip->i_update_core = 0;
2561		if (iip)
2562			iip->ili_format.ilf_fields = 0;
2563		xfs_ifunlock(ip);
2564		return XFS_ERROR(EIO);
2565	}
2566
2567	/*
2568	 * Get the buffer containing the on-disk inode.
2569	 */
2570	error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2571				(flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
2572	if (error || !bp) {
2573		xfs_ifunlock(ip);
2574		return error;
2575	}
2576
2577	/*
2578	 * First flush out the inode that xfs_iflush was called with.
2579	 */
2580	error = xfs_iflush_int(ip, bp);
2581	if (error)
2582		goto corrupt_out;
2583
2584	/*
2585	 * If the buffer is pinned then push on the log now so we won't
2586	 * get stuck waiting in the write for too long.
2587	 */
2588	if (xfs_buf_ispinned(bp))
2589		xfs_log_force(mp, 0);
2590
2591	/*
2592	 * inode clustering:
2593	 * see if other inodes can be gathered into this write
2594	 */
2595	error = xfs_iflush_cluster(ip, bp);
2596	if (error)
2597		goto cluster_corrupt_out;
2598
2599	if (flags & SYNC_WAIT)
2600		error = xfs_bwrite(mp, bp);
2601	else
2602		xfs_bdwrite(mp, bp);
2603	return error;
2604
2605corrupt_out:
2606	xfs_buf_relse(bp);
2607	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2608cluster_corrupt_out:
 
 
2609	/*
2610	 * Unlocks the flush lock
2611	 */
2612	xfs_iflush_abort(ip);
2613	return XFS_ERROR(EFSCORRUPTED);
2614}
2615
2616
2617STATIC int
2618xfs_iflush_int(
2619	xfs_inode_t		*ip,
2620	xfs_buf_t		*bp)
2621{
2622	xfs_inode_log_item_t	*iip;
2623	xfs_dinode_t		*dip;
2624	xfs_mount_t		*mp;
2625#ifdef XFS_TRANS_DEBUG
2626	int			first;
2627#endif
2628
2629	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2630	ASSERT(!completion_done(&ip->i_flush));
2631	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2632	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
2633
2634	iip = ip->i_itemp;
2635	mp = ip->i_mount;
2636
2637	/* set *dip = inode's place in the buffer */
2638	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
2639
2640	/*
2641	 * Clear i_update_core before copying out the data.
2642	 * This is for coordination with our timestamp updates
2643	 * that don't hold the inode lock. They will always
2644	 * update the timestamps BEFORE setting i_update_core,
2645	 * so if we clear i_update_core after they set it we
2646	 * are guaranteed to see their updates to the timestamps.
2647	 * I believe that this depends on strongly ordered memory
2648	 * semantics, but we have that.  We use the SYNCHRONIZE
2649	 * macro to make sure that the compiler does not reorder
2650	 * the i_update_core access below the data copy below.
2651	 */
2652	ip->i_update_core = 0;
2653	SYNCHRONIZE();
2654
2655	/*
2656	 * Make sure to get the latest timestamps from the Linux inode.
2657	 */
2658	xfs_synchronize_times(ip);
2659
2660	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2661			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
2662		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2663			"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2664			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2665		goto corrupt_out;
2666	}
2667	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
2668				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
2669		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2670			"%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2671			__func__, ip->i_ino, ip, ip->i_d.di_magic);
2672		goto corrupt_out;
2673	}
2674	if (S_ISREG(ip->i_d.di_mode)) {
2675		if (XFS_TEST_ERROR(
2676		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2677		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
2678		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
2679			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2680				"%s: Bad regular inode %Lu, ptr 0x%p",
2681				__func__, ip->i_ino, ip);
2682			goto corrupt_out;
2683		}
2684	} else if (S_ISDIR(ip->i_d.di_mode)) {
2685		if (XFS_TEST_ERROR(
2686		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2687		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
2688		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
2689		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
2690			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2691				"%s: Bad directory inode %Lu, ptr 0x%p",
2692				__func__, ip->i_ino, ip);
2693			goto corrupt_out;
2694		}
2695	}
2696	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
2697				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
2698				XFS_RANDOM_IFLUSH_5)) {
2699		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2700			"%s: detected corrupt incore inode %Lu, "
2701			"total extents = %d, nblocks = %Ld, ptr 0x%p",
2702			__func__, ip->i_ino,
2703			ip->i_d.di_nextents + ip->i_d.di_anextents,
2704			ip->i_d.di_nblocks, ip);
2705		goto corrupt_out;
2706	}
2707	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
2708				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
2709		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2710			"%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2711			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2712		goto corrupt_out;
2713	}
2714	/*
2715	 * bump the flush iteration count, used to detect flushes which
2716	 * postdate a log record during recovery.
2717	 */
2718
2719	ip->i_d.di_flushiter++;
2720
2721	/*
2722	 * Copy the dirty parts of the inode into the on-disk
2723	 * inode.  We always copy out the core of the inode,
2724	 * because if the inode is dirty at all the core must
2725	 * be.
2726	 */
2727	xfs_dinode_to_disk(dip, &ip->i_d);
2728
2729	/* Wrap, we never let the log put out DI_MAX_FLUSH */
2730	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
2731		ip->i_d.di_flushiter = 0;
2732
2733	/*
2734	 * If this is really an old format inode and the superblock version
2735	 * has not been updated to support only new format inodes, then
2736	 * convert back to the old inode format.  If the superblock version
2737	 * has been updated, then make the conversion permanent.
2738	 */
2739	ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2740	if (ip->i_d.di_version == 1) {
2741		if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2742			/*
2743			 * Convert it back.
2744			 */
2745			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
2746			dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
2747		} else {
2748			/*
2749			 * The superblock version has already been bumped,
2750			 * so just make the conversion to the new inode
2751			 * format permanent.
2752			 */
2753			ip->i_d.di_version = 2;
2754			dip->di_version = 2;
2755			ip->i_d.di_onlink = 0;
2756			dip->di_onlink = 0;
2757			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
2758			memset(&(dip->di_pad[0]), 0,
2759			      sizeof(dip->di_pad));
2760			ASSERT(xfs_get_projid(ip) == 0);
2761		}
2762	}
2763
2764	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
2765	if (XFS_IFORK_Q(ip))
2766		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
2767	xfs_inobp_check(mp, bp);
2768
2769	/*
2770	 * We've recorded everything logged in the inode, so we'd
2771	 * like to clear the ilf_fields bits so we don't log and
2772	 * flush things unnecessarily.  However, we can't stop
2773	 * logging all this information until the data we've copied
2774	 * into the disk buffer is written to disk.  If we did we might
2775	 * overwrite the copy of the inode in the log with all the
2776	 * data after re-logging only part of it, and in the face of
2777	 * a crash we wouldn't have all the data we need to recover.
2778	 *
2779	 * What we do is move the bits to the ili_last_fields field.
2780	 * When logging the inode, these bits are moved back to the
2781	 * ilf_fields field.  In the xfs_iflush_done() routine we
2782	 * clear ili_last_fields, since we know that the information
2783	 * those bits represent is permanently on disk.  As long as
2784	 * the flush completes before the inode is logged again, then
2785	 * both ilf_fields and ili_last_fields will be cleared.
2786	 *
2787	 * We can play with the ilf_fields bits here, because the inode
2788	 * lock must be held exclusively in order to set bits there
2789	 * and the flush lock protects the ili_last_fields bits.
2790	 * Set ili_logged so the flush done
2791	 * routine can tell whether or not to look in the AIL.
2792	 * Also, store the current LSN of the inode so that we can tell
2793	 * whether the item has moved in the AIL from xfs_iflush_done().
2794	 * In order to read the lsn we need the AIL lock, because
2795	 * it is a 64 bit value that cannot be read atomically.
2796	 */
2797	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
2798		iip->ili_last_fields = iip->ili_format.ilf_fields;
2799		iip->ili_format.ilf_fields = 0;
2800		iip->ili_logged = 1;
2801
2802		xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2803					&iip->ili_item.li_lsn);
2804
2805		/*
2806		 * Attach the function xfs_iflush_done to the inode's
2807		 * buffer.  This will remove the inode from the AIL
2808		 * and unlock the inode's flush lock when the inode is
2809		 * completely written to disk.
2810		 */
2811		xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
2812
2813		ASSERT(bp->b_fspriv != NULL);
2814		ASSERT(bp->b_iodone != NULL);
2815	} else {
2816		/*
2817		 * We're flushing an inode which is not in the AIL and has
2818		 * not been logged but has i_update_core set.  For this
2819		 * case we can use a B_DELWRI flush and immediately drop
2820		 * the inode flush lock because we can avoid the whole
2821		 * AIL state thing.  It's OK to drop the flush lock now,
2822		 * because we've already locked the buffer and to do anything
2823		 * you really need both.
2824		 */
2825		if (iip != NULL) {
2826			ASSERT(iip->ili_logged == 0);
2827			ASSERT(iip->ili_last_fields == 0);
2828			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
2829		}
2830		xfs_ifunlock(ip);
2831	}
2832
2833	return 0;
2834
2835corrupt_out:
2836	return XFS_ERROR(EFSCORRUPTED);
2837}
2838
2839/*
2840 * Return a pointer to the extent record at file index idx.
2841 */
2842xfs_bmbt_rec_host_t *
2843xfs_iext_get_ext(
2844	xfs_ifork_t	*ifp,		/* inode fork pointer */
2845	xfs_extnum_t	idx)		/* index of target extent */
2846{
2847	ASSERT(idx >= 0);
2848	ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
2849
2850	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
2851		return ifp->if_u1.if_ext_irec->er_extbuf;
2852	} else if (ifp->if_flags & XFS_IFEXTIREC) {
2853		xfs_ext_irec_t	*erp;		/* irec pointer */
2854		int		erp_idx = 0;	/* irec index */
2855		xfs_extnum_t	page_idx = idx;	/* ext index in target list */
2856
2857		erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
2858		return &erp->er_extbuf[page_idx];
2859	} else if (ifp->if_bytes) {
2860		return &ifp->if_u1.if_extents[idx];
2861	} else {
2862		return NULL;
2863	}
2864}
2865
2866/*
2867 * Insert new item(s) into the extent records for incore inode
2868 * fork 'ifp'.  'count' new items are inserted at index 'idx'.
2869 */
2870void
2871xfs_iext_insert(
2872	xfs_inode_t	*ip,		/* incore inode pointer */
2873	xfs_extnum_t	idx,		/* starting index of new items */
2874	xfs_extnum_t	count,		/* number of inserted items */
2875	xfs_bmbt_irec_t	*new,		/* items to insert */
2876	int		state)		/* type of extent conversion */
2877{
2878	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
2879	xfs_extnum_t	i;		/* extent record index */
2880
2881	trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
2882
2883	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2884	xfs_iext_add(ifp, idx, count);
2885	for (i = idx; i < idx + count; i++, new++)
2886		xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
2887}
2888
2889/*
2890 * This is called when the amount of space required for incore file
2891 * extents needs to be increased. The ext_diff parameter stores the
2892 * number of new extents being added and the idx parameter contains
2893 * the extent index where the new extents will be added. If the new
2894 * extents are being appended, then we just need to (re)allocate and
2895 * initialize the space. Otherwise, if the new extents are being
2896 * inserted into the middle of the existing entries, a bit more work
2897 * is required to make room for the new extents to be inserted. The
2898 * caller is responsible for filling in the new extent entries upon
2899 * return.
2900 */
2901void
2902xfs_iext_add(
2903	xfs_ifork_t	*ifp,		/* inode fork pointer */
2904	xfs_extnum_t	idx,		/* index to begin adding exts */
2905	int		ext_diff)	/* number of extents to add */
2906{
2907	int		byte_diff;	/* new bytes being added */
2908	int		new_size;	/* size of extents after adding */
2909	xfs_extnum_t	nextents;	/* number of extents in file */
2910
2911	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2912	ASSERT((idx >= 0) && (idx <= nextents));
2913	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
2914	new_size = ifp->if_bytes + byte_diff;
2915	/*
2916	 * If the new number of extents (nextents + ext_diff)
2917	 * fits inside the inode, then continue to use the inline
2918	 * extent buffer.
2919	 */
2920	if (nextents + ext_diff <= XFS_INLINE_EXTS) {
2921		if (idx < nextents) {
2922			memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
2923				&ifp->if_u2.if_inline_ext[idx],
2924				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
2925			memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
2926		}
2927		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
2928		ifp->if_real_bytes = 0;
2929	}
2930	/*
2931	 * Otherwise use a linear (direct) extent list.
2932	 * If the extents are currently inside the inode,
2933	 * xfs_iext_realloc_direct will switch us from
2934	 * inline to direct extent allocation mode.
2935	 */
2936	else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
2937		xfs_iext_realloc_direct(ifp, new_size);
2938		if (idx < nextents) {
2939			memmove(&ifp->if_u1.if_extents[idx + ext_diff],
2940				&ifp->if_u1.if_extents[idx],
2941				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
2942			memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
2943		}
2944	}
2945	/* Indirection array */
2946	else {
2947		xfs_ext_irec_t	*erp;
2948		int		erp_idx = 0;
2949		int		page_idx = idx;
2950
2951		ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
2952		if (ifp->if_flags & XFS_IFEXTIREC) {
2953			erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
2954		} else {
2955			xfs_iext_irec_init(ifp);
2956			ASSERT(ifp->if_flags & XFS_IFEXTIREC);
2957			erp = ifp->if_u1.if_ext_irec;
2958		}
2959		/* Extents fit in target extent page */
2960		if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
2961			if (page_idx < erp->er_extcount) {
2962				memmove(&erp->er_extbuf[page_idx + ext_diff],
2963					&erp->er_extbuf[page_idx],
2964					(erp->er_extcount - page_idx) *
2965					sizeof(xfs_bmbt_rec_t));
2966				memset(&erp->er_extbuf[page_idx], 0, byte_diff);
2967			}
2968			erp->er_extcount += ext_diff;
2969			xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
2970		}
2971		/* Insert a new extent page */
2972		else if (erp) {
2973			xfs_iext_add_indirect_multi(ifp,
2974				erp_idx, page_idx, ext_diff);
2975		}
2976		/*
2977		 * If extent(s) are being appended to the last page in
2978		 * the indirection array and the new extent(s) don't fit
2979		 * in the page, then erp is NULL and erp_idx is set to
2980		 * the next index needed in the indirection array.
2981		 */
2982		else {
2983			int	count = ext_diff;
2984
2985			while (count) {
2986				erp = xfs_iext_irec_new(ifp, erp_idx);
2987				erp->er_extcount = count;
2988				count -= MIN(count, (int)XFS_LINEAR_EXTS);
2989				if (count) {
2990					erp_idx++;
2991				}
2992			}
2993		}
2994	}
2995	ifp->if_bytes = new_size;
2996}
2997
2998/*
2999 * This is called when incore extents are being added to the indirection
3000 * array and the new extents do not fit in the target extent list. The
3001 * erp_idx parameter contains the irec index for the target extent list
3002 * in the indirection array, and the idx parameter contains the extent
3003 * index within the list. The number of extents being added is stored
3004 * in the count parameter.
3005 *
3006 *    |-------|   |-------|
3007 *    |       |   |       |    idx - number of extents before idx
3008 *    |  idx  |   | count |
3009 *    |       |   |       |    count - number of extents being inserted at idx
3010 *    |-------|   |-------|
3011 *    | count |   | nex2  |    nex2 - number of extents after idx + count
3012 *    |-------|   |-------|
3013 */
3014void
3015xfs_iext_add_indirect_multi(
3016	xfs_ifork_t	*ifp,			/* inode fork pointer */
3017	int		erp_idx,		/* target extent irec index */
3018	xfs_extnum_t	idx,			/* index within target list */
3019	int		count)			/* new extents being added */
3020{
3021	int		byte_diff;		/* new bytes being added */
3022	xfs_ext_irec_t	*erp;			/* pointer to irec entry */
3023	xfs_extnum_t	ext_diff;		/* number of extents to add */
3024	xfs_extnum_t	ext_cnt;		/* new extents still needed */
3025	xfs_extnum_t	nex2;			/* extents after idx + count */
3026	xfs_bmbt_rec_t	*nex2_ep = NULL;	/* temp list for nex2 extents */
3027	int		nlists;			/* number of irec's (lists) */
3028
3029	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3030	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3031	nex2 = erp->er_extcount - idx;
3032	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3033
3034	/*
3035	 * Save second part of target extent list
3036	 * (all extents past */
3037	if (nex2) {
3038		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3039		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3040		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3041		erp->er_extcount -= nex2;
3042		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3043		memset(&erp->er_extbuf[idx], 0, byte_diff);
3044	}
3045
3046	/*
3047	 * Add the new extents to the end of the target
3048	 * list, then allocate new irec record(s) and
3049	 * extent buffer(s) as needed to store the rest
3050	 * of the new extents.
3051	 */
3052	ext_cnt = count;
3053	ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3054	if (ext_diff) {
3055		erp->er_extcount += ext_diff;
3056		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3057		ext_cnt -= ext_diff;
3058	}
3059	while (ext_cnt) {
3060		erp_idx++;
3061		erp = xfs_iext_irec_new(ifp, erp_idx);
3062		ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3063		erp->er_extcount = ext_diff;
3064		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3065		ext_cnt -= ext_diff;
3066	}
3067
3068	/* Add nex2 extents back to indirection array */
3069	if (nex2) {
3070		xfs_extnum_t	ext_avail;
3071		int		i;
3072
3073		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3074		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3075		i = 0;
3076		/*
3077		 * If nex2 extents fit in the current page, append
3078		 * nex2_ep after the new extents.
3079		 */
3080		if (nex2 <= ext_avail) {
3081			i = erp->er_extcount;
3082		}
3083		/*
3084		 * Otherwise, check if space is available in the
3085		 * next page.
3086		 */
3087		else if ((erp_idx < nlists - 1) &&
3088			 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3089			  ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3090			erp_idx++;
3091			erp++;
3092			/* Create a hole for nex2 extents */
3093			memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3094				erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3095		}
3096		/*
3097		 * Final choice, create a new extent page for
3098		 * nex2 extents.
3099		 */
3100		else {
3101			erp_idx++;
3102			erp = xfs_iext_irec_new(ifp, erp_idx);
3103		}
3104		memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3105		kmem_free(nex2_ep);
3106		erp->er_extcount += nex2;
3107		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3108	}
3109}
3110
3111/*
3112 * This is called when the amount of space required for incore file
3113 * extents needs to be decreased. The ext_diff parameter stores the
3114 * number of extents to be removed and the idx parameter contains
3115 * the extent index where the extents will be removed from.
3116 *
3117 * If the amount of space needed has decreased below the linear
3118 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3119 * extent array.  Otherwise, use kmem_realloc() to adjust the
3120 * size to what is needed.
3121 */
3122void
3123xfs_iext_remove(
3124	xfs_inode_t	*ip,		/* incore inode pointer */
3125	xfs_extnum_t	idx,		/* index to begin removing exts */
3126	int		ext_diff,	/* number of extents to remove */
3127	int		state)		/* type of extent conversion */
3128{
3129	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3130	xfs_extnum_t	nextents;	/* number of extents in file */
3131	int		new_size;	/* size of extents after removal */
3132
3133	trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3134
3135	ASSERT(ext_diff > 0);
3136	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3137	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3138
3139	if (new_size == 0) {
3140		xfs_iext_destroy(ifp);
3141	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3142		xfs_iext_remove_indirect(ifp, idx, ext_diff);
3143	} else if (ifp->if_real_bytes) {
3144		xfs_iext_remove_direct(ifp, idx, ext_diff);
3145	} else {
3146		xfs_iext_remove_inline(ifp, idx, ext_diff);
3147	}
3148	ifp->if_bytes = new_size;
3149}
3150
3151/*
3152 * This removes ext_diff extents from the inline buffer, beginning
3153 * at extent index idx.
3154 */
3155void
3156xfs_iext_remove_inline(
3157	xfs_ifork_t	*ifp,		/* inode fork pointer */
3158	xfs_extnum_t	idx,		/* index to begin removing exts */
3159	int		ext_diff)	/* number of extents to remove */
3160{
3161	int		nextents;	/* number of extents in file */
3162
3163	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3164	ASSERT(idx < XFS_INLINE_EXTS);
3165	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3166	ASSERT(((nextents - ext_diff) > 0) &&
3167		(nextents - ext_diff) < XFS_INLINE_EXTS);
3168
3169	if (idx + ext_diff < nextents) {
3170		memmove(&ifp->if_u2.if_inline_ext[idx],
3171			&ifp->if_u2.if_inline_ext[idx + ext_diff],
3172			(nextents - (idx + ext_diff)) *
3173			 sizeof(xfs_bmbt_rec_t));
3174		memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3175			0, ext_diff * sizeof(xfs_bmbt_rec_t));
3176	} else {
3177		memset(&ifp->if_u2.if_inline_ext[idx], 0,
3178			ext_diff * sizeof(xfs_bmbt_rec_t));
3179	}
3180}
3181
3182/*
3183 * This removes ext_diff extents from a linear (direct) extent list,
3184 * beginning at extent index idx. If the extents are being removed
3185 * from the end of the list (ie. truncate) then we just need to re-
3186 * allocate the list to remove the extra space. Otherwise, if the
3187 * extents are being removed from the middle of the existing extent
3188 * entries, then we first need to move the extent records beginning
3189 * at idx + ext_diff up in the list to overwrite the records being
3190 * removed, then remove the extra space via kmem_realloc.
3191 */
3192void
3193xfs_iext_remove_direct(
3194	xfs_ifork_t	*ifp,		/* inode fork pointer */
3195	xfs_extnum_t	idx,		/* index to begin removing exts */
3196	int		ext_diff)	/* number of extents to remove */
3197{
3198	xfs_extnum_t	nextents;	/* number of extents in file */
3199	int		new_size;	/* size of extents after removal */
3200
3201	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3202	new_size = ifp->if_bytes -
3203		(ext_diff * sizeof(xfs_bmbt_rec_t));
3204	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3205
3206	if (new_size == 0) {
3207		xfs_iext_destroy(ifp);
3208		return;
3209	}
3210	/* Move extents up in the list (if needed) */
3211	if (idx + ext_diff < nextents) {
3212		memmove(&ifp->if_u1.if_extents[idx],
3213			&ifp->if_u1.if_extents[idx + ext_diff],
3214			(nextents - (idx + ext_diff)) *
3215			 sizeof(xfs_bmbt_rec_t));
3216	}
3217	memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3218		0, ext_diff * sizeof(xfs_bmbt_rec_t));
3219	/*
3220	 * Reallocate the direct extent list. If the extents
3221	 * will fit inside the inode then xfs_iext_realloc_direct
3222	 * will switch from direct to inline extent allocation
3223	 * mode for us.
3224	 */
3225	xfs_iext_realloc_direct(ifp, new_size);
3226	ifp->if_bytes = new_size;
3227}
3228
3229/*
3230 * This is called when incore extents are being removed from the
3231 * indirection array and the extents being removed span multiple extent
3232 * buffers. The idx parameter contains the file extent index where we
3233 * want to begin removing extents, and the count parameter contains
3234 * how many extents need to be removed.
3235 *
3236 *    |-------|   |-------|
3237 *    | nex1  |   |       |    nex1 - number of extents before idx
3238 *    |-------|   | count |
3239 *    |       |   |       |    count - number of extents being removed at idx
3240 *    | count |   |-------|
3241 *    |       |   | nex2  |    nex2 - number of extents after idx + count
3242 *    |-------|   |-------|
3243 */
3244void
3245xfs_iext_remove_indirect(
3246	xfs_ifork_t	*ifp,		/* inode fork pointer */
3247	xfs_extnum_t	idx,		/* index to begin removing extents */
3248	int		count)		/* number of extents to remove */
3249{
3250	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3251	int		erp_idx = 0;	/* indirection array index */
3252	xfs_extnum_t	ext_cnt;	/* extents left to remove */
3253	xfs_extnum_t	ext_diff;	/* extents to remove in current list */
3254	xfs_extnum_t	nex1;		/* number of extents before idx */
3255	xfs_extnum_t	nex2;		/* extents after idx + count */
3256	int		page_idx = idx;	/* index in target extent list */
3257
3258	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3259	erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
3260	ASSERT(erp != NULL);
3261	nex1 = page_idx;
3262	ext_cnt = count;
3263	while (ext_cnt) {
3264		nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3265		ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3266		/*
3267		 * Check for deletion of entire list;
3268		 * xfs_iext_irec_remove() updates extent offsets.
3269		 */
3270		if (ext_diff == erp->er_extcount) {
3271			xfs_iext_irec_remove(ifp, erp_idx);
3272			ext_cnt -= ext_diff;
3273			nex1 = 0;
3274			if (ext_cnt) {
3275				ASSERT(erp_idx < ifp->if_real_bytes /
3276					XFS_IEXT_BUFSZ);
3277				erp = &ifp->if_u1.if_ext_irec[erp_idx];
3278				nex1 = 0;
3279				continue;
3280			} else {
3281				break;
3282			}
3283		}
3284		/* Move extents up (if needed) */
3285		if (nex2) {
3286			memmove(&erp->er_extbuf[nex1],
3287				&erp->er_extbuf[nex1 + ext_diff],
3288				nex2 * sizeof(xfs_bmbt_rec_t));
3289		}
3290		/* Zero out rest of page */
3291		memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3292			((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3293		/* Update remaining counters */
3294		erp->er_extcount -= ext_diff;
3295		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3296		ext_cnt -= ext_diff;
3297		nex1 = 0;
3298		erp_idx++;
3299		erp++;
3300	}
3301	ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3302	xfs_iext_irec_compact(ifp);
3303}
3304
3305/*
3306 * Create, destroy, or resize a linear (direct) block of extents.
3307 */
3308void
3309xfs_iext_realloc_direct(
3310	xfs_ifork_t	*ifp,		/* inode fork pointer */
3311	int		new_size)	/* new size of extents */
3312{
3313	int		rnew_size;	/* real new size of extents */
3314
3315	rnew_size = new_size;
3316
3317	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3318		((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3319		 (new_size != ifp->if_real_bytes)));
3320
3321	/* Free extent records */
3322	if (new_size == 0) {
3323		xfs_iext_destroy(ifp);
3324	}
3325	/* Resize direct extent list and zero any new bytes */
3326	else if (ifp->if_real_bytes) {
3327		/* Check if extents will fit inside the inode */
3328		if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3329			xfs_iext_direct_to_inline(ifp, new_size /
3330				(uint)sizeof(xfs_bmbt_rec_t));
3331			ifp->if_bytes = new_size;
3332			return;
3333		}
3334		if (!is_power_of_2(new_size)){
3335			rnew_size = roundup_pow_of_two(new_size);
3336		}
3337		if (rnew_size != ifp->if_real_bytes) {
3338			ifp->if_u1.if_extents =
3339				kmem_realloc(ifp->if_u1.if_extents,
3340						rnew_size,
3341						ifp->if_real_bytes, KM_NOFS);
3342		}
3343		if (rnew_size > ifp->if_real_bytes) {
3344			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3345				(uint)sizeof(xfs_bmbt_rec_t)], 0,
3346				rnew_size - ifp->if_real_bytes);
3347		}
3348	}
3349	/*
3350	 * Switch from the inline extent buffer to a direct
3351	 * extent list. Be sure to include the inline extent
3352	 * bytes in new_size.
3353	 */
3354	else {
3355		new_size += ifp->if_bytes;
3356		if (!is_power_of_2(new_size)) {
3357			rnew_size = roundup_pow_of_two(new_size);
3358		}
3359		xfs_iext_inline_to_direct(ifp, rnew_size);
3360	}
3361	ifp->if_real_bytes = rnew_size;
3362	ifp->if_bytes = new_size;
3363}
3364
3365/*
3366 * Switch from linear (direct) extent records to inline buffer.
3367 */
3368void
3369xfs_iext_direct_to_inline(
3370	xfs_ifork_t	*ifp,		/* inode fork pointer */
3371	xfs_extnum_t	nextents)	/* number of extents in file */
3372{
3373	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3374	ASSERT(nextents <= XFS_INLINE_EXTS);
3375	/*
3376	 * The inline buffer was zeroed when we switched
3377	 * from inline to direct extent allocation mode,
3378	 * so we don't need to clear it here.
3379	 */
3380	memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3381		nextents * sizeof(xfs_bmbt_rec_t));
3382	kmem_free(ifp->if_u1.if_extents);
3383	ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3384	ifp->if_real_bytes = 0;
3385}
3386
3387/*
3388 * Switch from inline buffer to linear (direct) extent records.
3389 * new_size should already be rounded up to the next power of 2
3390 * by the caller (when appropriate), so use new_size as it is.
3391 * However, since new_size may be rounded up, we can't update
3392 * if_bytes here. It is the caller's responsibility to update
3393 * if_bytes upon return.
3394 */
3395void
3396xfs_iext_inline_to_direct(
3397	xfs_ifork_t	*ifp,		/* inode fork pointer */
3398	int		new_size)	/* number of extents in file */
3399{
3400	ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3401	memset(ifp->if_u1.if_extents, 0, new_size);
3402	if (ifp->if_bytes) {
3403		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3404			ifp->if_bytes);
3405		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3406			sizeof(xfs_bmbt_rec_t));
3407	}
3408	ifp->if_real_bytes = new_size;
3409}
3410
3411/*
3412 * Resize an extent indirection array to new_size bytes.
3413 */
3414STATIC void
3415xfs_iext_realloc_indirect(
3416	xfs_ifork_t	*ifp,		/* inode fork pointer */
3417	int		new_size)	/* new indirection array size */
3418{
3419	int		nlists;		/* number of irec's (ex lists) */
3420	int		size;		/* current indirection array size */
3421
3422	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3423	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3424	size = nlists * sizeof(xfs_ext_irec_t);
3425	ASSERT(ifp->if_real_bytes);
3426	ASSERT((new_size >= 0) && (new_size != size));
3427	if (new_size == 0) {
3428		xfs_iext_destroy(ifp);
3429	} else {
3430		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3431			kmem_realloc(ifp->if_u1.if_ext_irec,
3432				new_size, size, KM_NOFS);
3433	}
3434}
3435
3436/*
3437 * Switch from indirection array to linear (direct) extent allocations.
3438 */
3439STATIC void
3440xfs_iext_indirect_to_direct(
3441	 xfs_ifork_t	*ifp)		/* inode fork pointer */
3442{
3443	xfs_bmbt_rec_host_t *ep;	/* extent record pointer */
3444	xfs_extnum_t	nextents;	/* number of extents in file */
3445	int		size;		/* size of file extents */
3446
3447	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3448	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3449	ASSERT(nextents <= XFS_LINEAR_EXTS);
3450	size = nextents * sizeof(xfs_bmbt_rec_t);
3451
3452	xfs_iext_irec_compact_pages(ifp);
3453	ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3454
3455	ep = ifp->if_u1.if_ext_irec->er_extbuf;
3456	kmem_free(ifp->if_u1.if_ext_irec);
3457	ifp->if_flags &= ~XFS_IFEXTIREC;
3458	ifp->if_u1.if_extents = ep;
3459	ifp->if_bytes = size;
3460	if (nextents < XFS_LINEAR_EXTS) {
3461		xfs_iext_realloc_direct(ifp, size);
3462	}
3463}
3464
3465/*
3466 * Free incore file extents.
3467 */
3468void
3469xfs_iext_destroy(
3470	xfs_ifork_t	*ifp)		/* inode fork pointer */
3471{
3472	if (ifp->if_flags & XFS_IFEXTIREC) {
3473		int	erp_idx;
3474		int	nlists;
3475
3476		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3477		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3478			xfs_iext_irec_remove(ifp, erp_idx);
3479		}
3480		ifp->if_flags &= ~XFS_IFEXTIREC;
3481	} else if (ifp->if_real_bytes) {
3482		kmem_free(ifp->if_u1.if_extents);
3483	} else if (ifp->if_bytes) {
3484		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3485			sizeof(xfs_bmbt_rec_t));
3486	}
3487	ifp->if_u1.if_extents = NULL;
3488	ifp->if_real_bytes = 0;
3489	ifp->if_bytes = 0;
3490}
3491
3492/*
3493 * Return a pointer to the extent record for file system block bno.
3494 */
3495xfs_bmbt_rec_host_t *			/* pointer to found extent record */
3496xfs_iext_bno_to_ext(
3497	xfs_ifork_t	*ifp,		/* inode fork pointer */
3498	xfs_fileoff_t	bno,		/* block number to search for */
3499	xfs_extnum_t	*idxp)		/* index of target extent */
3500{
3501	xfs_bmbt_rec_host_t *base;	/* pointer to first extent */
3502	xfs_filblks_t	blockcount = 0;	/* number of blocks in extent */
3503	xfs_bmbt_rec_host_t *ep = NULL;	/* pointer to target extent */
3504	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
3505	int		high;		/* upper boundary in search */
3506	xfs_extnum_t	idx = 0;	/* index of target extent */
3507	int		low;		/* lower boundary in search */
3508	xfs_extnum_t	nextents;	/* number of file extents */
3509	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
3510
3511	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3512	if (nextents == 0) {
3513		*idxp = 0;
3514		return NULL;
3515	}
3516	low = 0;
3517	if (ifp->if_flags & XFS_IFEXTIREC) {
3518		/* Find target extent list */
3519		int	erp_idx = 0;
3520		erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3521		base = erp->er_extbuf;
3522		high = erp->er_extcount - 1;
3523	} else {
3524		base = ifp->if_u1.if_extents;
3525		high = nextents - 1;
3526	}
3527	/* Binary search extent records */
3528	while (low <= high) {
3529		idx = (low + high) >> 1;
3530		ep = base + idx;
3531		startoff = xfs_bmbt_get_startoff(ep);
3532		blockcount = xfs_bmbt_get_blockcount(ep);
3533		if (bno < startoff) {
3534			high = idx - 1;
3535		} else if (bno >= startoff + blockcount) {
3536			low = idx + 1;
3537		} else {
3538			/* Convert back to file-based extent index */
3539			if (ifp->if_flags & XFS_IFEXTIREC) {
3540				idx += erp->er_extoff;
3541			}
3542			*idxp = idx;
3543			return ep;
3544		}
3545	}
3546	/* Convert back to file-based extent index */
3547	if (ifp->if_flags & XFS_IFEXTIREC) {
3548		idx += erp->er_extoff;
3549	}
3550	if (bno >= startoff + blockcount) {
3551		if (++idx == nextents) {
3552			ep = NULL;
3553		} else {
3554			ep = xfs_iext_get_ext(ifp, idx);
3555		}
3556	}
3557	*idxp = idx;
3558	return ep;
3559}
3560
3561/*
3562 * Return a pointer to the indirection array entry containing the
3563 * extent record for filesystem block bno. Store the index of the
3564 * target irec in *erp_idxp.
3565 */
3566xfs_ext_irec_t *			/* pointer to found extent record */
3567xfs_iext_bno_to_irec(
3568	xfs_ifork_t	*ifp,		/* inode fork pointer */
3569	xfs_fileoff_t	bno,		/* block number to search for */
3570	int		*erp_idxp)	/* irec index of target ext list */
3571{
3572	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
3573	xfs_ext_irec_t	*erp_next;	/* next indirection array entry */
3574	int		erp_idx;	/* indirection array index */
3575	int		nlists;		/* number of extent irec's (lists) */
3576	int		high;		/* binary search upper limit */
3577	int		low;		/* binary search lower limit */
3578
3579	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3580	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3581	erp_idx = 0;
3582	low = 0;
3583	high = nlists - 1;
3584	while (low <= high) {
3585		erp_idx = (low + high) >> 1;
3586		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3587		erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3588		if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3589			high = erp_idx - 1;
3590		} else if (erp_next && bno >=
3591			   xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3592			low = erp_idx + 1;
3593		} else {
3594			break;
3595		}
3596	}
3597	*erp_idxp = erp_idx;
3598	return erp;
3599}
3600
3601/*
3602 * Return a pointer to the indirection array entry containing the
3603 * extent record at file extent index *idxp. Store the index of the
3604 * target irec in *erp_idxp and store the page index of the target
3605 * extent record in *idxp.
3606 */
3607xfs_ext_irec_t *
3608xfs_iext_idx_to_irec(
3609	xfs_ifork_t	*ifp,		/* inode fork pointer */
3610	xfs_extnum_t	*idxp,		/* extent index (file -> page) */
3611	int		*erp_idxp,	/* pointer to target irec */
3612	int		realloc)	/* new bytes were just added */
3613{
3614	xfs_ext_irec_t	*prev;		/* pointer to previous irec */
3615	xfs_ext_irec_t	*erp = NULL;	/* pointer to current irec */
3616	int		erp_idx;	/* indirection array index */
3617	int		nlists;		/* number of irec's (ex lists) */
3618	int		high;		/* binary search upper limit */
3619	int		low;		/* binary search lower limit */
3620	xfs_extnum_t	page_idx = *idxp; /* extent index in target list */
3621
3622	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3623	ASSERT(page_idx >= 0);
3624	ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3625	ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3626
3627	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3628	erp_idx = 0;
3629	low = 0;
3630	high = nlists - 1;
3631
3632	/* Binary search extent irec's */
3633	while (low <= high) {
3634		erp_idx = (low + high) >> 1;
3635		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3636		prev = erp_idx > 0 ? erp - 1 : NULL;
3637		if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
3638		     realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
3639			high = erp_idx - 1;
3640		} else if (page_idx > erp->er_extoff + erp->er_extcount ||
3641			   (page_idx == erp->er_extoff + erp->er_extcount &&
3642			    !realloc)) {
3643			low = erp_idx + 1;
3644		} else if (page_idx == erp->er_extoff + erp->er_extcount &&
3645			   erp->er_extcount == XFS_LINEAR_EXTS) {
3646			ASSERT(realloc);
3647			page_idx = 0;
3648			erp_idx++;
3649			erp = erp_idx < nlists ? erp + 1 : NULL;
3650			break;
3651		} else {
3652			page_idx -= erp->er_extoff;
3653			break;
3654		}
3655	}
3656	*idxp = page_idx;
3657	*erp_idxp = erp_idx;
3658	return(erp);
3659}
3660
3661/*
3662 * Allocate and initialize an indirection array once the space needed
3663 * for incore extents increases above XFS_IEXT_BUFSZ.
3664 */
3665void
3666xfs_iext_irec_init(
3667	xfs_ifork_t	*ifp)		/* inode fork pointer */
3668{
3669	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3670	xfs_extnum_t	nextents;	/* number of extents in file */
3671
3672	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3673	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3674	ASSERT(nextents <= XFS_LINEAR_EXTS);
3675
3676	erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
3677
3678	if (nextents == 0) {
3679		ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3680	} else if (!ifp->if_real_bytes) {
3681		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
3682	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
3683		xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
3684	}
3685	erp->er_extbuf = ifp->if_u1.if_extents;
3686	erp->er_extcount = nextents;
3687	erp->er_extoff = 0;
3688
3689	ifp->if_flags |= XFS_IFEXTIREC;
3690	ifp->if_real_bytes = XFS_IEXT_BUFSZ;
3691	ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
3692	ifp->if_u1.if_ext_irec = erp;
3693
3694	return;
3695}
3696
3697/*
3698 * Allocate and initialize a new entry in the indirection array.
3699 */
3700xfs_ext_irec_t *
3701xfs_iext_irec_new(
3702	xfs_ifork_t	*ifp,		/* inode fork pointer */
3703	int		erp_idx)	/* index for new irec */
3704{
3705	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3706	int		i;		/* loop counter */
3707	int		nlists;		/* number of irec's (ex lists) */
3708
3709	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3710	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3711
3712	/* Resize indirection array */
3713	xfs_iext_realloc_indirect(ifp, ++nlists *
3714				  sizeof(xfs_ext_irec_t));
3715	/*
3716	 * Move records down in the array so the
3717	 * new page can use erp_idx.
3718	 */
3719	erp = ifp->if_u1.if_ext_irec;
3720	for (i = nlists - 1; i > erp_idx; i--) {
3721		memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
3722	}
3723	ASSERT(i == erp_idx);
3724
3725	/* Initialize new extent record */
3726	erp = ifp->if_u1.if_ext_irec;
3727	erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3728	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3729	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3730	erp[erp_idx].er_extcount = 0;
3731	erp[erp_idx].er_extoff = erp_idx > 0 ?
3732		erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
3733	return (&erp[erp_idx]);
3734}
3735
3736/*
3737 * Remove a record from the indirection array.
3738 */
3739void
3740xfs_iext_irec_remove(
3741	xfs_ifork_t	*ifp,		/* inode fork pointer */
3742	int		erp_idx)	/* irec index to remove */
3743{
3744	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3745	int		i;		/* loop counter */
3746	int		nlists;		/* number of irec's (ex lists) */
3747
3748	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3749	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3750	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3751	if (erp->er_extbuf) {
3752		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
3753			-erp->er_extcount);
3754		kmem_free(erp->er_extbuf);
3755	}
3756	/* Compact extent records */
3757	erp = ifp->if_u1.if_ext_irec;
3758	for (i = erp_idx; i < nlists - 1; i++) {
3759		memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
3760	}
3761	/*
3762	 * Manually free the last extent record from the indirection
3763	 * array.  A call to xfs_iext_realloc_indirect() with a size
3764	 * of zero would result in a call to xfs_iext_destroy() which
3765	 * would in turn call this function again, creating a nasty
3766	 * infinite loop.
3767	 */
3768	if (--nlists) {
3769		xfs_iext_realloc_indirect(ifp,
3770			nlists * sizeof(xfs_ext_irec_t));
3771	} else {
3772		kmem_free(ifp->if_u1.if_ext_irec);
3773	}
3774	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3775}
3776
3777/*
3778 * This is called to clean up large amounts of unused memory allocated
3779 * by the indirection array.  Before compacting anything though, verify
3780 * that the indirection array is still needed and switch back to the
3781 * linear extent list (or even the inline buffer) if possible.  The
3782 * compaction policy is as follows:
3783 *
3784 *    Full Compaction: Extents fit into a single page (or inline buffer)
3785 * Partial Compaction: Extents occupy less than 50% of allocated space
3786 *      No Compaction: Extents occupy at least 50% of allocated space
3787 */
3788void
3789xfs_iext_irec_compact(
3790	xfs_ifork_t	*ifp)		/* inode fork pointer */
3791{
3792	xfs_extnum_t	nextents;	/* number of extents in file */
3793	int		nlists;		/* number of irec's (ex lists) */
3794
3795	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3796	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3797	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3798
3799	if (nextents == 0) {
3800		xfs_iext_destroy(ifp);
3801	} else if (nextents <= XFS_INLINE_EXTS) {
3802		xfs_iext_indirect_to_direct(ifp);
3803		xfs_iext_direct_to_inline(ifp, nextents);
3804	} else if (nextents <= XFS_LINEAR_EXTS) {
3805		xfs_iext_indirect_to_direct(ifp);
3806	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
3807		xfs_iext_irec_compact_pages(ifp);
3808	}
3809}
3810
3811/*
3812 * Combine extents from neighboring extent pages.
3813 */
3814void
3815xfs_iext_irec_compact_pages(
3816	xfs_ifork_t	*ifp)		/* inode fork pointer */
3817{
3818	xfs_ext_irec_t	*erp, *erp_next;/* pointers to irec entries */
3819	int		erp_idx = 0;	/* indirection array index */
3820	int		nlists;		/* number of irec's (ex lists) */
3821
3822	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3823	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3824	while (erp_idx < nlists - 1) {
3825		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3826		erp_next = erp + 1;
3827		if (erp_next->er_extcount <=
3828		    (XFS_LINEAR_EXTS - erp->er_extcount)) {
3829			memcpy(&erp->er_extbuf[erp->er_extcount],
3830				erp_next->er_extbuf, erp_next->er_extcount *
3831				sizeof(xfs_bmbt_rec_t));
3832			erp->er_extcount += erp_next->er_extcount;
3833			/*
3834			 * Free page before removing extent record
3835			 * so er_extoffs don't get modified in
3836			 * xfs_iext_irec_remove.
3837			 */
3838			kmem_free(erp_next->er_extbuf);
3839			erp_next->er_extbuf = NULL;
3840			xfs_iext_irec_remove(ifp, erp_idx + 1);
3841			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3842		} else {
3843			erp_idx++;
3844		}
3845	}
3846}
3847
3848/*
3849 * This is called to update the er_extoff field in the indirection
3850 * array when extents have been added or removed from one of the
3851 * extent lists. erp_idx contains the irec index to begin updating
3852 * at and ext_diff contains the number of extents that were added
3853 * or removed.
3854 */
3855void
3856xfs_iext_irec_update_extoffs(
3857	xfs_ifork_t	*ifp,		/* inode fork pointer */
3858	int		erp_idx,	/* irec index to update */
3859	int		ext_diff)	/* number of new extents */
3860{
3861	int		i;		/* loop counter */
3862	int		nlists;		/* number of irec's (ex lists */
3863
3864	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3865	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3866	for (i = erp_idx; i < nlists; i++) {
3867		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
3868	}
3869}
v3.5.6
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include <linux/log2.h>
  19
  20#include "xfs.h"
  21#include "xfs_fs.h"
  22#include "xfs_types.h"
 
  23#include "xfs_log.h"
  24#include "xfs_inum.h"
  25#include "xfs_trans.h"
  26#include "xfs_trans_priv.h"
  27#include "xfs_sb.h"
  28#include "xfs_ag.h"
  29#include "xfs_mount.h"
  30#include "xfs_bmap_btree.h"
  31#include "xfs_alloc_btree.h"
  32#include "xfs_ialloc_btree.h"
  33#include "xfs_attr_sf.h"
  34#include "xfs_dinode.h"
  35#include "xfs_inode.h"
  36#include "xfs_buf_item.h"
  37#include "xfs_inode_item.h"
  38#include "xfs_btree.h"
  39#include "xfs_alloc.h"
  40#include "xfs_ialloc.h"
  41#include "xfs_bmap.h"
  42#include "xfs_error.h"
  43#include "xfs_utils.h"
  44#include "xfs_quota.h"
  45#include "xfs_filestream.h"
  46#include "xfs_vnodeops.h"
  47#include "xfs_trace.h"
  48
  49kmem_zone_t *xfs_ifork_zone;
  50kmem_zone_t *xfs_inode_zone;
  51
  52/*
  53 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  54 * freed from a file in a single transaction.
  55 */
  56#define	XFS_ITRUNC_MAX_EXTENTS	2
  57
  58STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  59STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
  60STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
  61STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
  62
  63/*
  64 * helper function to extract extent size hint from inode
  65 */
  66xfs_extlen_t
  67xfs_get_extsz_hint(
  68	struct xfs_inode	*ip)
  69{
  70	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  71		return ip->i_d.di_extsize;
  72	if (XFS_IS_REALTIME_INODE(ip))
  73		return ip->i_mount->m_sb.sb_rextsize;
  74	return 0;
  75}
  76
  77#ifdef DEBUG
  78/*
  79 * Make sure that the extents in the given memory buffer
  80 * are valid.
  81 */
  82STATIC void
  83xfs_validate_extents(
  84	xfs_ifork_t		*ifp,
  85	int			nrecs,
  86	xfs_exntfmt_t		fmt)
  87{
  88	xfs_bmbt_irec_t		irec;
  89	xfs_bmbt_rec_host_t	rec;
  90	int			i;
  91
  92	for (i = 0; i < nrecs; i++) {
  93		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
  94		rec.l0 = get_unaligned(&ep->l0);
  95		rec.l1 = get_unaligned(&ep->l1);
  96		xfs_bmbt_get_all(&rec, &irec);
  97		if (fmt == XFS_EXTFMT_NOSTATE)
  98			ASSERT(irec.br_state == XFS_EXT_NORM);
  99	}
 100}
 101#else /* DEBUG */
 102#define xfs_validate_extents(ifp, nrecs, fmt)
 103#endif /* DEBUG */
 104
 105/*
 106 * Check that none of the inode's in the buffer have a next
 107 * unlinked field of 0.
 108 */
 109#if defined(DEBUG)
 110void
 111xfs_inobp_check(
 112	xfs_mount_t	*mp,
 113	xfs_buf_t	*bp)
 114{
 115	int		i;
 116	int		j;
 117	xfs_dinode_t	*dip;
 118
 119	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
 120
 121	for (i = 0; i < j; i++) {
 122		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
 123					i * mp->m_sb.sb_inodesize);
 124		if (!dip->di_next_unlinked)  {
 125			xfs_alert(mp,
 126	"Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
 127				bp);
 128			ASSERT(dip->di_next_unlinked);
 129		}
 130	}
 131}
 132#endif
 133
 134/*
 135 * Find the buffer associated with the given inode map
 136 * We do basic validation checks on the buffer once it has been
 137 * retrieved from disk.
 138 */
 139STATIC int
 140xfs_imap_to_bp(
 141	xfs_mount_t	*mp,
 142	xfs_trans_t	*tp,
 143	struct xfs_imap	*imap,
 144	xfs_buf_t	**bpp,
 145	uint		buf_flags,
 146	uint		iget_flags)
 147{
 148	int		error;
 149	int		i;
 150	int		ni;
 151	xfs_buf_t	*bp;
 152
 153	buf_flags |= XBF_UNMAPPED;
 154	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
 155				   (int)imap->im_len, buf_flags, &bp);
 156	if (error) {
 157		if (error != EAGAIN) {
 158			xfs_warn(mp,
 159				"%s: xfs_trans_read_buf() returned error %d.",
 160				__func__, error);
 161		} else {
 162			ASSERT(buf_flags & XBF_TRYLOCK);
 163		}
 164		return error;
 165	}
 166
 167	/*
 168	 * Validate the magic number and version of every inode in the buffer
 169	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
 170	 */
 171#ifdef DEBUG
 172	ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
 173#else	/* usual case */
 174	ni = 1;
 175#endif
 176
 177	for (i = 0; i < ni; i++) {
 178		int		di_ok;
 179		xfs_dinode_t	*dip;
 180
 181		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
 182					(i << mp->m_sb.sb_inodelog));
 183		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
 184			    XFS_DINODE_GOOD_VERSION(dip->di_version);
 185		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
 186						XFS_ERRTAG_ITOBP_INOTOBP,
 187						XFS_RANDOM_ITOBP_INOTOBP))) {
 188			if (iget_flags & XFS_IGET_UNTRUSTED) {
 189				xfs_trans_brelse(tp, bp);
 190				return XFS_ERROR(EINVAL);
 191			}
 192			XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
 193						XFS_ERRLEVEL_HIGH, mp, dip);
 194#ifdef DEBUG
 195			xfs_emerg(mp,
 196				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
 197				(unsigned long long)imap->im_blkno, i,
 198				be16_to_cpu(dip->di_magic));
 199			ASSERT(0);
 200#endif
 201			xfs_trans_brelse(tp, bp);
 202			return XFS_ERROR(EFSCORRUPTED);
 203		}
 204	}
 205
 206	xfs_inobp_check(mp, bp);
 
 
 
 
 
 
 207	*bpp = bp;
 208	return 0;
 209}
 210
 211/*
 212 * This routine is called to map an inode number within a file
 213 * system to the buffer containing the on-disk version of the
 214 * inode.  It returns a pointer to the buffer containing the
 215 * on-disk inode in the bpp parameter, and in the dip parameter
 216 * it returns a pointer to the on-disk inode within that buffer.
 217 *
 218 * If a non-zero error is returned, then the contents of bpp and
 219 * dipp are undefined.
 220 *
 221 * Use xfs_imap() to determine the size and location of the
 222 * buffer to read from disk.
 223 */
 224int
 225xfs_inotobp(
 226	xfs_mount_t	*mp,
 227	xfs_trans_t	*tp,
 228	xfs_ino_t	ino,
 229	xfs_dinode_t	**dipp,
 230	xfs_buf_t	**bpp,
 231	int		*offset,
 232	uint		imap_flags)
 233{
 234	struct xfs_imap	imap;
 235	xfs_buf_t	*bp;
 236	int		error;
 237
 238	imap.im_blkno = 0;
 239	error = xfs_imap(mp, tp, ino, &imap, imap_flags);
 240	if (error)
 241		return error;
 242
 243	error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
 244	if (error)
 245		return error;
 246
 247	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
 248	*bpp = bp;
 249	*offset = imap.im_boffset;
 250	return 0;
 251}
 252
 253
 254/*
 255 * This routine is called to map an inode to the buffer containing
 256 * the on-disk version of the inode.  It returns a pointer to the
 257 * buffer containing the on-disk inode in the bpp parameter, and in
 258 * the dip parameter it returns a pointer to the on-disk inode within
 259 * that buffer.
 260 *
 261 * If a non-zero error is returned, then the contents of bpp and
 262 * dipp are undefined.
 263 *
 264 * The inode is expected to already been mapped to its buffer and read
 265 * in once, thus we can use the mapping information stored in the inode
 266 * rather than calling xfs_imap().  This allows us to avoid the overhead
 267 * of looking at the inode btree for small block file systems
 268 * (see xfs_imap()).
 269 */
 270int
 271xfs_itobp(
 272	xfs_mount_t	*mp,
 273	xfs_trans_t	*tp,
 274	xfs_inode_t	*ip,
 275	xfs_dinode_t	**dipp,
 276	xfs_buf_t	**bpp,
 277	uint		buf_flags)
 278{
 279	xfs_buf_t	*bp;
 280	int		error;
 281
 282	ASSERT(ip->i_imap.im_blkno != 0);
 283
 284	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
 285	if (error)
 286		return error;
 287
 288	if (!bp) {
 289		ASSERT(buf_flags & XBF_TRYLOCK);
 290		ASSERT(tp == NULL);
 291		*bpp = NULL;
 292		return EAGAIN;
 293	}
 294
 295	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
 296	*bpp = bp;
 297	return 0;
 298}
 299
 300/*
 301 * Move inode type and inode format specific information from the
 302 * on-disk inode to the in-core inode.  For fifos, devs, and sockets
 303 * this means set if_rdev to the proper value.  For files, directories,
 304 * and symlinks this means to bring in the in-line data or extent
 305 * pointers.  For a file in B-tree format, only the root is immediately
 306 * brought in-core.  The rest will be in-lined in if_extents when it
 307 * is first referenced (see xfs_iread_extents()).
 308 */
 309STATIC int
 310xfs_iformat(
 311	xfs_inode_t		*ip,
 312	xfs_dinode_t		*dip)
 313{
 314	xfs_attr_shortform_t	*atp;
 315	int			size;
 316	int			error = 0;
 317	xfs_fsize_t             di_size;
 
 
 
 318
 319	if (unlikely(be32_to_cpu(dip->di_nextents) +
 320		     be16_to_cpu(dip->di_anextents) >
 321		     be64_to_cpu(dip->di_nblocks))) {
 322		xfs_warn(ip->i_mount,
 323			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
 324			(unsigned long long)ip->i_ino,
 325			(int)(be32_to_cpu(dip->di_nextents) +
 326			      be16_to_cpu(dip->di_anextents)),
 327			(unsigned long long)
 328				be64_to_cpu(dip->di_nblocks));
 329		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
 330				     ip->i_mount, dip);
 331		return XFS_ERROR(EFSCORRUPTED);
 332	}
 333
 334	if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
 335		xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
 336			(unsigned long long)ip->i_ino,
 337			dip->di_forkoff);
 338		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
 339				     ip->i_mount, dip);
 340		return XFS_ERROR(EFSCORRUPTED);
 341	}
 342
 343	if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
 344		     !ip->i_mount->m_rtdev_targp)) {
 345		xfs_warn(ip->i_mount,
 346			"corrupt dinode %Lu, has realtime flag set.",
 347			ip->i_ino);
 348		XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
 349				     XFS_ERRLEVEL_LOW, ip->i_mount, dip);
 350		return XFS_ERROR(EFSCORRUPTED);
 351	}
 352
 353	switch (ip->i_d.di_mode & S_IFMT) {
 354	case S_IFIFO:
 355	case S_IFCHR:
 356	case S_IFBLK:
 357	case S_IFSOCK:
 358		if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
 359			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
 360					      ip->i_mount, dip);
 361			return XFS_ERROR(EFSCORRUPTED);
 362		}
 363		ip->i_d.di_size = 0;
 
 364		ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
 365		break;
 366
 367	case S_IFREG:
 368	case S_IFLNK:
 369	case S_IFDIR:
 370		switch (dip->di_format) {
 371		case XFS_DINODE_FMT_LOCAL:
 372			/*
 373			 * no local regular files yet
 374			 */
 375			if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
 376				xfs_warn(ip->i_mount,
 377			"corrupt inode %Lu (local format for regular file).",
 378					(unsigned long long) ip->i_ino);
 379				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
 380						     XFS_ERRLEVEL_LOW,
 381						     ip->i_mount, dip);
 382				return XFS_ERROR(EFSCORRUPTED);
 383			}
 384
 385			di_size = be64_to_cpu(dip->di_size);
 386			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
 387				xfs_warn(ip->i_mount,
 388			"corrupt inode %Lu (bad size %Ld for local inode).",
 389					(unsigned long long) ip->i_ino,
 390					(long long) di_size);
 391				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
 392						     XFS_ERRLEVEL_LOW,
 393						     ip->i_mount, dip);
 394				return XFS_ERROR(EFSCORRUPTED);
 395			}
 396
 397			size = (int)di_size;
 398			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
 399			break;
 400		case XFS_DINODE_FMT_EXTENTS:
 401			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
 402			break;
 403		case XFS_DINODE_FMT_BTREE:
 404			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
 405			break;
 406		default:
 407			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
 408					 ip->i_mount);
 409			return XFS_ERROR(EFSCORRUPTED);
 410		}
 411		break;
 412
 413	default:
 414		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
 415		return XFS_ERROR(EFSCORRUPTED);
 416	}
 417	if (error) {
 418		return error;
 419	}
 420	if (!XFS_DFORK_Q(dip))
 421		return 0;
 422
 423	ASSERT(ip->i_afp == NULL);
 424	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
 425
 
 426	switch (dip->di_aformat) {
 427	case XFS_DINODE_FMT_LOCAL:
 428		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
 429		size = be16_to_cpu(atp->hdr.totsize);
 430
 431		if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
 432			xfs_warn(ip->i_mount,
 433				"corrupt inode %Lu (bad attr fork size %Ld).",
 434				(unsigned long long) ip->i_ino,
 435				(long long) size);
 436			XFS_CORRUPTION_ERROR("xfs_iformat(8)",
 437					     XFS_ERRLEVEL_LOW,
 438					     ip->i_mount, dip);
 439			return XFS_ERROR(EFSCORRUPTED);
 440		}
 441
 442		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
 443		break;
 444	case XFS_DINODE_FMT_EXTENTS:
 445		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
 446		break;
 447	case XFS_DINODE_FMT_BTREE:
 448		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
 449		break;
 450	default:
 451		error = XFS_ERROR(EFSCORRUPTED);
 452		break;
 453	}
 454	if (error) {
 455		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
 456		ip->i_afp = NULL;
 457		xfs_idestroy_fork(ip, XFS_DATA_FORK);
 458	}
 459	return error;
 460}
 461
 462/*
 463 * The file is in-lined in the on-disk inode.
 464 * If it fits into if_inline_data, then copy
 465 * it there, otherwise allocate a buffer for it
 466 * and copy the data there.  Either way, set
 467 * if_data to point at the data.
 468 * If we allocate a buffer for the data, make
 469 * sure that its size is a multiple of 4 and
 470 * record the real size in i_real_bytes.
 471 */
 472STATIC int
 473xfs_iformat_local(
 474	xfs_inode_t	*ip,
 475	xfs_dinode_t	*dip,
 476	int		whichfork,
 477	int		size)
 478{
 479	xfs_ifork_t	*ifp;
 480	int		real_size;
 481
 482	/*
 483	 * If the size is unreasonable, then something
 484	 * is wrong and we just bail out rather than crash in
 485	 * kmem_alloc() or memcpy() below.
 486	 */
 487	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
 488		xfs_warn(ip->i_mount,
 489	"corrupt inode %Lu (bad size %d for local fork, size = %d).",
 490			(unsigned long long) ip->i_ino, size,
 491			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
 492		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
 493				     ip->i_mount, dip);
 494		return XFS_ERROR(EFSCORRUPTED);
 495	}
 496	ifp = XFS_IFORK_PTR(ip, whichfork);
 497	real_size = 0;
 498	if (size == 0)
 499		ifp->if_u1.if_data = NULL;
 500	else if (size <= sizeof(ifp->if_u2.if_inline_data))
 501		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
 502	else {
 503		real_size = roundup(size, 4);
 504		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
 505	}
 506	ifp->if_bytes = size;
 507	ifp->if_real_bytes = real_size;
 508	if (size)
 509		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
 510	ifp->if_flags &= ~XFS_IFEXTENTS;
 511	ifp->if_flags |= XFS_IFINLINE;
 512	return 0;
 513}
 514
 515/*
 516 * The file consists of a set of extents all
 517 * of which fit into the on-disk inode.
 518 * If there are few enough extents to fit into
 519 * the if_inline_ext, then copy them there.
 520 * Otherwise allocate a buffer for them and copy
 521 * them into it.  Either way, set if_extents
 522 * to point at the extents.
 523 */
 524STATIC int
 525xfs_iformat_extents(
 526	xfs_inode_t	*ip,
 527	xfs_dinode_t	*dip,
 528	int		whichfork)
 529{
 530	xfs_bmbt_rec_t	*dp;
 531	xfs_ifork_t	*ifp;
 532	int		nex;
 533	int		size;
 534	int		i;
 535
 536	ifp = XFS_IFORK_PTR(ip, whichfork);
 537	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
 538	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
 539
 540	/*
 541	 * If the number of extents is unreasonable, then something
 542	 * is wrong and we just bail out rather than crash in
 543	 * kmem_alloc() or memcpy() below.
 544	 */
 545	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
 546		xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
 547			(unsigned long long) ip->i_ino, nex);
 548		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
 549				     ip->i_mount, dip);
 550		return XFS_ERROR(EFSCORRUPTED);
 551	}
 552
 553	ifp->if_real_bytes = 0;
 554	if (nex == 0)
 555		ifp->if_u1.if_extents = NULL;
 556	else if (nex <= XFS_INLINE_EXTS)
 557		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
 558	else
 559		xfs_iext_add(ifp, 0, nex);
 560
 561	ifp->if_bytes = size;
 562	if (size) {
 563		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
 564		xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
 565		for (i = 0; i < nex; i++, dp++) {
 566			xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
 567			ep->l0 = get_unaligned_be64(&dp->l0);
 568			ep->l1 = get_unaligned_be64(&dp->l1);
 569		}
 570		XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
 571		if (whichfork != XFS_DATA_FORK ||
 572			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
 573				if (unlikely(xfs_check_nostate_extents(
 574				    ifp, 0, nex))) {
 575					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
 576							 XFS_ERRLEVEL_LOW,
 577							 ip->i_mount);
 578					return XFS_ERROR(EFSCORRUPTED);
 579				}
 580	}
 581	ifp->if_flags |= XFS_IFEXTENTS;
 582	return 0;
 583}
 584
 585/*
 586 * The file has too many extents to fit into
 587 * the inode, so they are in B-tree format.
 588 * Allocate a buffer for the root of the B-tree
 589 * and copy the root into it.  The i_extents
 590 * field will remain NULL until all of the
 591 * extents are read in (when they are needed).
 592 */
 593STATIC int
 594xfs_iformat_btree(
 595	xfs_inode_t		*ip,
 596	xfs_dinode_t		*dip,
 597	int			whichfork)
 598{
 599	xfs_bmdr_block_t	*dfp;
 600	xfs_ifork_t		*ifp;
 601	/* REFERENCED */
 602	int			nrecs;
 603	int			size;
 604
 605	ifp = XFS_IFORK_PTR(ip, whichfork);
 606	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
 607	size = XFS_BMAP_BROOT_SPACE(dfp);
 608	nrecs = be16_to_cpu(dfp->bb_numrecs);
 609
 610	/*
 611	 * blow out if -- fork has less extents than can fit in
 612	 * fork (fork shouldn't be a btree format), root btree
 613	 * block has more records than can fit into the fork,
 614	 * or the number of extents is greater than the number of
 615	 * blocks.
 616	 */
 617	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
 618			XFS_IFORK_MAXEXT(ip, whichfork) ||
 619		     XFS_BMDR_SPACE_CALC(nrecs) >
 620			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) ||
 621		     XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
 622		xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
 623			(unsigned long long) ip->i_ino);
 624		XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
 625				 ip->i_mount, dip);
 626		return XFS_ERROR(EFSCORRUPTED);
 627	}
 628
 629	ifp->if_broot_bytes = size;
 630	ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
 631	ASSERT(ifp->if_broot != NULL);
 632	/*
 633	 * Copy and convert from the on-disk structure
 634	 * to the in-memory structure.
 635	 */
 636	xfs_bmdr_to_bmbt(ip->i_mount, dfp,
 637			 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
 638			 ifp->if_broot, size);
 639	ifp->if_flags &= ~XFS_IFEXTENTS;
 640	ifp->if_flags |= XFS_IFBROOT;
 641
 642	return 0;
 643}
 644
 645STATIC void
 646xfs_dinode_from_disk(
 647	xfs_icdinode_t		*to,
 648	xfs_dinode_t		*from)
 649{
 650	to->di_magic = be16_to_cpu(from->di_magic);
 651	to->di_mode = be16_to_cpu(from->di_mode);
 652	to->di_version = from ->di_version;
 653	to->di_format = from->di_format;
 654	to->di_onlink = be16_to_cpu(from->di_onlink);
 655	to->di_uid = be32_to_cpu(from->di_uid);
 656	to->di_gid = be32_to_cpu(from->di_gid);
 657	to->di_nlink = be32_to_cpu(from->di_nlink);
 658	to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
 659	to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
 660	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
 661	to->di_flushiter = be16_to_cpu(from->di_flushiter);
 662	to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
 663	to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
 664	to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
 665	to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
 666	to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
 667	to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
 668	to->di_size = be64_to_cpu(from->di_size);
 669	to->di_nblocks = be64_to_cpu(from->di_nblocks);
 670	to->di_extsize = be32_to_cpu(from->di_extsize);
 671	to->di_nextents = be32_to_cpu(from->di_nextents);
 672	to->di_anextents = be16_to_cpu(from->di_anextents);
 673	to->di_forkoff = from->di_forkoff;
 674	to->di_aformat	= from->di_aformat;
 675	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
 676	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
 677	to->di_flags	= be16_to_cpu(from->di_flags);
 678	to->di_gen	= be32_to_cpu(from->di_gen);
 679}
 680
 681void
 682xfs_dinode_to_disk(
 683	xfs_dinode_t		*to,
 684	xfs_icdinode_t		*from)
 685{
 686	to->di_magic = cpu_to_be16(from->di_magic);
 687	to->di_mode = cpu_to_be16(from->di_mode);
 688	to->di_version = from ->di_version;
 689	to->di_format = from->di_format;
 690	to->di_onlink = cpu_to_be16(from->di_onlink);
 691	to->di_uid = cpu_to_be32(from->di_uid);
 692	to->di_gid = cpu_to_be32(from->di_gid);
 693	to->di_nlink = cpu_to_be32(from->di_nlink);
 694	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
 695	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
 696	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
 697	to->di_flushiter = cpu_to_be16(from->di_flushiter);
 698	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
 699	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
 700	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
 701	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
 702	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
 703	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
 704	to->di_size = cpu_to_be64(from->di_size);
 705	to->di_nblocks = cpu_to_be64(from->di_nblocks);
 706	to->di_extsize = cpu_to_be32(from->di_extsize);
 707	to->di_nextents = cpu_to_be32(from->di_nextents);
 708	to->di_anextents = cpu_to_be16(from->di_anextents);
 709	to->di_forkoff = from->di_forkoff;
 710	to->di_aformat = from->di_aformat;
 711	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
 712	to->di_dmstate = cpu_to_be16(from->di_dmstate);
 713	to->di_flags = cpu_to_be16(from->di_flags);
 714	to->di_gen = cpu_to_be32(from->di_gen);
 715}
 716
 717STATIC uint
 718_xfs_dic2xflags(
 719	__uint16_t		di_flags)
 720{
 721	uint			flags = 0;
 722
 723	if (di_flags & XFS_DIFLAG_ANY) {
 724		if (di_flags & XFS_DIFLAG_REALTIME)
 725			flags |= XFS_XFLAG_REALTIME;
 726		if (di_flags & XFS_DIFLAG_PREALLOC)
 727			flags |= XFS_XFLAG_PREALLOC;
 728		if (di_flags & XFS_DIFLAG_IMMUTABLE)
 729			flags |= XFS_XFLAG_IMMUTABLE;
 730		if (di_flags & XFS_DIFLAG_APPEND)
 731			flags |= XFS_XFLAG_APPEND;
 732		if (di_flags & XFS_DIFLAG_SYNC)
 733			flags |= XFS_XFLAG_SYNC;
 734		if (di_flags & XFS_DIFLAG_NOATIME)
 735			flags |= XFS_XFLAG_NOATIME;
 736		if (di_flags & XFS_DIFLAG_NODUMP)
 737			flags |= XFS_XFLAG_NODUMP;
 738		if (di_flags & XFS_DIFLAG_RTINHERIT)
 739			flags |= XFS_XFLAG_RTINHERIT;
 740		if (di_flags & XFS_DIFLAG_PROJINHERIT)
 741			flags |= XFS_XFLAG_PROJINHERIT;
 742		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
 743			flags |= XFS_XFLAG_NOSYMLINKS;
 744		if (di_flags & XFS_DIFLAG_EXTSIZE)
 745			flags |= XFS_XFLAG_EXTSIZE;
 746		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
 747			flags |= XFS_XFLAG_EXTSZINHERIT;
 748		if (di_flags & XFS_DIFLAG_NODEFRAG)
 749			flags |= XFS_XFLAG_NODEFRAG;
 750		if (di_flags & XFS_DIFLAG_FILESTREAM)
 751			flags |= XFS_XFLAG_FILESTREAM;
 752	}
 753
 754	return flags;
 755}
 756
 757uint
 758xfs_ip2xflags(
 759	xfs_inode_t		*ip)
 760{
 761	xfs_icdinode_t		*dic = &ip->i_d;
 762
 763	return _xfs_dic2xflags(dic->di_flags) |
 764				(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
 765}
 766
 767uint
 768xfs_dic2xflags(
 769	xfs_dinode_t		*dip)
 770{
 771	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
 772				(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
 773}
 774
 775/*
 776 * Read the disk inode attributes into the in-core inode structure.
 777 */
 778int
 779xfs_iread(
 780	xfs_mount_t	*mp,
 781	xfs_trans_t	*tp,
 782	xfs_inode_t	*ip,
 783	uint		iget_flags)
 784{
 785	xfs_buf_t	*bp;
 786	xfs_dinode_t	*dip;
 787	int		error;
 788
 789	/*
 790	 * Fill in the location information in the in-core inode.
 791	 */
 792	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
 793	if (error)
 794		return error;
 795
 796	/*
 797	 * Get pointers to the on-disk inode and the buffer containing it.
 798	 */
 799	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags);
 
 800	if (error)
 801		return error;
 802	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
 803
 804	/*
 805	 * If we got something that isn't an inode it means someone
 806	 * (nfs or dmi) has a stale handle.
 807	 */
 808	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
 809#ifdef DEBUG
 810		xfs_alert(mp,
 811			"%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
 812			__func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC);
 813#endif /* DEBUG */
 814		error = XFS_ERROR(EINVAL);
 815		goto out_brelse;
 816	}
 817
 818	/*
 819	 * If the on-disk inode is already linked to a directory
 820	 * entry, copy all of the inode into the in-core inode.
 821	 * xfs_iformat() handles copying in the inode format
 822	 * specific information.
 823	 * Otherwise, just get the truly permanent information.
 824	 */
 825	if (dip->di_mode) {
 826		xfs_dinode_from_disk(&ip->i_d, dip);
 827		error = xfs_iformat(ip, dip);
 828		if (error)  {
 829#ifdef DEBUG
 830			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
 831				__func__, error);
 832#endif /* DEBUG */
 833			goto out_brelse;
 834		}
 835	} else {
 836		ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
 837		ip->i_d.di_version = dip->di_version;
 838		ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
 839		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
 840		/*
 841		 * Make sure to pull in the mode here as well in
 842		 * case the inode is released without being used.
 843		 * This ensures that xfs_inactive() will see that
 844		 * the inode is already free and not try to mess
 845		 * with the uninitialized part of it.
 846		 */
 847		ip->i_d.di_mode = 0;
 
 
 
 
 
 
 848	}
 849
 850	/*
 851	 * The inode format changed when we moved the link count and
 852	 * made it 32 bits long.  If this is an old format inode,
 853	 * convert it in memory to look like a new one.  If it gets
 854	 * flushed to disk we will convert back before flushing or
 855	 * logging it.  We zero out the new projid field and the old link
 856	 * count field.  We'll handle clearing the pad field (the remains
 857	 * of the old uuid field) when we actually convert the inode to
 858	 * the new format. We don't change the version number so that we
 859	 * can distinguish this from a real new format inode.
 860	 */
 861	if (ip->i_d.di_version == 1) {
 862		ip->i_d.di_nlink = ip->i_d.di_onlink;
 863		ip->i_d.di_onlink = 0;
 864		xfs_set_projid(ip, 0);
 865	}
 866
 867	ip->i_delayed_blks = 0;
 
 868
 869	/*
 870	 * Mark the buffer containing the inode as something to keep
 871	 * around for a while.  This helps to keep recently accessed
 872	 * meta-data in-core longer.
 873	 */
 874	xfs_buf_set_ref(bp, XFS_INO_REF);
 875
 876	/*
 877	 * Use xfs_trans_brelse() to release the buffer containing the
 878	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
 879	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
 880	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
 881	 * will only release the buffer if it is not dirty within the
 882	 * transaction.  It will be OK to release the buffer in this case,
 883	 * because inodes on disk are never destroyed and we will be
 884	 * locking the new in-core inode before putting it in the hash
 885	 * table where other processes can find it.  Thus we don't have
 886	 * to worry about the inode being changed just because we released
 887	 * the buffer.
 888	 */
 889 out_brelse:
 890	xfs_trans_brelse(tp, bp);
 891	return error;
 892}
 893
 894/*
 895 * Read in extents from a btree-format inode.
 896 * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
 897 */
 898int
 899xfs_iread_extents(
 900	xfs_trans_t	*tp,
 901	xfs_inode_t	*ip,
 902	int		whichfork)
 903{
 904	int		error;
 905	xfs_ifork_t	*ifp;
 906	xfs_extnum_t	nextents;
 907
 908	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
 909		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
 910				 ip->i_mount);
 911		return XFS_ERROR(EFSCORRUPTED);
 912	}
 913	nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
 914	ifp = XFS_IFORK_PTR(ip, whichfork);
 915
 916	/*
 917	 * We know that the size is valid (it's checked in iformat_btree)
 918	 */
 919	ifp->if_bytes = ifp->if_real_bytes = 0;
 920	ifp->if_flags |= XFS_IFEXTENTS;
 921	xfs_iext_add(ifp, 0, nextents);
 922	error = xfs_bmap_read_extents(tp, ip, whichfork);
 923	if (error) {
 924		xfs_iext_destroy(ifp);
 925		ifp->if_flags &= ~XFS_IFEXTENTS;
 926		return error;
 927	}
 928	xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
 929	return 0;
 930}
 931
 932/*
 933 * Allocate an inode on disk and return a copy of its in-core version.
 934 * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
 935 * appropriately within the inode.  The uid and gid for the inode are
 936 * set according to the contents of the given cred structure.
 937 *
 938 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
 939 * has a free inode available, call xfs_iget()
 940 * to obtain the in-core version of the allocated inode.  Finally,
 941 * fill in the inode and log its initial contents.  In this case,
 942 * ialloc_context would be set to NULL and call_again set to false.
 943 *
 944 * If xfs_dialloc() does not have an available inode,
 945 * it will replenish its supply by doing an allocation. Since we can
 946 * only do one allocation within a transaction without deadlocks, we
 947 * must commit the current transaction before returning the inode itself.
 948 * In this case, therefore, we will set call_again to true and return.
 949 * The caller should then commit the current transaction, start a new
 950 * transaction, and call xfs_ialloc() again to actually get the inode.
 951 *
 952 * To ensure that some other process does not grab the inode that
 953 * was allocated during the first call to xfs_ialloc(), this routine
 954 * also returns the [locked] bp pointing to the head of the freelist
 955 * as ialloc_context.  The caller should hold this buffer across
 956 * the commit and pass it back into this routine on the second call.
 957 *
 958 * If we are allocating quota inodes, we do not have a parent inode
 959 * to attach to or associate with (i.e. pip == NULL) because they
 960 * are not linked into the directory structure - they are attached
 961 * directly to the superblock - and so have no parent.
 962 */
 963int
 964xfs_ialloc(
 965	xfs_trans_t	*tp,
 966	xfs_inode_t	*pip,
 967	umode_t		mode,
 968	xfs_nlink_t	nlink,
 969	xfs_dev_t	rdev,
 970	prid_t		prid,
 971	int		okalloc,
 972	xfs_buf_t	**ialloc_context,
 973	boolean_t	*call_again,
 974	xfs_inode_t	**ipp)
 975{
 976	xfs_ino_t	ino;
 977	xfs_inode_t	*ip;
 978	uint		flags;
 979	int		error;
 980	timespec_t	tv;
 981	int		filestreams = 0;
 982
 983	/*
 984	 * Call the space management code to pick
 985	 * the on-disk inode to be allocated.
 986	 */
 987	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
 988			    ialloc_context, call_again, &ino);
 989	if (error)
 990		return error;
 991	if (*call_again || ino == NULLFSINO) {
 992		*ipp = NULL;
 993		return 0;
 994	}
 995	ASSERT(*ialloc_context == NULL);
 996
 997	/*
 998	 * Get the in-core inode with the lock held exclusively.
 999	 * This is because we're setting fields here we need
1000	 * to prevent others from looking at until we're done.
1001	 */
1002	error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE,
1003			 XFS_ILOCK_EXCL, &ip);
1004	if (error)
1005		return error;
1006	ASSERT(ip != NULL);
1007
1008	ip->i_d.di_mode = mode;
1009	ip->i_d.di_onlink = 0;
1010	ip->i_d.di_nlink = nlink;
1011	ASSERT(ip->i_d.di_nlink == nlink);
1012	ip->i_d.di_uid = current_fsuid();
1013	ip->i_d.di_gid = current_fsgid();
1014	xfs_set_projid(ip, prid);
1015	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1016
1017	/*
1018	 * If the superblock version is up to where we support new format
1019	 * inodes and this is currently an old format inode, then change
1020	 * the inode version number now.  This way we only do the conversion
1021	 * here rather than here and in the flush/logging code.
1022	 */
1023	if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1024	    ip->i_d.di_version == 1) {
1025		ip->i_d.di_version = 2;
1026		/*
1027		 * We've already zeroed the old link count, the projid field,
1028		 * and the pad field.
1029		 */
1030	}
1031
1032	/*
1033	 * Project ids won't be stored on disk if we are using a version 1 inode.
1034	 */
1035	if ((prid != 0) && (ip->i_d.di_version == 1))
1036		xfs_bump_ino_vers2(tp, ip);
1037
1038	if (pip && XFS_INHERIT_GID(pip)) {
1039		ip->i_d.di_gid = pip->i_d.di_gid;
1040		if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1041			ip->i_d.di_mode |= S_ISGID;
1042		}
1043	}
1044
1045	/*
1046	 * If the group ID of the new file does not match the effective group
1047	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1048	 * (and only if the irix_sgid_inherit compatibility variable is set).
1049	 */
1050	if ((irix_sgid_inherit) &&
1051	    (ip->i_d.di_mode & S_ISGID) &&
1052	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
1053		ip->i_d.di_mode &= ~S_ISGID;
1054	}
1055
1056	ip->i_d.di_size = 0;
 
1057	ip->i_d.di_nextents = 0;
1058	ASSERT(ip->i_d.di_nblocks == 0);
1059
1060	nanotime(&tv);
1061	ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1062	ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1063	ip->i_d.di_atime = ip->i_d.di_mtime;
1064	ip->i_d.di_ctime = ip->i_d.di_mtime;
1065
1066	/*
1067	 * di_gen will have been taken care of in xfs_iread.
1068	 */
1069	ip->i_d.di_extsize = 0;
1070	ip->i_d.di_dmevmask = 0;
1071	ip->i_d.di_dmstate = 0;
1072	ip->i_d.di_flags = 0;
1073	flags = XFS_ILOG_CORE;
1074	switch (mode & S_IFMT) {
1075	case S_IFIFO:
1076	case S_IFCHR:
1077	case S_IFBLK:
1078	case S_IFSOCK:
1079		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1080		ip->i_df.if_u2.if_rdev = rdev;
1081		ip->i_df.if_flags = 0;
1082		flags |= XFS_ILOG_DEV;
1083		break;
1084	case S_IFREG:
1085		/*
1086		 * we can't set up filestreams until after the VFS inode
1087		 * is set up properly.
1088		 */
1089		if (pip && xfs_inode_is_filestream(pip))
1090			filestreams = 1;
1091		/* fall through */
1092	case S_IFDIR:
1093		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1094			uint	di_flags = 0;
1095
1096			if (S_ISDIR(mode)) {
1097				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1098					di_flags |= XFS_DIFLAG_RTINHERIT;
1099				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1100					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1101					ip->i_d.di_extsize = pip->i_d.di_extsize;
1102				}
1103			} else if (S_ISREG(mode)) {
1104				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1105					di_flags |= XFS_DIFLAG_REALTIME;
1106				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1107					di_flags |= XFS_DIFLAG_EXTSIZE;
1108					ip->i_d.di_extsize = pip->i_d.di_extsize;
1109				}
1110			}
1111			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1112			    xfs_inherit_noatime)
1113				di_flags |= XFS_DIFLAG_NOATIME;
1114			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1115			    xfs_inherit_nodump)
1116				di_flags |= XFS_DIFLAG_NODUMP;
1117			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1118			    xfs_inherit_sync)
1119				di_flags |= XFS_DIFLAG_SYNC;
1120			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1121			    xfs_inherit_nosymlinks)
1122				di_flags |= XFS_DIFLAG_NOSYMLINKS;
1123			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1124				di_flags |= XFS_DIFLAG_PROJINHERIT;
1125			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1126			    xfs_inherit_nodefrag)
1127				di_flags |= XFS_DIFLAG_NODEFRAG;
1128			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1129				di_flags |= XFS_DIFLAG_FILESTREAM;
1130			ip->i_d.di_flags |= di_flags;
1131		}
1132		/* FALLTHROUGH */
1133	case S_IFLNK:
1134		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1135		ip->i_df.if_flags = XFS_IFEXTENTS;
1136		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1137		ip->i_df.if_u1.if_extents = NULL;
1138		break;
1139	default:
1140		ASSERT(0);
1141	}
1142	/*
1143	 * Attribute fork settings for new inode.
1144	 */
1145	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1146	ip->i_d.di_anextents = 0;
1147
1148	/*
1149	 * Log the new values stuffed into the inode.
1150	 */
1151	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1152	xfs_trans_log_inode(tp, ip, flags);
1153
1154	/* now that we have an i_mode we can setup inode ops and unlock */
1155	xfs_setup_inode(ip);
1156
1157	/* now we have set up the vfs inode we can associate the filestream */
1158	if (filestreams) {
1159		error = xfs_filestream_associate(pip, ip);
1160		if (error < 0)
1161			return -error;
1162		if (!error)
1163			xfs_iflags_set(ip, XFS_IFILESTREAM);
1164	}
1165
1166	*ipp = ip;
1167	return 0;
1168}
1169
1170/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171 * Free up the underlying blocks past new_size.  The new size must be smaller
1172 * than the current size.  This routine can be used both for the attribute and
1173 * data fork, and does not modify the inode size, which is left to the caller.
1174 *
1175 * The transaction passed to this routine must have made a permanent log
1176 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1177 * given transaction and start new ones, so make sure everything involved in
1178 * the transaction is tidy before calling here.  Some transaction will be
1179 * returned to the caller to be committed.  The incoming transaction must
1180 * already include the inode, and both inode locks must be held exclusively.
1181 * The inode must also be "held" within the transaction.  On return the inode
1182 * will be "held" within the returned transaction.  This routine does NOT
1183 * require any disk space to be reserved for it within the transaction.
1184 *
1185 * If we get an error, we must return with the inode locked and linked into the
1186 * current transaction. This keeps things simple for the higher level code,
1187 * because it always knows that the inode is locked and held in the transaction
1188 * that returns to it whether errors occur or not.  We don't mark the inode
1189 * dirty on error so that transactions can be easily aborted if possible.
1190 */
1191int
1192xfs_itruncate_extents(
1193	struct xfs_trans	**tpp,
1194	struct xfs_inode	*ip,
1195	int			whichfork,
1196	xfs_fsize_t		new_size)
1197{
1198	struct xfs_mount	*mp = ip->i_mount;
1199	struct xfs_trans	*tp = *tpp;
1200	struct xfs_trans	*ntp;
1201	xfs_bmap_free_t		free_list;
1202	xfs_fsblock_t		first_block;
1203	xfs_fileoff_t		first_unmap_block;
1204	xfs_fileoff_t		last_block;
1205	xfs_filblks_t		unmap_len;
1206	int			committed;
1207	int			error = 0;
1208	int			done = 0;
1209
1210	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1211	ASSERT(new_size <= XFS_ISIZE(ip));
1212	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1213	ASSERT(ip->i_itemp != NULL);
1214	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1215	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1216
1217	trace_xfs_itruncate_extents_start(ip, new_size);
1218
1219	/*
1220	 * Since it is possible for space to become allocated beyond
1221	 * the end of the file (in a crash where the space is allocated
1222	 * but the inode size is not yet updated), simply remove any
1223	 * blocks which show up between the new EOF and the maximum
1224	 * possible file size.  If the first block to be removed is
1225	 * beyond the maximum file size (ie it is the same as last_block),
1226	 * then there is nothing to do.
1227	 */
1228	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1229	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1230	if (first_unmap_block == last_block)
1231		return 0;
1232
1233	ASSERT(first_unmap_block < last_block);
1234	unmap_len = last_block - first_unmap_block + 1;
1235	while (!done) {
1236		xfs_bmap_init(&free_list, &first_block);
1237		error = xfs_bunmapi(tp, ip,
1238				    first_unmap_block, unmap_len,
1239				    xfs_bmapi_aflag(whichfork),
1240				    XFS_ITRUNC_MAX_EXTENTS,
1241				    &first_block, &free_list,
1242				    &done);
1243		if (error)
1244			goto out_bmap_cancel;
1245
1246		/*
1247		 * Duplicate the transaction that has the permanent
1248		 * reservation and commit the old transaction.
1249		 */
1250		error = xfs_bmap_finish(&tp, &free_list, &committed);
1251		if (committed)
1252			xfs_trans_ijoin(tp, ip, 0);
1253		if (error)
1254			goto out_bmap_cancel;
1255
1256		if (committed) {
1257			/*
1258			 * Mark the inode dirty so it will be logged and
1259			 * moved forward in the log as part of every commit.
1260			 */
1261			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1262		}
1263
1264		ntp = xfs_trans_dup(tp);
1265		error = xfs_trans_commit(tp, 0);
1266		tp = ntp;
1267
1268		xfs_trans_ijoin(tp, ip, 0);
1269
1270		if (error)
1271			goto out;
1272
1273		/*
1274		 * Transaction commit worked ok so we can drop the extra ticket
1275		 * reference that we gained in xfs_trans_dup()
1276		 */
1277		xfs_log_ticket_put(tp->t_ticket);
1278		error = xfs_trans_reserve(tp, 0,
1279					XFS_ITRUNCATE_LOG_RES(mp), 0,
1280					XFS_TRANS_PERM_LOG_RES,
1281					XFS_ITRUNCATE_LOG_COUNT);
1282		if (error)
1283			goto out;
1284	}
1285
1286	/*
1287	 * Always re-log the inode so that our permanent transaction can keep
1288	 * on rolling it forward in the log.
1289	 */
1290	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1291
1292	trace_xfs_itruncate_extents_end(ip, new_size);
1293
1294out:
1295	*tpp = tp;
1296	return error;
1297out_bmap_cancel:
1298	/*
1299	 * If the bunmapi call encounters an error, return to the caller where
1300	 * the transaction can be properly aborted.  We just need to make sure
1301	 * we're not holding any resources that we were not when we came in.
1302	 */
1303	xfs_bmap_cancel(&free_list);
1304	goto out;
1305}
1306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307/*
1308 * This is called when the inode's link count goes to 0.
1309 * We place the on-disk inode on a list in the AGI.  It
1310 * will be pulled from this list when the inode is freed.
1311 */
1312int
1313xfs_iunlink(
1314	xfs_trans_t	*tp,
1315	xfs_inode_t	*ip)
1316{
1317	xfs_mount_t	*mp;
1318	xfs_agi_t	*agi;
1319	xfs_dinode_t	*dip;
1320	xfs_buf_t	*agibp;
1321	xfs_buf_t	*ibp;
1322	xfs_agino_t	agino;
1323	short		bucket_index;
1324	int		offset;
1325	int		error;
1326
1327	ASSERT(ip->i_d.di_nlink == 0);
1328	ASSERT(ip->i_d.di_mode != 0);
1329
1330	mp = tp->t_mountp;
1331
1332	/*
1333	 * Get the agi buffer first.  It ensures lock ordering
1334	 * on the list.
1335	 */
1336	error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1337	if (error)
1338		return error;
1339	agi = XFS_BUF_TO_AGI(agibp);
1340
1341	/*
1342	 * Get the index into the agi hash table for the
1343	 * list this inode will go on.
1344	 */
1345	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1346	ASSERT(agino != 0);
1347	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1348	ASSERT(agi->agi_unlinked[bucket_index]);
1349	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1350
1351	if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1352		/*
1353		 * There is already another inode in the bucket we need
1354		 * to add ourselves to.  Add us at the front of the list.
1355		 * Here we put the head pointer into our next pointer,
1356		 * and then we fall through to point the head at us.
1357		 */
1358		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
1359		if (error)
1360			return error;
1361
1362		ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1363		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1364		offset = ip->i_imap.im_boffset +
1365			offsetof(xfs_dinode_t, di_next_unlinked);
1366		xfs_trans_inode_buf(tp, ibp);
1367		xfs_trans_log_buf(tp, ibp, offset,
1368				  (offset + sizeof(xfs_agino_t) - 1));
1369		xfs_inobp_check(mp, ibp);
1370	}
1371
1372	/*
1373	 * Point the bucket head pointer at the inode being inserted.
1374	 */
1375	ASSERT(agino != 0);
1376	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1377	offset = offsetof(xfs_agi_t, agi_unlinked) +
1378		(sizeof(xfs_agino_t) * bucket_index);
1379	xfs_trans_log_buf(tp, agibp, offset,
1380			  (offset + sizeof(xfs_agino_t) - 1));
1381	return 0;
1382}
1383
1384/*
1385 * Pull the on-disk inode from the AGI unlinked list.
1386 */
1387STATIC int
1388xfs_iunlink_remove(
1389	xfs_trans_t	*tp,
1390	xfs_inode_t	*ip)
1391{
1392	xfs_ino_t	next_ino;
1393	xfs_mount_t	*mp;
1394	xfs_agi_t	*agi;
1395	xfs_dinode_t	*dip;
1396	xfs_buf_t	*agibp;
1397	xfs_buf_t	*ibp;
1398	xfs_agnumber_t	agno;
1399	xfs_agino_t	agino;
1400	xfs_agino_t	next_agino;
1401	xfs_buf_t	*last_ibp;
1402	xfs_dinode_t	*last_dip = NULL;
1403	short		bucket_index;
1404	int		offset, last_offset = 0;
1405	int		error;
1406
1407	mp = tp->t_mountp;
1408	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1409
1410	/*
1411	 * Get the agi buffer first.  It ensures lock ordering
1412	 * on the list.
1413	 */
1414	error = xfs_read_agi(mp, tp, agno, &agibp);
1415	if (error)
1416		return error;
1417
1418	agi = XFS_BUF_TO_AGI(agibp);
1419
1420	/*
1421	 * Get the index into the agi hash table for the
1422	 * list this inode will go on.
1423	 */
1424	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1425	ASSERT(agino != 0);
1426	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1427	ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1428	ASSERT(agi->agi_unlinked[bucket_index]);
1429
1430	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1431		/*
1432		 * We're at the head of the list.  Get the inode's
1433		 * on-disk buffer to see if there is anyone after us
1434		 * on the list.  Only modify our next pointer if it
1435		 * is not already NULLAGINO.  This saves us the overhead
1436		 * of dealing with the buffer when there is no need to
1437		 * change it.
1438		 */
1439		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
1440		if (error) {
1441			xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
1442				__func__, error);
1443			return error;
1444		}
1445		next_agino = be32_to_cpu(dip->di_next_unlinked);
1446		ASSERT(next_agino != 0);
1447		if (next_agino != NULLAGINO) {
1448			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1449			offset = ip->i_imap.im_boffset +
1450				offsetof(xfs_dinode_t, di_next_unlinked);
1451			xfs_trans_inode_buf(tp, ibp);
1452			xfs_trans_log_buf(tp, ibp, offset,
1453					  (offset + sizeof(xfs_agino_t) - 1));
1454			xfs_inobp_check(mp, ibp);
1455		} else {
1456			xfs_trans_brelse(tp, ibp);
1457		}
1458		/*
1459		 * Point the bucket head pointer at the next inode.
1460		 */
1461		ASSERT(next_agino != 0);
1462		ASSERT(next_agino != agino);
1463		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1464		offset = offsetof(xfs_agi_t, agi_unlinked) +
1465			(sizeof(xfs_agino_t) * bucket_index);
1466		xfs_trans_log_buf(tp, agibp, offset,
1467				  (offset + sizeof(xfs_agino_t) - 1));
1468	} else {
1469		/*
1470		 * We need to search the list for the inode being freed.
1471		 */
1472		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1473		last_ibp = NULL;
1474		while (next_agino != agino) {
1475			/*
1476			 * If the last inode wasn't the one pointing to
1477			 * us, then release its buffer since we're not
1478			 * going to do anything with it.
1479			 */
1480			if (last_ibp != NULL) {
1481				xfs_trans_brelse(tp, last_ibp);
1482			}
1483			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1484			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1485					    &last_ibp, &last_offset, 0);
1486			if (error) {
1487				xfs_warn(mp,
1488					"%s: xfs_inotobp() returned error %d.",
1489					__func__, error);
1490				return error;
1491			}
1492			next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1493			ASSERT(next_agino != NULLAGINO);
1494			ASSERT(next_agino != 0);
1495		}
1496		/*
1497		 * Now last_ibp points to the buffer previous to us on
1498		 * the unlinked list.  Pull us from the list.
1499		 */
1500		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
1501		if (error) {
1502			xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
1503				__func__, error);
1504			return error;
1505		}
1506		next_agino = be32_to_cpu(dip->di_next_unlinked);
1507		ASSERT(next_agino != 0);
1508		ASSERT(next_agino != agino);
1509		if (next_agino != NULLAGINO) {
1510			dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1511			offset = ip->i_imap.im_boffset +
1512				offsetof(xfs_dinode_t, di_next_unlinked);
1513			xfs_trans_inode_buf(tp, ibp);
1514			xfs_trans_log_buf(tp, ibp, offset,
1515					  (offset + sizeof(xfs_agino_t) - 1));
1516			xfs_inobp_check(mp, ibp);
1517		} else {
1518			xfs_trans_brelse(tp, ibp);
1519		}
1520		/*
1521		 * Point the previous inode on the list to the next inode.
1522		 */
1523		last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1524		ASSERT(next_agino != 0);
1525		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1526		xfs_trans_inode_buf(tp, last_ibp);
1527		xfs_trans_log_buf(tp, last_ibp, offset,
1528				  (offset + sizeof(xfs_agino_t) - 1));
1529		xfs_inobp_check(mp, last_ibp);
1530	}
1531	return 0;
1532}
1533
1534/*
1535 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1536 * inodes that are in memory - they all must be marked stale and attached to
1537 * the cluster buffer.
1538 */
1539STATIC int
1540xfs_ifree_cluster(
1541	xfs_inode_t	*free_ip,
1542	xfs_trans_t	*tp,
1543	xfs_ino_t	inum)
1544{
1545	xfs_mount_t		*mp = free_ip->i_mount;
1546	int			blks_per_cluster;
1547	int			nbufs;
1548	int			ninodes;
1549	int			i, j;
1550	xfs_daddr_t		blkno;
1551	xfs_buf_t		*bp;
1552	xfs_inode_t		*ip;
1553	xfs_inode_log_item_t	*iip;
1554	xfs_log_item_t		*lip;
1555	struct xfs_perag	*pag;
1556
1557	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1558	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1559		blks_per_cluster = 1;
1560		ninodes = mp->m_sb.sb_inopblock;
1561		nbufs = XFS_IALLOC_BLOCKS(mp);
1562	} else {
1563		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1564					mp->m_sb.sb_blocksize;
1565		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1566		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1567	}
1568
1569	for (j = 0; j < nbufs; j++, inum += ninodes) {
1570		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1571					 XFS_INO_TO_AGBNO(mp, inum));
1572
1573		/*
1574		 * We obtain and lock the backing buffer first in the process
1575		 * here, as we have to ensure that any dirty inode that we
1576		 * can't get the flush lock on is attached to the buffer.
1577		 * If we scan the in-memory inodes first, then buffer IO can
1578		 * complete before we get a lock on it, and hence we may fail
1579		 * to mark all the active inodes on the buffer stale.
1580		 */
1581		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1582					mp->m_bsize * blks_per_cluster, 0);
 
1583
1584		if (!bp)
1585			return ENOMEM;
1586		/*
1587		 * Walk the inodes already attached to the buffer and mark them
1588		 * stale. These will all have the flush locks held, so an
1589		 * in-memory inode walk can't lock them. By marking them all
1590		 * stale first, we will not attempt to lock them in the loop
1591		 * below as the XFS_ISTALE flag will be set.
1592		 */
1593		lip = bp->b_fspriv;
1594		while (lip) {
1595			if (lip->li_type == XFS_LI_INODE) {
1596				iip = (xfs_inode_log_item_t *)lip;
1597				ASSERT(iip->ili_logged == 1);
1598				lip->li_cb = xfs_istale_done;
1599				xfs_trans_ail_copy_lsn(mp->m_ail,
1600							&iip->ili_flush_lsn,
1601							&iip->ili_item.li_lsn);
1602				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1603			}
1604			lip = lip->li_bio_list;
1605		}
1606
1607
1608		/*
1609		 * For each inode in memory attempt to add it to the inode
1610		 * buffer and set it up for being staled on buffer IO
1611		 * completion.  This is safe as we've locked out tail pushing
1612		 * and flushing by locking the buffer.
1613		 *
1614		 * We have already marked every inode that was part of a
1615		 * transaction stale above, which means there is no point in
1616		 * even trying to lock them.
1617		 */
1618		for (i = 0; i < ninodes; i++) {
1619retry:
1620			rcu_read_lock();
1621			ip = radix_tree_lookup(&pag->pag_ici_root,
1622					XFS_INO_TO_AGINO(mp, (inum + i)));
1623
1624			/* Inode not in memory, nothing to do */
1625			if (!ip) {
1626				rcu_read_unlock();
1627				continue;
1628			}
1629
1630			/*
1631			 * because this is an RCU protected lookup, we could
1632			 * find a recently freed or even reallocated inode
1633			 * during the lookup. We need to check under the
1634			 * i_flags_lock for a valid inode here. Skip it if it
1635			 * is not valid, the wrong inode or stale.
1636			 */
1637			spin_lock(&ip->i_flags_lock);
1638			if (ip->i_ino != inum + i ||
1639			    __xfs_iflags_test(ip, XFS_ISTALE)) {
1640				spin_unlock(&ip->i_flags_lock);
1641				rcu_read_unlock();
1642				continue;
1643			}
1644			spin_unlock(&ip->i_flags_lock);
1645
1646			/*
1647			 * Don't try to lock/unlock the current inode, but we
1648			 * _cannot_ skip the other inodes that we did not find
1649			 * in the list attached to the buffer and are not
1650			 * already marked stale. If we can't lock it, back off
1651			 * and retry.
1652			 */
1653			if (ip != free_ip &&
1654			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1655				rcu_read_unlock();
1656				delay(1);
1657				goto retry;
1658			}
1659			rcu_read_unlock();
1660
1661			xfs_iflock(ip);
1662			xfs_iflags_set(ip, XFS_ISTALE);
1663
1664			/*
1665			 * we don't need to attach clean inodes or those only
1666			 * with unlogged changes (which we throw away, anyway).
1667			 */
1668			iip = ip->i_itemp;
1669			if (!iip || xfs_inode_clean(ip)) {
1670				ASSERT(ip != free_ip);
 
1671				xfs_ifunlock(ip);
1672				xfs_iunlock(ip, XFS_ILOCK_EXCL);
1673				continue;
1674			}
1675
1676			iip->ili_last_fields = iip->ili_fields;
1677			iip->ili_fields = 0;
1678			iip->ili_logged = 1;
1679			xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
1680						&iip->ili_item.li_lsn);
1681
1682			xfs_buf_attach_iodone(bp, xfs_istale_done,
1683						  &iip->ili_item);
1684
1685			if (ip != free_ip)
1686				xfs_iunlock(ip, XFS_ILOCK_EXCL);
1687		}
1688
1689		xfs_trans_stale_inode_buf(tp, bp);
1690		xfs_trans_binval(tp, bp);
1691	}
1692
1693	xfs_perag_put(pag);
1694	return 0;
1695}
1696
1697/*
1698 * This is called to return an inode to the inode free list.
1699 * The inode should already be truncated to 0 length and have
1700 * no pages associated with it.  This routine also assumes that
1701 * the inode is already a part of the transaction.
1702 *
1703 * The on-disk copy of the inode will have been added to the list
1704 * of unlinked inodes in the AGI. We need to remove the inode from
1705 * that list atomically with respect to freeing it here.
1706 */
1707int
1708xfs_ifree(
1709	xfs_trans_t	*tp,
1710	xfs_inode_t	*ip,
1711	xfs_bmap_free_t	*flist)
1712{
1713	int			error;
1714	int			delete;
1715	xfs_ino_t		first_ino;
1716	xfs_dinode_t    	*dip;
1717	xfs_buf_t       	*ibp;
1718
1719	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1720	ASSERT(ip->i_d.di_nlink == 0);
1721	ASSERT(ip->i_d.di_nextents == 0);
1722	ASSERT(ip->i_d.di_anextents == 0);
1723	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
 
1724	ASSERT(ip->i_d.di_nblocks == 0);
1725
1726	/*
1727	 * Pull the on-disk inode from the AGI unlinked list.
1728	 */
1729	error = xfs_iunlink_remove(tp, ip);
1730	if (error != 0) {
1731		return error;
1732	}
1733
1734	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
1735	if (error != 0) {
1736		return error;
1737	}
1738	ip->i_d.di_mode = 0;		/* mark incore inode as free */
1739	ip->i_d.di_flags = 0;
1740	ip->i_d.di_dmevmask = 0;
1741	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
 
 
1742	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1743	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1744	/*
1745	 * Bump the generation count so no one will be confused
1746	 * by reincarnations of this inode.
1747	 */
1748	ip->i_d.di_gen++;
1749
1750	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1751
1752	error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0);
1753	if (error)
1754		return error;
1755
1756        /*
1757	* Clear the on-disk di_mode. This is to prevent xfs_bulkstat
1758	* from picking up this inode when it is reclaimed (its incore state
1759	* initialzed but not flushed to disk yet). The in-core di_mode is
1760	* already cleared  and a corresponding transaction logged.
1761	* The hack here just synchronizes the in-core to on-disk
1762	* di_mode value in advance before the actual inode sync to disk.
1763	* This is OK because the inode is already unlinked and would never
1764	* change its di_mode again for this inode generation.
1765	* This is a temporary hack that would require a proper fix
1766	* in the future.
1767	*/
1768	dip->di_mode = 0;
1769
1770	if (delete) {
1771		error = xfs_ifree_cluster(ip, tp, first_ino);
1772	}
1773
1774	return error;
1775}
1776
1777/*
1778 * Reallocate the space for if_broot based on the number of records
1779 * being added or deleted as indicated in rec_diff.  Move the records
1780 * and pointers in if_broot to fit the new size.  When shrinking this
1781 * will eliminate holes between the records and pointers created by
1782 * the caller.  When growing this will create holes to be filled in
1783 * by the caller.
1784 *
1785 * The caller must not request to add more records than would fit in
1786 * the on-disk inode root.  If the if_broot is currently NULL, then
1787 * if we adding records one will be allocated.  The caller must also
1788 * not request that the number of records go below zero, although
1789 * it can go to zero.
1790 *
1791 * ip -- the inode whose if_broot area is changing
1792 * ext_diff -- the change in the number of records, positive or negative,
1793 *	 requested for the if_broot array.
1794 */
1795void
1796xfs_iroot_realloc(
1797	xfs_inode_t		*ip,
1798	int			rec_diff,
1799	int			whichfork)
1800{
1801	struct xfs_mount	*mp = ip->i_mount;
1802	int			cur_max;
1803	xfs_ifork_t		*ifp;
1804	struct xfs_btree_block	*new_broot;
1805	int			new_max;
1806	size_t			new_size;
1807	char			*np;
1808	char			*op;
1809
1810	/*
1811	 * Handle the degenerate case quietly.
1812	 */
1813	if (rec_diff == 0) {
1814		return;
1815	}
1816
1817	ifp = XFS_IFORK_PTR(ip, whichfork);
1818	if (rec_diff > 0) {
1819		/*
1820		 * If there wasn't any memory allocated before, just
1821		 * allocate it now and get out.
1822		 */
1823		if (ifp->if_broot_bytes == 0) {
1824			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
1825			ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1826			ifp->if_broot_bytes = (int)new_size;
1827			return;
1828		}
1829
1830		/*
1831		 * If there is already an existing if_broot, then we need
1832		 * to realloc() it and shift the pointers to their new
1833		 * location.  The records don't change location because
1834		 * they are kept butted up against the btree block header.
1835		 */
1836		cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1837		new_max = cur_max + rec_diff;
1838		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1839		ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
1840				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
1841				KM_SLEEP | KM_NOFS);
1842		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1843						     ifp->if_broot_bytes);
1844		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1845						     (int)new_size);
1846		ifp->if_broot_bytes = (int)new_size;
1847		ASSERT(ifp->if_broot_bytes <=
1848			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
1849		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
1850		return;
1851	}
1852
1853	/*
1854	 * rec_diff is less than 0.  In this case, we are shrinking the
1855	 * if_broot buffer.  It must already exist.  If we go to zero
1856	 * records, just get rid of the root and clear the status bit.
1857	 */
1858	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
1859	cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
1860	new_max = cur_max + rec_diff;
1861	ASSERT(new_max >= 0);
1862	if (new_max > 0)
1863		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
1864	else
1865		new_size = 0;
1866	if (new_size > 0) {
1867		new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
1868		/*
1869		 * First copy over the btree block header.
1870		 */
1871		memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
1872	} else {
1873		new_broot = NULL;
1874		ifp->if_flags &= ~XFS_IFBROOT;
1875	}
1876
1877	/*
1878	 * Only copy the records and pointers if there are any.
1879	 */
1880	if (new_max > 0) {
1881		/*
1882		 * First copy the records.
1883		 */
1884		op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
1885		np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
1886		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
1887
1888		/*
1889		 * Then copy the pointers.
1890		 */
1891		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
1892						     ifp->if_broot_bytes);
1893		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
1894						     (int)new_size);
1895		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
1896	}
1897	kmem_free(ifp->if_broot);
1898	ifp->if_broot = new_broot;
1899	ifp->if_broot_bytes = (int)new_size;
1900	ASSERT(ifp->if_broot_bytes <=
1901		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
1902	return;
1903}
1904
1905
1906/*
1907 * This is called when the amount of space needed for if_data
1908 * is increased or decreased.  The change in size is indicated by
1909 * the number of bytes that need to be added or deleted in the
1910 * byte_diff parameter.
1911 *
1912 * If the amount of space needed has decreased below the size of the
1913 * inline buffer, then switch to using the inline buffer.  Otherwise,
1914 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
1915 * to what is needed.
1916 *
1917 * ip -- the inode whose if_data area is changing
1918 * byte_diff -- the change in the number of bytes, positive or negative,
1919 *	 requested for the if_data array.
1920 */
1921void
1922xfs_idata_realloc(
1923	xfs_inode_t	*ip,
1924	int		byte_diff,
1925	int		whichfork)
1926{
1927	xfs_ifork_t	*ifp;
1928	int		new_size;
1929	int		real_size;
1930
1931	if (byte_diff == 0) {
1932		return;
1933	}
1934
1935	ifp = XFS_IFORK_PTR(ip, whichfork);
1936	new_size = (int)ifp->if_bytes + byte_diff;
1937	ASSERT(new_size >= 0);
1938
1939	if (new_size == 0) {
1940		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
1941			kmem_free(ifp->if_u1.if_data);
1942		}
1943		ifp->if_u1.if_data = NULL;
1944		real_size = 0;
1945	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
1946		/*
1947		 * If the valid extents/data can fit in if_inline_ext/data,
1948		 * copy them from the malloc'd vector and free it.
1949		 */
1950		if (ifp->if_u1.if_data == NULL) {
1951			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
1952		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
1953			ASSERT(ifp->if_real_bytes != 0);
1954			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
1955			      new_size);
1956			kmem_free(ifp->if_u1.if_data);
1957			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
1958		}
1959		real_size = 0;
1960	} else {
1961		/*
1962		 * Stuck with malloc/realloc.
1963		 * For inline data, the underlying buffer must be
1964		 * a multiple of 4 bytes in size so that it can be
1965		 * logged and stay on word boundaries.  We enforce
1966		 * that here.
1967		 */
1968		real_size = roundup(new_size, 4);
1969		if (ifp->if_u1.if_data == NULL) {
1970			ASSERT(ifp->if_real_bytes == 0);
1971			ifp->if_u1.if_data = kmem_alloc(real_size,
1972							KM_SLEEP | KM_NOFS);
1973		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
1974			/*
1975			 * Only do the realloc if the underlying size
1976			 * is really changing.
1977			 */
1978			if (ifp->if_real_bytes != real_size) {
1979				ifp->if_u1.if_data =
1980					kmem_realloc(ifp->if_u1.if_data,
1981							real_size,
1982							ifp->if_real_bytes,
1983							KM_SLEEP | KM_NOFS);
1984			}
1985		} else {
1986			ASSERT(ifp->if_real_bytes == 0);
1987			ifp->if_u1.if_data = kmem_alloc(real_size,
1988							KM_SLEEP | KM_NOFS);
1989			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
1990				ifp->if_bytes);
1991		}
1992	}
1993	ifp->if_real_bytes = real_size;
1994	ifp->if_bytes = new_size;
1995	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
1996}
1997
1998void
1999xfs_idestroy_fork(
2000	xfs_inode_t	*ip,
2001	int		whichfork)
2002{
2003	xfs_ifork_t	*ifp;
2004
2005	ifp = XFS_IFORK_PTR(ip, whichfork);
2006	if (ifp->if_broot != NULL) {
2007		kmem_free(ifp->if_broot);
2008		ifp->if_broot = NULL;
2009	}
2010
2011	/*
2012	 * If the format is local, then we can't have an extents
2013	 * array so just look for an inline data array.  If we're
2014	 * not local then we may or may not have an extents list,
2015	 * so check and free it up if we do.
2016	 */
2017	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2018		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2019		    (ifp->if_u1.if_data != NULL)) {
2020			ASSERT(ifp->if_real_bytes != 0);
2021			kmem_free(ifp->if_u1.if_data);
2022			ifp->if_u1.if_data = NULL;
2023			ifp->if_real_bytes = 0;
2024		}
2025	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2026		   ((ifp->if_flags & XFS_IFEXTIREC) ||
2027		    ((ifp->if_u1.if_extents != NULL) &&
2028		     (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2029		ASSERT(ifp->if_real_bytes != 0);
2030		xfs_iext_destroy(ifp);
2031	}
2032	ASSERT(ifp->if_u1.if_extents == NULL ||
2033	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2034	ASSERT(ifp->if_real_bytes == 0);
2035	if (whichfork == XFS_ATTR_FORK) {
2036		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2037		ip->i_afp = NULL;
2038	}
2039}
2040
2041/*
2042 * This is called to unpin an inode.  The caller must have the inode locked
2043 * in at least shared mode so that the buffer cannot be subsequently pinned
2044 * once someone is waiting for it to be unpinned.
2045 */
2046static void
2047xfs_iunpin(
2048	struct xfs_inode	*ip)
2049{
2050	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2051
2052	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2053
2054	/* Give the log a push to start the unpinning I/O */
2055	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2056
2057}
2058
2059static void
2060__xfs_iunpin_wait(
2061	struct xfs_inode	*ip)
2062{
2063	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2064	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2065
2066	xfs_iunpin(ip);
2067
2068	do {
2069		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2070		if (xfs_ipincount(ip))
2071			io_schedule();
2072	} while (xfs_ipincount(ip));
2073	finish_wait(wq, &wait.wait);
2074}
2075
2076void
2077xfs_iunpin_wait(
2078	struct xfs_inode	*ip)
2079{
2080	if (xfs_ipincount(ip))
2081		__xfs_iunpin_wait(ip);
 
 
2082}
2083
2084/*
2085 * xfs_iextents_copy()
2086 *
2087 * This is called to copy the REAL extents (as opposed to the delayed
2088 * allocation extents) from the inode into the given buffer.  It
2089 * returns the number of bytes copied into the buffer.
2090 *
2091 * If there are no delayed allocation extents, then we can just
2092 * memcpy() the extents into the buffer.  Otherwise, we need to
2093 * examine each extent in turn and skip those which are delayed.
2094 */
2095int
2096xfs_iextents_copy(
2097	xfs_inode_t		*ip,
2098	xfs_bmbt_rec_t		*dp,
2099	int			whichfork)
2100{
2101	int			copied;
2102	int			i;
2103	xfs_ifork_t		*ifp;
2104	int			nrecs;
2105	xfs_fsblock_t		start_block;
2106
2107	ifp = XFS_IFORK_PTR(ip, whichfork);
2108	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2109	ASSERT(ifp->if_bytes > 0);
2110
2111	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2112	XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2113	ASSERT(nrecs > 0);
2114
2115	/*
2116	 * There are some delayed allocation extents in the
2117	 * inode, so copy the extents one at a time and skip
2118	 * the delayed ones.  There must be at least one
2119	 * non-delayed extent.
2120	 */
2121	copied = 0;
2122	for (i = 0; i < nrecs; i++) {
2123		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2124		start_block = xfs_bmbt_get_startblock(ep);
2125		if (isnullstartblock(start_block)) {
2126			/*
2127			 * It's a delayed allocation extent, so skip it.
2128			 */
2129			continue;
2130		}
2131
2132		/* Translate to on disk format */
2133		put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2134		put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2135		dp++;
2136		copied++;
2137	}
2138	ASSERT(copied != 0);
2139	xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2140
2141	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2142}
2143
2144/*
2145 * Each of the following cases stores data into the same region
2146 * of the on-disk inode, so only one of them can be valid at
2147 * any given time. While it is possible to have conflicting formats
2148 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2149 * in EXTENTS format, this can only happen when the fork has
2150 * changed formats after being modified but before being flushed.
2151 * In these cases, the format always takes precedence, because the
2152 * format indicates the current state of the fork.
2153 */
2154/*ARGSUSED*/
2155STATIC void
2156xfs_iflush_fork(
2157	xfs_inode_t		*ip,
2158	xfs_dinode_t		*dip,
2159	xfs_inode_log_item_t	*iip,
2160	int			whichfork,
2161	xfs_buf_t		*bp)
2162{
2163	char			*cp;
2164	xfs_ifork_t		*ifp;
2165	xfs_mount_t		*mp;
2166#ifdef XFS_TRANS_DEBUG
2167	int			first;
2168#endif
2169	static const short	brootflag[2] =
2170		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2171	static const short	dataflag[2] =
2172		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2173	static const short	extflag[2] =
2174		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2175
2176	if (!iip)
2177		return;
2178	ifp = XFS_IFORK_PTR(ip, whichfork);
2179	/*
2180	 * This can happen if we gave up in iformat in an error path,
2181	 * for the attribute fork.
2182	 */
2183	if (!ifp) {
2184		ASSERT(whichfork == XFS_ATTR_FORK);
2185		return;
2186	}
2187	cp = XFS_DFORK_PTR(dip, whichfork);
2188	mp = ip->i_mount;
2189	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2190	case XFS_DINODE_FMT_LOCAL:
2191		if ((iip->ili_fields & dataflag[whichfork]) &&
2192		    (ifp->if_bytes > 0)) {
2193			ASSERT(ifp->if_u1.if_data != NULL);
2194			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2195			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2196		}
2197		break;
2198
2199	case XFS_DINODE_FMT_EXTENTS:
2200		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2201		       !(iip->ili_fields & extflag[whichfork]));
2202		if ((iip->ili_fields & extflag[whichfork]) &&
2203		    (ifp->if_bytes > 0)) {
2204			ASSERT(xfs_iext_get_ext(ifp, 0));
2205			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2206			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2207				whichfork);
2208		}
2209		break;
2210
2211	case XFS_DINODE_FMT_BTREE:
2212		if ((iip->ili_fields & brootflag[whichfork]) &&
2213		    (ifp->if_broot_bytes > 0)) {
2214			ASSERT(ifp->if_broot != NULL);
2215			ASSERT(ifp->if_broot_bytes <=
2216			       (XFS_IFORK_SIZE(ip, whichfork) +
2217				XFS_BROOT_SIZE_ADJ));
2218			xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2219				(xfs_bmdr_block_t *)cp,
2220				XFS_DFORK_SIZE(dip, mp, whichfork));
2221		}
2222		break;
2223
2224	case XFS_DINODE_FMT_DEV:
2225		if (iip->ili_fields & XFS_ILOG_DEV) {
2226			ASSERT(whichfork == XFS_DATA_FORK);
2227			xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2228		}
2229		break;
2230
2231	case XFS_DINODE_FMT_UUID:
2232		if (iip->ili_fields & XFS_ILOG_UUID) {
2233			ASSERT(whichfork == XFS_DATA_FORK);
2234			memcpy(XFS_DFORK_DPTR(dip),
2235			       &ip->i_df.if_u2.if_uuid,
2236			       sizeof(uuid_t));
2237		}
2238		break;
2239
2240	default:
2241		ASSERT(0);
2242		break;
2243	}
2244}
2245
2246STATIC int
2247xfs_iflush_cluster(
2248	xfs_inode_t	*ip,
2249	xfs_buf_t	*bp)
2250{
2251	xfs_mount_t		*mp = ip->i_mount;
2252	struct xfs_perag	*pag;
2253	unsigned long		first_index, mask;
2254	unsigned long		inodes_per_cluster;
2255	int			ilist_size;
2256	xfs_inode_t		**ilist;
2257	xfs_inode_t		*iq;
2258	int			nr_found;
2259	int			clcount = 0;
2260	int			bufwasdelwri;
2261	int			i;
2262
2263	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2264
2265	inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2266	ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2267	ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2268	if (!ilist)
2269		goto out_put;
2270
2271	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2272	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2273	rcu_read_lock();
2274	/* really need a gang lookup range call here */
2275	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2276					first_index, inodes_per_cluster);
2277	if (nr_found == 0)
2278		goto out_free;
2279
2280	for (i = 0; i < nr_found; i++) {
2281		iq = ilist[i];
2282		if (iq == ip)
2283			continue;
2284
2285		/*
2286		 * because this is an RCU protected lookup, we could find a
2287		 * recently freed or even reallocated inode during the lookup.
2288		 * We need to check under the i_flags_lock for a valid inode
2289		 * here. Skip it if it is not valid or the wrong inode.
2290		 */
2291		spin_lock(&ip->i_flags_lock);
2292		if (!ip->i_ino ||
2293		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2294			spin_unlock(&ip->i_flags_lock);
2295			continue;
2296		}
2297		spin_unlock(&ip->i_flags_lock);
2298
2299		/*
2300		 * Do an un-protected check to see if the inode is dirty and
2301		 * is a candidate for flushing.  These checks will be repeated
2302		 * later after the appropriate locks are acquired.
2303		 */
2304		if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2305			continue;
2306
2307		/*
2308		 * Try to get locks.  If any are unavailable or it is pinned,
2309		 * then this inode cannot be flushed and is skipped.
2310		 */
2311
2312		if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2313			continue;
2314		if (!xfs_iflock_nowait(iq)) {
2315			xfs_iunlock(iq, XFS_ILOCK_SHARED);
2316			continue;
2317		}
2318		if (xfs_ipincount(iq)) {
2319			xfs_ifunlock(iq);
2320			xfs_iunlock(iq, XFS_ILOCK_SHARED);
2321			continue;
2322		}
2323
2324		/*
2325		 * arriving here means that this inode can be flushed.  First
2326		 * re-check that it's dirty before flushing.
2327		 */
2328		if (!xfs_inode_clean(iq)) {
2329			int	error;
2330			error = xfs_iflush_int(iq, bp);
2331			if (error) {
2332				xfs_iunlock(iq, XFS_ILOCK_SHARED);
2333				goto cluster_corrupt_out;
2334			}
2335			clcount++;
2336		} else {
2337			xfs_ifunlock(iq);
2338		}
2339		xfs_iunlock(iq, XFS_ILOCK_SHARED);
2340	}
2341
2342	if (clcount) {
2343		XFS_STATS_INC(xs_icluster_flushcnt);
2344		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2345	}
2346
2347out_free:
2348	rcu_read_unlock();
2349	kmem_free(ilist);
2350out_put:
2351	xfs_perag_put(pag);
2352	return 0;
2353
2354
2355cluster_corrupt_out:
2356	/*
2357	 * Corruption detected in the clustering loop.  Invalidate the
2358	 * inode buffer and shut down the filesystem.
2359	 */
2360	rcu_read_unlock();
2361	/*
2362	 * Clean up the buffer.  If it was delwri, just release it --
2363	 * brelse can handle it with no problems.  If not, shut down the
2364	 * filesystem before releasing the buffer.
2365	 */
2366	bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
2367	if (bufwasdelwri)
2368		xfs_buf_relse(bp);
2369
2370	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2371
2372	if (!bufwasdelwri) {
2373		/*
2374		 * Just like incore_relse: if we have b_iodone functions,
2375		 * mark the buffer as an error and call them.  Otherwise
2376		 * mark it as stale and brelse.
2377		 */
2378		if (bp->b_iodone) {
2379			XFS_BUF_UNDONE(bp);
2380			xfs_buf_stale(bp);
2381			xfs_buf_ioerror(bp, EIO);
2382			xfs_buf_ioend(bp, 0);
2383		} else {
2384			xfs_buf_stale(bp);
2385			xfs_buf_relse(bp);
2386		}
2387	}
2388
2389	/*
2390	 * Unlocks the flush lock
2391	 */
2392	xfs_iflush_abort(iq, false);
2393	kmem_free(ilist);
2394	xfs_perag_put(pag);
2395	return XFS_ERROR(EFSCORRUPTED);
2396}
2397
2398/*
2399 * Flush dirty inode metadata into the backing buffer.
2400 *
2401 * The caller must have the inode lock and the inode flush lock held.  The
2402 * inode lock will still be held upon return to the caller, and the inode
2403 * flush lock will be released after the inode has reached the disk.
2404 *
2405 * The caller must write out the buffer returned in *bpp and release it.
2406 */
2407int
2408xfs_iflush(
2409	struct xfs_inode	*ip,
2410	struct xfs_buf		**bpp)
2411{
2412	struct xfs_mount	*mp = ip->i_mount;
2413	struct xfs_buf		*bp;
2414	struct xfs_dinode	*dip;
 
2415	int			error;
2416
2417	XFS_STATS_INC(xs_iflush_count);
2418
2419	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2420	ASSERT(xfs_isiflocked(ip));
2421	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2422	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2423
2424	*bpp = NULL;
 
2425
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2426	xfs_iunpin_wait(ip);
2427
2428	/*
2429	 * For stale inodes we cannot rely on the backing buffer remaining
2430	 * stale in cache for the remaining life of the stale inode and so
2431	 * xfs_itobp() below may give us a buffer that no longer contains
2432	 * inodes below. We have to check this after ensuring the inode is
2433	 * unpinned so that it is safe to reclaim the stale inode after the
2434	 * flush call.
2435	 */
2436	if (xfs_iflags_test(ip, XFS_ISTALE)) {
2437		xfs_ifunlock(ip);
2438		return 0;
2439	}
2440
2441	/*
2442	 * This may have been unpinned because the filesystem is shutting
2443	 * down forcibly. If that's the case we must not write this inode
2444	 * to disk, because the log record didn't make it to disk.
2445	 *
2446	 * We also have to remove the log item from the AIL in this case,
2447	 * as we wait for an empty AIL as part of the unmount process.
2448	 */
2449	if (XFS_FORCED_SHUTDOWN(mp)) {
2450		error = XFS_ERROR(EIO);
2451		goto abort_out;
 
 
 
2452	}
2453
2454	/*
2455	 * Get the buffer containing the on-disk inode.
2456	 */
2457	error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK);
 
2458	if (error || !bp) {
2459		xfs_ifunlock(ip);
2460		return error;
2461	}
2462
2463	/*
2464	 * First flush out the inode that xfs_iflush was called with.
2465	 */
2466	error = xfs_iflush_int(ip, bp);
2467	if (error)
2468		goto corrupt_out;
2469
2470	/*
2471	 * If the buffer is pinned then push on the log now so we won't
2472	 * get stuck waiting in the write for too long.
2473	 */
2474	if (xfs_buf_ispinned(bp))
2475		xfs_log_force(mp, 0);
2476
2477	/*
2478	 * inode clustering:
2479	 * see if other inodes can be gathered into this write
2480	 */
2481	error = xfs_iflush_cluster(ip, bp);
2482	if (error)
2483		goto cluster_corrupt_out;
2484
2485	*bpp = bp;
2486	return 0;
 
 
 
2487
2488corrupt_out:
2489	xfs_buf_relse(bp);
2490	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2491cluster_corrupt_out:
2492	error = XFS_ERROR(EFSCORRUPTED);
2493abort_out:
2494	/*
2495	 * Unlocks the flush lock
2496	 */
2497	xfs_iflush_abort(ip, false);
2498	return error;
2499}
2500
2501
2502STATIC int
2503xfs_iflush_int(
2504	xfs_inode_t		*ip,
2505	xfs_buf_t		*bp)
2506{
2507	xfs_inode_log_item_t	*iip;
2508	xfs_dinode_t		*dip;
2509	xfs_mount_t		*mp;
2510#ifdef XFS_TRANS_DEBUG
2511	int			first;
2512#endif
2513
2514	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2515	ASSERT(xfs_isiflocked(ip));
2516	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2517	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2518
2519	iip = ip->i_itemp;
2520	mp = ip->i_mount;
2521
2522	/* set *dip = inode's place in the buffer */
2523	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
2524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2525	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2526			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
2527		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2528			"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2529			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2530		goto corrupt_out;
2531	}
2532	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
2533				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
2534		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2535			"%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2536			__func__, ip->i_ino, ip, ip->i_d.di_magic);
2537		goto corrupt_out;
2538	}
2539	if (S_ISREG(ip->i_d.di_mode)) {
2540		if (XFS_TEST_ERROR(
2541		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2542		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
2543		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
2544			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2545				"%s: Bad regular inode %Lu, ptr 0x%p",
2546				__func__, ip->i_ino, ip);
2547			goto corrupt_out;
2548		}
2549	} else if (S_ISDIR(ip->i_d.di_mode)) {
2550		if (XFS_TEST_ERROR(
2551		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2552		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
2553		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
2554		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
2555			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2556				"%s: Bad directory inode %Lu, ptr 0x%p",
2557				__func__, ip->i_ino, ip);
2558			goto corrupt_out;
2559		}
2560	}
2561	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
2562				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
2563				XFS_RANDOM_IFLUSH_5)) {
2564		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2565			"%s: detected corrupt incore inode %Lu, "
2566			"total extents = %d, nblocks = %Ld, ptr 0x%p",
2567			__func__, ip->i_ino,
2568			ip->i_d.di_nextents + ip->i_d.di_anextents,
2569			ip->i_d.di_nblocks, ip);
2570		goto corrupt_out;
2571	}
2572	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
2573				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
2574		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2575			"%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2576			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2577		goto corrupt_out;
2578	}
2579	/*
2580	 * bump the flush iteration count, used to detect flushes which
2581	 * postdate a log record during recovery.
2582	 */
2583
2584	ip->i_d.di_flushiter++;
2585
2586	/*
2587	 * Copy the dirty parts of the inode into the on-disk
2588	 * inode.  We always copy out the core of the inode,
2589	 * because if the inode is dirty at all the core must
2590	 * be.
2591	 */
2592	xfs_dinode_to_disk(dip, &ip->i_d);
2593
2594	/* Wrap, we never let the log put out DI_MAX_FLUSH */
2595	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
2596		ip->i_d.di_flushiter = 0;
2597
2598	/*
2599	 * If this is really an old format inode and the superblock version
2600	 * has not been updated to support only new format inodes, then
2601	 * convert back to the old inode format.  If the superblock version
2602	 * has been updated, then make the conversion permanent.
2603	 */
2604	ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2605	if (ip->i_d.di_version == 1) {
2606		if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2607			/*
2608			 * Convert it back.
2609			 */
2610			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
2611			dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
2612		} else {
2613			/*
2614			 * The superblock version has already been bumped,
2615			 * so just make the conversion to the new inode
2616			 * format permanent.
2617			 */
2618			ip->i_d.di_version = 2;
2619			dip->di_version = 2;
2620			ip->i_d.di_onlink = 0;
2621			dip->di_onlink = 0;
2622			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
2623			memset(&(dip->di_pad[0]), 0,
2624			      sizeof(dip->di_pad));
2625			ASSERT(xfs_get_projid(ip) == 0);
2626		}
2627	}
2628
2629	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
2630	if (XFS_IFORK_Q(ip))
2631		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
2632	xfs_inobp_check(mp, bp);
2633
2634	/*
2635	 * We've recorded everything logged in the inode, so we'd like to clear
2636	 * the ili_fields bits so we don't log and flush things unnecessarily.
2637	 * However, we can't stop logging all this information until the data
2638	 * we've copied into the disk buffer is written to disk.  If we did we
2639	 * might overwrite the copy of the inode in the log with all the data
2640	 * after re-logging only part of it, and in the face of a crash we
2641	 * wouldn't have all the data we need to recover.
 
2642	 *
2643	 * What we do is move the bits to the ili_last_fields field.  When
2644	 * logging the inode, these bits are moved back to the ili_fields field.
2645	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
2646	 * know that the information those bits represent is permanently on
2647	 * disk.  As long as the flush completes before the inode is logged
2648	 * again, then both ili_fields and ili_last_fields will be cleared.
 
2649	 *
2650	 * We can play with the ili_fields bits here, because the inode lock
2651	 * must be held exclusively in order to set bits there and the flush
2652	 * lock protects the ili_last_fields bits.  Set ili_logged so the flush
2653	 * done routine can tell whether or not to look in the AIL.  Also, store
2654	 * the current LSN of the inode so that we can tell whether the item has
2655	 * moved in the AIL from xfs_iflush_done().  In order to read the lsn we
2656	 * need the AIL lock, because it is a 64 bit value that cannot be read
2657	 * atomically.
2658	 */
2659	if (iip != NULL && iip->ili_fields != 0) {
2660		iip->ili_last_fields = iip->ili_fields;
2661		iip->ili_fields = 0;
 
2662		iip->ili_logged = 1;
2663
2664		xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2665					&iip->ili_item.li_lsn);
2666
2667		/*
2668		 * Attach the function xfs_iflush_done to the inode's
2669		 * buffer.  This will remove the inode from the AIL
2670		 * and unlock the inode's flush lock when the inode is
2671		 * completely written to disk.
2672		 */
2673		xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
2674
2675		ASSERT(bp->b_fspriv != NULL);
2676		ASSERT(bp->b_iodone != NULL);
2677	} else {
2678		/*
2679		 * We're flushing an inode which is not in the AIL and has
2680		 * not been logged.  For this case we can immediately drop
 
2681		 * the inode flush lock because we can avoid the whole
2682		 * AIL state thing.  It's OK to drop the flush lock now,
2683		 * because we've already locked the buffer and to do anything
2684		 * you really need both.
2685		 */
2686		if (iip != NULL) {
2687			ASSERT(iip->ili_logged == 0);
2688			ASSERT(iip->ili_last_fields == 0);
2689			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
2690		}
2691		xfs_ifunlock(ip);
2692	}
2693
2694	return 0;
2695
2696corrupt_out:
2697	return XFS_ERROR(EFSCORRUPTED);
2698}
2699
2700/*
2701 * Return a pointer to the extent record at file index idx.
2702 */
2703xfs_bmbt_rec_host_t *
2704xfs_iext_get_ext(
2705	xfs_ifork_t	*ifp,		/* inode fork pointer */
2706	xfs_extnum_t	idx)		/* index of target extent */
2707{
2708	ASSERT(idx >= 0);
2709	ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
2710
2711	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
2712		return ifp->if_u1.if_ext_irec->er_extbuf;
2713	} else if (ifp->if_flags & XFS_IFEXTIREC) {
2714		xfs_ext_irec_t	*erp;		/* irec pointer */
2715		int		erp_idx = 0;	/* irec index */
2716		xfs_extnum_t	page_idx = idx;	/* ext index in target list */
2717
2718		erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
2719		return &erp->er_extbuf[page_idx];
2720	} else if (ifp->if_bytes) {
2721		return &ifp->if_u1.if_extents[idx];
2722	} else {
2723		return NULL;
2724	}
2725}
2726
2727/*
2728 * Insert new item(s) into the extent records for incore inode
2729 * fork 'ifp'.  'count' new items are inserted at index 'idx'.
2730 */
2731void
2732xfs_iext_insert(
2733	xfs_inode_t	*ip,		/* incore inode pointer */
2734	xfs_extnum_t	idx,		/* starting index of new items */
2735	xfs_extnum_t	count,		/* number of inserted items */
2736	xfs_bmbt_irec_t	*new,		/* items to insert */
2737	int		state)		/* type of extent conversion */
2738{
2739	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
2740	xfs_extnum_t	i;		/* extent record index */
2741
2742	trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
2743
2744	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2745	xfs_iext_add(ifp, idx, count);
2746	for (i = idx; i < idx + count; i++, new++)
2747		xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
2748}
2749
2750/*
2751 * This is called when the amount of space required for incore file
2752 * extents needs to be increased. The ext_diff parameter stores the
2753 * number of new extents being added and the idx parameter contains
2754 * the extent index where the new extents will be added. If the new
2755 * extents are being appended, then we just need to (re)allocate and
2756 * initialize the space. Otherwise, if the new extents are being
2757 * inserted into the middle of the existing entries, a bit more work
2758 * is required to make room for the new extents to be inserted. The
2759 * caller is responsible for filling in the new extent entries upon
2760 * return.
2761 */
2762void
2763xfs_iext_add(
2764	xfs_ifork_t	*ifp,		/* inode fork pointer */
2765	xfs_extnum_t	idx,		/* index to begin adding exts */
2766	int		ext_diff)	/* number of extents to add */
2767{
2768	int		byte_diff;	/* new bytes being added */
2769	int		new_size;	/* size of extents after adding */
2770	xfs_extnum_t	nextents;	/* number of extents in file */
2771
2772	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2773	ASSERT((idx >= 0) && (idx <= nextents));
2774	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
2775	new_size = ifp->if_bytes + byte_diff;
2776	/*
2777	 * If the new number of extents (nextents + ext_diff)
2778	 * fits inside the inode, then continue to use the inline
2779	 * extent buffer.
2780	 */
2781	if (nextents + ext_diff <= XFS_INLINE_EXTS) {
2782		if (idx < nextents) {
2783			memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
2784				&ifp->if_u2.if_inline_ext[idx],
2785				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
2786			memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
2787		}
2788		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
2789		ifp->if_real_bytes = 0;
2790	}
2791	/*
2792	 * Otherwise use a linear (direct) extent list.
2793	 * If the extents are currently inside the inode,
2794	 * xfs_iext_realloc_direct will switch us from
2795	 * inline to direct extent allocation mode.
2796	 */
2797	else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
2798		xfs_iext_realloc_direct(ifp, new_size);
2799		if (idx < nextents) {
2800			memmove(&ifp->if_u1.if_extents[idx + ext_diff],
2801				&ifp->if_u1.if_extents[idx],
2802				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
2803			memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
2804		}
2805	}
2806	/* Indirection array */
2807	else {
2808		xfs_ext_irec_t	*erp;
2809		int		erp_idx = 0;
2810		int		page_idx = idx;
2811
2812		ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
2813		if (ifp->if_flags & XFS_IFEXTIREC) {
2814			erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
2815		} else {
2816			xfs_iext_irec_init(ifp);
2817			ASSERT(ifp->if_flags & XFS_IFEXTIREC);
2818			erp = ifp->if_u1.if_ext_irec;
2819		}
2820		/* Extents fit in target extent page */
2821		if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
2822			if (page_idx < erp->er_extcount) {
2823				memmove(&erp->er_extbuf[page_idx + ext_diff],
2824					&erp->er_extbuf[page_idx],
2825					(erp->er_extcount - page_idx) *
2826					sizeof(xfs_bmbt_rec_t));
2827				memset(&erp->er_extbuf[page_idx], 0, byte_diff);
2828			}
2829			erp->er_extcount += ext_diff;
2830			xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
2831		}
2832		/* Insert a new extent page */
2833		else if (erp) {
2834			xfs_iext_add_indirect_multi(ifp,
2835				erp_idx, page_idx, ext_diff);
2836		}
2837		/*
2838		 * If extent(s) are being appended to the last page in
2839		 * the indirection array and the new extent(s) don't fit
2840		 * in the page, then erp is NULL and erp_idx is set to
2841		 * the next index needed in the indirection array.
2842		 */
2843		else {
2844			int	count = ext_diff;
2845
2846			while (count) {
2847				erp = xfs_iext_irec_new(ifp, erp_idx);
2848				erp->er_extcount = count;
2849				count -= MIN(count, (int)XFS_LINEAR_EXTS);
2850				if (count) {
2851					erp_idx++;
2852				}
2853			}
2854		}
2855	}
2856	ifp->if_bytes = new_size;
2857}
2858
2859/*
2860 * This is called when incore extents are being added to the indirection
2861 * array and the new extents do not fit in the target extent list. The
2862 * erp_idx parameter contains the irec index for the target extent list
2863 * in the indirection array, and the idx parameter contains the extent
2864 * index within the list. The number of extents being added is stored
2865 * in the count parameter.
2866 *
2867 *    |-------|   |-------|
2868 *    |       |   |       |    idx - number of extents before idx
2869 *    |  idx  |   | count |
2870 *    |       |   |       |    count - number of extents being inserted at idx
2871 *    |-------|   |-------|
2872 *    | count |   | nex2  |    nex2 - number of extents after idx + count
2873 *    |-------|   |-------|
2874 */
2875void
2876xfs_iext_add_indirect_multi(
2877	xfs_ifork_t	*ifp,			/* inode fork pointer */
2878	int		erp_idx,		/* target extent irec index */
2879	xfs_extnum_t	idx,			/* index within target list */
2880	int		count)			/* new extents being added */
2881{
2882	int		byte_diff;		/* new bytes being added */
2883	xfs_ext_irec_t	*erp;			/* pointer to irec entry */
2884	xfs_extnum_t	ext_diff;		/* number of extents to add */
2885	xfs_extnum_t	ext_cnt;		/* new extents still needed */
2886	xfs_extnum_t	nex2;			/* extents after idx + count */
2887	xfs_bmbt_rec_t	*nex2_ep = NULL;	/* temp list for nex2 extents */
2888	int		nlists;			/* number of irec's (lists) */
2889
2890	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
2891	erp = &ifp->if_u1.if_ext_irec[erp_idx];
2892	nex2 = erp->er_extcount - idx;
2893	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
2894
2895	/*
2896	 * Save second part of target extent list
2897	 * (all extents past */
2898	if (nex2) {
2899		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
2900		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
2901		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
2902		erp->er_extcount -= nex2;
2903		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
2904		memset(&erp->er_extbuf[idx], 0, byte_diff);
2905	}
2906
2907	/*
2908	 * Add the new extents to the end of the target
2909	 * list, then allocate new irec record(s) and
2910	 * extent buffer(s) as needed to store the rest
2911	 * of the new extents.
2912	 */
2913	ext_cnt = count;
2914	ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
2915	if (ext_diff) {
2916		erp->er_extcount += ext_diff;
2917		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
2918		ext_cnt -= ext_diff;
2919	}
2920	while (ext_cnt) {
2921		erp_idx++;
2922		erp = xfs_iext_irec_new(ifp, erp_idx);
2923		ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
2924		erp->er_extcount = ext_diff;
2925		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
2926		ext_cnt -= ext_diff;
2927	}
2928
2929	/* Add nex2 extents back to indirection array */
2930	if (nex2) {
2931		xfs_extnum_t	ext_avail;
2932		int		i;
2933
2934		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
2935		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
2936		i = 0;
2937		/*
2938		 * If nex2 extents fit in the current page, append
2939		 * nex2_ep after the new extents.
2940		 */
2941		if (nex2 <= ext_avail) {
2942			i = erp->er_extcount;
2943		}
2944		/*
2945		 * Otherwise, check if space is available in the
2946		 * next page.
2947		 */
2948		else if ((erp_idx < nlists - 1) &&
2949			 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
2950			  ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
2951			erp_idx++;
2952			erp++;
2953			/* Create a hole for nex2 extents */
2954			memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
2955				erp->er_extcount * sizeof(xfs_bmbt_rec_t));
2956		}
2957		/*
2958		 * Final choice, create a new extent page for
2959		 * nex2 extents.
2960		 */
2961		else {
2962			erp_idx++;
2963			erp = xfs_iext_irec_new(ifp, erp_idx);
2964		}
2965		memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
2966		kmem_free(nex2_ep);
2967		erp->er_extcount += nex2;
2968		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
2969	}
2970}
2971
2972/*
2973 * This is called when the amount of space required for incore file
2974 * extents needs to be decreased. The ext_diff parameter stores the
2975 * number of extents to be removed and the idx parameter contains
2976 * the extent index where the extents will be removed from.
2977 *
2978 * If the amount of space needed has decreased below the linear
2979 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
2980 * extent array.  Otherwise, use kmem_realloc() to adjust the
2981 * size to what is needed.
2982 */
2983void
2984xfs_iext_remove(
2985	xfs_inode_t	*ip,		/* incore inode pointer */
2986	xfs_extnum_t	idx,		/* index to begin removing exts */
2987	int		ext_diff,	/* number of extents to remove */
2988	int		state)		/* type of extent conversion */
2989{
2990	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
2991	xfs_extnum_t	nextents;	/* number of extents in file */
2992	int		new_size;	/* size of extents after removal */
2993
2994	trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
2995
2996	ASSERT(ext_diff > 0);
2997	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2998	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
2999
3000	if (new_size == 0) {
3001		xfs_iext_destroy(ifp);
3002	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3003		xfs_iext_remove_indirect(ifp, idx, ext_diff);
3004	} else if (ifp->if_real_bytes) {
3005		xfs_iext_remove_direct(ifp, idx, ext_diff);
3006	} else {
3007		xfs_iext_remove_inline(ifp, idx, ext_diff);
3008	}
3009	ifp->if_bytes = new_size;
3010}
3011
3012/*
3013 * This removes ext_diff extents from the inline buffer, beginning
3014 * at extent index idx.
3015 */
3016void
3017xfs_iext_remove_inline(
3018	xfs_ifork_t	*ifp,		/* inode fork pointer */
3019	xfs_extnum_t	idx,		/* index to begin removing exts */
3020	int		ext_diff)	/* number of extents to remove */
3021{
3022	int		nextents;	/* number of extents in file */
3023
3024	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3025	ASSERT(idx < XFS_INLINE_EXTS);
3026	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3027	ASSERT(((nextents - ext_diff) > 0) &&
3028		(nextents - ext_diff) < XFS_INLINE_EXTS);
3029
3030	if (idx + ext_diff < nextents) {
3031		memmove(&ifp->if_u2.if_inline_ext[idx],
3032			&ifp->if_u2.if_inline_ext[idx + ext_diff],
3033			(nextents - (idx + ext_diff)) *
3034			 sizeof(xfs_bmbt_rec_t));
3035		memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3036			0, ext_diff * sizeof(xfs_bmbt_rec_t));
3037	} else {
3038		memset(&ifp->if_u2.if_inline_ext[idx], 0,
3039			ext_diff * sizeof(xfs_bmbt_rec_t));
3040	}
3041}
3042
3043/*
3044 * This removes ext_diff extents from a linear (direct) extent list,
3045 * beginning at extent index idx. If the extents are being removed
3046 * from the end of the list (ie. truncate) then we just need to re-
3047 * allocate the list to remove the extra space. Otherwise, if the
3048 * extents are being removed from the middle of the existing extent
3049 * entries, then we first need to move the extent records beginning
3050 * at idx + ext_diff up in the list to overwrite the records being
3051 * removed, then remove the extra space via kmem_realloc.
3052 */
3053void
3054xfs_iext_remove_direct(
3055	xfs_ifork_t	*ifp,		/* inode fork pointer */
3056	xfs_extnum_t	idx,		/* index to begin removing exts */
3057	int		ext_diff)	/* number of extents to remove */
3058{
3059	xfs_extnum_t	nextents;	/* number of extents in file */
3060	int		new_size;	/* size of extents after removal */
3061
3062	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3063	new_size = ifp->if_bytes -
3064		(ext_diff * sizeof(xfs_bmbt_rec_t));
3065	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3066
3067	if (new_size == 0) {
3068		xfs_iext_destroy(ifp);
3069		return;
3070	}
3071	/* Move extents up in the list (if needed) */
3072	if (idx + ext_diff < nextents) {
3073		memmove(&ifp->if_u1.if_extents[idx],
3074			&ifp->if_u1.if_extents[idx + ext_diff],
3075			(nextents - (idx + ext_diff)) *
3076			 sizeof(xfs_bmbt_rec_t));
3077	}
3078	memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3079		0, ext_diff * sizeof(xfs_bmbt_rec_t));
3080	/*
3081	 * Reallocate the direct extent list. If the extents
3082	 * will fit inside the inode then xfs_iext_realloc_direct
3083	 * will switch from direct to inline extent allocation
3084	 * mode for us.
3085	 */
3086	xfs_iext_realloc_direct(ifp, new_size);
3087	ifp->if_bytes = new_size;
3088}
3089
3090/*
3091 * This is called when incore extents are being removed from the
3092 * indirection array and the extents being removed span multiple extent
3093 * buffers. The idx parameter contains the file extent index where we
3094 * want to begin removing extents, and the count parameter contains
3095 * how many extents need to be removed.
3096 *
3097 *    |-------|   |-------|
3098 *    | nex1  |   |       |    nex1 - number of extents before idx
3099 *    |-------|   | count |
3100 *    |       |   |       |    count - number of extents being removed at idx
3101 *    | count |   |-------|
3102 *    |       |   | nex2  |    nex2 - number of extents after idx + count
3103 *    |-------|   |-------|
3104 */
3105void
3106xfs_iext_remove_indirect(
3107	xfs_ifork_t	*ifp,		/* inode fork pointer */
3108	xfs_extnum_t	idx,		/* index to begin removing extents */
3109	int		count)		/* number of extents to remove */
3110{
3111	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3112	int		erp_idx = 0;	/* indirection array index */
3113	xfs_extnum_t	ext_cnt;	/* extents left to remove */
3114	xfs_extnum_t	ext_diff;	/* extents to remove in current list */
3115	xfs_extnum_t	nex1;		/* number of extents before idx */
3116	xfs_extnum_t	nex2;		/* extents after idx + count */
3117	int		page_idx = idx;	/* index in target extent list */
3118
3119	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3120	erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
3121	ASSERT(erp != NULL);
3122	nex1 = page_idx;
3123	ext_cnt = count;
3124	while (ext_cnt) {
3125		nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3126		ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3127		/*
3128		 * Check for deletion of entire list;
3129		 * xfs_iext_irec_remove() updates extent offsets.
3130		 */
3131		if (ext_diff == erp->er_extcount) {
3132			xfs_iext_irec_remove(ifp, erp_idx);
3133			ext_cnt -= ext_diff;
3134			nex1 = 0;
3135			if (ext_cnt) {
3136				ASSERT(erp_idx < ifp->if_real_bytes /
3137					XFS_IEXT_BUFSZ);
3138				erp = &ifp->if_u1.if_ext_irec[erp_idx];
3139				nex1 = 0;
3140				continue;
3141			} else {
3142				break;
3143			}
3144		}
3145		/* Move extents up (if needed) */
3146		if (nex2) {
3147			memmove(&erp->er_extbuf[nex1],
3148				&erp->er_extbuf[nex1 + ext_diff],
3149				nex2 * sizeof(xfs_bmbt_rec_t));
3150		}
3151		/* Zero out rest of page */
3152		memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3153			((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3154		/* Update remaining counters */
3155		erp->er_extcount -= ext_diff;
3156		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3157		ext_cnt -= ext_diff;
3158		nex1 = 0;
3159		erp_idx++;
3160		erp++;
3161	}
3162	ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3163	xfs_iext_irec_compact(ifp);
3164}
3165
3166/*
3167 * Create, destroy, or resize a linear (direct) block of extents.
3168 */
3169void
3170xfs_iext_realloc_direct(
3171	xfs_ifork_t	*ifp,		/* inode fork pointer */
3172	int		new_size)	/* new size of extents */
3173{
3174	int		rnew_size;	/* real new size of extents */
3175
3176	rnew_size = new_size;
3177
3178	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3179		((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3180		 (new_size != ifp->if_real_bytes)));
3181
3182	/* Free extent records */
3183	if (new_size == 0) {
3184		xfs_iext_destroy(ifp);
3185	}
3186	/* Resize direct extent list and zero any new bytes */
3187	else if (ifp->if_real_bytes) {
3188		/* Check if extents will fit inside the inode */
3189		if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3190			xfs_iext_direct_to_inline(ifp, new_size /
3191				(uint)sizeof(xfs_bmbt_rec_t));
3192			ifp->if_bytes = new_size;
3193			return;
3194		}
3195		if (!is_power_of_2(new_size)){
3196			rnew_size = roundup_pow_of_two(new_size);
3197		}
3198		if (rnew_size != ifp->if_real_bytes) {
3199			ifp->if_u1.if_extents =
3200				kmem_realloc(ifp->if_u1.if_extents,
3201						rnew_size,
3202						ifp->if_real_bytes, KM_NOFS);
3203		}
3204		if (rnew_size > ifp->if_real_bytes) {
3205			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3206				(uint)sizeof(xfs_bmbt_rec_t)], 0,
3207				rnew_size - ifp->if_real_bytes);
3208		}
3209	}
3210	/*
3211	 * Switch from the inline extent buffer to a direct
3212	 * extent list. Be sure to include the inline extent
3213	 * bytes in new_size.
3214	 */
3215	else {
3216		new_size += ifp->if_bytes;
3217		if (!is_power_of_2(new_size)) {
3218			rnew_size = roundup_pow_of_two(new_size);
3219		}
3220		xfs_iext_inline_to_direct(ifp, rnew_size);
3221	}
3222	ifp->if_real_bytes = rnew_size;
3223	ifp->if_bytes = new_size;
3224}
3225
3226/*
3227 * Switch from linear (direct) extent records to inline buffer.
3228 */
3229void
3230xfs_iext_direct_to_inline(
3231	xfs_ifork_t	*ifp,		/* inode fork pointer */
3232	xfs_extnum_t	nextents)	/* number of extents in file */
3233{
3234	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3235	ASSERT(nextents <= XFS_INLINE_EXTS);
3236	/*
3237	 * The inline buffer was zeroed when we switched
3238	 * from inline to direct extent allocation mode,
3239	 * so we don't need to clear it here.
3240	 */
3241	memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3242		nextents * sizeof(xfs_bmbt_rec_t));
3243	kmem_free(ifp->if_u1.if_extents);
3244	ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3245	ifp->if_real_bytes = 0;
3246}
3247
3248/*
3249 * Switch from inline buffer to linear (direct) extent records.
3250 * new_size should already be rounded up to the next power of 2
3251 * by the caller (when appropriate), so use new_size as it is.
3252 * However, since new_size may be rounded up, we can't update
3253 * if_bytes here. It is the caller's responsibility to update
3254 * if_bytes upon return.
3255 */
3256void
3257xfs_iext_inline_to_direct(
3258	xfs_ifork_t	*ifp,		/* inode fork pointer */
3259	int		new_size)	/* number of extents in file */
3260{
3261	ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3262	memset(ifp->if_u1.if_extents, 0, new_size);
3263	if (ifp->if_bytes) {
3264		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3265			ifp->if_bytes);
3266		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3267			sizeof(xfs_bmbt_rec_t));
3268	}
3269	ifp->if_real_bytes = new_size;
3270}
3271
3272/*
3273 * Resize an extent indirection array to new_size bytes.
3274 */
3275STATIC void
3276xfs_iext_realloc_indirect(
3277	xfs_ifork_t	*ifp,		/* inode fork pointer */
3278	int		new_size)	/* new indirection array size */
3279{
3280	int		nlists;		/* number of irec's (ex lists) */
3281	int		size;		/* current indirection array size */
3282
3283	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3284	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3285	size = nlists * sizeof(xfs_ext_irec_t);
3286	ASSERT(ifp->if_real_bytes);
3287	ASSERT((new_size >= 0) && (new_size != size));
3288	if (new_size == 0) {
3289		xfs_iext_destroy(ifp);
3290	} else {
3291		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3292			kmem_realloc(ifp->if_u1.if_ext_irec,
3293				new_size, size, KM_NOFS);
3294	}
3295}
3296
3297/*
3298 * Switch from indirection array to linear (direct) extent allocations.
3299 */
3300STATIC void
3301xfs_iext_indirect_to_direct(
3302	 xfs_ifork_t	*ifp)		/* inode fork pointer */
3303{
3304	xfs_bmbt_rec_host_t *ep;	/* extent record pointer */
3305	xfs_extnum_t	nextents;	/* number of extents in file */
3306	int		size;		/* size of file extents */
3307
3308	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3309	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3310	ASSERT(nextents <= XFS_LINEAR_EXTS);
3311	size = nextents * sizeof(xfs_bmbt_rec_t);
3312
3313	xfs_iext_irec_compact_pages(ifp);
3314	ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3315
3316	ep = ifp->if_u1.if_ext_irec->er_extbuf;
3317	kmem_free(ifp->if_u1.if_ext_irec);
3318	ifp->if_flags &= ~XFS_IFEXTIREC;
3319	ifp->if_u1.if_extents = ep;
3320	ifp->if_bytes = size;
3321	if (nextents < XFS_LINEAR_EXTS) {
3322		xfs_iext_realloc_direct(ifp, size);
3323	}
3324}
3325
3326/*
3327 * Free incore file extents.
3328 */
3329void
3330xfs_iext_destroy(
3331	xfs_ifork_t	*ifp)		/* inode fork pointer */
3332{
3333	if (ifp->if_flags & XFS_IFEXTIREC) {
3334		int	erp_idx;
3335		int	nlists;
3336
3337		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3338		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3339			xfs_iext_irec_remove(ifp, erp_idx);
3340		}
3341		ifp->if_flags &= ~XFS_IFEXTIREC;
3342	} else if (ifp->if_real_bytes) {
3343		kmem_free(ifp->if_u1.if_extents);
3344	} else if (ifp->if_bytes) {
3345		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3346			sizeof(xfs_bmbt_rec_t));
3347	}
3348	ifp->if_u1.if_extents = NULL;
3349	ifp->if_real_bytes = 0;
3350	ifp->if_bytes = 0;
3351}
3352
3353/*
3354 * Return a pointer to the extent record for file system block bno.
3355 */
3356xfs_bmbt_rec_host_t *			/* pointer to found extent record */
3357xfs_iext_bno_to_ext(
3358	xfs_ifork_t	*ifp,		/* inode fork pointer */
3359	xfs_fileoff_t	bno,		/* block number to search for */
3360	xfs_extnum_t	*idxp)		/* index of target extent */
3361{
3362	xfs_bmbt_rec_host_t *base;	/* pointer to first extent */
3363	xfs_filblks_t	blockcount = 0;	/* number of blocks in extent */
3364	xfs_bmbt_rec_host_t *ep = NULL;	/* pointer to target extent */
3365	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
3366	int		high;		/* upper boundary in search */
3367	xfs_extnum_t	idx = 0;	/* index of target extent */
3368	int		low;		/* lower boundary in search */
3369	xfs_extnum_t	nextents;	/* number of file extents */
3370	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
3371
3372	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3373	if (nextents == 0) {
3374		*idxp = 0;
3375		return NULL;
3376	}
3377	low = 0;
3378	if (ifp->if_flags & XFS_IFEXTIREC) {
3379		/* Find target extent list */
3380		int	erp_idx = 0;
3381		erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3382		base = erp->er_extbuf;
3383		high = erp->er_extcount - 1;
3384	} else {
3385		base = ifp->if_u1.if_extents;
3386		high = nextents - 1;
3387	}
3388	/* Binary search extent records */
3389	while (low <= high) {
3390		idx = (low + high) >> 1;
3391		ep = base + idx;
3392		startoff = xfs_bmbt_get_startoff(ep);
3393		blockcount = xfs_bmbt_get_blockcount(ep);
3394		if (bno < startoff) {
3395			high = idx - 1;
3396		} else if (bno >= startoff + blockcount) {
3397			low = idx + 1;
3398		} else {
3399			/* Convert back to file-based extent index */
3400			if (ifp->if_flags & XFS_IFEXTIREC) {
3401				idx += erp->er_extoff;
3402			}
3403			*idxp = idx;
3404			return ep;
3405		}
3406	}
3407	/* Convert back to file-based extent index */
3408	if (ifp->if_flags & XFS_IFEXTIREC) {
3409		idx += erp->er_extoff;
3410	}
3411	if (bno >= startoff + blockcount) {
3412		if (++idx == nextents) {
3413			ep = NULL;
3414		} else {
3415			ep = xfs_iext_get_ext(ifp, idx);
3416		}
3417	}
3418	*idxp = idx;
3419	return ep;
3420}
3421
3422/*
3423 * Return a pointer to the indirection array entry containing the
3424 * extent record for filesystem block bno. Store the index of the
3425 * target irec in *erp_idxp.
3426 */
3427xfs_ext_irec_t *			/* pointer to found extent record */
3428xfs_iext_bno_to_irec(
3429	xfs_ifork_t	*ifp,		/* inode fork pointer */
3430	xfs_fileoff_t	bno,		/* block number to search for */
3431	int		*erp_idxp)	/* irec index of target ext list */
3432{
3433	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
3434	xfs_ext_irec_t	*erp_next;	/* next indirection array entry */
3435	int		erp_idx;	/* indirection array index */
3436	int		nlists;		/* number of extent irec's (lists) */
3437	int		high;		/* binary search upper limit */
3438	int		low;		/* binary search lower limit */
3439
3440	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3441	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3442	erp_idx = 0;
3443	low = 0;
3444	high = nlists - 1;
3445	while (low <= high) {
3446		erp_idx = (low + high) >> 1;
3447		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3448		erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3449		if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3450			high = erp_idx - 1;
3451		} else if (erp_next && bno >=
3452			   xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3453			low = erp_idx + 1;
3454		} else {
3455			break;
3456		}
3457	}
3458	*erp_idxp = erp_idx;
3459	return erp;
3460}
3461
3462/*
3463 * Return a pointer to the indirection array entry containing the
3464 * extent record at file extent index *idxp. Store the index of the
3465 * target irec in *erp_idxp and store the page index of the target
3466 * extent record in *idxp.
3467 */
3468xfs_ext_irec_t *
3469xfs_iext_idx_to_irec(
3470	xfs_ifork_t	*ifp,		/* inode fork pointer */
3471	xfs_extnum_t	*idxp,		/* extent index (file -> page) */
3472	int		*erp_idxp,	/* pointer to target irec */
3473	int		realloc)	/* new bytes were just added */
3474{
3475	xfs_ext_irec_t	*prev;		/* pointer to previous irec */
3476	xfs_ext_irec_t	*erp = NULL;	/* pointer to current irec */
3477	int		erp_idx;	/* indirection array index */
3478	int		nlists;		/* number of irec's (ex lists) */
3479	int		high;		/* binary search upper limit */
3480	int		low;		/* binary search lower limit */
3481	xfs_extnum_t	page_idx = *idxp; /* extent index in target list */
3482
3483	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3484	ASSERT(page_idx >= 0);
3485	ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3486	ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3487
3488	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3489	erp_idx = 0;
3490	low = 0;
3491	high = nlists - 1;
3492
3493	/* Binary search extent irec's */
3494	while (low <= high) {
3495		erp_idx = (low + high) >> 1;
3496		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3497		prev = erp_idx > 0 ? erp - 1 : NULL;
3498		if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
3499		     realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
3500			high = erp_idx - 1;
3501		} else if (page_idx > erp->er_extoff + erp->er_extcount ||
3502			   (page_idx == erp->er_extoff + erp->er_extcount &&
3503			    !realloc)) {
3504			low = erp_idx + 1;
3505		} else if (page_idx == erp->er_extoff + erp->er_extcount &&
3506			   erp->er_extcount == XFS_LINEAR_EXTS) {
3507			ASSERT(realloc);
3508			page_idx = 0;
3509			erp_idx++;
3510			erp = erp_idx < nlists ? erp + 1 : NULL;
3511			break;
3512		} else {
3513			page_idx -= erp->er_extoff;
3514			break;
3515		}
3516	}
3517	*idxp = page_idx;
3518	*erp_idxp = erp_idx;
3519	return(erp);
3520}
3521
3522/*
3523 * Allocate and initialize an indirection array once the space needed
3524 * for incore extents increases above XFS_IEXT_BUFSZ.
3525 */
3526void
3527xfs_iext_irec_init(
3528	xfs_ifork_t	*ifp)		/* inode fork pointer */
3529{
3530	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3531	xfs_extnum_t	nextents;	/* number of extents in file */
3532
3533	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3534	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3535	ASSERT(nextents <= XFS_LINEAR_EXTS);
3536
3537	erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
3538
3539	if (nextents == 0) {
3540		ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3541	} else if (!ifp->if_real_bytes) {
3542		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
3543	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
3544		xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
3545	}
3546	erp->er_extbuf = ifp->if_u1.if_extents;
3547	erp->er_extcount = nextents;
3548	erp->er_extoff = 0;
3549
3550	ifp->if_flags |= XFS_IFEXTIREC;
3551	ifp->if_real_bytes = XFS_IEXT_BUFSZ;
3552	ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
3553	ifp->if_u1.if_ext_irec = erp;
3554
3555	return;
3556}
3557
3558/*
3559 * Allocate and initialize a new entry in the indirection array.
3560 */
3561xfs_ext_irec_t *
3562xfs_iext_irec_new(
3563	xfs_ifork_t	*ifp,		/* inode fork pointer */
3564	int		erp_idx)	/* index for new irec */
3565{
3566	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3567	int		i;		/* loop counter */
3568	int		nlists;		/* number of irec's (ex lists) */
3569
3570	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3571	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3572
3573	/* Resize indirection array */
3574	xfs_iext_realloc_indirect(ifp, ++nlists *
3575				  sizeof(xfs_ext_irec_t));
3576	/*
3577	 * Move records down in the array so the
3578	 * new page can use erp_idx.
3579	 */
3580	erp = ifp->if_u1.if_ext_irec;
3581	for (i = nlists - 1; i > erp_idx; i--) {
3582		memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
3583	}
3584	ASSERT(i == erp_idx);
3585
3586	/* Initialize new extent record */
3587	erp = ifp->if_u1.if_ext_irec;
3588	erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3589	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3590	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3591	erp[erp_idx].er_extcount = 0;
3592	erp[erp_idx].er_extoff = erp_idx > 0 ?
3593		erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
3594	return (&erp[erp_idx]);
3595}
3596
3597/*
3598 * Remove a record from the indirection array.
3599 */
3600void
3601xfs_iext_irec_remove(
3602	xfs_ifork_t	*ifp,		/* inode fork pointer */
3603	int		erp_idx)	/* irec index to remove */
3604{
3605	xfs_ext_irec_t	*erp;		/* indirection array pointer */
3606	int		i;		/* loop counter */
3607	int		nlists;		/* number of irec's (ex lists) */
3608
3609	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3610	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3611	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3612	if (erp->er_extbuf) {
3613		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
3614			-erp->er_extcount);
3615		kmem_free(erp->er_extbuf);
3616	}
3617	/* Compact extent records */
3618	erp = ifp->if_u1.if_ext_irec;
3619	for (i = erp_idx; i < nlists - 1; i++) {
3620		memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
3621	}
3622	/*
3623	 * Manually free the last extent record from the indirection
3624	 * array.  A call to xfs_iext_realloc_indirect() with a size
3625	 * of zero would result in a call to xfs_iext_destroy() which
3626	 * would in turn call this function again, creating a nasty
3627	 * infinite loop.
3628	 */
3629	if (--nlists) {
3630		xfs_iext_realloc_indirect(ifp,
3631			nlists * sizeof(xfs_ext_irec_t));
3632	} else {
3633		kmem_free(ifp->if_u1.if_ext_irec);
3634	}
3635	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3636}
3637
3638/*
3639 * This is called to clean up large amounts of unused memory allocated
3640 * by the indirection array.  Before compacting anything though, verify
3641 * that the indirection array is still needed and switch back to the
3642 * linear extent list (or even the inline buffer) if possible.  The
3643 * compaction policy is as follows:
3644 *
3645 *    Full Compaction: Extents fit into a single page (or inline buffer)
3646 * Partial Compaction: Extents occupy less than 50% of allocated space
3647 *      No Compaction: Extents occupy at least 50% of allocated space
3648 */
3649void
3650xfs_iext_irec_compact(
3651	xfs_ifork_t	*ifp)		/* inode fork pointer */
3652{
3653	xfs_extnum_t	nextents;	/* number of extents in file */
3654	int		nlists;		/* number of irec's (ex lists) */
3655
3656	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3657	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3658	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3659
3660	if (nextents == 0) {
3661		xfs_iext_destroy(ifp);
3662	} else if (nextents <= XFS_INLINE_EXTS) {
3663		xfs_iext_indirect_to_direct(ifp);
3664		xfs_iext_direct_to_inline(ifp, nextents);
3665	} else if (nextents <= XFS_LINEAR_EXTS) {
3666		xfs_iext_indirect_to_direct(ifp);
3667	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
3668		xfs_iext_irec_compact_pages(ifp);
3669	}
3670}
3671
3672/*
3673 * Combine extents from neighboring extent pages.
3674 */
3675void
3676xfs_iext_irec_compact_pages(
3677	xfs_ifork_t	*ifp)		/* inode fork pointer */
3678{
3679	xfs_ext_irec_t	*erp, *erp_next;/* pointers to irec entries */
3680	int		erp_idx = 0;	/* indirection array index */
3681	int		nlists;		/* number of irec's (ex lists) */
3682
3683	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3684	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3685	while (erp_idx < nlists - 1) {
3686		erp = &ifp->if_u1.if_ext_irec[erp_idx];
3687		erp_next = erp + 1;
3688		if (erp_next->er_extcount <=
3689		    (XFS_LINEAR_EXTS - erp->er_extcount)) {
3690			memcpy(&erp->er_extbuf[erp->er_extcount],
3691				erp_next->er_extbuf, erp_next->er_extcount *
3692				sizeof(xfs_bmbt_rec_t));
3693			erp->er_extcount += erp_next->er_extcount;
3694			/*
3695			 * Free page before removing extent record
3696			 * so er_extoffs don't get modified in
3697			 * xfs_iext_irec_remove.
3698			 */
3699			kmem_free(erp_next->er_extbuf);
3700			erp_next->er_extbuf = NULL;
3701			xfs_iext_irec_remove(ifp, erp_idx + 1);
3702			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3703		} else {
3704			erp_idx++;
3705		}
3706	}
3707}
3708
3709/*
3710 * This is called to update the er_extoff field in the indirection
3711 * array when extents have been added or removed from one of the
3712 * extent lists. erp_idx contains the irec index to begin updating
3713 * at and ext_diff contains the number of extents that were added
3714 * or removed.
3715 */
3716void
3717xfs_iext_irec_update_extoffs(
3718	xfs_ifork_t	*ifp,		/* inode fork pointer */
3719	int		erp_idx,	/* irec index to update */
3720	int		ext_diff)	/* number of new extents */
3721{
3722	int		i;		/* loop counter */
3723	int		nlists;		/* number of irec's (ex lists */
3724
3725	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3726	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3727	for (i = erp_idx; i < nlists; i++) {
3728		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
3729	}
3730}