Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_sb.h"
  15#include "xfs_mount.h"
  16#include "xfs_defer.h"
  17#include "xfs_inode.h"
  18#include "xfs_dir2.h"
  19#include "xfs_attr.h"
  20#include "xfs_trans_space.h"
  21#include "xfs_trans.h"
  22#include "xfs_buf_item.h"
  23#include "xfs_inode_item.h"
 
  24#include "xfs_ialloc.h"
  25#include "xfs_bmap.h"
  26#include "xfs_bmap_util.h"
  27#include "xfs_errortag.h"
  28#include "xfs_error.h"
  29#include "xfs_quota.h"
  30#include "xfs_filestream.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_symlink.h"
  34#include "xfs_trans_priv.h"
  35#include "xfs_log.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_reflink.h"
 
 
 
  38
  39kmem_zone_t *xfs_inode_zone;
  40
  41/*
  42 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  43 * freed from a file in a single transaction.
  44 */
  45#define	XFS_ITRUNC_MAX_EXTENTS	2
  46
  47STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
  48STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  49STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
 
  50
  51/*
  52 * helper function to extract extent size hint from inode
  53 */
  54xfs_extlen_t
  55xfs_get_extsz_hint(
  56	struct xfs_inode	*ip)
  57{
  58	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  59		return ip->i_d.di_extsize;
 
 
 
 
 
 
  60	if (XFS_IS_REALTIME_INODE(ip))
  61		return ip->i_mount->m_sb.sb_rextsize;
  62	return 0;
  63}
  64
  65/*
  66 * Helper function to extract CoW extent size hint from inode.
  67 * Between the extent size hint and the CoW extent size hint, we
  68 * return the greater of the two.  If the value is zero (automatic),
  69 * use the default size.
  70 */
  71xfs_extlen_t
  72xfs_get_cowextsz_hint(
  73	struct xfs_inode	*ip)
  74{
  75	xfs_extlen_t		a, b;
  76
  77	a = 0;
  78	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  79		a = ip->i_d.di_cowextsize;
  80	b = xfs_get_extsz_hint(ip);
  81
  82	a = max(a, b);
  83	if (a == 0)
  84		return XFS_DEFAULT_COWEXTSZ_HINT;
  85	return a;
  86}
  87
  88/*
  89 * These two are wrapper routines around the xfs_ilock() routine used to
  90 * centralize some grungy code.  They are used in places that wish to lock the
  91 * inode solely for reading the extents.  The reason these places can't just
  92 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  93 * bringing in of the extents from disk for a file in b-tree format.  If the
  94 * inode is in b-tree format, then we need to lock the inode exclusively until
  95 * the extents are read in.  Locking it exclusively all the time would limit
  96 * our parallelism unnecessarily, though.  What we do instead is check to see
  97 * if the extents have been read in yet, and only lock the inode exclusively
  98 * if they have not.
  99 *
 100 * The functions return a value which should be given to the corresponding
 101 * xfs_iunlock() call.
 102 */
 103uint
 104xfs_ilock_data_map_shared(
 105	struct xfs_inode	*ip)
 106{
 107	uint			lock_mode = XFS_ILOCK_SHARED;
 108
 109	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
 110	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
 111		lock_mode = XFS_ILOCK_EXCL;
 112	xfs_ilock(ip, lock_mode);
 113	return lock_mode;
 114}
 115
 116uint
 117xfs_ilock_attr_map_shared(
 118	struct xfs_inode	*ip)
 119{
 120	uint			lock_mode = XFS_ILOCK_SHARED;
 121
 122	if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
 123	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
 124		lock_mode = XFS_ILOCK_EXCL;
 125	xfs_ilock(ip, lock_mode);
 126	return lock_mode;
 127}
 128
 129/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 131 * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
 132 * various combinations of the locks to be obtained.
 133 *
 134 * The 3 locks should always be ordered so that the IO lock is obtained first,
 135 * the mmap lock second and the ilock last in order to prevent deadlock.
 136 *
 137 * Basic locking order:
 138 *
 139 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
 140 *
 141 * mmap_sem locking order:
 142 *
 143 * i_rwsem -> page lock -> mmap_sem
 144 * mmap_sem -> i_mmap_lock -> page_lock
 145 *
 146 * The difference in mmap_sem locking order mean that we cannot hold the
 147 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
 148 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
 149 * in get_user_pages() to map the user pages into the kernel address space for
 150 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
 151 * page faults already hold the mmap_sem.
 152 *
 153 * Hence to serialise fully against both syscall and mmap based IO, we need to
 154 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
 155 * taken in places where we need to invalidate the page cache in a race
 156 * free manner (e.g. truncate, hole punch and other extent manipulation
 157 * functions).
 158 */
 159void
 160xfs_ilock(
 161	xfs_inode_t		*ip,
 162	uint			lock_flags)
 163{
 164	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 165
 166	/*
 167	 * You can't set both SHARED and EXCL for the same lock,
 168	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 169	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 170	 */
 171	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 172	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 173	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 174	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 175	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 176	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 177	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 178
 179	if (lock_flags & XFS_IOLOCK_EXCL) {
 180		down_write_nested(&VFS_I(ip)->i_rwsem,
 181				  XFS_IOLOCK_DEP(lock_flags));
 182	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 183		down_read_nested(&VFS_I(ip)->i_rwsem,
 184				 XFS_IOLOCK_DEP(lock_flags));
 185	}
 186
 187	if (lock_flags & XFS_MMAPLOCK_EXCL)
 188		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 189	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 190		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 
 
 
 191
 192	if (lock_flags & XFS_ILOCK_EXCL)
 193		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 194	else if (lock_flags & XFS_ILOCK_SHARED)
 195		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 196}
 197
 198/*
 199 * This is just like xfs_ilock(), except that the caller
 200 * is guaranteed not to sleep.  It returns 1 if it gets
 201 * the requested locks and 0 otherwise.  If the IO lock is
 202 * obtained but the inode lock cannot be, then the IO lock
 203 * is dropped before returning.
 204 *
 205 * ip -- the inode being locked
 206 * lock_flags -- this parameter indicates the inode's locks to be
 207 *       to be locked.  See the comment for xfs_ilock() for a list
 208 *	 of valid values.
 209 */
 210int
 211xfs_ilock_nowait(
 212	xfs_inode_t		*ip,
 213	uint			lock_flags)
 214{
 215	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 216
 217	/*
 218	 * You can't set both SHARED and EXCL for the same lock,
 219	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 220	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 221	 */
 222	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 223	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 224	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 225	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 226	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 227	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 228	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 229
 230	if (lock_flags & XFS_IOLOCK_EXCL) {
 231		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 232			goto out;
 233	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 234		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 235			goto out;
 236	}
 237
 238	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 239		if (!mrtryupdate(&ip->i_mmaplock))
 240			goto out_undo_iolock;
 241	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 242		if (!mrtryaccess(&ip->i_mmaplock))
 243			goto out_undo_iolock;
 244	}
 245
 246	if (lock_flags & XFS_ILOCK_EXCL) {
 247		if (!mrtryupdate(&ip->i_lock))
 248			goto out_undo_mmaplock;
 249	} else if (lock_flags & XFS_ILOCK_SHARED) {
 250		if (!mrtryaccess(&ip->i_lock))
 251			goto out_undo_mmaplock;
 252	}
 253	return 1;
 254
 255out_undo_mmaplock:
 256	if (lock_flags & XFS_MMAPLOCK_EXCL)
 257		mrunlock_excl(&ip->i_mmaplock);
 258	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 259		mrunlock_shared(&ip->i_mmaplock);
 260out_undo_iolock:
 261	if (lock_flags & XFS_IOLOCK_EXCL)
 262		up_write(&VFS_I(ip)->i_rwsem);
 263	else if (lock_flags & XFS_IOLOCK_SHARED)
 264		up_read(&VFS_I(ip)->i_rwsem);
 265out:
 266	return 0;
 267}
 268
 269/*
 270 * xfs_iunlock() is used to drop the inode locks acquired with
 271 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 272 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 273 * that we know which locks to drop.
 274 *
 275 * ip -- the inode being unlocked
 276 * lock_flags -- this parameter indicates the inode's locks to be
 277 *       to be unlocked.  See the comment for xfs_ilock() for a list
 278 *	 of valid values for this parameter.
 279 *
 280 */
 281void
 282xfs_iunlock(
 283	xfs_inode_t		*ip,
 284	uint			lock_flags)
 285{
 286	/*
 287	 * You can't set both SHARED and EXCL for the same lock,
 288	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 289	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 290	 */
 291	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 292	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 293	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 294	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 295	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 296	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 297	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 298	ASSERT(lock_flags != 0);
 299
 300	if (lock_flags & XFS_IOLOCK_EXCL)
 301		up_write(&VFS_I(ip)->i_rwsem);
 302	else if (lock_flags & XFS_IOLOCK_SHARED)
 303		up_read(&VFS_I(ip)->i_rwsem);
 304
 305	if (lock_flags & XFS_MMAPLOCK_EXCL)
 306		mrunlock_excl(&ip->i_mmaplock);
 307	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 308		mrunlock_shared(&ip->i_mmaplock);
 309
 310	if (lock_flags & XFS_ILOCK_EXCL)
 311		mrunlock_excl(&ip->i_lock);
 312	else if (lock_flags & XFS_ILOCK_SHARED)
 313		mrunlock_shared(&ip->i_lock);
 314
 315	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 316}
 317
 318/*
 319 * give up write locks.  the i/o lock cannot be held nested
 320 * if it is being demoted.
 321 */
 322void
 323xfs_ilock_demote(
 324	xfs_inode_t		*ip,
 325	uint			lock_flags)
 326{
 327	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 328	ASSERT((lock_flags &
 329		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 330
 331	if (lock_flags & XFS_ILOCK_EXCL)
 332		mrdemote(&ip->i_lock);
 333	if (lock_flags & XFS_MMAPLOCK_EXCL)
 334		mrdemote(&ip->i_mmaplock);
 335	if (lock_flags & XFS_IOLOCK_EXCL)
 336		downgrade_write(&VFS_I(ip)->i_rwsem);
 337
 338	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 339}
 340
 341#if defined(DEBUG) || defined(XFS_WARN)
 342int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343xfs_isilocked(
 344	xfs_inode_t		*ip,
 345	uint			lock_flags)
 346{
 347	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 348		if (!(lock_flags & XFS_ILOCK_SHARED))
 349			return !!ip->i_lock.mr_writer;
 350		return rwsem_is_locked(&ip->i_lock.mr_lock);
 351	}
 352
 353	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 354		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
 355			return !!ip->i_mmaplock.mr_writer;
 356		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
 357	}
 358
 359	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
 360		if (!(lock_flags & XFS_IOLOCK_SHARED))
 361			return !debug_locks ||
 362				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
 363		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
 364	}
 365
 366	ASSERT(0);
 367	return 0;
 368}
 369#endif
 370
 371/*
 372 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 373 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 374 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 375 * errors and warnings.
 376 */
 377#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 378static bool
 379xfs_lockdep_subclass_ok(
 380	int subclass)
 381{
 382	return subclass < MAX_LOCKDEP_SUBCLASSES;
 383}
 384#else
 385#define xfs_lockdep_subclass_ok(subclass)	(true)
 386#endif
 387
 388/*
 389 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 390 * value. This can be called for any type of inode lock combination, including
 391 * parent locking. Care must be taken to ensure we don't overrun the subclass
 392 * storage fields in the class mask we build.
 393 */
 394static inline int
 395xfs_lock_inumorder(int lock_mode, int subclass)
 
 
 396{
 397	int	class = 0;
 398
 399	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 400			      XFS_ILOCK_RTSUM)));
 401	ASSERT(xfs_lockdep_subclass_ok(subclass));
 402
 403	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 404		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 405		class += subclass << XFS_IOLOCK_SHIFT;
 406	}
 407
 408	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 409		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 410		class += subclass << XFS_MMAPLOCK_SHIFT;
 411	}
 412
 413	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 414		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 415		class += subclass << XFS_ILOCK_SHIFT;
 416	}
 417
 418	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 419}
 420
 421/*
 422 * The following routine will lock n inodes in exclusive mode.  We assume the
 423 * caller calls us with the inodes in i_ino order.
 424 *
 425 * We need to detect deadlock where an inode that we lock is in the AIL and we
 426 * start waiting for another inode that is locked by a thread in a long running
 427 * transaction (such as truncate). This can result in deadlock since the long
 428 * running trans might need to wait for the inode we just locked in order to
 429 * push the tail and free space in the log.
 430 *
 431 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 432 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 433 * lock more than one at a time, lockdep will report false positives saying we
 434 * have violated locking orders.
 435 */
 436static void
 437xfs_lock_inodes(
 438	struct xfs_inode	**ips,
 439	int			inodes,
 440	uint			lock_mode)
 441{
 442	int			attempts = 0, i, j, try_lock;
 
 
 
 443	struct xfs_log_item	*lp;
 444
 445	/*
 446	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 447	 * support an arbitrary depth of locking here, but absolute limits on
 448	 * inodes depend on the the type of locking and the limits placed by
 449	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 450	 * the asserts.
 451	 */
 452	ASSERT(ips && inodes >= 2 && inodes <= 5);
 453	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 454			    XFS_ILOCK_EXCL));
 455	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 456			      XFS_ILOCK_SHARED)));
 457	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 458		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 459	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 460		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 461
 462	if (lock_mode & XFS_IOLOCK_EXCL) {
 463		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 464	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 465		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 466
 467	try_lock = 0;
 468	i = 0;
 469again:
 
 
 470	for (; i < inodes; i++) {
 471		ASSERT(ips[i]);
 472
 473		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 474			continue;
 475
 476		/*
 477		 * If try_lock is not set yet, make sure all locked inodes are
 478		 * not in the AIL.  If any are, set try_lock to be used later.
 479		 */
 480		if (!try_lock) {
 481			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 482				lp = &ips[j]->i_itemp->ili_item;
 483				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 484					try_lock++;
 485			}
 486		}
 487
 488		/*
 489		 * If any of the previous locks we have locked is in the AIL,
 490		 * we must TRY to get the second and subsequent locks. If
 491		 * we can't get any, we must release all we have
 492		 * and try again.
 493		 */
 494		if (!try_lock) {
 495			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 496			continue;
 497		}
 498
 499		/* try_lock means we have an inode locked that is in the AIL. */
 500		ASSERT(i != 0);
 501		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 502			continue;
 503
 504		/*
 505		 * Unlock all previous guys and try again.  xfs_iunlock will try
 506		 * to push the tail if the inode is in the AIL.
 507		 */
 508		attempts++;
 509		for (j = i - 1; j >= 0; j--) {
 510			/*
 511			 * Check to see if we've already unlocked this one.  Not
 512			 * the first one going back, and the inode ptr is the
 513			 * same.
 514			 */
 515			if (j != (i - 1) && ips[j] == ips[j + 1])
 516				continue;
 517
 518			xfs_iunlock(ips[j], lock_mode);
 519		}
 520
 521		if ((attempts % 5) == 0) {
 522			delay(1); /* Don't just spin the CPU */
 523		}
 524		i = 0;
 525		try_lock = 0;
 526		goto again;
 527	}
 528}
 529
 530/*
 531 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
 532 * the mmaplock or the ilock, but not more than one type at a time. If we lock
 533 * more than one at a time, lockdep will report false positives saying we have
 534 * violated locking orders.  The iolock must be double-locked separately since
 535 * we use i_rwsem for that.  We now support taking one lock EXCL and the other
 536 * SHARED.
 537 */
 538void
 539xfs_lock_two_inodes(
 540	struct xfs_inode	*ip0,
 541	uint			ip0_mode,
 542	struct xfs_inode	*ip1,
 543	uint			ip1_mode)
 544{
 545	struct xfs_inode	*temp;
 546	uint			mode_temp;
 547	int			attempts = 0;
 548	struct xfs_log_item	*lp;
 549
 550	ASSERT(hweight32(ip0_mode) == 1);
 551	ASSERT(hweight32(ip1_mode) == 1);
 552	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 553	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 554	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 555	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 556	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 557	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 558	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 559	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 560	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 561	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 562
 563	ASSERT(ip0->i_ino != ip1->i_ino);
 564
 565	if (ip0->i_ino > ip1->i_ino) {
 566		temp = ip0;
 567		ip0 = ip1;
 568		ip1 = temp;
 569		mode_temp = ip0_mode;
 570		ip0_mode = ip1_mode;
 571		ip1_mode = mode_temp;
 572	}
 573
 574 again:
 575	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 576
 577	/*
 578	 * If the first lock we have locked is in the AIL, we must TRY to get
 579	 * the second lock. If we can't get it, we must release the first one
 580	 * and try again.
 581	 */
 582	lp = &ip0->i_itemp->ili_item;
 583	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 584		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 585			xfs_iunlock(ip0, ip0_mode);
 586			if ((++attempts % 5) == 0)
 587				delay(1); /* Don't just spin the CPU */
 588			goto again;
 589		}
 590	} else {
 591		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 592	}
 593}
 594
 595void
 596__xfs_iflock(
 597	struct xfs_inode	*ip)
 598{
 599	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
 600	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
 601
 602	do {
 603		prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 604		if (xfs_isiflocked(ip))
 605			io_schedule();
 606	} while (!xfs_iflock_nowait(ip));
 607
 608	finish_wait(wq, &wait.wq_entry);
 609}
 610
 611STATIC uint
 612_xfs_dic2xflags(
 613	uint16_t		di_flags,
 614	uint64_t		di_flags2,
 615	bool			has_attr)
 616{
 617	uint			flags = 0;
 618
 619	if (di_flags & XFS_DIFLAG_ANY) {
 620		if (di_flags & XFS_DIFLAG_REALTIME)
 621			flags |= FS_XFLAG_REALTIME;
 622		if (di_flags & XFS_DIFLAG_PREALLOC)
 623			flags |= FS_XFLAG_PREALLOC;
 624		if (di_flags & XFS_DIFLAG_IMMUTABLE)
 625			flags |= FS_XFLAG_IMMUTABLE;
 626		if (di_flags & XFS_DIFLAG_APPEND)
 627			flags |= FS_XFLAG_APPEND;
 628		if (di_flags & XFS_DIFLAG_SYNC)
 629			flags |= FS_XFLAG_SYNC;
 630		if (di_flags & XFS_DIFLAG_NOATIME)
 631			flags |= FS_XFLAG_NOATIME;
 632		if (di_flags & XFS_DIFLAG_NODUMP)
 633			flags |= FS_XFLAG_NODUMP;
 634		if (di_flags & XFS_DIFLAG_RTINHERIT)
 635			flags |= FS_XFLAG_RTINHERIT;
 636		if (di_flags & XFS_DIFLAG_PROJINHERIT)
 637			flags |= FS_XFLAG_PROJINHERIT;
 638		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
 639			flags |= FS_XFLAG_NOSYMLINKS;
 640		if (di_flags & XFS_DIFLAG_EXTSIZE)
 641			flags |= FS_XFLAG_EXTSIZE;
 642		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
 643			flags |= FS_XFLAG_EXTSZINHERIT;
 644		if (di_flags & XFS_DIFLAG_NODEFRAG)
 645			flags |= FS_XFLAG_NODEFRAG;
 646		if (di_flags & XFS_DIFLAG_FILESTREAM)
 647			flags |= FS_XFLAG_FILESTREAM;
 648	}
 649
 650	if (di_flags2 & XFS_DIFLAG2_ANY) {
 651		if (di_flags2 & XFS_DIFLAG2_DAX)
 652			flags |= FS_XFLAG_DAX;
 653		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
 654			flags |= FS_XFLAG_COWEXTSIZE;
 655	}
 656
 657	if (has_attr)
 658		flags |= FS_XFLAG_HASATTR;
 659
 660	return flags;
 661}
 662
 663uint
 664xfs_ip2xflags(
 665	struct xfs_inode	*ip)
 666{
 667	struct xfs_icdinode	*dic = &ip->i_d;
 668
 669	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
 670}
 671
 672/*
 673 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 674 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 675 * ci_name->name will point to a the actual name (caller must free) or
 676 * will be set to NULL if an exact match is found.
 677 */
 678int
 679xfs_lookup(
 680	xfs_inode_t		*dp,
 681	struct xfs_name		*name,
 682	xfs_inode_t		**ipp,
 683	struct xfs_name		*ci_name)
 684{
 685	xfs_ino_t		inum;
 686	int			error;
 687
 688	trace_xfs_lookup(dp, name);
 689
 690	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 
 
 691		return -EIO;
 692
 693	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 694	if (error)
 695		goto out_unlock;
 696
 697	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 698	if (error)
 699		goto out_free_name;
 700
 701	return 0;
 702
 703out_free_name:
 704	if (ci_name)
 705		kmem_free(ci_name->name);
 706out_unlock:
 707	*ipp = NULL;
 708	return error;
 709}
 710
 711/*
 712 * Allocate an inode on disk and return a copy of its in-core version.
 713 * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
 714 * appropriately within the inode.  The uid and gid for the inode are
 715 * set according to the contents of the given cred structure.
 716 *
 717 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
 718 * has a free inode available, call xfs_iget() to obtain the in-core
 719 * version of the allocated inode.  Finally, fill in the inode and
 720 * log its initial contents.  In this case, ialloc_context would be
 721 * set to NULL.
 722 *
 723 * If xfs_dialloc() does not have an available inode, it will replenish
 724 * its supply by doing an allocation. Since we can only do one
 725 * allocation within a transaction without deadlocks, we must commit
 726 * the current transaction before returning the inode itself.
 727 * In this case, therefore, we will set ialloc_context and return.
 728 * The caller should then commit the current transaction, start a new
 729 * transaction, and call xfs_ialloc() again to actually get the inode.
 730 *
 731 * To ensure that some other process does not grab the inode that
 732 * was allocated during the first call to xfs_ialloc(), this routine
 733 * also returns the [locked] bp pointing to the head of the freelist
 734 * as ialloc_context.  The caller should hold this buffer across
 735 * the commit and pass it back into this routine on the second call.
 736 *
 737 * If we are allocating quota inodes, we do not have a parent inode
 738 * to attach to or associate with (i.e. pip == NULL) because they
 739 * are not linked into the directory structure - they are attached
 740 * directly to the superblock - and so have no parent.
 741 */
 742static int
 743xfs_ialloc(
 744	xfs_trans_t	*tp,
 745	xfs_inode_t	*pip,
 746	umode_t		mode,
 747	xfs_nlink_t	nlink,
 748	dev_t		rdev,
 749	prid_t		prid,
 750	xfs_buf_t	**ialloc_context,
 751	xfs_inode_t	**ipp)
 752{
 753	struct xfs_mount *mp = tp->t_mountp;
 754	xfs_ino_t	ino;
 755	xfs_inode_t	*ip;
 756	uint		flags;
 757	int		error;
 758	struct timespec64 tv;
 759	struct inode	*inode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760
 761	/*
 762	 * Call the space management code to pick
 763	 * the on-disk inode to be allocated.
 764	 */
 765	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
 766			    ialloc_context, &ino);
 767	if (error)
 768		return error;
 769	if (*ialloc_context || ino == NULLFSINO) {
 770		*ipp = NULL;
 771		return 0;
 
 
 
 
 
 
 
 
 
 
 772	}
 773	ASSERT(*ialloc_context == NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774
 775	/*
 776	 * Protect against obviously corrupt allocation btree records. Later
 777	 * xfs_iget checks will catch re-allocation of other active in-memory
 778	 * and on-disk inodes. If we don't catch reallocating the parent inode
 779	 * here we will deadlock in xfs_iget() so we have to do these checks
 780	 * first.
 781	 */
 782	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 783		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 784		return -EFSCORRUPTED;
 785	}
 786
 787	/*
 788	 * Get the in-core inode with the lock held exclusively.
 789	 * This is because we're setting fields here we need
 790	 * to prevent others from looking at until we're done.
 791	 */
 792	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
 793			 XFS_ILOCK_EXCL, &ip);
 794	if (error)
 795		return error;
 
 796	ASSERT(ip != NULL);
 797	inode = VFS_I(ip);
 798
 799	/*
 800	 * We always convert v1 inodes to v2 now - we only support filesystems
 801	 * with >= v2 inode capability, so there is no reason for ever leaving
 802	 * an inode in v1 format.
 803	 */
 804	if (ip->i_d.di_version == 1)
 805		ip->i_d.di_version = 2;
 806
 807	inode->i_mode = mode;
 808	set_nlink(inode, nlink);
 809	ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
 810	ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
 811	inode->i_rdev = rdev;
 812	xfs_set_projid(ip, prid);
 813
 814	if (pip && XFS_INHERIT_GID(pip)) {
 815		ip->i_d.di_gid = pip->i_d.di_gid;
 816		if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
 817			inode->i_mode |= S_ISGID;
 
 
 818	}
 819
 820	/*
 821	 * If the group ID of the new file does not match the effective group
 822	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 823	 * (and only if the irix_sgid_inherit compatibility variable is set).
 824	 */
 825	if ((irix_sgid_inherit) &&
 826	    (inode->i_mode & S_ISGID) &&
 827	    (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
 828		inode->i_mode &= ~S_ISGID;
 829
 830	ip->i_d.di_size = 0;
 831	ip->i_d.di_nextents = 0;
 832	ASSERT(ip->i_d.di_nblocks == 0);
 833
 834	tv = current_time(inode);
 835	inode->i_mtime = tv;
 836	inode->i_atime = tv;
 837	inode->i_ctime = tv;
 838
 839	ip->i_d.di_extsize = 0;
 840	ip->i_d.di_dmevmask = 0;
 841	ip->i_d.di_dmstate = 0;
 842	ip->i_d.di_flags = 0;
 843
 844	if (ip->i_d.di_version == 3) {
 845		inode_set_iversion(inode, 1);
 846		ip->i_d.di_flags2 = 0;
 847		ip->i_d.di_cowextsize = 0;
 848		ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
 849		ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
 850	}
 851
 852
 853	flags = XFS_ILOG_CORE;
 854	switch (mode & S_IFMT) {
 855	case S_IFIFO:
 856	case S_IFCHR:
 857	case S_IFBLK:
 858	case S_IFSOCK:
 859		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
 860		ip->i_df.if_flags = 0;
 861		flags |= XFS_ILOG_DEV;
 862		break;
 863	case S_IFREG:
 864	case S_IFDIR:
 865		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
 866			uint		di_flags = 0;
 867
 868			if (S_ISDIR(mode)) {
 869				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
 870					di_flags |= XFS_DIFLAG_RTINHERIT;
 871				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
 872					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 873					ip->i_d.di_extsize = pip->i_d.di_extsize;
 874				}
 875				if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
 876					di_flags |= XFS_DIFLAG_PROJINHERIT;
 877			} else if (S_ISREG(mode)) {
 878				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
 879					di_flags |= XFS_DIFLAG_REALTIME;
 880				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
 881					di_flags |= XFS_DIFLAG_EXTSIZE;
 882					ip->i_d.di_extsize = pip->i_d.di_extsize;
 883				}
 884			}
 885			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
 886			    xfs_inherit_noatime)
 887				di_flags |= XFS_DIFLAG_NOATIME;
 888			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
 889			    xfs_inherit_nodump)
 890				di_flags |= XFS_DIFLAG_NODUMP;
 891			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
 892			    xfs_inherit_sync)
 893				di_flags |= XFS_DIFLAG_SYNC;
 894			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
 895			    xfs_inherit_nosymlinks)
 896				di_flags |= XFS_DIFLAG_NOSYMLINKS;
 897			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
 898			    xfs_inherit_nodefrag)
 899				di_flags |= XFS_DIFLAG_NODEFRAG;
 900			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
 901				di_flags |= XFS_DIFLAG_FILESTREAM;
 902
 903			ip->i_d.di_flags |= di_flags;
 904		}
 905		if (pip &&
 906		    (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
 907		    pip->i_d.di_version == 3 &&
 908		    ip->i_d.di_version == 3) {
 909			uint64_t	di_flags2 = 0;
 910
 911			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
 912				di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 913				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
 914			}
 915			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
 916				di_flags2 |= XFS_DIFLAG2_DAX;
 917
 918			ip->i_d.di_flags2 |= di_flags2;
 919		}
 920		/* FALLTHROUGH */
 921	case S_IFLNK:
 922		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
 923		ip->i_df.if_flags = XFS_IFEXTENTS;
 924		ip->i_df.if_bytes = 0;
 925		ip->i_df.if_u1.if_root = NULL;
 926		break;
 927	default:
 928		ASSERT(0);
 929	}
 
 930	/*
 931	 * Attribute fork settings for new inode.
 932	 */
 933	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
 934	ip->i_d.di_anextents = 0;
 
 
 
 
 
 
 
 
 935
 936	/*
 937	 * Log the new values stuffed into the inode.
 938	 */
 939	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 940	xfs_trans_log_inode(tp, ip, flags);
 941
 942	/* now that we have an i_mode we can setup the inode structure */
 943	xfs_setup_inode(ip);
 944
 945	*ipp = ip;
 946	return 0;
 947}
 948
 949/*
 950 * Allocates a new inode from disk and return a pointer to the
 951 * incore copy. This routine will internally commit the current
 952 * transaction and allocate a new one if the Space Manager needed
 953 * to do an allocation to replenish the inode free-list.
 954 *
 955 * This routine is designed to be called from xfs_create and
 956 * xfs_create_dir.
 957 *
 958 */
 959int
 960xfs_dir_ialloc(
 961	xfs_trans_t	**tpp,		/* input: current transaction;
 962					   output: may be a new transaction. */
 963	xfs_inode_t	*dp,		/* directory within whose allocate
 964					   the inode. */
 965	umode_t		mode,
 966	xfs_nlink_t	nlink,
 967	dev_t		rdev,
 968	prid_t		prid,		/* project id */
 969	xfs_inode_t	**ipp)		/* pointer to inode; it will be
 970					   locked. */
 971{
 972	xfs_trans_t	*tp;
 973	xfs_inode_t	*ip;
 974	xfs_buf_t	*ialloc_context = NULL;
 975	int		code;
 976	void		*dqinfo;
 977	uint		tflags;
 978
 979	tp = *tpp;
 980	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 981
 982	/*
 983	 * xfs_ialloc will return a pointer to an incore inode if
 984	 * the Space Manager has an available inode on the free
 985	 * list. Otherwise, it will do an allocation and replenish
 986	 * the freelist.  Since we can only do one allocation per
 987	 * transaction without deadlocks, we will need to commit the
 988	 * current transaction and start a new one.  We will then
 989	 * need to call xfs_ialloc again to get the inode.
 990	 *
 991	 * If xfs_ialloc did an allocation to replenish the freelist,
 992	 * it returns the bp containing the head of the freelist as
 993	 * ialloc_context. We will hold a lock on it across the
 994	 * transaction commit so that no other process can steal
 995	 * the inode(s) that we've just allocated.
 996	 */
 997	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
 998			&ip);
 999
1000	/*
1001	 * Return an error if we were unable to allocate a new inode.
1002	 * This should only happen if we run out of space on disk or
1003	 * encounter a disk error.
1004	 */
1005	if (code) {
1006		*ipp = NULL;
1007		return code;
1008	}
1009	if (!ialloc_context && !ip) {
1010		*ipp = NULL;
1011		return -ENOSPC;
1012	}
1013
1014	/*
1015	 * If the AGI buffer is non-NULL, then we were unable to get an
1016	 * inode in one operation.  We need to commit the current
1017	 * transaction and call xfs_ialloc() again.  It is guaranteed
1018	 * to succeed the second time.
1019	 */
1020	if (ialloc_context) {
1021		/*
1022		 * Normally, xfs_trans_commit releases all the locks.
1023		 * We call bhold to hang on to the ialloc_context across
1024		 * the commit.  Holding this buffer prevents any other
1025		 * processes from doing any allocations in this
1026		 * allocation group.
1027		 */
1028		xfs_trans_bhold(tp, ialloc_context);
1029
1030		/*
1031		 * We want the quota changes to be associated with the next
1032		 * transaction, NOT this one. So, detach the dqinfo from this
1033		 * and attach it to the next transaction.
1034		 */
1035		dqinfo = NULL;
1036		tflags = 0;
1037		if (tp->t_dqinfo) {
1038			dqinfo = (void *)tp->t_dqinfo;
1039			tp->t_dqinfo = NULL;
1040			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1041			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1042		}
1043
1044		code = xfs_trans_roll(&tp);
1045
1046		/*
1047		 * Re-attach the quota info that we detached from prev trx.
1048		 */
1049		if (dqinfo) {
1050			tp->t_dqinfo = dqinfo;
1051			tp->t_flags |= tflags;
1052		}
1053
1054		if (code) {
1055			xfs_buf_relse(ialloc_context);
1056			*tpp = tp;
1057			*ipp = NULL;
1058			return code;
1059		}
1060		xfs_trans_bjoin(tp, ialloc_context);
1061
1062		/*
1063		 * Call ialloc again. Since we've locked out all
1064		 * other allocations in this allocation group,
1065		 * this call should always succeed.
1066		 */
1067		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1068				  &ialloc_context, &ip);
1069
1070		/*
1071		 * If we get an error at this point, return to the caller
1072		 * so that the current transaction can be aborted.
1073		 */
1074		if (code) {
1075			*tpp = tp;
1076			*ipp = NULL;
1077			return code;
1078		}
1079		ASSERT(!ialloc_context && ip);
1080
1081	}
1082
1083	*ipp = ip;
1084	*tpp = tp;
1085
1086	return 0;
1087}
1088
1089/*
1090 * Decrement the link count on an inode & log the change.  If this causes the
1091 * link count to go to zero, move the inode to AGI unlinked list so that it can
1092 * be freed when the last active reference goes away via xfs_inactive().
1093 */
1094static int			/* error */
1095xfs_droplink(
1096	xfs_trans_t *tp,
1097	xfs_inode_t *ip)
1098{
 
 
 
 
 
 
 
1099	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1100
1101	drop_nlink(VFS_I(ip));
1102	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1103
1104	if (VFS_I(ip)->i_nlink)
1105		return 0;
1106
1107	return xfs_iunlink(tp, ip);
1108}
1109
1110/*
1111 * Increment the link count on an inode & log the change.
1112 */
1113static void
1114xfs_bumplink(
1115	xfs_trans_t *tp,
1116	xfs_inode_t *ip)
1117{
1118	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1119
1120	ASSERT(ip->i_d.di_version > 1);
1121	inc_nlink(VFS_I(ip));
1122	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1123}
1124
1125int
1126xfs_create(
 
1127	xfs_inode_t		*dp,
1128	struct xfs_name		*name,
1129	umode_t			mode,
1130	dev_t			rdev,
 
1131	xfs_inode_t		**ipp)
1132{
1133	int			is_dir = S_ISDIR(mode);
1134	struct xfs_mount	*mp = dp->i_mount;
1135	struct xfs_inode	*ip = NULL;
1136	struct xfs_trans	*tp = NULL;
1137	int			error;
1138	bool                    unlock_dp_on_error = false;
1139	prid_t			prid;
1140	struct xfs_dquot	*udqp = NULL;
1141	struct xfs_dquot	*gdqp = NULL;
1142	struct xfs_dquot	*pdqp = NULL;
1143	struct xfs_trans_res	*tres;
1144	uint			resblks;
 
1145
1146	trace_xfs_create(dp, name);
1147
1148	if (XFS_FORCED_SHUTDOWN(mp))
 
 
1149		return -EIO;
1150
1151	prid = xfs_get_initial_prid(dp);
1152
1153	/*
1154	 * Make sure that we have allocated dquot(s) on disk.
1155	 */
1156	error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1157					xfs_kgid_to_gid(current_fsgid()), prid,
1158					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1159					&udqp, &gdqp, &pdqp);
1160	if (error)
1161		return error;
1162
1163	if (is_dir) {
1164		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1165		tres = &M_RES(mp)->tr_mkdir;
1166	} else {
1167		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1168		tres = &M_RES(mp)->tr_create;
1169	}
1170
1171	/*
1172	 * Initially assume that the file does not exist and
1173	 * reserve the resources for that case.  If that is not
1174	 * the case we'll drop the one we have and get a more
1175	 * appropriate transaction later.
1176	 */
1177	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 
1178	if (error == -ENOSPC) {
1179		/* flush outstanding delalloc blocks and retry */
1180		xfs_flush_inodes(mp);
1181		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 
1182	}
1183	if (error)
1184		goto out_release_inode;
1185
1186	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1187	unlock_dp_on_error = true;
1188
1189	/*
1190	 * Reserve disk quota and the inode.
1191	 */
1192	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1193						pdqp, resblks, 1, 0);
1194	if (error)
1195		goto out_trans_cancel;
1196
1197	/*
1198	 * A newly created regular or special file just has one directory
1199	 * entry pointing to them, but a directory also the "." entry
1200	 * pointing to itself.
1201	 */
1202	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
 
 
 
1203	if (error)
1204		goto out_trans_cancel;
1205
1206	/*
1207	 * Now we join the directory inode to the transaction.  We do not do it
1208	 * earlier because xfs_dir_ialloc might commit the previous transaction
1209	 * (and release all the locks).  An error from here on will result in
1210	 * the transaction cancel unlocking dp so don't do it explicitly in the
1211	 * error path.
1212	 */
1213	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1214	unlock_dp_on_error = false;
1215
1216	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1217				   resblks ?
1218					resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1219	if (error) {
1220		ASSERT(error != -ENOSPC);
1221		goto out_trans_cancel;
1222	}
1223	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1224	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1225
1226	if (is_dir) {
1227		error = xfs_dir_init(tp, ip, dp);
1228		if (error)
1229			goto out_trans_cancel;
1230
1231		xfs_bumplink(tp, dp);
1232	}
1233
1234	/*
1235	 * If this is a synchronous mount, make sure that the
1236	 * create transaction goes to disk before returning to
1237	 * the user.
1238	 */
1239	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1240		xfs_trans_set_sync(tp);
1241
1242	/*
1243	 * Attach the dquot(s) to the inodes and modify them incore.
1244	 * These ids of the inode couldn't have changed since the new
1245	 * inode has been locked ever since it was created.
1246	 */
1247	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1248
1249	error = xfs_trans_commit(tp);
1250	if (error)
1251		goto out_release_inode;
1252
1253	xfs_qm_dqrele(udqp);
1254	xfs_qm_dqrele(gdqp);
1255	xfs_qm_dqrele(pdqp);
1256
1257	*ipp = ip;
1258	return 0;
1259
1260 out_trans_cancel:
1261	xfs_trans_cancel(tp);
1262 out_release_inode:
1263	/*
1264	 * Wait until after the current transaction is aborted to finish the
1265	 * setup of the inode and release the inode.  This prevents recursive
1266	 * transactions and deadlocks from xfs_inactive.
1267	 */
1268	if (ip) {
1269		xfs_finish_inode_setup(ip);
1270		xfs_irele(ip);
1271	}
1272
1273	xfs_qm_dqrele(udqp);
1274	xfs_qm_dqrele(gdqp);
1275	xfs_qm_dqrele(pdqp);
1276
1277	if (unlock_dp_on_error)
1278		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1279	return error;
1280}
1281
1282int
1283xfs_create_tmpfile(
 
1284	struct xfs_inode	*dp,
1285	umode_t			mode,
1286	struct xfs_inode	**ipp)
1287{
1288	struct xfs_mount	*mp = dp->i_mount;
1289	struct xfs_inode	*ip = NULL;
1290	struct xfs_trans	*tp = NULL;
1291	int			error;
1292	prid_t                  prid;
1293	struct xfs_dquot	*udqp = NULL;
1294	struct xfs_dquot	*gdqp = NULL;
1295	struct xfs_dquot	*pdqp = NULL;
1296	struct xfs_trans_res	*tres;
1297	uint			resblks;
 
1298
1299	if (XFS_FORCED_SHUTDOWN(mp))
1300		return -EIO;
1301
1302	prid = xfs_get_initial_prid(dp);
1303
1304	/*
1305	 * Make sure that we have allocated dquot(s) on disk.
1306	 */
1307	error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1308				xfs_kgid_to_gid(current_fsgid()), prid,
1309				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1310				&udqp, &gdqp, &pdqp);
1311	if (error)
1312		return error;
1313
1314	resblks = XFS_IALLOC_SPACE_RES(mp);
1315	tres = &M_RES(mp)->tr_create_tmpfile;
1316
1317	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 
1318	if (error)
1319		goto out_release_inode;
1320
1321	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1322						pdqp, resblks, 1, 0);
1323	if (error)
1324		goto out_trans_cancel;
1325
1326	error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
 
 
 
1327	if (error)
1328		goto out_trans_cancel;
1329
1330	if (mp->m_flags & XFS_MOUNT_WSYNC)
1331		xfs_trans_set_sync(tp);
1332
1333	/*
1334	 * Attach the dquot(s) to the inodes and modify them incore.
1335	 * These ids of the inode couldn't have changed since the new
1336	 * inode has been locked ever since it was created.
1337	 */
1338	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1339
1340	error = xfs_iunlink(tp, ip);
1341	if (error)
1342		goto out_trans_cancel;
1343
1344	error = xfs_trans_commit(tp);
1345	if (error)
1346		goto out_release_inode;
1347
1348	xfs_qm_dqrele(udqp);
1349	xfs_qm_dqrele(gdqp);
1350	xfs_qm_dqrele(pdqp);
1351
1352	*ipp = ip;
1353	return 0;
1354
1355 out_trans_cancel:
1356	xfs_trans_cancel(tp);
1357 out_release_inode:
1358	/*
1359	 * Wait until after the current transaction is aborted to finish the
1360	 * setup of the inode and release the inode.  This prevents recursive
1361	 * transactions and deadlocks from xfs_inactive.
1362	 */
1363	if (ip) {
1364		xfs_finish_inode_setup(ip);
1365		xfs_irele(ip);
1366	}
1367
1368	xfs_qm_dqrele(udqp);
1369	xfs_qm_dqrele(gdqp);
1370	xfs_qm_dqrele(pdqp);
1371
1372	return error;
1373}
1374
1375int
1376xfs_link(
1377	xfs_inode_t		*tdp,
1378	xfs_inode_t		*sip,
1379	struct xfs_name		*target_name)
1380{
1381	xfs_mount_t		*mp = tdp->i_mount;
1382	xfs_trans_t		*tp;
1383	int			error;
1384	int			resblks;
1385
1386	trace_xfs_link(tdp, target_name);
1387
1388	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1389
1390	if (XFS_FORCED_SHUTDOWN(mp))
 
 
1391		return -EIO;
1392
1393	error = xfs_qm_dqattach(sip);
1394	if (error)
1395		goto std_return;
1396
1397	error = xfs_qm_dqattach(tdp);
1398	if (error)
1399		goto std_return;
1400
1401	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1402	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1403	if (error == -ENOSPC) {
1404		resblks = 0;
1405		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1406	}
1407	if (error)
1408		goto std_return;
1409
1410	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1411
1412	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1413	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1414
1415	/*
1416	 * If we are using project inheritance, we only allow hard link
1417	 * creation in our tree when the project IDs are the same; else
1418	 * the tree quota mechanism could be circumvented.
1419	 */
1420	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1421		     (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1422		error = -EXDEV;
1423		goto error_return;
1424	}
1425
1426	if (!resblks) {
1427		error = xfs_dir_canenter(tp, tdp, target_name);
1428		if (error)
1429			goto error_return;
1430	}
1431
1432	/*
1433	 * Handle initial link state of O_TMPFILE inode
1434	 */
1435	if (VFS_I(sip)->i_nlink == 0) {
1436		error = xfs_iunlink_remove(tp, sip);
 
 
 
 
1437		if (error)
1438			goto error_return;
1439	}
1440
1441	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1442				   resblks);
1443	if (error)
1444		goto error_return;
1445	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1446	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1447
1448	xfs_bumplink(tp, sip);
1449
1450	/*
1451	 * If this is a synchronous mount, make sure that the
1452	 * link transaction goes to disk before returning to
1453	 * the user.
1454	 */
1455	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1456		xfs_trans_set_sync(tp);
1457
1458	return xfs_trans_commit(tp);
1459
1460 error_return:
1461	xfs_trans_cancel(tp);
1462 std_return:
 
 
1463	return error;
1464}
1465
1466/* Clear the reflink flag and the cowblocks tag if possible. */
1467static void
1468xfs_itruncate_clear_reflink_flags(
1469	struct xfs_inode	*ip)
1470{
1471	struct xfs_ifork	*dfork;
1472	struct xfs_ifork	*cfork;
1473
1474	if (!xfs_is_reflink_inode(ip))
1475		return;
1476	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1477	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1478	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1479		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1480	if (cfork->if_bytes == 0)
1481		xfs_inode_clear_cowblocks_tag(ip);
1482}
1483
1484/*
1485 * Free up the underlying blocks past new_size.  The new size must be smaller
1486 * than the current size.  This routine can be used both for the attribute and
1487 * data fork, and does not modify the inode size, which is left to the caller.
1488 *
1489 * The transaction passed to this routine must have made a permanent log
1490 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1491 * given transaction and start new ones, so make sure everything involved in
1492 * the transaction is tidy before calling here.  Some transaction will be
1493 * returned to the caller to be committed.  The incoming transaction must
1494 * already include the inode, and both inode locks must be held exclusively.
1495 * The inode must also be "held" within the transaction.  On return the inode
1496 * will be "held" within the returned transaction.  This routine does NOT
1497 * require any disk space to be reserved for it within the transaction.
1498 *
1499 * If we get an error, we must return with the inode locked and linked into the
1500 * current transaction. This keeps things simple for the higher level code,
1501 * because it always knows that the inode is locked and held in the transaction
1502 * that returns to it whether errors occur or not.  We don't mark the inode
1503 * dirty on error so that transactions can be easily aborted if possible.
1504 */
1505int
1506xfs_itruncate_extents_flags(
1507	struct xfs_trans	**tpp,
1508	struct xfs_inode	*ip,
1509	int			whichfork,
1510	xfs_fsize_t		new_size,
1511	int			flags)
1512{
1513	struct xfs_mount	*mp = ip->i_mount;
1514	struct xfs_trans	*tp = *tpp;
1515	xfs_fileoff_t		first_unmap_block;
1516	xfs_fileoff_t		last_block;
1517	xfs_filblks_t		unmap_len;
1518	int			error = 0;
1519	int			done = 0;
1520
1521	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1522	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1523	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1524	ASSERT(new_size <= XFS_ISIZE(ip));
1525	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1526	ASSERT(ip->i_itemp != NULL);
1527	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1528	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1529
1530	trace_xfs_itruncate_extents_start(ip, new_size);
1531
1532	flags |= xfs_bmapi_aflag(whichfork);
1533
1534	/*
1535	 * Since it is possible for space to become allocated beyond
1536	 * the end of the file (in a crash where the space is allocated
1537	 * but the inode size is not yet updated), simply remove any
1538	 * blocks which show up between the new EOF and the maximum
1539	 * possible file size.  If the first block to be removed is
1540	 * beyond the maximum file size (ie it is the same as last_block),
1541	 * then there is nothing to do.
 
1542	 */
1543	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1544	last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1545	if (first_unmap_block == last_block)
1546		return 0;
1547
1548	ASSERT(first_unmap_block < last_block);
1549	unmap_len = last_block - first_unmap_block + 1;
1550	while (!done) {
1551		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1552		error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
1553				    XFS_ITRUNC_MAX_EXTENTS, &done);
1554		if (error)
1555			goto out;
1556
1557		/*
1558		 * Duplicate the transaction that has the permanent
1559		 * reservation and commit the old transaction.
1560		 */
1561		error = xfs_defer_finish(&tp);
1562		if (error)
1563			goto out;
1564
1565		error = xfs_trans_roll_inode(&tp, ip);
1566		if (error)
1567			goto out;
1568	}
1569
 
 
 
 
 
1570	if (whichfork == XFS_DATA_FORK) {
1571		/* Remove all pending CoW reservations. */
1572		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1573				first_unmap_block, last_block, true);
1574		if (error)
1575			goto out;
1576
1577		xfs_itruncate_clear_reflink_flags(ip);
1578	}
1579
1580	/*
1581	 * Always re-log the inode so that our permanent transaction can keep
1582	 * on rolling it forward in the log.
1583	 */
1584	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1585
1586	trace_xfs_itruncate_extents_end(ip, new_size);
1587
1588out:
1589	*tpp = tp;
1590	return error;
1591}
1592
1593int
1594xfs_release(
1595	xfs_inode_t	*ip)
1596{
1597	xfs_mount_t	*mp = ip->i_mount;
1598	int		error;
1599
1600	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1601		return 0;
1602
1603	/* If this is a read-only mount, don't do this (would generate I/O) */
1604	if (mp->m_flags & XFS_MOUNT_RDONLY)
1605		return 0;
1606
1607	if (!XFS_FORCED_SHUTDOWN(mp)) {
1608		int truncated;
1609
1610		/*
1611		 * If we previously truncated this file and removed old data
1612		 * in the process, we want to initiate "early" writeout on
1613		 * the last close.  This is an attempt to combat the notorious
1614		 * NULL files problem which is particularly noticeable from a
1615		 * truncate down, buffered (re-)write (delalloc), followed by
1616		 * a crash.  What we are effectively doing here is
1617		 * significantly reducing the time window where we'd otherwise
1618		 * be exposed to that problem.
1619		 */
1620		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1621		if (truncated) {
1622			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1623			if (ip->i_delayed_blks > 0) {
1624				error = filemap_flush(VFS_I(ip)->i_mapping);
1625				if (error)
1626					return error;
1627			}
1628		}
1629	}
1630
1631	if (VFS_I(ip)->i_nlink == 0)
1632		return 0;
1633
1634	if (xfs_can_free_eofblocks(ip, false)) {
 
 
 
 
 
 
 
1635
 
1636		/*
1637		 * Check if the inode is being opened, written and closed
1638		 * frequently and we have delayed allocation blocks outstanding
1639		 * (e.g. streaming writes from the NFS server), truncating the
1640		 * blocks past EOF will cause fragmentation to occur.
1641		 *
1642		 * In this case don't do the truncation, but we have to be
1643		 * careful how we detect this case. Blocks beyond EOF show up as
1644		 * i_delayed_blks even when the inode is clean, so we need to
1645		 * truncate them away first before checking for a dirty release.
1646		 * Hence on the first dirty close we will still remove the
1647		 * speculative allocation, but after that we will leave it in
1648		 * place.
1649		 */
1650		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1651			return 0;
1652		/*
1653		 * If we can't get the iolock just skip truncating the blocks
1654		 * past EOF because we could deadlock with the mmap_sem
1655		 * otherwise. We'll get another chance to drop them once the
1656		 * last reference to the inode is dropped, so we'll never leak
1657		 * blocks permanently.
1658		 */
1659		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1660			error = xfs_free_eofblocks(ip);
1661			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1662			if (error)
1663				return error;
1664		}
1665
1666		/* delalloc blocks after truncation means it really is dirty */
1667		if (ip->i_delayed_blks)
1668			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1669	}
1670	return 0;
 
 
 
1671}
1672
1673/*
1674 * xfs_inactive_truncate
1675 *
1676 * Called to perform a truncate when an inode becomes unlinked.
1677 */
1678STATIC int
1679xfs_inactive_truncate(
1680	struct xfs_inode *ip)
1681{
1682	struct xfs_mount	*mp = ip->i_mount;
1683	struct xfs_trans	*tp;
1684	int			error;
1685
1686	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1687	if (error) {
1688		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1689		return error;
1690	}
1691	xfs_ilock(ip, XFS_ILOCK_EXCL);
1692	xfs_trans_ijoin(tp, ip, 0);
1693
1694	/*
1695	 * Log the inode size first to prevent stale data exposure in the event
1696	 * of a system crash before the truncate completes. See the related
1697	 * comment in xfs_vn_setattr_size() for details.
1698	 */
1699	ip->i_d.di_size = 0;
1700	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1701
1702	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1703	if (error)
1704		goto error_trans_cancel;
1705
1706	ASSERT(ip->i_d.di_nextents == 0);
1707
1708	error = xfs_trans_commit(tp);
1709	if (error)
1710		goto error_unlock;
1711
1712	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1713	return 0;
1714
1715error_trans_cancel:
1716	xfs_trans_cancel(tp);
1717error_unlock:
1718	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1719	return error;
1720}
1721
1722/*
1723 * xfs_inactive_ifree()
1724 *
1725 * Perform the inode free when an inode is unlinked.
1726 */
1727STATIC int
1728xfs_inactive_ifree(
1729	struct xfs_inode *ip)
1730{
1731	struct xfs_mount	*mp = ip->i_mount;
1732	struct xfs_trans	*tp;
1733	int			error;
1734
1735	/*
1736	 * We try to use a per-AG reservation for any block needed by the finobt
1737	 * tree, but as the finobt feature predates the per-AG reservation
1738	 * support a degraded file system might not have enough space for the
1739	 * reservation at mount time.  In that case try to dip into the reserved
1740	 * pool and pray.
1741	 *
1742	 * Send a warning if the reservation does happen to fail, as the inode
1743	 * now remains allocated and sits on the unlinked list until the fs is
1744	 * repaired.
1745	 */
1746	if (unlikely(mp->m_finobt_nores)) {
1747		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1748				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1749				&tp);
1750	} else {
1751		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1752	}
1753	if (error) {
1754		if (error == -ENOSPC) {
1755			xfs_warn_ratelimited(mp,
1756			"Failed to remove inode(s) from unlinked list. "
1757			"Please free space, unmount and run xfs_repair.");
1758		} else {
1759			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1760		}
1761		return error;
1762	}
1763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764	xfs_ilock(ip, XFS_ILOCK_EXCL);
1765	xfs_trans_ijoin(tp, ip, 0);
1766
1767	error = xfs_ifree(tp, ip);
 
1768	if (error) {
1769		/*
1770		 * If we fail to free the inode, shut down.  The cancel
1771		 * might do that, we need to make sure.  Otherwise the
1772		 * inode might be lost for a long time or forever.
1773		 */
1774		if (!XFS_FORCED_SHUTDOWN(mp)) {
1775			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1776				__func__, error);
1777			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1778		}
1779		xfs_trans_cancel(tp);
1780		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1781		return error;
1782	}
1783
1784	/*
1785	 * Credit the quota account(s). The inode is gone.
1786	 */
1787	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1789	/*
1790	 * Just ignore errors at this point.  There is nothing we can do except
1791	 * to try to keep going. Make sure it's not a silent error.
1792	 */
1793	error = xfs_trans_commit(tp);
1794	if (error)
1795		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1796			__func__, error);
1797
1798	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1799	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800}
1801
1802/*
1803 * xfs_inactive
1804 *
1805 * This is called when the vnode reference count for the vnode
1806 * goes to zero.  If the file has been unlinked, then it must
1807 * now be truncated.  Also, we clear all of the read-ahead state
1808 * kept for the inode here since the file is now closed.
1809 */
1810void
1811xfs_inactive(
1812	xfs_inode_t	*ip)
1813{
1814	struct xfs_mount	*mp;
1815	int			error;
1816	int			truncate = 0;
1817
1818	/*
1819	 * If the inode is already free, then there can be nothing
1820	 * to clean up here.
1821	 */
1822	if (VFS_I(ip)->i_mode == 0) {
1823		ASSERT(ip->i_df.if_broot_bytes == 0);
1824		return;
1825	}
1826
1827	mp = ip->i_mount;
1828	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1829
1830	/* If this is a read-only mount, don't do this (would generate I/O) */
1831	if (mp->m_flags & XFS_MOUNT_RDONLY)
1832		return;
 
 
 
 
 
 
 
1833
1834	/* Try to clean out the cow blocks if there are any. */
1835	if (xfs_inode_has_cow_data(ip))
1836		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1837
1838	if (VFS_I(ip)->i_nlink != 0) {
1839		/*
1840		 * force is true because we are evicting an inode from the
1841		 * cache. Post-eof blocks must be freed, lest we end up with
1842		 * broken free space accounting.
1843		 *
1844		 * Note: don't bother with iolock here since lockdep complains
1845		 * about acquiring it in reclaim context. We have the only
1846		 * reference to the inode at this point anyways.
1847		 */
1848		if (xfs_can_free_eofblocks(ip, true))
1849			xfs_free_eofblocks(ip);
1850
1851		return;
1852	}
1853
1854	if (S_ISREG(VFS_I(ip)->i_mode) &&
1855	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1856	     ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1857		truncate = 1;
1858
1859	error = xfs_qm_dqattach(ip);
1860	if (error)
1861		return;
 
 
 
 
 
 
 
 
 
 
 
 
1862
1863	if (S_ISLNK(VFS_I(ip)->i_mode))
1864		error = xfs_inactive_symlink(ip);
1865	else if (truncate)
1866		error = xfs_inactive_truncate(ip);
1867	if (error)
1868		return;
1869
1870	/*
1871	 * If there are attributes associated with the file then blow them away
1872	 * now.  The code calls a routine that recursively deconstructs the
1873	 * attribute fork. If also blows away the in-core attribute fork.
1874	 */
1875	if (XFS_IFORK_Q(ip)) {
1876		error = xfs_attr_inactive(ip);
1877		if (error)
1878			return;
1879	}
1880
1881	ASSERT(!ip->i_afp);
1882	ASSERT(ip->i_d.di_anextents == 0);
1883	ASSERT(ip->i_d.di_forkoff == 0);
1884
1885	/*
1886	 * Free the inode.
1887	 */
1888	error = xfs_inactive_ifree(ip);
1889	if (error)
1890		return;
1891
 
1892	/*
1893	 * Release the dquots held by inode, if any.
 
1894	 */
1895	xfs_qm_dqdetach(ip);
 
1896}
1897
1898/*
1899 * In-Core Unlinked List Lookups
1900 * =============================
1901 *
1902 * Every inode is supposed to be reachable from some other piece of metadata
1903 * with the exception of the root directory.  Inodes with a connection to a
1904 * file descriptor but not linked from anywhere in the on-disk directory tree
1905 * are collectively known as unlinked inodes, though the filesystem itself
1906 * maintains links to these inodes so that on-disk metadata are consistent.
1907 *
1908 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1909 * header contains a number of buckets that point to an inode, and each inode
1910 * record has a pointer to the next inode in the hash chain.  This
1911 * singly-linked list causes scaling problems in the iunlink remove function
1912 * because we must walk that list to find the inode that points to the inode
1913 * being removed from the unlinked hash bucket list.
1914 *
1915 * What if we modelled the unlinked list as a collection of records capturing
1916 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1917 * have a fast way to look up unlinked list predecessors, which avoids the
1918 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1919 * rhashtable.
1920 *
1921 * Because this is a backref cache, we ignore operational failures since the
1922 * iunlink code can fall back to the slow bucket walk.  The only errors that
1923 * should bubble out are for obviously incorrect situations.
1924 *
1925 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1926 * access or have otherwise provided for concurrency control.
1927 */
1928
1929/* Capture a "X.next_unlinked = Y" relationship. */
1930struct xfs_iunlink {
1931	struct rhash_head	iu_rhash_head;
1932	xfs_agino_t		iu_agino;		/* X */
1933	xfs_agino_t		iu_next_unlinked;	/* Y */
1934};
1935
1936/* Unlinked list predecessor lookup hashtable construction */
1937static int
1938xfs_iunlink_obj_cmpfn(
1939	struct rhashtable_compare_arg	*arg,
1940	const void			*obj)
1941{
1942	const xfs_agino_t		*key = arg->key;
1943	const struct xfs_iunlink	*iu = obj;
1944
1945	if (iu->iu_next_unlinked != *key)
1946		return 1;
1947	return 0;
1948}
1949
1950static const struct rhashtable_params xfs_iunlink_hash_params = {
1951	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1952	.key_len		= sizeof(xfs_agino_t),
1953	.key_offset		= offsetof(struct xfs_iunlink,
1954					   iu_next_unlinked),
1955	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1956	.automatic_shrinking	= true,
1957	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1958};
1959
1960/*
1961 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1962 * relation is found.
 
 
1963 */
1964static xfs_agino_t
1965xfs_iunlink_lookup_backref(
1966	struct xfs_perag	*pag,
1967	xfs_agino_t		agino)
1968{
1969	struct xfs_iunlink	*iu;
1970
1971	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1972			xfs_iunlink_hash_params);
1973	return iu ? iu->iu_agino : NULLAGINO;
1974}
1975
1976/*
1977 * Take ownership of an iunlink cache entry and insert it into the hash table.
1978 * If successful, the entry will be owned by the cache; if not, it is freed.
1979 * Either way, the caller does not own @iu after this call.
1980 */
1981static int
1982xfs_iunlink_insert_backref(
1983	struct xfs_perag	*pag,
1984	struct xfs_iunlink	*iu)
1985{
1986	int			error;
1987
1988	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1989			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1990	/*
1991	 * Fail loudly if there already was an entry because that's a sign of
1992	 * corruption of in-memory data.  Also fail loudly if we see an error
1993	 * code we didn't anticipate from the rhashtable code.  Currently we
1994	 * only anticipate ENOMEM.
1995	 */
1996	if (error) {
1997		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1998		kmem_free(iu);
1999	}
2000	/*
2001	 * Absorb any runtime errors that aren't a result of corruption because
2002	 * this is a cache and we can always fall back to bucket list scanning.
2003	 */
2004	if (error != 0 && error != -EEXIST)
2005		error = 0;
2006	return error;
2007}
2008
2009/* Remember that @prev_agino.next_unlinked = @this_agino. */
2010static int
2011xfs_iunlink_add_backref(
2012	struct xfs_perag	*pag,
2013	xfs_agino_t		prev_agino,
2014	xfs_agino_t		this_agino)
2015{
2016	struct xfs_iunlink	*iu;
2017
2018	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2019		return 0;
2020
2021	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2022	iu->iu_agino = prev_agino;
2023	iu->iu_next_unlinked = this_agino;
2024
2025	return xfs_iunlink_insert_backref(pag, iu);
2026}
2027
2028/*
2029 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2030 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
2031 * wasn't any such entry then we don't bother.
2032 */
2033static int
2034xfs_iunlink_change_backref(
2035	struct xfs_perag	*pag,
2036	xfs_agino_t		agino,
2037	xfs_agino_t		next_unlinked)
2038{
2039	struct xfs_iunlink	*iu;
2040	int			error;
2041
2042	/* Look up the old entry; if there wasn't one then exit. */
2043	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2044			xfs_iunlink_hash_params);
2045	if (!iu)
2046		return 0;
2047
2048	/*
2049	 * Remove the entry.  This shouldn't ever return an error, but if we
2050	 * couldn't remove the old entry we don't want to add it again to the
2051	 * hash table, and if the entry disappeared on us then someone's
2052	 * violated the locking rules and we need to fail loudly.  Either way
2053	 * we cannot remove the inode because internal state is or would have
2054	 * been corrupt.
2055	 */
2056	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2057			&iu->iu_rhash_head, xfs_iunlink_hash_params);
2058	if (error)
2059		return error;
2060
2061	/* If there is no new next entry just free our item and return. */
2062	if (next_unlinked == NULLAGINO) {
2063		kmem_free(iu);
2064		return 0;
2065	}
2066
2067	/* Update the entry and re-add it to the hash table. */
2068	iu->iu_next_unlinked = next_unlinked;
2069	return xfs_iunlink_insert_backref(pag, iu);
2070}
2071
2072/* Set up the in-core predecessor structures. */
2073int
2074xfs_iunlink_init(
2075	struct xfs_perag	*pag)
2076{
2077	return rhashtable_init(&pag->pagi_unlinked_hash,
2078			&xfs_iunlink_hash_params);
2079}
2080
2081/* Free the in-core predecessor structures. */
2082static void
2083xfs_iunlink_free_item(
2084	void			*ptr,
2085	void			*arg)
2086{
2087	struct xfs_iunlink	*iu = ptr;
2088	bool			*freed_anything = arg;
2089
2090	*freed_anything = true;
2091	kmem_free(iu);
2092}
2093
2094void
2095xfs_iunlink_destroy(
2096	struct xfs_perag	*pag)
2097{
2098	bool			freed_anything = false;
2099
2100	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2101			xfs_iunlink_free_item, &freed_anything);
2102
2103	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2104}
2105
2106/*
2107 * Point the AGI unlinked bucket at an inode and log the results.  The caller
2108 * is responsible for validating the old value.
2109 */
2110STATIC int
2111xfs_iunlink_update_bucket(
2112	struct xfs_trans	*tp,
2113	xfs_agnumber_t		agno,
2114	struct xfs_buf		*agibp,
2115	unsigned int		bucket_index,
2116	xfs_agino_t		new_agino)
2117{
2118	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agibp);
2119	xfs_agino_t		old_value;
2120	int			offset;
2121
2122	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2123
2124	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2125	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2126			old_value, new_agino);
2127
2128	/*
2129	 * We should never find the head of the list already set to the value
2130	 * passed in because either we're adding or removing ourselves from the
2131	 * head of the list.
2132	 */
2133	if (old_value == new_agino)
 
2134		return -EFSCORRUPTED;
 
2135
2136	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2137	offset = offsetof(struct xfs_agi, agi_unlinked) +
2138			(sizeof(xfs_agino_t) * bucket_index);
2139	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2140	return 0;
2141}
2142
2143/* Set an on-disk inode's next_unlinked pointer. */
2144STATIC void
2145xfs_iunlink_update_dinode(
 
 
 
 
2146	struct xfs_trans	*tp,
2147	xfs_agnumber_t		agno,
2148	xfs_agino_t		agino,
2149	struct xfs_buf		*ibp,
2150	struct xfs_dinode	*dip,
2151	struct xfs_imap		*imap,
2152	xfs_agino_t		next_agino)
2153{
2154	struct xfs_mount	*mp = tp->t_mountp;
2155	int			offset;
2156
2157	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2158
2159	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2160			be32_to_cpu(dip->di_next_unlinked), next_agino);
2161
2162	dip->di_next_unlinked = cpu_to_be32(next_agino);
2163	offset = imap->im_boffset +
2164			offsetof(struct xfs_dinode, di_next_unlinked);
2165
2166	/* need to recalc the inode CRC if appropriate */
2167	xfs_dinode_calc_crc(mp, dip);
2168	xfs_trans_inode_buf(tp, ibp);
2169	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2170	xfs_inobp_check(mp, ibp);
2171}
2172
2173/* Set an in-core inode's unlinked pointer and return the old value. */
2174STATIC int
2175xfs_iunlink_update_inode(
2176	struct xfs_trans	*tp,
2177	struct xfs_inode	*ip,
2178	xfs_agnumber_t		agno,
2179	xfs_agino_t		next_agino,
2180	xfs_agino_t		*old_next_agino)
2181{
2182	struct xfs_mount	*mp = tp->t_mountp;
2183	struct xfs_dinode	*dip;
2184	struct xfs_buf		*ibp;
2185	xfs_agino_t		old_value;
2186	int			error;
2187
2188	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
 
 
2189
2190	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
 
 
 
 
 
 
 
2191	if (error)
2192		return error;
2193
2194	/* Make sure the old pointer isn't garbage. */
2195	old_value = be32_to_cpu(dip->di_next_unlinked);
2196	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2197		error = -EFSCORRUPTED;
2198		goto out;
2199	}
2200
2201	/*
2202	 * Since we're updating a linked list, we should never find that the
2203	 * current pointer is the same as the new value, unless we're
2204	 * terminating the list.
2205	 */
2206	*old_next_agino = old_value;
2207	if (old_value == next_agino) {
2208		if (next_agino != NULLAGINO)
2209			error = -EFSCORRUPTED;
2210		goto out;
2211	}
2212
2213	/* Ok, update the new pointer. */
2214	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2215			ibp, dip, &ip->i_imap, next_agino);
2216	return 0;
2217out:
2218	xfs_trans_brelse(tp, ibp);
2219	return error;
2220}
2221
2222/*
2223 * This is called when the inode's link count has gone to 0 or we are creating
2224 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2225 *
2226 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2227 * list when the inode is freed.
2228 */
2229STATIC int
2230xfs_iunlink(
2231	struct xfs_trans	*tp,
 
 
2232	struct xfs_inode	*ip)
2233{
2234	struct xfs_mount	*mp = tp->t_mountp;
2235	struct xfs_agi		*agi;
2236	struct xfs_buf		*agibp;
2237	xfs_agino_t		next_agino;
2238	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2239	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2240	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2241	int			error;
2242
2243	ASSERT(VFS_I(ip)->i_nlink == 0);
2244	ASSERT(VFS_I(ip)->i_mode != 0);
2245	trace_xfs_iunlink(ip);
2246
2247	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2248	error = xfs_read_agi(mp, tp, agno, &agibp);
2249	if (error)
2250		return error;
2251	agi = XFS_BUF_TO_AGI(agibp);
2252
2253	/*
2254	 * Get the index into the agi hash table for the list this inode will
2255	 * go on.  Make sure the pointer isn't garbage and that this inode
2256	 * isn't already on the list.
2257	 */
2258	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2259	if (next_agino == agino ||
2260	    !xfs_verify_agino_or_null(mp, agno, next_agino))
 
2261		return -EFSCORRUPTED;
 
2262
2263	if (next_agino != NULLAGINO) {
2264		struct xfs_perag	*pag;
2265		xfs_agino_t		old_agino;
 
 
 
 
 
 
2266
 
2267		/*
2268		 * There is already another inode in the bucket, so point this
2269		 * inode to the current head of the list.
2270		 */
2271		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2272				&old_agino);
2273		if (error)
2274			return error;
2275		ASSERT(old_agino == NULLAGINO);
2276
2277		/*
2278		 * agino has been unlinked, add a backref from the next inode
2279		 * back to agino.
2280		 */
2281		pag = xfs_perag_get(mp, agno);
2282		error = xfs_iunlink_add_backref(pag, agino, next_agino);
2283		xfs_perag_put(pag);
2284		if (error)
2285			return error;
 
2286	}
2287
2288	/* Point the head of the list to point to this inode. */
2289	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
 
2290}
2291
2292/* Return the imap, dinode pointer, and buffer for an inode. */
 
 
 
 
 
 
2293STATIC int
2294xfs_iunlink_map_ino(
2295	struct xfs_trans	*tp,
2296	xfs_agnumber_t		agno,
2297	xfs_agino_t		agino,
2298	struct xfs_imap		*imap,
2299	struct xfs_dinode	**dipp,
2300	struct xfs_buf		**bpp)
2301{
2302	struct xfs_mount	*mp = tp->t_mountp;
 
 
2303	int			error;
2304
2305	imap->im_blkno = 0;
2306	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2307	if (error) {
2308		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2309				__func__, error);
2310		return error;
2311	}
2312
2313	error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
2314	if (error) {
2315		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2316				__func__, error);
2317		return error;
2318	}
2319
2320	return 0;
 
 
 
 
 
 
 
 
2321}
2322
2323/*
2324 * Walk the unlinked chain from @head_agino until we find the inode that
2325 * points to @target_agino.  Return the inode number, map, dinode pointer,
2326 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2327 *
2328 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2329 * @agino, @imap, @dipp, and @bpp are all output parameters.
2330 *
2331 * Do not call this function if @target_agino is the head of the list.
2332 */
2333STATIC int
2334xfs_iunlink_map_prev(
2335	struct xfs_trans	*tp,
2336	xfs_agnumber_t		agno,
2337	xfs_agino_t		head_agino,
2338	xfs_agino_t		target_agino,
2339	xfs_agino_t		*agino,
2340	struct xfs_imap		*imap,
2341	struct xfs_dinode	**dipp,
2342	struct xfs_buf		**bpp,
2343	struct xfs_perag	*pag)
2344{
2345	struct xfs_mount	*mp = tp->t_mountp;
2346	xfs_agino_t		next_agino;
 
 
 
2347	int			error;
2348
2349	ASSERT(head_agino != target_agino);
2350	*bpp = NULL;
2351
2352	/* See if our backref cache can find it faster. */
2353	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2354	if (*agino != NULLAGINO) {
2355		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2356		if (error)
2357			return error;
2358
2359		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2360			return 0;
2361
2362		/*
2363		 * If we get here the cache contents were corrupt, so drop the
2364		 * buffer and fall back to walking the bucket list.
2365		 */
2366		xfs_trans_brelse(tp, *bpp);
2367		*bpp = NULL;
2368		WARN_ON_ONCE(1);
 
 
2369	}
2370
2371	trace_xfs_iunlink_map_prev_fallback(mp, agno);
2372
2373	/* Otherwise, walk the entire bucket until we find it. */
2374	next_agino = head_agino;
2375	while (next_agino != target_agino) {
2376		xfs_agino_t	unlinked_agino;
 
 
2377
2378		if (*bpp)
2379			xfs_trans_brelse(tp, *bpp);
 
 
 
 
 
 
 
 
 
2380
2381		*agino = next_agino;
2382		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2383				bpp);
2384		if (error)
2385			return error;
2386
2387		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2388		/*
2389		 * Make sure this pointer is valid and isn't an obvious
2390		 * infinite loop.
2391		 */
2392		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2393		    next_agino == unlinked_agino) {
2394			XFS_CORRUPTION_ERROR(__func__,
2395					XFS_ERRLEVEL_LOW, mp,
2396					*dipp, sizeof(**dipp));
2397			error = -EFSCORRUPTED;
2398			return error;
2399		}
2400		next_agino = unlinked_agino;
2401	}
2402
2403	return 0;
 
 
2404}
2405
2406/*
2407 * Pull the on-disk inode from the AGI unlinked list.
2408 */
2409STATIC int
2410xfs_iunlink_remove(
2411	struct xfs_trans	*tp,
 
2412	struct xfs_inode	*ip)
2413{
2414	struct xfs_mount	*mp = tp->t_mountp;
2415	struct xfs_agi		*agi;
2416	struct xfs_buf		*agibp;
2417	struct xfs_buf		*last_ibp;
2418	struct xfs_dinode	*last_dip = NULL;
2419	struct xfs_perag	*pag = NULL;
2420	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2421	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2422	xfs_agino_t		next_agino;
2423	xfs_agino_t		head_agino;
2424	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2425	int			error;
2426
2427	trace_xfs_iunlink_remove(ip);
2428
2429	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2430	error = xfs_read_agi(mp, tp, agno, &agibp);
2431	if (error)
2432		return error;
2433	agi = XFS_BUF_TO_AGI(agibp);
2434
2435	/*
2436	 * Get the index into the agi hash table for the list this inode will
2437	 * go on.  Make sure the head pointer isn't garbage.
2438	 */
2439	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2440	if (!xfs_verify_agino(mp, agno, head_agino)) {
2441		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2442				agi, sizeof(*agi));
2443		return -EFSCORRUPTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2444	}
2445
2446	/*
2447	 * Set our inode's next_unlinked pointer to NULL and then return
2448	 * the old pointer value so that we can update whatever was previous
2449	 * to us in the list to point to whatever was next in the list.
 
 
 
 
 
 
 
 
 
 
 
2450	 */
2451	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2452	if (error)
2453		return error;
 
 
 
 
 
 
2454
2455	/*
2456	 * If there was a backref pointing from the next inode back to this
2457	 * one, remove it because we've removed this inode from the list.
2458	 *
2459	 * Later, if this inode was in the middle of the list we'll update
2460	 * this inode's backref to point from the next inode.
2461	 */
2462	if (next_agino != NULLAGINO) {
2463		pag = xfs_perag_get(mp, agno);
2464		error = xfs_iunlink_change_backref(pag, next_agino,
2465				NULLAGINO);
2466		if (error)
2467			goto out;
2468	}
2469
2470	if (head_agino == agino) {
2471		/* Point the head of the list to the next unlinked inode. */
2472		error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2473				next_agino);
2474		if (error)
2475			goto out;
2476	} else {
2477		struct xfs_imap	imap;
2478		xfs_agino_t	prev_agino;
2479
2480		if (!pag)
2481			pag = xfs_perag_get(mp, agno);
2482
2483		/* We need to search the list for the inode being freed. */
2484		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2485				&prev_agino, &imap, &last_dip, &last_ibp,
2486				pag);
2487		if (error)
2488			goto out;
2489
2490		/* Point the previous inode on the list to the next inode. */
2491		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2492				last_dip, &imap, next_agino);
 
 
 
 
2493
2494		/*
2495		 * Now we deal with the backref for this inode.  If this inode
2496		 * pointed at a real inode, change the backref that pointed to
2497		 * us to point to our old next.  If this inode was the end of
2498		 * the list, delete the backref that pointed to us.  Note that
2499		 * change_backref takes care of deleting the backref if
2500		 * next_agino is NULLAGINO.
2501		 */
2502		error = xfs_iunlink_change_backref(pag, agino, next_agino);
2503		if (error)
2504			goto out;
2505	}
2506
2507out:
2508	if (pag)
2509		xfs_perag_put(pag);
2510	return error;
 
 
2511}
2512
2513/*
2514 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2515 * inodes that are in memory - they all must be marked stale and attached to
2516 * the cluster buffer.
2517 */
2518STATIC int
2519xfs_ifree_cluster(
2520	xfs_inode_t		*free_ip,
2521	xfs_trans_t		*tp,
 
2522	struct xfs_icluster	*xic)
2523{
2524	xfs_mount_t		*mp = free_ip->i_mount;
 
 
 
 
2525	int			nbufs;
2526	int			i, j;
2527	int			ioffset;
2528	xfs_daddr_t		blkno;
2529	xfs_buf_t		*bp;
2530	xfs_inode_t		*ip;
2531	xfs_inode_log_item_t	*iip;
2532	struct xfs_log_item	*lip;
2533	struct xfs_perag	*pag;
2534	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2535	xfs_ino_t		inum;
2536
2537	inum = xic->first_ino;
2538	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2539	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2540
2541	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2542		/*
2543		 * The allocation bitmap tells us which inodes of the chunk were
2544		 * physically allocated. Skip the cluster if an inode falls into
2545		 * a sparse region.
2546		 */
2547		ioffset = inum - xic->first_ino;
2548		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2549			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2550			continue;
2551		}
2552
2553		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2554					 XFS_INO_TO_AGBNO(mp, inum));
2555
2556		/*
2557		 * We obtain and lock the backing buffer first in the process
2558		 * here, as we have to ensure that any dirty inode that we
2559		 * can't get the flush lock on is attached to the buffer.
 
2560		 * If we scan the in-memory inodes first, then buffer IO can
2561		 * complete before we get a lock on it, and hence we may fail
2562		 * to mark all the active inodes on the buffer stale.
2563		 */
2564		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2565					mp->m_bsize * igeo->blocks_per_cluster,
2566					XBF_UNMAPPED);
2567
2568		if (!bp)
2569			return -ENOMEM;
2570
2571		/*
2572		 * This buffer may not have been correctly initialised as we
2573		 * didn't read it from disk. That's not important because we are
2574		 * only using to mark the buffer as stale in the log, and to
2575		 * attach stale cached inodes on it. That means it will never be
2576		 * dispatched for IO. If it is, we want to know about it, and we
2577		 * want it to fail. We can acheive this by adding a write
2578		 * verifier to the buffer.
2579		 */
2580		bp->b_ops = &xfs_inode_buf_ops;
2581
2582		/*
2583		 * Walk the inodes already attached to the buffer and mark them
2584		 * stale. These will all have the flush locks held, so an
2585		 * in-memory inode walk can't lock them. By marking them all
2586		 * stale first, we will not attempt to lock them in the loop
2587		 * below as the XFS_ISTALE flag will be set.
2588		 */
2589		list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
2590			if (lip->li_type == XFS_LI_INODE) {
2591				iip = (xfs_inode_log_item_t *)lip;
2592				ASSERT(iip->ili_logged == 1);
2593				lip->li_cb = xfs_istale_done;
2594				xfs_trans_ail_copy_lsn(mp->m_ail,
2595							&iip->ili_flush_lsn,
2596							&iip->ili_item.li_lsn);
2597				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2598			}
2599		}
2600
2601
2602		/*
2603		 * For each inode in memory attempt to add it to the inode
2604		 * buffer and set it up for being staled on buffer IO
2605		 * completion.  This is safe as we've locked out tail pushing
2606		 * and flushing by locking the buffer.
2607		 *
2608		 * We have already marked every inode that was part of a
2609		 * transaction stale above, which means there is no point in
2610		 * even trying to lock them.
2611		 */
2612		for (i = 0; i < igeo->inodes_per_cluster; i++) {
2613retry:
2614			rcu_read_lock();
2615			ip = radix_tree_lookup(&pag->pag_ici_root,
2616					XFS_INO_TO_AGINO(mp, (inum + i)));
2617
2618			/* Inode not in memory, nothing to do */
2619			if (!ip) {
2620				rcu_read_unlock();
2621				continue;
2622			}
2623
2624			/*
2625			 * because this is an RCU protected lookup, we could
2626			 * find a recently freed or even reallocated inode
2627			 * during the lookup. We need to check under the
2628			 * i_flags_lock for a valid inode here. Skip it if it
2629			 * is not valid, the wrong inode or stale.
2630			 */
2631			spin_lock(&ip->i_flags_lock);
2632			if (ip->i_ino != inum + i ||
2633			    __xfs_iflags_test(ip, XFS_ISTALE)) {
2634				spin_unlock(&ip->i_flags_lock);
2635				rcu_read_unlock();
2636				continue;
2637			}
2638			spin_unlock(&ip->i_flags_lock);
2639
2640			/*
2641			 * Don't try to lock/unlock the current inode, but we
2642			 * _cannot_ skip the other inodes that we did not find
2643			 * in the list attached to the buffer and are not
2644			 * already marked stale. If we can't lock it, back off
2645			 * and retry.
2646			 */
2647			if (ip != free_ip) {
2648				if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2649					rcu_read_unlock();
2650					delay(1);
2651					goto retry;
2652				}
2653
2654				/*
2655				 * Check the inode number again in case we're
2656				 * racing with freeing in xfs_reclaim_inode().
2657				 * See the comments in that function for more
2658				 * information as to why the initial check is
2659				 * not sufficient.
2660				 */
2661				if (ip->i_ino != inum + i) {
2662					xfs_iunlock(ip, XFS_ILOCK_EXCL);
2663					rcu_read_unlock();
2664					continue;
2665				}
2666			}
2667			rcu_read_unlock();
2668
2669			xfs_iflock(ip);
2670			xfs_iflags_set(ip, XFS_ISTALE);
2671
2672			/*
2673			 * we don't need to attach clean inodes or those only
2674			 * with unlogged changes (which we throw away, anyway).
2675			 */
2676			iip = ip->i_itemp;
2677			if (!iip || xfs_inode_clean(ip)) {
2678				ASSERT(ip != free_ip);
2679				xfs_ifunlock(ip);
2680				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2681				continue;
2682			}
2683
2684			iip->ili_last_fields = iip->ili_fields;
2685			iip->ili_fields = 0;
2686			iip->ili_fsync_fields = 0;
2687			iip->ili_logged = 1;
2688			xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2689						&iip->ili_item.li_lsn);
2690
2691			xfs_buf_attach_iodone(bp, xfs_istale_done,
2692						  &iip->ili_item);
2693
2694			if (ip != free_ip)
2695				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2696		}
2697
2698		xfs_trans_stale_inode_buf(tp, bp);
2699		xfs_trans_binval(tp, bp);
2700	}
2701
2702	xfs_perag_put(pag);
2703	return 0;
2704}
2705
2706/*
2707 * Free any local-format buffers sitting around before we reset to
2708 * extents format.
2709 */
2710static inline void
2711xfs_ifree_local_data(
2712	struct xfs_inode	*ip,
2713	int			whichfork)
2714{
2715	struct xfs_ifork	*ifp;
2716
2717	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2718		return;
2719
2720	ifp = XFS_IFORK_PTR(ip, whichfork);
2721	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2722}
2723
2724/*
2725 * This is called to return an inode to the inode free list.
2726 * The inode should already be truncated to 0 length and have
2727 * no pages associated with it.  This routine also assumes that
2728 * the inode is already a part of the transaction.
2729 *
2730 * The on-disk copy of the inode will have been added to the list
2731 * of unlinked inodes in the AGI. We need to remove the inode from
2732 * that list atomically with respect to freeing it here.
2733 */
2734int
2735xfs_ifree(
2736	struct xfs_trans	*tp,
2737	struct xfs_inode	*ip)
2738{
2739	int			error;
 
2740	struct xfs_icluster	xic = { 0 };
 
 
2741
2742	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2743	ASSERT(VFS_I(ip)->i_nlink == 0);
2744	ASSERT(ip->i_d.di_nextents == 0);
2745	ASSERT(ip->i_d.di_anextents == 0);
2746	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2747	ASSERT(ip->i_d.di_nblocks == 0);
 
2748
2749	/*
2750	 * Pull the on-disk inode from the AGI unlinked list.
 
 
 
2751	 */
2752	error = xfs_iunlink_remove(tp, ip);
2753	if (error)
2754		return error;
2755
2756	error = xfs_difree(tp, ip->i_ino, &xic);
2757	if (error)
2758		return error;
2759
2760	xfs_ifree_local_data(ip, XFS_DATA_FORK);
2761	xfs_ifree_local_data(ip, XFS_ATTR_FORK);
 
 
 
 
 
 
 
 
2762
2763	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2764	ip->i_d.di_flags = 0;
2765	ip->i_d.di_flags2 = 0;
2766	ip->i_d.di_dmevmask = 0;
2767	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2768	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2769	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2770
2771	/* Don't attempt to replay owner changes for a deleted inode */
2772	ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
 
 
2773
2774	/*
2775	 * Bump the generation count so no one will be confused
2776	 * by reincarnations of this inode.
2777	 */
2778	VFS_I(ip)->i_generation++;
2779	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2780
2781	if (xic.deleted)
2782		error = xfs_ifree_cluster(ip, tp, &xic);
2783
 
2784	return error;
2785}
2786
2787/*
2788 * This is called to unpin an inode.  The caller must have the inode locked
2789 * in at least shared mode so that the buffer cannot be subsequently pinned
2790 * once someone is waiting for it to be unpinned.
2791 */
2792static void
2793xfs_iunpin(
2794	struct xfs_inode	*ip)
2795{
2796	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2797
2798	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2799
2800	/* Give the log a push to start the unpinning I/O */
2801	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2802
2803}
2804
2805static void
2806__xfs_iunpin_wait(
2807	struct xfs_inode	*ip)
2808{
2809	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2810	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2811
2812	xfs_iunpin(ip);
2813
2814	do {
2815		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2816		if (xfs_ipincount(ip))
2817			io_schedule();
2818	} while (xfs_ipincount(ip));
2819	finish_wait(wq, &wait.wq_entry);
2820}
2821
2822void
2823xfs_iunpin_wait(
2824	struct xfs_inode	*ip)
2825{
2826	if (xfs_ipincount(ip))
2827		__xfs_iunpin_wait(ip);
2828}
2829
2830/*
2831 * Removing an inode from the namespace involves removing the directory entry
2832 * and dropping the link count on the inode. Removing the directory entry can
2833 * result in locking an AGF (directory blocks were freed) and removing a link
2834 * count can result in placing the inode on an unlinked list which results in
2835 * locking an AGI.
2836 *
2837 * The big problem here is that we have an ordering constraint on AGF and AGI
2838 * locking - inode allocation locks the AGI, then can allocate a new extent for
2839 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2840 * removes the inode from the unlinked list, requiring that we lock the AGI
2841 * first, and then freeing the inode can result in an inode chunk being freed
2842 * and hence freeing disk space requiring that we lock an AGF.
2843 *
2844 * Hence the ordering that is imposed by other parts of the code is AGI before
2845 * AGF. This means we cannot remove the directory entry before we drop the inode
2846 * reference count and put it on the unlinked list as this results in a lock
2847 * order of AGF then AGI, and this can deadlock against inode allocation and
2848 * freeing. Therefore we must drop the link counts before we remove the
2849 * directory entry.
2850 *
2851 * This is still safe from a transactional point of view - it is not until we
2852 * get to xfs_defer_finish() that we have the possibility of multiple
2853 * transactions in this operation. Hence as long as we remove the directory
2854 * entry and drop the link count in the first transaction of the remove
2855 * operation, there are no transactional constraints on the ordering here.
2856 */
2857int
2858xfs_remove(
2859	xfs_inode_t             *dp,
2860	struct xfs_name		*name,
2861	xfs_inode_t		*ip)
2862{
2863	xfs_mount_t		*mp = dp->i_mount;
2864	xfs_trans_t             *tp = NULL;
2865	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
 
2866	int                     error = 0;
2867	uint			resblks;
2868
2869	trace_xfs_remove(dp, name);
2870
2871	if (XFS_FORCED_SHUTDOWN(mp))
 
 
2872		return -EIO;
2873
2874	error = xfs_qm_dqattach(dp);
2875	if (error)
2876		goto std_return;
2877
2878	error = xfs_qm_dqattach(ip);
2879	if (error)
2880		goto std_return;
2881
2882	/*
2883	 * We try to get the real space reservation first,
2884	 * allowing for directory btree deletion(s) implying
2885	 * possible bmap insert(s).  If we can't get the space
2886	 * reservation then we use 0 instead, and avoid the bmap
2887	 * btree insert(s) in the directory code by, if the bmap
2888	 * insert tries to happen, instead trimming the LAST
2889	 * block from the directory.
 
 
2890	 */
2891	resblks = XFS_REMOVE_SPACE_RES(mp);
2892	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2893	if (error == -ENOSPC) {
2894		resblks = 0;
2895		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2896				&tp);
2897	}
2898	if (error) {
2899		ASSERT(error != -ENOSPC);
2900		goto std_return;
2901	}
2902
2903	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2904
2905	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2906	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2907
2908	/*
2909	 * If we're removing a directory perform some additional validation.
2910	 */
2911	if (is_dir) {
2912		ASSERT(VFS_I(ip)->i_nlink >= 2);
2913		if (VFS_I(ip)->i_nlink != 2) {
2914			error = -ENOTEMPTY;
2915			goto out_trans_cancel;
2916		}
2917		if (!xfs_dir_isempty(ip)) {
2918			error = -ENOTEMPTY;
2919			goto out_trans_cancel;
2920		}
2921
2922		/* Drop the link from ip's "..".  */
2923		error = xfs_droplink(tp, dp);
2924		if (error)
2925			goto out_trans_cancel;
2926
2927		/* Drop the "." link from ip to self.  */
2928		error = xfs_droplink(tp, ip);
2929		if (error)
2930			goto out_trans_cancel;
 
 
 
 
 
 
 
 
 
 
 
 
 
2931	} else {
2932		/*
2933		 * When removing a non-directory we need to log the parent
2934		 * inode here.  For a directory this is done implicitly
2935		 * by the xfs_droplink call for the ".." entry.
2936		 */
2937		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2938	}
2939	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2940
2941	/* Drop the link from dp to ip. */
2942	error = xfs_droplink(tp, ip);
2943	if (error)
2944		goto out_trans_cancel;
2945
2946	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2947	if (error) {
2948		ASSERT(error != -ENOENT);
2949		goto out_trans_cancel;
2950	}
2951
2952	/*
2953	 * If this is a synchronous mount, make sure that the
2954	 * remove transaction goes to disk before returning to
2955	 * the user.
2956	 */
2957	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2958		xfs_trans_set_sync(tp);
2959
2960	error = xfs_trans_commit(tp);
2961	if (error)
2962		goto std_return;
2963
2964	if (is_dir && xfs_inode_is_filestream(ip))
2965		xfs_filestream_deassociate(ip);
2966
2967	return 0;
2968
2969 out_trans_cancel:
2970	xfs_trans_cancel(tp);
2971 std_return:
2972	return error;
2973}
2974
2975/*
2976 * Enter all inodes for a rename transaction into a sorted array.
2977 */
2978#define __XFS_SORT_INODES	5
2979STATIC void
2980xfs_sort_for_rename(
2981	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2982	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2983	struct xfs_inode	*ip1,	/* in: inode of old entry */
2984	struct xfs_inode	*ip2,	/* in: inode of new entry */
2985	struct xfs_inode	*wip,	/* in: whiteout inode */
2986	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2987	int			*num_inodes)  /* in/out: inodes in array */
2988{
2989	int			i, j;
2990
2991	ASSERT(*num_inodes == __XFS_SORT_INODES);
2992	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2993
2994	/*
2995	 * i_tab contains a list of pointers to inodes.  We initialize
2996	 * the table here & we'll sort it.  We will then use it to
2997	 * order the acquisition of the inode locks.
2998	 *
2999	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
3000	 */
3001	i = 0;
3002	i_tab[i++] = dp1;
3003	i_tab[i++] = dp2;
3004	i_tab[i++] = ip1;
3005	if (ip2)
3006		i_tab[i++] = ip2;
3007	if (wip)
3008		i_tab[i++] = wip;
3009	*num_inodes = i;
3010
3011	/*
3012	 * Sort the elements via bubble sort.  (Remember, there are at
3013	 * most 5 elements to sort, so this is adequate.)
3014	 */
3015	for (i = 0; i < *num_inodes; i++) {
3016		for (j = 1; j < *num_inodes; j++) {
3017			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
3018				struct xfs_inode *temp = i_tab[j];
3019				i_tab[j] = i_tab[j-1];
3020				i_tab[j-1] = temp;
3021			}
3022		}
3023	}
3024}
3025
3026static int
3027xfs_finish_rename(
3028	struct xfs_trans	*tp)
3029{
3030	/*
3031	 * If this is a synchronous mount, make sure that the rename transaction
3032	 * goes to disk before returning to the user.
3033	 */
3034	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
3035		xfs_trans_set_sync(tp);
3036
3037	return xfs_trans_commit(tp);
3038}
3039
3040/*
3041 * xfs_cross_rename()
3042 *
3043 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3044 */
3045STATIC int
3046xfs_cross_rename(
3047	struct xfs_trans	*tp,
3048	struct xfs_inode	*dp1,
3049	struct xfs_name		*name1,
3050	struct xfs_inode	*ip1,
3051	struct xfs_inode	*dp2,
3052	struct xfs_name		*name2,
3053	struct xfs_inode	*ip2,
3054	int			spaceres)
3055{
3056	int		error = 0;
3057	int		ip1_flags = 0;
3058	int		ip2_flags = 0;
3059	int		dp2_flags = 0;
3060
3061	/* Swap inode number for dirent in first parent */
3062	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3063	if (error)
3064		goto out_trans_abort;
3065
3066	/* Swap inode number for dirent in second parent */
3067	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3068	if (error)
3069		goto out_trans_abort;
3070
3071	/*
3072	 * If we're renaming one or more directories across different parents,
3073	 * update the respective ".." entries (and link counts) to match the new
3074	 * parents.
3075	 */
3076	if (dp1 != dp2) {
3077		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3078
3079		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3080			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3081						dp1->i_ino, spaceres);
3082			if (error)
3083				goto out_trans_abort;
3084
3085			/* transfer ip2 ".." reference to dp1 */
3086			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3087				error = xfs_droplink(tp, dp2);
3088				if (error)
3089					goto out_trans_abort;
3090				xfs_bumplink(tp, dp1);
3091			}
3092
3093			/*
3094			 * Although ip1 isn't changed here, userspace needs
3095			 * to be warned about the change, so that applications
3096			 * relying on it (like backup ones), will properly
3097			 * notify the change
3098			 */
3099			ip1_flags |= XFS_ICHGTIME_CHG;
3100			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3101		}
3102
3103		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3104			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3105						dp2->i_ino, spaceres);
3106			if (error)
3107				goto out_trans_abort;
3108
3109			/* transfer ip1 ".." reference to dp2 */
3110			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3111				error = xfs_droplink(tp, dp1);
3112				if (error)
3113					goto out_trans_abort;
3114				xfs_bumplink(tp, dp2);
3115			}
3116
3117			/*
3118			 * Although ip2 isn't changed here, userspace needs
3119			 * to be warned about the change, so that applications
3120			 * relying on it (like backup ones), will properly
3121			 * notify the change
3122			 */
3123			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3124			ip2_flags |= XFS_ICHGTIME_CHG;
3125		}
3126	}
3127
3128	if (ip1_flags) {
3129		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3130		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3131	}
3132	if (ip2_flags) {
3133		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3134		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3135	}
3136	if (dp2_flags) {
3137		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3138		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3139	}
3140	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3141	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3142	return xfs_finish_rename(tp);
3143
3144out_trans_abort:
3145	xfs_trans_cancel(tp);
3146	return error;
3147}
3148
3149/*
3150 * xfs_rename_alloc_whiteout()
3151 *
3152 * Return a referenced, unlinked, unlocked inode that that can be used as a
3153 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3154 * crash between allocating the inode and linking it into the rename transaction
3155 * recovery will free the inode and we won't leak it.
3156 */
3157static int
3158xfs_rename_alloc_whiteout(
 
 
3159	struct xfs_inode	*dp,
3160	struct xfs_inode	**wip)
3161{
3162	struct xfs_inode	*tmpfile;
 
3163	int			error;
3164
3165	error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
 
3166	if (error)
3167		return error;
3168
 
 
 
 
 
 
 
 
 
3169	/*
3170	 * Prepare the tmpfile inode as if it were created through the VFS.
3171	 * Complete the inode setup and flag it as linkable.  nlink is already
3172	 * zero, so we can skip the drop_nlink.
3173	 */
3174	xfs_setup_iops(tmpfile);
3175	xfs_finish_inode_setup(tmpfile);
3176	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3177
3178	*wip = tmpfile;
3179	return 0;
3180}
3181
3182/*
3183 * xfs_rename
3184 */
3185int
3186xfs_rename(
 
3187	struct xfs_inode	*src_dp,
3188	struct xfs_name		*src_name,
3189	struct xfs_inode	*src_ip,
3190	struct xfs_inode	*target_dp,
3191	struct xfs_name		*target_name,
3192	struct xfs_inode	*target_ip,
3193	unsigned int		flags)
3194{
3195	struct xfs_mount	*mp = src_dp->i_mount;
3196	struct xfs_trans	*tp;
3197	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3198	struct xfs_inode	*inodes[__XFS_SORT_INODES];
 
3199	int			num_inodes = __XFS_SORT_INODES;
3200	bool			new_parent = (src_dp != target_dp);
3201	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3202	int			spaceres;
3203	int			error;
 
3204
3205	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3206
3207	if ((flags & RENAME_EXCHANGE) && !target_ip)
3208		return -EINVAL;
3209
3210	/*
3211	 * If we are doing a whiteout operation, allocate the whiteout inode
3212	 * we will be placing at the target and ensure the type is set
3213	 * appropriately.
3214	 */
3215	if (flags & RENAME_WHITEOUT) {
3216		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3217		error = xfs_rename_alloc_whiteout(target_dp, &wip);
3218		if (error)
3219			return error;
3220
3221		/* setup target dirent info as whiteout */
3222		src_name->type = XFS_DIR3_FT_CHRDEV;
3223	}
3224
3225	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3226				inodes, &num_inodes);
3227
 
 
3228	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3229	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3230	if (error == -ENOSPC) {
 
3231		spaceres = 0;
3232		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3233				&tp);
3234	}
3235	if (error)
3236		goto out_release_wip;
3237
3238	/*
3239	 * Attach the dquots to the inodes
3240	 */
3241	error = xfs_qm_vop_rename_dqattach(inodes);
3242	if (error)
3243		goto out_trans_cancel;
3244
3245	/*
3246	 * Lock all the participating inodes. Depending upon whether
3247	 * the target_name exists in the target directory, and
3248	 * whether the target directory is the same as the source
3249	 * directory, we can lock from 2 to 4 inodes.
3250	 */
3251	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3252
3253	/*
3254	 * Join all the inodes to the transaction. From this point on,
3255	 * we can rely on either trans_commit or trans_cancel to unlock
3256	 * them.
3257	 */
3258	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3259	if (new_parent)
3260		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3261	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3262	if (target_ip)
3263		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3264	if (wip)
3265		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3266
3267	/*
3268	 * If we are using project inheritance, we only allow renames
3269	 * into our tree when the project IDs are the same; else the
3270	 * tree quota mechanism would be circumvented.
3271	 */
3272	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3273		     (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
3274		error = -EXDEV;
3275		goto out_trans_cancel;
3276	}
3277
3278	/* RENAME_EXCHANGE is unique from here on. */
3279	if (flags & RENAME_EXCHANGE)
3280		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3281					target_dp, target_name, target_ip,
3282					spaceres);
3283
3284	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3285	 * Check for expected errors before we dirty the transaction
3286	 * so we can return an error without a transaction abort.
3287	 */
3288	if (target_ip == NULL) {
3289		/*
3290		 * If there's no space reservation, check the entry will
3291		 * fit before actually inserting it.
3292		 */
3293		if (!spaceres) {
3294			error = xfs_dir_canenter(tp, target_dp, target_name);
3295			if (error)
3296				goto out_trans_cancel;
3297		}
3298	} else {
3299		/*
3300		 * If target exists and it's a directory, check that whether
3301		 * it can be destroyed.
3302		 */
3303		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3304		    (!xfs_dir_isempty(target_ip) ||
3305		     (VFS_I(target_ip)->i_nlink > 2))) {
3306			error = -EEXIST;
3307			goto out_trans_cancel;
3308		}
3309	}
3310
3311	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3312	 * Directory entry creation below may acquire the AGF. Remove
3313	 * the whiteout from the unlinked list first to preserve correct
3314	 * AGI/AGF locking order. This dirties the transaction so failures
3315	 * after this point will abort and log recovery will clean up the
3316	 * mess.
3317	 *
3318	 * For whiteouts, we need to bump the link count on the whiteout
3319	 * inode. After this point, we have a real link, clear the tmpfile
3320	 * state flag from the inode so it doesn't accidentally get misused
3321	 * in future.
3322	 */
3323	if (wip) {
 
 
3324		ASSERT(VFS_I(wip)->i_nlink == 0);
3325		error = xfs_iunlink_remove(tp, wip);
 
 
 
3326		if (error)
3327			goto out_trans_cancel;
3328
3329		xfs_bumplink(tp, wip);
3330		xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3331		VFS_I(wip)->i_state &= ~I_LINKABLE;
3332	}
3333
3334	/*
3335	 * Set up the target.
3336	 */
3337	if (target_ip == NULL) {
3338		/*
3339		 * If target does not exist and the rename crosses
3340		 * directories, adjust the target directory link count
3341		 * to account for the ".." reference from the new entry.
3342		 */
3343		error = xfs_dir_createname(tp, target_dp, target_name,
3344					   src_ip->i_ino, spaceres);
3345		if (error)
3346			goto out_trans_cancel;
3347
3348		xfs_trans_ichgtime(tp, target_dp,
3349					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3350
3351		if (new_parent && src_is_directory) {
3352			xfs_bumplink(tp, target_dp);
3353		}
3354	} else { /* target_ip != NULL */
3355		/*
3356		 * Link the source inode under the target name.
3357		 * If the source inode is a directory and we are moving
3358		 * it across directories, its ".." entry will be
3359		 * inconsistent until we replace that down below.
3360		 *
3361		 * In case there is already an entry with the same
3362		 * name at the destination directory, remove it first.
3363		 */
3364		error = xfs_dir_replace(tp, target_dp, target_name,
3365					src_ip->i_ino, spaceres);
3366		if (error)
3367			goto out_trans_cancel;
3368
3369		xfs_trans_ichgtime(tp, target_dp,
3370					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3371
3372		/*
3373		 * Decrement the link count on the target since the target
3374		 * dir no longer points to it.
3375		 */
3376		error = xfs_droplink(tp, target_ip);
3377		if (error)
3378			goto out_trans_cancel;
3379
3380		if (src_is_directory) {
3381			/*
3382			 * Drop the link from the old "." entry.
3383			 */
3384			error = xfs_droplink(tp, target_ip);
3385			if (error)
3386				goto out_trans_cancel;
3387		}
3388	} /* target_ip != NULL */
3389
3390	/*
3391	 * Remove the source.
3392	 */
3393	if (new_parent && src_is_directory) {
3394		/*
3395		 * Rewrite the ".." entry to point to the new
3396		 * directory.
3397		 */
3398		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3399					target_dp->i_ino, spaceres);
3400		ASSERT(error != -EEXIST);
3401		if (error)
3402			goto out_trans_cancel;
3403	}
3404
3405	/*
3406	 * We always want to hit the ctime on the source inode.
3407	 *
3408	 * This isn't strictly required by the standards since the source
3409	 * inode isn't really being changed, but old unix file systems did
3410	 * it and some incremental backup programs won't work without it.
3411	 */
3412	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3413	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3414
3415	/*
3416	 * Adjust the link count on src_dp.  This is necessary when
3417	 * renaming a directory, either within one parent when
3418	 * the target existed, or across two parent directories.
3419	 */
3420	if (src_is_directory && (new_parent || target_ip != NULL)) {
3421
3422		/*
3423		 * Decrement link count on src_directory since the
3424		 * entry that's moved no longer points to it.
3425		 */
3426		error = xfs_droplink(tp, src_dp);
3427		if (error)
3428			goto out_trans_cancel;
3429	}
3430
3431	/*
3432	 * For whiteouts, we only need to update the source dirent with the
3433	 * inode number of the whiteout inode rather than removing it
3434	 * altogether.
3435	 */
3436	if (wip) {
3437		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3438					spaceres);
3439	} else
3440		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3441					   spaceres);
 
3442	if (error)
3443		goto out_trans_cancel;
3444
3445	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3446	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3447	if (new_parent)
3448		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3449
3450	error = xfs_finish_rename(tp);
3451	if (wip)
3452		xfs_irele(wip);
3453	return error;
3454
3455out_trans_cancel:
3456	xfs_trans_cancel(tp);
3457out_release_wip:
3458	if (wip)
3459		xfs_irele(wip);
 
 
3460	return error;
3461}
3462
3463STATIC int
3464xfs_iflush_cluster(
3465	struct xfs_inode	*ip,
3466	struct xfs_buf		*bp)
3467{
3468	struct xfs_mount	*mp = ip->i_mount;
3469	struct xfs_perag	*pag;
3470	unsigned long		first_index, mask;
3471	int			cilist_size;
3472	struct xfs_inode	**cilist;
3473	struct xfs_inode	*cip;
3474	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
3475	int			nr_found;
3476	int			clcount = 0;
3477	int			i;
3478
3479	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3480
3481	cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *);
3482	cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3483	if (!cilist)
3484		goto out_put;
3485
3486	mask = ~(igeo->inodes_per_cluster - 1);
3487	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3488	rcu_read_lock();
3489	/* really need a gang lookup range call here */
3490	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
3491					first_index, igeo->inodes_per_cluster);
3492	if (nr_found == 0)
3493		goto out_free;
3494
3495	for (i = 0; i < nr_found; i++) {
3496		cip = cilist[i];
3497		if (cip == ip)
3498			continue;
3499
3500		/*
3501		 * because this is an RCU protected lookup, we could find a
3502		 * recently freed or even reallocated inode during the lookup.
3503		 * We need to check under the i_flags_lock for a valid inode
3504		 * here. Skip it if it is not valid or the wrong inode.
3505		 */
3506		spin_lock(&cip->i_flags_lock);
3507		if (!cip->i_ino ||
3508		    __xfs_iflags_test(cip, XFS_ISTALE)) {
3509			spin_unlock(&cip->i_flags_lock);
3510			continue;
3511		}
3512
3513		/*
3514		 * Once we fall off the end of the cluster, no point checking
3515		 * any more inodes in the list because they will also all be
3516		 * outside the cluster.
3517		 */
3518		if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3519			spin_unlock(&cip->i_flags_lock);
3520			break;
3521		}
3522		spin_unlock(&cip->i_flags_lock);
3523
3524		/*
3525		 * Do an un-protected check to see if the inode is dirty and
3526		 * is a candidate for flushing.  These checks will be repeated
3527		 * later after the appropriate locks are acquired.
3528		 */
3529		if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3530			continue;
3531
3532		/*
3533		 * Try to get locks.  If any are unavailable or it is pinned,
3534		 * then this inode cannot be flushed and is skipped.
3535		 */
3536
3537		if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3538			continue;
3539		if (!xfs_iflock_nowait(cip)) {
3540			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3541			continue;
3542		}
3543		if (xfs_ipincount(cip)) {
3544			xfs_ifunlock(cip);
3545			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3546			continue;
3547		}
3548
3549
3550		/*
3551		 * Check the inode number again, just to be certain we are not
3552		 * racing with freeing in xfs_reclaim_inode(). See the comments
3553		 * in that function for more information as to why the initial
3554		 * check is not sufficient.
3555		 */
3556		if (!cip->i_ino) {
3557			xfs_ifunlock(cip);
3558			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3559			continue;
3560		}
3561
3562		/*
3563		 * arriving here means that this inode can be flushed.  First
3564		 * re-check that it's dirty before flushing.
3565		 */
3566		if (!xfs_inode_clean(cip)) {
3567			int	error;
3568			error = xfs_iflush_int(cip, bp);
3569			if (error) {
3570				xfs_iunlock(cip, XFS_ILOCK_SHARED);
3571				goto cluster_corrupt_out;
3572			}
3573			clcount++;
3574		} else {
3575			xfs_ifunlock(cip);
3576		}
3577		xfs_iunlock(cip, XFS_ILOCK_SHARED);
3578	}
3579
3580	if (clcount) {
3581		XFS_STATS_INC(mp, xs_icluster_flushcnt);
3582		XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3583	}
3584
3585out_free:
3586	rcu_read_unlock();
3587	kmem_free(cilist);
3588out_put:
3589	xfs_perag_put(pag);
3590	return 0;
3591
3592
3593cluster_corrupt_out:
3594	/*
3595	 * Corruption detected in the clustering loop.  Invalidate the
3596	 * inode buffer and shut down the filesystem.
3597	 */
3598	rcu_read_unlock();
3599
3600	/*
3601	 * We'll always have an inode attached to the buffer for completion
3602	 * process by the time we are called from xfs_iflush(). Hence we have
3603	 * always need to do IO completion processing to abort the inodes
3604	 * attached to the buffer.  handle them just like the shutdown case in
3605	 * xfs_buf_submit().
3606	 */
3607	ASSERT(bp->b_iodone);
3608	bp->b_flags |= XBF_ASYNC;
3609	bp->b_flags &= ~XBF_DONE;
3610	xfs_buf_stale(bp);
3611	xfs_buf_ioerror(bp, -EIO);
3612	xfs_buf_ioend(bp);
3613
3614	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3615
3616	/* abort the corrupt inode, as it was not attached to the buffer */
3617	xfs_iflush_abort(cip, false);
3618	kmem_free(cilist);
3619	xfs_perag_put(pag);
3620	return -EFSCORRUPTED;
3621}
3622
3623/*
3624 * Flush dirty inode metadata into the backing buffer.
3625 *
3626 * The caller must have the inode lock and the inode flush lock held.  The
3627 * inode lock will still be held upon return to the caller, and the inode
3628 * flush lock will be released after the inode has reached the disk.
3629 *
3630 * The caller must write out the buffer returned in *bpp and release it.
3631 */
3632int
3633xfs_iflush(
3634	struct xfs_inode	*ip,
3635	struct xfs_buf		**bpp)
3636{
3637	struct xfs_mount	*mp = ip->i_mount;
3638	struct xfs_buf		*bp = NULL;
3639	struct xfs_dinode	*dip;
3640	int			error;
3641
3642	XFS_STATS_INC(mp, xs_iflush_count);
3643
3644	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3645	ASSERT(xfs_isiflocked(ip));
3646	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3647	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3648
3649	*bpp = NULL;
3650
3651	xfs_iunpin_wait(ip);
3652
3653	/*
3654	 * For stale inodes we cannot rely on the backing buffer remaining
3655	 * stale in cache for the remaining life of the stale inode and so
3656	 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3657	 * inodes below. We have to check this after ensuring the inode is
3658	 * unpinned so that it is safe to reclaim the stale inode after the
3659	 * flush call.
3660	 */
3661	if (xfs_iflags_test(ip, XFS_ISTALE)) {
3662		xfs_ifunlock(ip);
3663		return 0;
3664	}
3665
3666	/*
3667	 * This may have been unpinned because the filesystem is shutting
3668	 * down forcibly. If that's the case we must not write this inode
3669	 * to disk, because the log record didn't make it to disk.
3670	 *
3671	 * We also have to remove the log item from the AIL in this case,
3672	 * as we wait for an empty AIL as part of the unmount process.
3673	 */
3674	if (XFS_FORCED_SHUTDOWN(mp)) {
3675		error = -EIO;
3676		goto abort_out;
3677	}
3678
3679	/*
3680	 * Get the buffer containing the on-disk inode. We are doing a try-lock
3681	 * operation here, so we may get  an EAGAIN error. In that case, we
3682	 * simply want to return with the inode still dirty.
3683	 *
3684	 * If we get any other error, we effectively have a corruption situation
3685	 * and we cannot flush the inode, so we treat it the same as failing
3686	 * xfs_iflush_int().
3687	 */
3688	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3689			       0);
3690	if (error == -EAGAIN) {
3691		xfs_ifunlock(ip);
3692		return error;
3693	}
3694	if (error)
3695		goto corrupt_out;
3696
3697	/*
3698	 * First flush out the inode that xfs_iflush was called with.
3699	 */
3700	error = xfs_iflush_int(ip, bp);
3701	if (error)
3702		goto corrupt_out;
3703
3704	/*
3705	 * If the buffer is pinned then push on the log now so we won't
3706	 * get stuck waiting in the write for too long.
3707	 */
3708	if (xfs_buf_ispinned(bp))
3709		xfs_log_force(mp, 0);
3710
3711	/*
3712	 * inode clustering: try to gather other inodes into this write
3713	 *
3714	 * Note: Any error during clustering will result in the filesystem
3715	 * being shut down and completion callbacks run on the cluster buffer.
3716	 * As we have already flushed and attached this inode to the buffer,
3717	 * it has already been aborted and released by xfs_iflush_cluster() and
3718	 * so we have no further error handling to do here.
3719	 */
3720	error = xfs_iflush_cluster(ip, bp);
3721	if (error)
3722		return error;
3723
3724	*bpp = bp;
3725	return 0;
3726
3727corrupt_out:
3728	if (bp)
3729		xfs_buf_relse(bp);
3730	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3731abort_out:
3732	/* abort the corrupt inode, as it was not attached to the buffer */
3733	xfs_iflush_abort(ip, false);
3734	return error;
3735}
3736
3737/*
3738 * If there are inline format data / attr forks attached to this inode,
3739 * make sure they're not corrupt.
3740 */
3741bool
3742xfs_inode_verify_forks(
3743	struct xfs_inode	*ip)
3744{
3745	struct xfs_ifork	*ifp;
3746	xfs_failaddr_t		fa;
3747
3748	fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3749	if (fa) {
3750		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3751		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3752				ifp->if_u1.if_data, ifp->if_bytes, fa);
3753		return false;
3754	}
3755
3756	fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3757	if (fa) {
3758		ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3759		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3760				ifp ? ifp->if_u1.if_data : NULL,
3761				ifp ? ifp->if_bytes : 0, fa);
3762		return false;
3763	}
3764	return true;
3765}
3766
3767STATIC int
3768xfs_iflush_int(
3769	struct xfs_inode	*ip,
3770	struct xfs_buf		*bp)
3771{
3772	struct xfs_inode_log_item *iip = ip->i_itemp;
3773	struct xfs_dinode	*dip;
3774	struct xfs_mount	*mp = ip->i_mount;
 
3775
3776	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3777	ASSERT(xfs_isiflocked(ip));
3778	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3779	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3780	ASSERT(iip != NULL && iip->ili_fields != 0);
3781	ASSERT(ip->i_d.di_version > 1);
3782
3783	/* set *dip = inode's place in the buffer */
3784	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3785
 
 
 
 
 
 
 
3786	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3787			       mp, XFS_ERRTAG_IFLUSH_1)) {
3788		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3789			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3790			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3791		goto corrupt_out;
3792	}
3793	if (S_ISREG(VFS_I(ip)->i_mode)) {
3794		if (XFS_TEST_ERROR(
3795		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3796		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3797		    mp, XFS_ERRTAG_IFLUSH_3)) {
3798			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3799				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3800				__func__, ip->i_ino, ip);
3801			goto corrupt_out;
3802		}
3803	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3804		if (XFS_TEST_ERROR(
3805		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3806		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3807		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3808		    mp, XFS_ERRTAG_IFLUSH_4)) {
3809			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3810				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3811				__func__, ip->i_ino, ip);
3812			goto corrupt_out;
3813		}
3814	}
3815	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3816				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3817		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3818			"%s: detected corrupt incore inode %Lu, "
3819			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3820			__func__, ip->i_ino,
3821			ip->i_d.di_nextents + ip->i_d.di_anextents,
3822			ip->i_d.di_nblocks, ip);
3823		goto corrupt_out;
3824	}
3825	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3826				mp, XFS_ERRTAG_IFLUSH_6)) {
3827		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3828			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3829			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3830		goto corrupt_out;
3831	}
3832
3833	/*
3834	 * Inode item log recovery for v2 inodes are dependent on the
3835	 * di_flushiter count for correct sequencing. We bump the flush
3836	 * iteration count so we can detect flushes which postdate a log record
3837	 * during recovery. This is redundant as we now log every change and
3838	 * hence this can't happen but we need to still do it to ensure
3839	 * backwards compatibility with old kernels that predate logging all
3840	 * inode changes.
3841	 */
3842	if (ip->i_d.di_version < 3)
3843		ip->i_d.di_flushiter++;
3844
3845	/* Check the inline fork data before we write out. */
3846	if (!xfs_inode_verify_forks(ip))
3847		goto corrupt_out;
 
 
 
 
 
 
 
3848
3849	/*
3850	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3851	 * copy out the core of the inode, because if the inode is dirty at all
3852	 * the core must be.
3853	 */
3854	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3855
3856	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3857	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3858		ip->i_d.di_flushiter = 0;
 
 
3859
3860	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3861	if (XFS_IFORK_Q(ip))
3862		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3863	xfs_inobp_check(mp, bp);
3864
3865	/*
3866	 * We've recorded everything logged in the inode, so we'd like to clear
3867	 * the ili_fields bits so we don't log and flush things unnecessarily.
3868	 * However, we can't stop logging all this information until the data
3869	 * we've copied into the disk buffer is written to disk.  If we did we
3870	 * might overwrite the copy of the inode in the log with all the data
3871	 * after re-logging only part of it, and in the face of a crash we
3872	 * wouldn't have all the data we need to recover.
3873	 *
3874	 * What we do is move the bits to the ili_last_fields field.  When
3875	 * logging the inode, these bits are moved back to the ili_fields field.
3876	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3877	 * know that the information those bits represent is permanently on
3878	 * disk.  As long as the flush completes before the inode is logged
3879	 * again, then both ili_fields and ili_last_fields will be cleared.
3880	 *
3881	 * We can play with the ili_fields bits here, because the inode lock
3882	 * must be held exclusively in order to set bits there and the flush
3883	 * lock protects the ili_last_fields bits.  Set ili_logged so the flush
3884	 * done routine can tell whether or not to look in the AIL.  Also, store
3885	 * the current LSN of the inode so that we can tell whether the item has
3886	 * moved in the AIL from xfs_iflush_done().  In order to read the lsn we
3887	 * need the AIL lock, because it is a 64 bit value that cannot be read
3888	 * atomically.
3889	 */
 
 
 
3890	iip->ili_last_fields = iip->ili_fields;
3891	iip->ili_fields = 0;
3892	iip->ili_fsync_fields = 0;
3893	iip->ili_logged = 1;
3894
 
 
 
 
3895	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3896				&iip->ili_item.li_lsn);
3897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3898	/*
3899	 * Attach the function xfs_iflush_done to the inode's
3900	 * buffer.  This will remove the inode from the AIL
3901	 * and unlock the inode's flush lock when the inode is
3902	 * completely written to disk.
3903	 */
3904	xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
 
 
3905
3906	/* generate the checksum. */
3907	xfs_dinode_calc_crc(mp, dip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3908
3909	ASSERT(!list_empty(&bp->b_li_list));
3910	ASSERT(bp->b_iodone != NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3911	return 0;
3912
3913corrupt_out:
3914	return -EFSCORRUPTED;
3915}
3916
3917/* Release an inode. */
3918void
3919xfs_irele(
3920	struct xfs_inode	*ip)
3921{
3922	trace_xfs_irele(ip, _RET_IP_);
3923	iput(VFS_I(ip));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3924}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
 
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_dir2.h"
  18#include "xfs_attr.h"
  19#include "xfs_trans_space.h"
  20#include "xfs_trans.h"
  21#include "xfs_buf_item.h"
  22#include "xfs_inode_item.h"
  23#include "xfs_iunlink_item.h"
  24#include "xfs_ialloc.h"
  25#include "xfs_bmap.h"
  26#include "xfs_bmap_util.h"
  27#include "xfs_errortag.h"
  28#include "xfs_error.h"
  29#include "xfs_quota.h"
  30#include "xfs_filestream.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_symlink.h"
  34#include "xfs_trans_priv.h"
  35#include "xfs_log.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_reflink.h"
  38#include "xfs_ag.h"
  39#include "xfs_log_priv.h"
  40#include "xfs_health.h"
  41
  42struct kmem_cache *xfs_inode_cache;
  43
 
 
 
 
 
 
 
  44STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  45STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
  46	struct xfs_inode *);
  47
  48/*
  49 * helper function to extract extent size hint from inode
  50 */
  51xfs_extlen_t
  52xfs_get_extsz_hint(
  53	struct xfs_inode	*ip)
  54{
  55	/*
  56	 * No point in aligning allocations if we need to COW to actually
  57	 * write to them.
  58	 */
  59	if (xfs_is_always_cow_inode(ip))
  60		return 0;
  61	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
  62		return ip->i_extsize;
  63	if (XFS_IS_REALTIME_INODE(ip))
  64		return ip->i_mount->m_sb.sb_rextsize;
  65	return 0;
  66}
  67
  68/*
  69 * Helper function to extract CoW extent size hint from inode.
  70 * Between the extent size hint and the CoW extent size hint, we
  71 * return the greater of the two.  If the value is zero (automatic),
  72 * use the default size.
  73 */
  74xfs_extlen_t
  75xfs_get_cowextsz_hint(
  76	struct xfs_inode	*ip)
  77{
  78	xfs_extlen_t		a, b;
  79
  80	a = 0;
  81	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
  82		a = ip->i_cowextsize;
  83	b = xfs_get_extsz_hint(ip);
  84
  85	a = max(a, b);
  86	if (a == 0)
  87		return XFS_DEFAULT_COWEXTSZ_HINT;
  88	return a;
  89}
  90
  91/*
  92 * These two are wrapper routines around the xfs_ilock() routine used to
  93 * centralize some grungy code.  They are used in places that wish to lock the
  94 * inode solely for reading the extents.  The reason these places can't just
  95 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  96 * bringing in of the extents from disk for a file in b-tree format.  If the
  97 * inode is in b-tree format, then we need to lock the inode exclusively until
  98 * the extents are read in.  Locking it exclusively all the time would limit
  99 * our parallelism unnecessarily, though.  What we do instead is check to see
 100 * if the extents have been read in yet, and only lock the inode exclusively
 101 * if they have not.
 102 *
 103 * The functions return a value which should be given to the corresponding
 104 * xfs_iunlock() call.
 105 */
 106uint
 107xfs_ilock_data_map_shared(
 108	struct xfs_inode	*ip)
 109{
 110	uint			lock_mode = XFS_ILOCK_SHARED;
 111
 112	if (xfs_need_iread_extents(&ip->i_df))
 
 113		lock_mode = XFS_ILOCK_EXCL;
 114	xfs_ilock(ip, lock_mode);
 115	return lock_mode;
 116}
 117
 118uint
 119xfs_ilock_attr_map_shared(
 120	struct xfs_inode	*ip)
 121{
 122	uint			lock_mode = XFS_ILOCK_SHARED;
 123
 124	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
 
 125		lock_mode = XFS_ILOCK_EXCL;
 126	xfs_ilock(ip, lock_mode);
 127	return lock_mode;
 128}
 129
 130/*
 131 * You can't set both SHARED and EXCL for the same lock,
 132 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
 133 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
 134 * to set in lock_flags.
 135 */
 136static inline void
 137xfs_lock_flags_assert(
 138	uint		lock_flags)
 139{
 140	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 141		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 142	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 143		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 144	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 145		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 146	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 147	ASSERT(lock_flags != 0);
 148}
 149
 150/*
 151 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 152 * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
 153 * various combinations of the locks to be obtained.
 154 *
 155 * The 3 locks should always be ordered so that the IO lock is obtained first,
 156 * the mmap lock second and the ilock last in order to prevent deadlock.
 157 *
 158 * Basic locking order:
 159 *
 160 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
 161 *
 162 * mmap_lock locking order:
 163 *
 164 * i_rwsem -> page lock -> mmap_lock
 165 * mmap_lock -> invalidate_lock -> page_lock
 166 *
 167 * The difference in mmap_lock locking order mean that we cannot hold the
 168 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
 169 * can fault in pages during copy in/out (for buffered IO) or require the
 170 * mmap_lock in get_user_pages() to map the user pages into the kernel address
 171 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
 172 * fault because page faults already hold the mmap_lock.
 173 *
 174 * Hence to serialise fully against both syscall and mmap based IO, we need to
 175 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
 176 * both taken in places where we need to invalidate the page cache in a race
 177 * free manner (e.g. truncate, hole punch and other extent manipulation
 178 * functions).
 179 */
 180void
 181xfs_ilock(
 182	xfs_inode_t		*ip,
 183	uint			lock_flags)
 184{
 185	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 186
 187	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 188
 189	if (lock_flags & XFS_IOLOCK_EXCL) {
 190		down_write_nested(&VFS_I(ip)->i_rwsem,
 191				  XFS_IOLOCK_DEP(lock_flags));
 192	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 193		down_read_nested(&VFS_I(ip)->i_rwsem,
 194				 XFS_IOLOCK_DEP(lock_flags));
 195	}
 196
 197	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 198		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
 199				  XFS_MMAPLOCK_DEP(lock_flags));
 200	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 201		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
 202				 XFS_MMAPLOCK_DEP(lock_flags));
 203	}
 204
 205	if (lock_flags & XFS_ILOCK_EXCL)
 206		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 207	else if (lock_flags & XFS_ILOCK_SHARED)
 208		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 209}
 210
 211/*
 212 * This is just like xfs_ilock(), except that the caller
 213 * is guaranteed not to sleep.  It returns 1 if it gets
 214 * the requested locks and 0 otherwise.  If the IO lock is
 215 * obtained but the inode lock cannot be, then the IO lock
 216 * is dropped before returning.
 217 *
 218 * ip -- the inode being locked
 219 * lock_flags -- this parameter indicates the inode's locks to be
 220 *       to be locked.  See the comment for xfs_ilock() for a list
 221 *	 of valid values.
 222 */
 223int
 224xfs_ilock_nowait(
 225	xfs_inode_t		*ip,
 226	uint			lock_flags)
 227{
 228	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 229
 230	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 231
 232	if (lock_flags & XFS_IOLOCK_EXCL) {
 233		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 234			goto out;
 235	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 236		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 237			goto out;
 238	}
 239
 240	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 241		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
 242			goto out_undo_iolock;
 243	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 244		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
 245			goto out_undo_iolock;
 246	}
 247
 248	if (lock_flags & XFS_ILOCK_EXCL) {
 249		if (!mrtryupdate(&ip->i_lock))
 250			goto out_undo_mmaplock;
 251	} else if (lock_flags & XFS_ILOCK_SHARED) {
 252		if (!mrtryaccess(&ip->i_lock))
 253			goto out_undo_mmaplock;
 254	}
 255	return 1;
 256
 257out_undo_mmaplock:
 258	if (lock_flags & XFS_MMAPLOCK_EXCL)
 259		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 260	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 261		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 262out_undo_iolock:
 263	if (lock_flags & XFS_IOLOCK_EXCL)
 264		up_write(&VFS_I(ip)->i_rwsem);
 265	else if (lock_flags & XFS_IOLOCK_SHARED)
 266		up_read(&VFS_I(ip)->i_rwsem);
 267out:
 268	return 0;
 269}
 270
 271/*
 272 * xfs_iunlock() is used to drop the inode locks acquired with
 273 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 274 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 275 * that we know which locks to drop.
 276 *
 277 * ip -- the inode being unlocked
 278 * lock_flags -- this parameter indicates the inode's locks to be
 279 *       to be unlocked.  See the comment for xfs_ilock() for a list
 280 *	 of valid values for this parameter.
 281 *
 282 */
 283void
 284xfs_iunlock(
 285	xfs_inode_t		*ip,
 286	uint			lock_flags)
 287{
 288	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 289
 290	if (lock_flags & XFS_IOLOCK_EXCL)
 291		up_write(&VFS_I(ip)->i_rwsem);
 292	else if (lock_flags & XFS_IOLOCK_SHARED)
 293		up_read(&VFS_I(ip)->i_rwsem);
 294
 295	if (lock_flags & XFS_MMAPLOCK_EXCL)
 296		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 297	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 298		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 299
 300	if (lock_flags & XFS_ILOCK_EXCL)
 301		mrunlock_excl(&ip->i_lock);
 302	else if (lock_flags & XFS_ILOCK_SHARED)
 303		mrunlock_shared(&ip->i_lock);
 304
 305	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 306}
 307
 308/*
 309 * give up write locks.  the i/o lock cannot be held nested
 310 * if it is being demoted.
 311 */
 312void
 313xfs_ilock_demote(
 314	xfs_inode_t		*ip,
 315	uint			lock_flags)
 316{
 317	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 318	ASSERT((lock_flags &
 319		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 320
 321	if (lock_flags & XFS_ILOCK_EXCL)
 322		mrdemote(&ip->i_lock);
 323	if (lock_flags & XFS_MMAPLOCK_EXCL)
 324		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 325	if (lock_flags & XFS_IOLOCK_EXCL)
 326		downgrade_write(&VFS_I(ip)->i_rwsem);
 327
 328	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 329}
 330
 331#if defined(DEBUG) || defined(XFS_WARN)
 332static inline bool
 333__xfs_rwsem_islocked(
 334	struct rw_semaphore	*rwsem,
 335	bool			shared)
 336{
 337	if (!debug_locks)
 338		return rwsem_is_locked(rwsem);
 339
 340	if (!shared)
 341		return lockdep_is_held_type(rwsem, 0);
 342
 343	/*
 344	 * We are checking that the lock is held at least in shared
 345	 * mode but don't care that it might be held exclusively
 346	 * (i.e. shared | excl). Hence we check if the lock is held
 347	 * in any mode rather than an explicit shared mode.
 348	 */
 349	return lockdep_is_held_type(rwsem, -1);
 350}
 351
 352bool
 353xfs_isilocked(
 354	struct xfs_inode	*ip,
 355	uint			lock_flags)
 356{
 357	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 358		if (!(lock_flags & XFS_ILOCK_SHARED))
 359			return !!ip->i_lock.mr_writer;
 360		return rwsem_is_locked(&ip->i_lock.mr_lock);
 361	}
 362
 363	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 364		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
 365				(lock_flags & XFS_MMAPLOCK_SHARED));
 
 366	}
 367
 368	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
 369		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
 370				(lock_flags & XFS_IOLOCK_SHARED));
 
 
 371	}
 372
 373	ASSERT(0);
 374	return false;
 375}
 376#endif
 377
 378/*
 379 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 380 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 381 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 382 * errors and warnings.
 383 */
 384#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 385static bool
 386xfs_lockdep_subclass_ok(
 387	int subclass)
 388{
 389	return subclass < MAX_LOCKDEP_SUBCLASSES;
 390}
 391#else
 392#define xfs_lockdep_subclass_ok(subclass)	(true)
 393#endif
 394
 395/*
 396 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 397 * value. This can be called for any type of inode lock combination, including
 398 * parent locking. Care must be taken to ensure we don't overrun the subclass
 399 * storage fields in the class mask we build.
 400 */
 401static inline uint
 402xfs_lock_inumorder(
 403	uint	lock_mode,
 404	uint	subclass)
 405{
 406	uint	class = 0;
 407
 408	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 409			      XFS_ILOCK_RTSUM)));
 410	ASSERT(xfs_lockdep_subclass_ok(subclass));
 411
 412	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 413		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 414		class += subclass << XFS_IOLOCK_SHIFT;
 415	}
 416
 417	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 418		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 419		class += subclass << XFS_MMAPLOCK_SHIFT;
 420	}
 421
 422	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 423		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 424		class += subclass << XFS_ILOCK_SHIFT;
 425	}
 426
 427	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 428}
 429
 430/*
 431 * The following routine will lock n inodes in exclusive mode.  We assume the
 432 * caller calls us with the inodes in i_ino order.
 433 *
 434 * We need to detect deadlock where an inode that we lock is in the AIL and we
 435 * start waiting for another inode that is locked by a thread in a long running
 436 * transaction (such as truncate). This can result in deadlock since the long
 437 * running trans might need to wait for the inode we just locked in order to
 438 * push the tail and free space in the log.
 439 *
 440 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 441 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 442 * lock more than one at a time, lockdep will report false positives saying we
 443 * have violated locking orders.
 444 */
 445static void
 446xfs_lock_inodes(
 447	struct xfs_inode	**ips,
 448	int			inodes,
 449	uint			lock_mode)
 450{
 451	int			attempts = 0;
 452	uint			i;
 453	int			j;
 454	bool			try_lock;
 455	struct xfs_log_item	*lp;
 456
 457	/*
 458	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 459	 * support an arbitrary depth of locking here, but absolute limits on
 460	 * inodes depend on the type of locking and the limits placed by
 461	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 462	 * the asserts.
 463	 */
 464	ASSERT(ips && inodes >= 2 && inodes <= 5);
 465	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 466			    XFS_ILOCK_EXCL));
 467	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 468			      XFS_ILOCK_SHARED)));
 469	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 470		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 471	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 472		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 473
 474	if (lock_mode & XFS_IOLOCK_EXCL) {
 475		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 476	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 477		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 478
 
 
 479again:
 480	try_lock = false;
 481	i = 0;
 482	for (; i < inodes; i++) {
 483		ASSERT(ips[i]);
 484
 485		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 486			continue;
 487
 488		/*
 489		 * If try_lock is not set yet, make sure all locked inodes are
 490		 * not in the AIL.  If any are, set try_lock to be used later.
 491		 */
 492		if (!try_lock) {
 493			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 494				lp = &ips[j]->i_itemp->ili_item;
 495				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 496					try_lock = true;
 497			}
 498		}
 499
 500		/*
 501		 * If any of the previous locks we have locked is in the AIL,
 502		 * we must TRY to get the second and subsequent locks. If
 503		 * we can't get any, we must release all we have
 504		 * and try again.
 505		 */
 506		if (!try_lock) {
 507			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 508			continue;
 509		}
 510
 511		/* try_lock means we have an inode locked that is in the AIL. */
 512		ASSERT(i != 0);
 513		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 514			continue;
 515
 516		/*
 517		 * Unlock all previous guys and try again.  xfs_iunlock will try
 518		 * to push the tail if the inode is in the AIL.
 519		 */
 520		attempts++;
 521		for (j = i - 1; j >= 0; j--) {
 522			/*
 523			 * Check to see if we've already unlocked this one.  Not
 524			 * the first one going back, and the inode ptr is the
 525			 * same.
 526			 */
 527			if (j != (i - 1) && ips[j] == ips[j + 1])
 528				continue;
 529
 530			xfs_iunlock(ips[j], lock_mode);
 531		}
 532
 533		if ((attempts % 5) == 0) {
 534			delay(1); /* Don't just spin the CPU */
 535		}
 
 
 536		goto again;
 537	}
 538}
 539
 540/*
 541 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
 542 * mmaplock must be double-locked separately since we use i_rwsem and
 543 * invalidate_lock for that. We now support taking one lock EXCL and the
 544 * other SHARED.
 
 
 545 */
 546void
 547xfs_lock_two_inodes(
 548	struct xfs_inode	*ip0,
 549	uint			ip0_mode,
 550	struct xfs_inode	*ip1,
 551	uint			ip1_mode)
 552{
 
 
 553	int			attempts = 0;
 554	struct xfs_log_item	*lp;
 555
 556	ASSERT(hweight32(ip0_mode) == 1);
 557	ASSERT(hweight32(ip1_mode) == 1);
 558	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 559	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 560	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
 561	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
 
 
 
 
 
 
 
 562	ASSERT(ip0->i_ino != ip1->i_ino);
 563
 564	if (ip0->i_ino > ip1->i_ino) {
 565		swap(ip0, ip1);
 566		swap(ip0_mode, ip1_mode);
 
 
 
 
 567	}
 568
 569 again:
 570	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 571
 572	/*
 573	 * If the first lock we have locked is in the AIL, we must TRY to get
 574	 * the second lock. If we can't get it, we must release the first one
 575	 * and try again.
 576	 */
 577	lp = &ip0->i_itemp->ili_item;
 578	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 579		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 580			xfs_iunlock(ip0, ip0_mode);
 581			if ((++attempts % 5) == 0)
 582				delay(1); /* Don't just spin the CPU */
 583			goto again;
 584		}
 585	} else {
 586		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 587	}
 588}
 589
 590uint
 591xfs_ip2xflags(
 592	struct xfs_inode	*ip)
 593{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594	uint			flags = 0;
 595
 596	if (ip->i_diflags & XFS_DIFLAG_ANY) {
 597		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
 598			flags |= FS_XFLAG_REALTIME;
 599		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
 600			flags |= FS_XFLAG_PREALLOC;
 601		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
 602			flags |= FS_XFLAG_IMMUTABLE;
 603		if (ip->i_diflags & XFS_DIFLAG_APPEND)
 604			flags |= FS_XFLAG_APPEND;
 605		if (ip->i_diflags & XFS_DIFLAG_SYNC)
 606			flags |= FS_XFLAG_SYNC;
 607		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
 608			flags |= FS_XFLAG_NOATIME;
 609		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
 610			flags |= FS_XFLAG_NODUMP;
 611		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
 612			flags |= FS_XFLAG_RTINHERIT;
 613		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 614			flags |= FS_XFLAG_PROJINHERIT;
 615		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
 616			flags |= FS_XFLAG_NOSYMLINKS;
 617		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
 618			flags |= FS_XFLAG_EXTSIZE;
 619		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
 620			flags |= FS_XFLAG_EXTSZINHERIT;
 621		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
 622			flags |= FS_XFLAG_NODEFRAG;
 623		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
 624			flags |= FS_XFLAG_FILESTREAM;
 625	}
 626
 627	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
 628		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
 629			flags |= FS_XFLAG_DAX;
 630		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
 631			flags |= FS_XFLAG_COWEXTSIZE;
 632	}
 633
 634	if (xfs_inode_has_attr_fork(ip))
 635		flags |= FS_XFLAG_HASATTR;
 
 636	return flags;
 637}
 638
 
 
 
 
 
 
 
 
 
 639/*
 640 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 641 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 642 * ci_name->name will point to a the actual name (caller must free) or
 643 * will be set to NULL if an exact match is found.
 644 */
 645int
 646xfs_lookup(
 647	struct xfs_inode	*dp,
 648	const struct xfs_name	*name,
 649	struct xfs_inode	**ipp,
 650	struct xfs_name		*ci_name)
 651{
 652	xfs_ino_t		inum;
 653	int			error;
 654
 655	trace_xfs_lookup(dp, name);
 656
 657	if (xfs_is_shutdown(dp->i_mount))
 658		return -EIO;
 659	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
 660		return -EIO;
 661
 662	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 663	if (error)
 664		goto out_unlock;
 665
 666	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 667	if (error)
 668		goto out_free_name;
 669
 670	return 0;
 671
 672out_free_name:
 673	if (ci_name)
 674		kmem_free(ci_name->name);
 675out_unlock:
 676	*ipp = NULL;
 677	return error;
 678}
 679
 680/* Propagate di_flags from a parent inode to a child inode. */
 681static void
 682xfs_inode_inherit_flags(
 683	struct xfs_inode	*ip,
 684	const struct xfs_inode	*pip)
 685{
 686	unsigned int		di_flags = 0;
 687	xfs_failaddr_t		failaddr;
 688	umode_t			mode = VFS_I(ip)->i_mode;
 689
 690	if (S_ISDIR(mode)) {
 691		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
 692			di_flags |= XFS_DIFLAG_RTINHERIT;
 693		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 694			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 695			ip->i_extsize = pip->i_extsize;
 696		}
 697		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 698			di_flags |= XFS_DIFLAG_PROJINHERIT;
 699	} else if (S_ISREG(mode)) {
 700		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
 701		    xfs_has_realtime(ip->i_mount))
 702			di_flags |= XFS_DIFLAG_REALTIME;
 703		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 704			di_flags |= XFS_DIFLAG_EXTSIZE;
 705			ip->i_extsize = pip->i_extsize;
 706		}
 707	}
 708	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
 709	    xfs_inherit_noatime)
 710		di_flags |= XFS_DIFLAG_NOATIME;
 711	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
 712	    xfs_inherit_nodump)
 713		di_flags |= XFS_DIFLAG_NODUMP;
 714	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
 715	    xfs_inherit_sync)
 716		di_flags |= XFS_DIFLAG_SYNC;
 717	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
 718	    xfs_inherit_nosymlinks)
 719		di_flags |= XFS_DIFLAG_NOSYMLINKS;
 720	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
 721	    xfs_inherit_nodefrag)
 722		di_flags |= XFS_DIFLAG_NODEFRAG;
 723	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
 724		di_flags |= XFS_DIFLAG_FILESTREAM;
 725
 726	ip->i_diflags |= di_flags;
 727
 728	/*
 729	 * Inode verifiers on older kernels only check that the extent size
 730	 * hint is an integer multiple of the rt extent size on realtime files.
 731	 * They did not check the hint alignment on a directory with both
 732	 * rtinherit and extszinherit flags set.  If the misaligned hint is
 733	 * propagated from a directory into a new realtime file, new file
 734	 * allocations will fail due to math errors in the rt allocator and/or
 735	 * trip the verifiers.  Validate the hint settings in the new file so
 736	 * that we don't let broken hints propagate.
 737	 */
 738	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
 739			VFS_I(ip)->i_mode, ip->i_diflags);
 740	if (failaddr) {
 741		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
 742				   XFS_DIFLAG_EXTSZINHERIT);
 743		ip->i_extsize = 0;
 744	}
 745}
 746
 747/* Propagate di_flags2 from a parent inode to a child inode. */
 748static void
 749xfs_inode_inherit_flags2(
 750	struct xfs_inode	*ip,
 751	const struct xfs_inode	*pip)
 752{
 753	xfs_failaddr_t		failaddr;
 754
 755	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
 756		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
 757		ip->i_cowextsize = pip->i_cowextsize;
 758	}
 759	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
 760		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
 761
 762	/* Don't let invalid cowextsize hints propagate. */
 763	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
 764			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
 765	if (failaddr) {
 766		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
 767		ip->i_cowextsize = 0;
 768	}
 769}
 770
 771/*
 772 * Initialise a newly allocated inode and return the in-core inode to the
 773 * caller locked exclusively.
 774 */
 775int
 776xfs_init_new_inode(
 777	struct mnt_idmap	*idmap,
 778	struct xfs_trans	*tp,
 779	struct xfs_inode	*pip,
 780	xfs_ino_t		ino,
 781	umode_t			mode,
 782	xfs_nlink_t		nlink,
 783	dev_t			rdev,
 784	prid_t			prid,
 785	bool			init_xattrs,
 786	struct xfs_inode	**ipp)
 787{
 788	struct inode		*dir = pip ? VFS_I(pip) : NULL;
 789	struct xfs_mount	*mp = tp->t_mountp;
 790	struct xfs_inode	*ip;
 791	unsigned int		flags;
 792	int			error;
 793	struct timespec64	tv;
 794	struct inode		*inode;
 795
 796	/*
 797	 * Protect against obviously corrupt allocation btree records. Later
 798	 * xfs_iget checks will catch re-allocation of other active in-memory
 799	 * and on-disk inodes. If we don't catch reallocating the parent inode
 800	 * here we will deadlock in xfs_iget() so we have to do these checks
 801	 * first.
 802	 */
 803	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 804		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 805		return -EFSCORRUPTED;
 806	}
 807
 808	/*
 809	 * Get the in-core inode with the lock held exclusively to prevent
 810	 * others from looking at until we're done.
 
 811	 */
 812	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
 
 813	if (error)
 814		return error;
 815
 816	ASSERT(ip != NULL);
 817	inode = VFS_I(ip);
 
 
 
 
 
 
 
 
 
 
 818	set_nlink(inode, nlink);
 
 
 819	inode->i_rdev = rdev;
 820	ip->i_projid = prid;
 821
 822	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
 823		inode_fsuid_set(inode, idmap);
 824		inode->i_gid = dir->i_gid;
 825		inode->i_mode = mode;
 826	} else {
 827		inode_init_owner(idmap, inode, dir, mode);
 828	}
 829
 830	/*
 831	 * If the group ID of the new file does not match the effective group
 832	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 833	 * (and only if the irix_sgid_inherit compatibility variable is set).
 834	 */
 835	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
 836	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
 
 837		inode->i_mode &= ~S_ISGID;
 838
 839	ip->i_disk_size = 0;
 840	ip->i_df.if_nextents = 0;
 841	ASSERT(ip->i_nblocks == 0);
 842
 843	tv = inode_set_ctime_current(inode);
 844	inode_set_mtime_to_ts(inode, tv);
 845	inode_set_atime_to_ts(inode, tv);
 846
 847	ip->i_extsize = 0;
 848	ip->i_diflags = 0;
 
 
 
 849
 850	if (xfs_has_v3inodes(mp)) {
 851		inode_set_iversion(inode, 1);
 852		ip->i_cowextsize = 0;
 853		ip->i_crtime = tv;
 
 
 854	}
 855
 
 856	flags = XFS_ILOG_CORE;
 857	switch (mode & S_IFMT) {
 858	case S_IFIFO:
 859	case S_IFCHR:
 860	case S_IFBLK:
 861	case S_IFSOCK:
 862		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
 
 863		flags |= XFS_ILOG_DEV;
 864		break;
 865	case S_IFREG:
 866	case S_IFDIR:
 867		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
 868			xfs_inode_inherit_flags(ip, pip);
 869		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
 870			xfs_inode_inherit_flags2(ip, pip);
 871		fallthrough;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872	case S_IFLNK:
 873		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 
 874		ip->i_df.if_bytes = 0;
 875		ip->i_df.if_data = NULL;
 876		break;
 877	default:
 878		ASSERT(0);
 879	}
 880
 881	/*
 882	 * If we need to create attributes immediately after allocating the
 883	 * inode, initialise an empty attribute fork right now. We use the
 884	 * default fork offset for attributes here as we don't know exactly what
 885	 * size or how many attributes we might be adding. We can do this
 886	 * safely here because we know the data fork is completely empty and
 887	 * this saves us from needing to run a separate transaction to set the
 888	 * fork offset in the immediate future.
 889	 */
 890	if (init_xattrs && xfs_has_attr(mp)) {
 891		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
 892		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
 893	}
 894
 895	/*
 896	 * Log the new values stuffed into the inode.
 897	 */
 898	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 899	xfs_trans_log_inode(tp, ip, flags);
 900
 901	/* now that we have an i_mode we can setup the inode structure */
 902	xfs_setup_inode(ip);
 903
 904	*ipp = ip;
 905	return 0;
 906}
 907
 908/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909 * Decrement the link count on an inode & log the change.  If this causes the
 910 * link count to go to zero, move the inode to AGI unlinked list so that it can
 911 * be freed when the last active reference goes away via xfs_inactive().
 912 */
 913static int			/* error */
 914xfs_droplink(
 915	xfs_trans_t *tp,
 916	xfs_inode_t *ip)
 917{
 918	if (VFS_I(ip)->i_nlink == 0) {
 919		xfs_alert(ip->i_mount,
 920			  "%s: Attempt to drop inode (%llu) with nlink zero.",
 921			  __func__, ip->i_ino);
 922		return -EFSCORRUPTED;
 923	}
 924
 925	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 926
 927	drop_nlink(VFS_I(ip));
 928	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 929
 930	if (VFS_I(ip)->i_nlink)
 931		return 0;
 932
 933	return xfs_iunlink(tp, ip);
 934}
 935
 936/*
 937 * Increment the link count on an inode & log the change.
 938 */
 939static void
 940xfs_bumplink(
 941	xfs_trans_t *tp,
 942	xfs_inode_t *ip)
 943{
 944	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 945
 
 946	inc_nlink(VFS_I(ip));
 947	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 948}
 949
 950int
 951xfs_create(
 952	struct mnt_idmap	*idmap,
 953	xfs_inode_t		*dp,
 954	struct xfs_name		*name,
 955	umode_t			mode,
 956	dev_t			rdev,
 957	bool			init_xattrs,
 958	xfs_inode_t		**ipp)
 959{
 960	int			is_dir = S_ISDIR(mode);
 961	struct xfs_mount	*mp = dp->i_mount;
 962	struct xfs_inode	*ip = NULL;
 963	struct xfs_trans	*tp = NULL;
 964	int			error;
 965	bool                    unlock_dp_on_error = false;
 966	prid_t			prid;
 967	struct xfs_dquot	*udqp = NULL;
 968	struct xfs_dquot	*gdqp = NULL;
 969	struct xfs_dquot	*pdqp = NULL;
 970	struct xfs_trans_res	*tres;
 971	uint			resblks;
 972	xfs_ino_t		ino;
 973
 974	trace_xfs_create(dp, name);
 975
 976	if (xfs_is_shutdown(mp))
 977		return -EIO;
 978	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
 979		return -EIO;
 980
 981	prid = xfs_get_initial_prid(dp);
 982
 983	/*
 984	 * Make sure that we have allocated dquot(s) on disk.
 985	 */
 986	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
 987			mapped_fsgid(idmap, &init_user_ns), prid,
 988			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
 989			&udqp, &gdqp, &pdqp);
 990	if (error)
 991		return error;
 992
 993	if (is_dir) {
 994		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
 995		tres = &M_RES(mp)->tr_mkdir;
 996	} else {
 997		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 998		tres = &M_RES(mp)->tr_create;
 999	}
1000
1001	/*
1002	 * Initially assume that the file does not exist and
1003	 * reserve the resources for that case.  If that is not
1004	 * the case we'll drop the one we have and get a more
1005	 * appropriate transaction later.
1006	 */
1007	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1008			&tp);
1009	if (error == -ENOSPC) {
1010		/* flush outstanding delalloc blocks and retry */
1011		xfs_flush_inodes(mp);
1012		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1013				resblks, &tp);
1014	}
1015	if (error)
1016		goto out_release_dquots;
1017
1018	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1019	unlock_dp_on_error = true;
1020
1021	/*
 
 
 
 
 
 
 
 
1022	 * A newly created regular or special file just has one directory
1023	 * entry pointing to them, but a directory also the "." entry
1024	 * pointing to itself.
1025	 */
1026	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1027	if (!error)
1028		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1029				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1030	if (error)
1031		goto out_trans_cancel;
1032
1033	/*
1034	 * Now we join the directory inode to the transaction.  We do not do it
1035	 * earlier because xfs_dialloc might commit the previous transaction
1036	 * (and release all the locks).  An error from here on will result in
1037	 * the transaction cancel unlocking dp so don't do it explicitly in the
1038	 * error path.
1039	 */
1040	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1041	unlock_dp_on_error = false;
1042
1043	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1044					resblks - XFS_IALLOC_SPACE_RES(mp));
 
1045	if (error) {
1046		ASSERT(error != -ENOSPC);
1047		goto out_trans_cancel;
1048	}
1049	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1050	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1051
1052	if (is_dir) {
1053		error = xfs_dir_init(tp, ip, dp);
1054		if (error)
1055			goto out_trans_cancel;
1056
1057		xfs_bumplink(tp, dp);
1058	}
1059
1060	/*
1061	 * If this is a synchronous mount, make sure that the
1062	 * create transaction goes to disk before returning to
1063	 * the user.
1064	 */
1065	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1066		xfs_trans_set_sync(tp);
1067
1068	/*
1069	 * Attach the dquot(s) to the inodes and modify them incore.
1070	 * These ids of the inode couldn't have changed since the new
1071	 * inode has been locked ever since it was created.
1072	 */
1073	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1074
1075	error = xfs_trans_commit(tp);
1076	if (error)
1077		goto out_release_inode;
1078
1079	xfs_qm_dqrele(udqp);
1080	xfs_qm_dqrele(gdqp);
1081	xfs_qm_dqrele(pdqp);
1082
1083	*ipp = ip;
1084	return 0;
1085
1086 out_trans_cancel:
1087	xfs_trans_cancel(tp);
1088 out_release_inode:
1089	/*
1090	 * Wait until after the current transaction is aborted to finish the
1091	 * setup of the inode and release the inode.  This prevents recursive
1092	 * transactions and deadlocks from xfs_inactive.
1093	 */
1094	if (ip) {
1095		xfs_finish_inode_setup(ip);
1096		xfs_irele(ip);
1097	}
1098 out_release_dquots:
1099	xfs_qm_dqrele(udqp);
1100	xfs_qm_dqrele(gdqp);
1101	xfs_qm_dqrele(pdqp);
1102
1103	if (unlock_dp_on_error)
1104		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1105	return error;
1106}
1107
1108int
1109xfs_create_tmpfile(
1110	struct mnt_idmap	*idmap,
1111	struct xfs_inode	*dp,
1112	umode_t			mode,
1113	struct xfs_inode	**ipp)
1114{
1115	struct xfs_mount	*mp = dp->i_mount;
1116	struct xfs_inode	*ip = NULL;
1117	struct xfs_trans	*tp = NULL;
1118	int			error;
1119	prid_t                  prid;
1120	struct xfs_dquot	*udqp = NULL;
1121	struct xfs_dquot	*gdqp = NULL;
1122	struct xfs_dquot	*pdqp = NULL;
1123	struct xfs_trans_res	*tres;
1124	uint			resblks;
1125	xfs_ino_t		ino;
1126
1127	if (xfs_is_shutdown(mp))
1128		return -EIO;
1129
1130	prid = xfs_get_initial_prid(dp);
1131
1132	/*
1133	 * Make sure that we have allocated dquot(s) on disk.
1134	 */
1135	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1136			mapped_fsgid(idmap, &init_user_ns), prid,
1137			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1138			&udqp, &gdqp, &pdqp);
1139	if (error)
1140		return error;
1141
1142	resblks = XFS_IALLOC_SPACE_RES(mp);
1143	tres = &M_RES(mp)->tr_create_tmpfile;
1144
1145	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1146			&tp);
1147	if (error)
1148		goto out_release_dquots;
 
 
 
 
 
1149
1150	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1151	if (!error)
1152		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1153				0, 0, prid, false, &ip);
1154	if (error)
1155		goto out_trans_cancel;
1156
1157	if (xfs_has_wsync(mp))
1158		xfs_trans_set_sync(tp);
1159
1160	/*
1161	 * Attach the dquot(s) to the inodes and modify them incore.
1162	 * These ids of the inode couldn't have changed since the new
1163	 * inode has been locked ever since it was created.
1164	 */
1165	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1166
1167	error = xfs_iunlink(tp, ip);
1168	if (error)
1169		goto out_trans_cancel;
1170
1171	error = xfs_trans_commit(tp);
1172	if (error)
1173		goto out_release_inode;
1174
1175	xfs_qm_dqrele(udqp);
1176	xfs_qm_dqrele(gdqp);
1177	xfs_qm_dqrele(pdqp);
1178
1179	*ipp = ip;
1180	return 0;
1181
1182 out_trans_cancel:
1183	xfs_trans_cancel(tp);
1184 out_release_inode:
1185	/*
1186	 * Wait until after the current transaction is aborted to finish the
1187	 * setup of the inode and release the inode.  This prevents recursive
1188	 * transactions and deadlocks from xfs_inactive.
1189	 */
1190	if (ip) {
1191		xfs_finish_inode_setup(ip);
1192		xfs_irele(ip);
1193	}
1194 out_release_dquots:
1195	xfs_qm_dqrele(udqp);
1196	xfs_qm_dqrele(gdqp);
1197	xfs_qm_dqrele(pdqp);
1198
1199	return error;
1200}
1201
1202int
1203xfs_link(
1204	xfs_inode_t		*tdp,
1205	xfs_inode_t		*sip,
1206	struct xfs_name		*target_name)
1207{
1208	xfs_mount_t		*mp = tdp->i_mount;
1209	xfs_trans_t		*tp;
1210	int			error, nospace_error = 0;
1211	int			resblks;
1212
1213	trace_xfs_link(tdp, target_name);
1214
1215	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1216
1217	if (xfs_is_shutdown(mp))
1218		return -EIO;
1219	if (xfs_ifork_zapped(tdp, XFS_DATA_FORK))
1220		return -EIO;
1221
1222	error = xfs_qm_dqattach(sip);
1223	if (error)
1224		goto std_return;
1225
1226	error = xfs_qm_dqattach(tdp);
1227	if (error)
1228		goto std_return;
1229
1230	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1231	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1232			&tp, &nospace_error);
 
 
 
1233	if (error)
1234		goto std_return;
1235
 
 
 
 
 
1236	/*
1237	 * If we are using project inheritance, we only allow hard link
1238	 * creation in our tree when the project IDs are the same; else
1239	 * the tree quota mechanism could be circumvented.
1240	 */
1241	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1242		     tdp->i_projid != sip->i_projid)) {
1243		error = -EXDEV;
1244		goto error_return;
1245	}
1246
1247	if (!resblks) {
1248		error = xfs_dir_canenter(tp, tdp, target_name);
1249		if (error)
1250			goto error_return;
1251	}
1252
1253	/*
1254	 * Handle initial link state of O_TMPFILE inode
1255	 */
1256	if (VFS_I(sip)->i_nlink == 0) {
1257		struct xfs_perag	*pag;
1258
1259		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1260		error = xfs_iunlink_remove(tp, pag, sip);
1261		xfs_perag_put(pag);
1262		if (error)
1263			goto error_return;
1264	}
1265
1266	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1267				   resblks);
1268	if (error)
1269		goto error_return;
1270	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1271	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1272
1273	xfs_bumplink(tp, sip);
1274
1275	/*
1276	 * If this is a synchronous mount, make sure that the
1277	 * link transaction goes to disk before returning to
1278	 * the user.
1279	 */
1280	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1281		xfs_trans_set_sync(tp);
1282
1283	return xfs_trans_commit(tp);
1284
1285 error_return:
1286	xfs_trans_cancel(tp);
1287 std_return:
1288	if (error == -ENOSPC && nospace_error)
1289		error = nospace_error;
1290	return error;
1291}
1292
1293/* Clear the reflink flag and the cowblocks tag if possible. */
1294static void
1295xfs_itruncate_clear_reflink_flags(
1296	struct xfs_inode	*ip)
1297{
1298	struct xfs_ifork	*dfork;
1299	struct xfs_ifork	*cfork;
1300
1301	if (!xfs_is_reflink_inode(ip))
1302		return;
1303	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1304	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1305	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1306		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1307	if (cfork->if_bytes == 0)
1308		xfs_inode_clear_cowblocks_tag(ip);
1309}
1310
1311/*
1312 * Free up the underlying blocks past new_size.  The new size must be smaller
1313 * than the current size.  This routine can be used both for the attribute and
1314 * data fork, and does not modify the inode size, which is left to the caller.
1315 *
1316 * The transaction passed to this routine must have made a permanent log
1317 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1318 * given transaction and start new ones, so make sure everything involved in
1319 * the transaction is tidy before calling here.  Some transaction will be
1320 * returned to the caller to be committed.  The incoming transaction must
1321 * already include the inode, and both inode locks must be held exclusively.
1322 * The inode must also be "held" within the transaction.  On return the inode
1323 * will be "held" within the returned transaction.  This routine does NOT
1324 * require any disk space to be reserved for it within the transaction.
1325 *
1326 * If we get an error, we must return with the inode locked and linked into the
1327 * current transaction. This keeps things simple for the higher level code,
1328 * because it always knows that the inode is locked and held in the transaction
1329 * that returns to it whether errors occur or not.  We don't mark the inode
1330 * dirty on error so that transactions can be easily aborted if possible.
1331 */
1332int
1333xfs_itruncate_extents_flags(
1334	struct xfs_trans	**tpp,
1335	struct xfs_inode	*ip,
1336	int			whichfork,
1337	xfs_fsize_t		new_size,
1338	int			flags)
1339{
1340	struct xfs_mount	*mp = ip->i_mount;
1341	struct xfs_trans	*tp = *tpp;
1342	xfs_fileoff_t		first_unmap_block;
 
 
1343	int			error = 0;
 
1344
1345	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1346	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1347	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1348	ASSERT(new_size <= XFS_ISIZE(ip));
1349	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1350	ASSERT(ip->i_itemp != NULL);
1351	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1352	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1353
1354	trace_xfs_itruncate_extents_start(ip, new_size);
1355
1356	flags |= xfs_bmapi_aflag(whichfork);
1357
1358	/*
1359	 * Since it is possible for space to become allocated beyond
1360	 * the end of the file (in a crash where the space is allocated
1361	 * but the inode size is not yet updated), simply remove any
1362	 * blocks which show up between the new EOF and the maximum
1363	 * possible file size.
1364	 *
1365	 * We have to free all the blocks to the bmbt maximum offset, even if
1366	 * the page cache can't scale that far.
1367	 */
1368	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1369	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1370		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1371		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1372	}
1373
1374	error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1375			XFS_MAX_FILEOFF);
1376	if (error)
1377		goto out;
1378
1379	if (whichfork == XFS_DATA_FORK) {
1380		/* Remove all pending CoW reservations. */
1381		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1382				first_unmap_block, XFS_MAX_FILEOFF, true);
1383		if (error)
1384			goto out;
1385
1386		xfs_itruncate_clear_reflink_flags(ip);
1387	}
1388
1389	/*
1390	 * Always re-log the inode so that our permanent transaction can keep
1391	 * on rolling it forward in the log.
1392	 */
1393	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1394
1395	trace_xfs_itruncate_extents_end(ip, new_size);
1396
1397out:
1398	*tpp = tp;
1399	return error;
1400}
1401
1402int
1403xfs_release(
1404	xfs_inode_t	*ip)
1405{
1406	xfs_mount_t	*mp = ip->i_mount;
1407	int		error = 0;
1408
1409	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1410		return 0;
1411
1412	/* If this is a read-only mount, don't do this (would generate I/O) */
1413	if (xfs_is_readonly(mp))
1414		return 0;
1415
1416	if (!xfs_is_shutdown(mp)) {
1417		int truncated;
1418
1419		/*
1420		 * If we previously truncated this file and removed old data
1421		 * in the process, we want to initiate "early" writeout on
1422		 * the last close.  This is an attempt to combat the notorious
1423		 * NULL files problem which is particularly noticeable from a
1424		 * truncate down, buffered (re-)write (delalloc), followed by
1425		 * a crash.  What we are effectively doing here is
1426		 * significantly reducing the time window where we'd otherwise
1427		 * be exposed to that problem.
1428		 */
1429		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1430		if (truncated) {
1431			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1432			if (ip->i_delayed_blks > 0) {
1433				error = filemap_flush(VFS_I(ip)->i_mapping);
1434				if (error)
1435					return error;
1436			}
1437		}
1438	}
1439
1440	if (VFS_I(ip)->i_nlink == 0)
1441		return 0;
1442
1443	/*
1444	 * If we can't get the iolock just skip truncating the blocks past EOF
1445	 * because we could deadlock with the mmap_lock otherwise. We'll get
1446	 * another chance to drop them once the last reference to the inode is
1447	 * dropped, so we'll never leak blocks permanently.
1448	 */
1449	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1450		return 0;
1451
1452	if (xfs_can_free_eofblocks(ip, false)) {
1453		/*
1454		 * Check if the inode is being opened, written and closed
1455		 * frequently and we have delayed allocation blocks outstanding
1456		 * (e.g. streaming writes from the NFS server), truncating the
1457		 * blocks past EOF will cause fragmentation to occur.
1458		 *
1459		 * In this case don't do the truncation, but we have to be
1460		 * careful how we detect this case. Blocks beyond EOF show up as
1461		 * i_delayed_blks even when the inode is clean, so we need to
1462		 * truncate them away first before checking for a dirty release.
1463		 * Hence on the first dirty close we will still remove the
1464		 * speculative allocation, but after that we will leave it in
1465		 * place.
1466		 */
1467		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1468			goto out_unlock;
1469
1470		error = xfs_free_eofblocks(ip);
1471		if (error)
1472			goto out_unlock;
 
 
 
 
 
 
 
 
 
1473
1474		/* delalloc blocks after truncation means it really is dirty */
1475		if (ip->i_delayed_blks)
1476			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1477	}
1478
1479out_unlock:
1480	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1481	return error;
1482}
1483
1484/*
1485 * xfs_inactive_truncate
1486 *
1487 * Called to perform a truncate when an inode becomes unlinked.
1488 */
1489STATIC int
1490xfs_inactive_truncate(
1491	struct xfs_inode *ip)
1492{
1493	struct xfs_mount	*mp = ip->i_mount;
1494	struct xfs_trans	*tp;
1495	int			error;
1496
1497	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1498	if (error) {
1499		ASSERT(xfs_is_shutdown(mp));
1500		return error;
1501	}
1502	xfs_ilock(ip, XFS_ILOCK_EXCL);
1503	xfs_trans_ijoin(tp, ip, 0);
1504
1505	/*
1506	 * Log the inode size first to prevent stale data exposure in the event
1507	 * of a system crash before the truncate completes. See the related
1508	 * comment in xfs_vn_setattr_size() for details.
1509	 */
1510	ip->i_disk_size = 0;
1511	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1512
1513	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1514	if (error)
1515		goto error_trans_cancel;
1516
1517	ASSERT(ip->i_df.if_nextents == 0);
1518
1519	error = xfs_trans_commit(tp);
1520	if (error)
1521		goto error_unlock;
1522
1523	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1524	return 0;
1525
1526error_trans_cancel:
1527	xfs_trans_cancel(tp);
1528error_unlock:
1529	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1530	return error;
1531}
1532
1533/*
1534 * xfs_inactive_ifree()
1535 *
1536 * Perform the inode free when an inode is unlinked.
1537 */
1538STATIC int
1539xfs_inactive_ifree(
1540	struct xfs_inode *ip)
1541{
1542	struct xfs_mount	*mp = ip->i_mount;
1543	struct xfs_trans	*tp;
1544	int			error;
1545
1546	/*
1547	 * We try to use a per-AG reservation for any block needed by the finobt
1548	 * tree, but as the finobt feature predates the per-AG reservation
1549	 * support a degraded file system might not have enough space for the
1550	 * reservation at mount time.  In that case try to dip into the reserved
1551	 * pool and pray.
1552	 *
1553	 * Send a warning if the reservation does happen to fail, as the inode
1554	 * now remains allocated and sits on the unlinked list until the fs is
1555	 * repaired.
1556	 */
1557	if (unlikely(mp->m_finobt_nores)) {
1558		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1559				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1560				&tp);
1561	} else {
1562		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1563	}
1564	if (error) {
1565		if (error == -ENOSPC) {
1566			xfs_warn_ratelimited(mp,
1567			"Failed to remove inode(s) from unlinked list. "
1568			"Please free space, unmount and run xfs_repair.");
1569		} else {
1570			ASSERT(xfs_is_shutdown(mp));
1571		}
1572		return error;
1573	}
1574
1575	/*
1576	 * We do not hold the inode locked across the entire rolling transaction
1577	 * here. We only need to hold it for the first transaction that
1578	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1579	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1580	 * here breaks the relationship between cluster buffer invalidation and
1581	 * stale inode invalidation on cluster buffer item journal commit
1582	 * completion, and can result in leaving dirty stale inodes hanging
1583	 * around in memory.
1584	 *
1585	 * We have no need for serialising this inode operation against other
1586	 * operations - we freed the inode and hence reallocation is required
1587	 * and that will serialise on reallocating the space the deferops need
1588	 * to free. Hence we can unlock the inode on the first commit of
1589	 * the transaction rather than roll it right through the deferops. This
1590	 * avoids relogging the XFS_ISTALE inode.
1591	 *
1592	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1593	 * by asserting that the inode is still locked when it returns.
1594	 */
1595	xfs_ilock(ip, XFS_ILOCK_EXCL);
1596	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1597
1598	error = xfs_ifree(tp, ip);
1599	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1600	if (error) {
1601		/*
1602		 * If we fail to free the inode, shut down.  The cancel
1603		 * might do that, we need to make sure.  Otherwise the
1604		 * inode might be lost for a long time or forever.
1605		 */
1606		if (!xfs_is_shutdown(mp)) {
1607			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1608				__func__, error);
1609			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1610		}
1611		xfs_trans_cancel(tp);
 
1612		return error;
1613	}
1614
1615	/*
1616	 * Credit the quota account(s). The inode is gone.
1617	 */
1618	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1619
1620	return xfs_trans_commit(tp);
1621}
1622
1623/*
1624 * Returns true if we need to update the on-disk metadata before we can free
1625 * the memory used by this inode.  Updates include freeing post-eof
1626 * preallocations; freeing COW staging extents; and marking the inode free in
1627 * the inobt if it is on the unlinked list.
1628 */
1629bool
1630xfs_inode_needs_inactive(
1631	struct xfs_inode	*ip)
1632{
1633	struct xfs_mount	*mp = ip->i_mount;
1634	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1635
1636	/*
1637	 * If the inode is already free, then there can be nothing
1638	 * to clean up here.
1639	 */
1640	if (VFS_I(ip)->i_mode == 0)
1641		return false;
 
 
1642
1643	/*
1644	 * If this is a read-only mount, don't do this (would generate I/O)
1645	 * unless we're in log recovery and cleaning the iunlinked list.
1646	 */
1647	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1648		return false;
1649
1650	/* If the log isn't running, push inodes straight to reclaim. */
1651	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1652		return false;
1653
1654	/* Metadata inodes require explicit resource cleanup. */
1655	if (xfs_is_metadata_inode(ip))
1656		return false;
1657
1658	/* Want to clean out the cow blocks if there are any. */
1659	if (cow_ifp && cow_ifp->if_bytes > 0)
1660		return true;
1661
1662	/* Unlinked files must be freed. */
1663	if (VFS_I(ip)->i_nlink == 0)
1664		return true;
1665
1666	/*
1667	 * This file isn't being freed, so check if there are post-eof blocks
1668	 * to free.  @force is true because we are evicting an inode from the
1669	 * cache.  Post-eof blocks must be freed, lest we end up with broken
1670	 * free space accounting.
1671	 *
1672	 * Note: don't bother with iolock here since lockdep complains about
1673	 * acquiring it in reclaim context. We have the only reference to the
1674	 * inode at this point anyways.
1675	 */
1676	return xfs_can_free_eofblocks(ip, true);
1677}
1678
1679/*
1680 * xfs_inactive
1681 *
1682 * This is called when the vnode reference count for the vnode
1683 * goes to zero.  If the file has been unlinked, then it must
1684 * now be truncated.  Also, we clear all of the read-ahead state
1685 * kept for the inode here since the file is now closed.
1686 */
1687int
1688xfs_inactive(
1689	xfs_inode_t	*ip)
1690{
1691	struct xfs_mount	*mp;
1692	int			error = 0;
1693	int			truncate = 0;
1694
1695	/*
1696	 * If the inode is already free, then there can be nothing
1697	 * to clean up here.
1698	 */
1699	if (VFS_I(ip)->i_mode == 0) {
1700		ASSERT(ip->i_df.if_broot_bytes == 0);
1701		goto out;
1702	}
1703
1704	mp = ip->i_mount;
1705	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1706
1707	/*
1708	 * If this is a read-only mount, don't do this (would generate I/O)
1709	 * unless we're in log recovery and cleaning the iunlinked list.
1710	 */
1711	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1712		goto out;
1713
1714	/* Metadata inodes require explicit resource cleanup. */
1715	if (xfs_is_metadata_inode(ip))
1716		goto out;
1717
1718	/* Try to clean out the cow blocks if there are any. */
1719	if (xfs_inode_has_cow_data(ip))
1720		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1721
1722	if (VFS_I(ip)->i_nlink != 0) {
1723		/*
1724		 * force is true because we are evicting an inode from the
1725		 * cache. Post-eof blocks must be freed, lest we end up with
1726		 * broken free space accounting.
1727		 *
1728		 * Note: don't bother with iolock here since lockdep complains
1729		 * about acquiring it in reclaim context. We have the only
1730		 * reference to the inode at this point anyways.
1731		 */
1732		if (xfs_can_free_eofblocks(ip, true))
1733			error = xfs_free_eofblocks(ip);
1734
1735		goto out;
1736	}
1737
1738	if (S_ISREG(VFS_I(ip)->i_mode) &&
1739	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1740	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1741		truncate = 1;
1742
1743	if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1744		/*
1745		 * If this inode is being inactivated during a quotacheck and
1746		 * has not yet been scanned by quotacheck, we /must/ remove
1747		 * the dquots from the inode before inactivation changes the
1748		 * block and inode counts.  Most probably this is a result of
1749		 * reloading the incore iunlinked list to purge unrecovered
1750		 * unlinked inodes.
1751		 */
1752		xfs_qm_dqdetach(ip);
1753	} else {
1754		error = xfs_qm_dqattach(ip);
1755		if (error)
1756			goto out;
1757	}
1758
1759	if (S_ISLNK(VFS_I(ip)->i_mode))
1760		error = xfs_inactive_symlink(ip);
1761	else if (truncate)
1762		error = xfs_inactive_truncate(ip);
1763	if (error)
1764		goto out;
1765
1766	/*
1767	 * If there are attributes associated with the file then blow them away
1768	 * now.  The code calls a routine that recursively deconstructs the
1769	 * attribute fork. If also blows away the in-core attribute fork.
1770	 */
1771	if (xfs_inode_has_attr_fork(ip)) {
1772		error = xfs_attr_inactive(ip);
1773		if (error)
1774			goto out;
1775	}
1776
1777	ASSERT(ip->i_forkoff == 0);
 
 
1778
1779	/*
1780	 * Free the inode.
1781	 */
1782	error = xfs_inactive_ifree(ip);
 
 
1783
1784out:
1785	/*
1786	 * We're done making metadata updates for this inode, so we can release
1787	 * the attached dquots.
1788	 */
1789	xfs_qm_dqdetach(ip);
1790	return error;
1791}
1792
1793/*
1794 * In-Core Unlinked List Lookups
1795 * =============================
1796 *
1797 * Every inode is supposed to be reachable from some other piece of metadata
1798 * with the exception of the root directory.  Inodes with a connection to a
1799 * file descriptor but not linked from anywhere in the on-disk directory tree
1800 * are collectively known as unlinked inodes, though the filesystem itself
1801 * maintains links to these inodes so that on-disk metadata are consistent.
1802 *
1803 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1804 * header contains a number of buckets that point to an inode, and each inode
1805 * record has a pointer to the next inode in the hash chain.  This
1806 * singly-linked list causes scaling problems in the iunlink remove function
1807 * because we must walk that list to find the inode that points to the inode
1808 * being removed from the unlinked hash bucket list.
1809 *
1810 * Hence we keep an in-memory double linked list to link each inode on an
1811 * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1812 * based lists would require having 64 list heads in the perag, one for each
1813 * list. This is expensive in terms of memory (think millions of AGs) and cache
1814 * misses on lookups. Instead, use the fact that inodes on the unlinked list
1815 * must be referenced at the VFS level to keep them on the list and hence we
1816 * have an existence guarantee for inodes on the unlinked list.
1817 *
1818 * Given we have an existence guarantee, we can use lockless inode cache lookups
1819 * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1820 * for the double linked unlinked list, and we don't need any extra locking to
1821 * keep the list safe as all manipulations are done under the AGI buffer lock.
1822 * Keeping the list up to date does not require memory allocation, just finding
1823 * the XFS inode and updating the next/prev unlinked list aginos.
1824 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825
1826/*
1827 * Find an inode on the unlinked list. This does not take references to the
1828 * inode as we have existence guarantees by holding the AGI buffer lock and that
1829 * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1830 * don't find the inode in cache, then let the caller handle the situation.
1831 */
1832static struct xfs_inode *
1833xfs_iunlink_lookup(
1834	struct xfs_perag	*pag,
1835	xfs_agino_t		agino)
1836{
1837	struct xfs_inode	*ip;
 
 
 
 
 
1838
1839	rcu_read_lock();
1840	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1841	if (!ip) {
1842		/* Caller can handle inode not being in memory. */
1843		rcu_read_unlock();
1844		return NULL;
1845	}
 
 
 
 
1846
 
 
1847	/*
1848	 * Inode in RCU freeing limbo should not happen.  Warn about this and
1849	 * let the caller handle the failure.
 
 
1850	 */
1851	if (WARN_ON_ONCE(!ip->i_ino)) {
1852		rcu_read_unlock();
1853		return NULL;
1854	}
1855	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1856	rcu_read_unlock();
1857	return ip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858}
1859
1860/*
1861 * Update the prev pointer of the next agino.  Returns -ENOLINK if the inode
1862 * is not in cache.
 
1863 */
1864static int
1865xfs_iunlink_update_backref(
1866	struct xfs_perag	*pag,
1867	xfs_agino_t		prev_agino,
1868	xfs_agino_t		next_agino)
1869{
1870	struct xfs_inode	*ip;
 
1871
1872	/* No update necessary if we are at the end of the list. */
1873	if (next_agino == NULLAGINO)
 
 
1874		return 0;
1875
1876	ip = xfs_iunlink_lookup(pag, next_agino);
1877	if (!ip)
1878		return -ENOLINK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879
1880	ip->i_prev_unlinked = prev_agino;
1881	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1882}
1883
1884/*
1885 * Point the AGI unlinked bucket at an inode and log the results.  The caller
1886 * is responsible for validating the old value.
1887 */
1888STATIC int
1889xfs_iunlink_update_bucket(
1890	struct xfs_trans	*tp,
1891	struct xfs_perag	*pag,
1892	struct xfs_buf		*agibp,
1893	unsigned int		bucket_index,
1894	xfs_agino_t		new_agino)
1895{
1896	struct xfs_agi		*agi = agibp->b_addr;
1897	xfs_agino_t		old_value;
1898	int			offset;
1899
1900	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1901
1902	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1903	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1904			old_value, new_agino);
1905
1906	/*
1907	 * We should never find the head of the list already set to the value
1908	 * passed in because either we're adding or removing ourselves from the
1909	 * head of the list.
1910	 */
1911	if (old_value == new_agino) {
1912		xfs_buf_mark_corrupt(agibp);
1913		return -EFSCORRUPTED;
1914	}
1915
1916	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1917	offset = offsetof(struct xfs_agi, agi_unlinked) +
1918			(sizeof(xfs_agino_t) * bucket_index);
1919	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1920	return 0;
1921}
1922
1923/*
1924 * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1925 * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
1926 * to the unlinked list.
1927 */
1928STATIC int
1929xfs_iunlink_reload_next(
1930	struct xfs_trans	*tp,
1931	struct xfs_buf		*agibp,
1932	xfs_agino_t		prev_agino,
 
 
 
1933	xfs_agino_t		next_agino)
1934{
1935	struct xfs_perag	*pag = agibp->b_pag;
1936	struct xfs_mount	*mp = pag->pag_mount;
1937	struct xfs_inode	*next_ip = NULL;
1938	xfs_ino_t		ino;
1939	int			error;
 
 
 
 
 
 
1940
1941	ASSERT(next_agino != NULLAGINO);
 
 
 
 
 
1942
1943#ifdef DEBUG
1944	rcu_read_lock();
1945	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1946	ASSERT(next_ip == NULL);
1947	rcu_read_unlock();
1948#endif
 
 
 
 
 
 
 
 
1949
1950	xfs_info_ratelimited(mp,
1951 "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
1952			next_agino, pag->pag_agno);
1953
1954	/*
1955	 * Use an untrusted lookup just to be cautious in case the AGI has been
1956	 * corrupted and now points at a free inode.  That shouldn't happen,
1957	 * but we'd rather shut down now since we're already running in a weird
1958	 * situation.
1959	 */
1960	ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1961	error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1962	if (error)
1963		return error;
1964
1965	/* If this is not an unlinked inode, something is very wrong. */
1966	if (VFS_I(next_ip)->i_nlink != 0) {
 
1967		error = -EFSCORRUPTED;
1968		goto rele;
1969	}
1970
1971	next_ip->i_prev_unlinked = prev_agino;
1972	trace_xfs_iunlink_reload_next(next_ip);
1973rele:
1974	ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1975	if (xfs_is_quotacheck_running(mp) && next_ip)
1976		xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1977	xfs_irele(next_ip);
 
 
 
 
 
 
 
 
 
 
 
1978	return error;
1979}
1980
1981static int
1982xfs_iunlink_insert_inode(
 
 
 
 
 
 
 
1983	struct xfs_trans	*tp,
1984	struct xfs_perag	*pag,
1985	struct xfs_buf		*agibp,
1986	struct xfs_inode	*ip)
1987{
1988	struct xfs_mount	*mp = tp->t_mountp;
1989	struct xfs_agi		*agi = agibp->b_addr;
 
1990	xfs_agino_t		next_agino;
 
1991	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1992	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1993	int			error;
1994
 
 
 
 
 
 
 
 
 
 
1995	/*
1996	 * Get the index into the agi hash table for the list this inode will
1997	 * go on.  Make sure the pointer isn't garbage and that this inode
1998	 * isn't already on the list.
1999	 */
2000	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2001	if (next_agino == agino ||
2002	    !xfs_verify_agino_or_null(pag, next_agino)) {
2003		xfs_buf_mark_corrupt(agibp);
2004		return -EFSCORRUPTED;
2005	}
2006
2007	/*
2008	 * Update the prev pointer in the next inode to point back to this
2009	 * inode.
2010	 */
2011	error = xfs_iunlink_update_backref(pag, agino, next_agino);
2012	if (error == -ENOLINK)
2013		error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
2014	if (error)
2015		return error;
2016
2017	if (next_agino != NULLAGINO) {
2018		/*
2019		 * There is already another inode in the bucket, so point this
2020		 * inode to the current head of the list.
2021		 */
2022		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
 
 
 
 
 
 
 
 
 
 
 
 
2023		if (error)
2024			return error;
2025		ip->i_next_unlinked = next_agino;
2026	}
2027
2028	/* Point the head of the list to point to this inode. */
2029	ip->i_prev_unlinked = NULLAGINO;
2030	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2031}
2032
2033/*
2034 * This is called when the inode's link count has gone to 0 or we are creating
2035 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2036 *
2037 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2038 * list when the inode is freed.
2039 */
2040STATIC int
2041xfs_iunlink(
2042	struct xfs_trans	*tp,
2043	struct xfs_inode	*ip)
 
 
 
 
2044{
2045	struct xfs_mount	*mp = tp->t_mountp;
2046	struct xfs_perag	*pag;
2047	struct xfs_buf		*agibp;
2048	int			error;
2049
2050	ASSERT(VFS_I(ip)->i_nlink == 0);
2051	ASSERT(VFS_I(ip)->i_mode != 0);
2052	trace_xfs_iunlink(ip);
 
 
 
 
2053
2054	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 
 
 
 
 
2055
2056	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2057	error = xfs_read_agi(pag, tp, &agibp);
2058	if (error)
2059		goto out;
2060
2061	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2062out:
2063	xfs_perag_put(pag);
2064	return error;
2065}
2066
2067static int
2068xfs_iunlink_remove_inode(
 
 
 
 
 
 
 
 
 
 
2069	struct xfs_trans	*tp,
2070	struct xfs_perag	*pag,
2071	struct xfs_buf		*agibp,
2072	struct xfs_inode	*ip)
 
 
 
 
 
2073{
2074	struct xfs_mount	*mp = tp->t_mountp;
2075	struct xfs_agi		*agi = agibp->b_addr;
2076	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2077	xfs_agino_t		head_agino;
2078	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2079	int			error;
2080
2081	trace_xfs_iunlink_remove(ip);
 
 
 
 
 
 
 
 
 
 
 
2082
2083	/*
2084	 * Get the index into the agi hash table for the list this inode will
2085	 * go on.  Make sure the head pointer isn't garbage.
2086	 */
2087	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2088	if (!xfs_verify_agino(pag, head_agino)) {
2089		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2090				agi, sizeof(*agi));
2091		return -EFSCORRUPTED;
2092	}
2093
2094	/*
2095	 * Set our inode's next_unlinked pointer to NULL and then return
2096	 * the old pointer value so that we can update whatever was previous
2097	 * to us in the list to point to whatever was next in the list.
2098	 */
2099	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2100	if (error)
2101		return error;
2102
2103	/*
2104	 * Update the prev pointer in the next inode to point back to previous
2105	 * inode in the chain.
2106	 */
2107	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2108			ip->i_next_unlinked);
2109	if (error == -ENOLINK)
2110		error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2111				ip->i_next_unlinked);
2112	if (error)
2113		return error;
2114
2115	if (head_agino != agino) {
2116		struct xfs_inode	*prev_ip;
 
 
 
2117
2118		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2119		if (!prev_ip)
2120			return -EFSCORRUPTED;
2121
2122		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2123				ip->i_next_unlinked);
2124		prev_ip->i_next_unlinked = ip->i_next_unlinked;
2125	} else {
2126		/* Point the head of the list to the next unlinked inode. */
2127		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2128				ip->i_next_unlinked);
 
 
 
2129	}
2130
2131	ip->i_next_unlinked = NULLAGINO;
2132	ip->i_prev_unlinked = 0;
2133	return error;
2134}
2135
2136/*
2137 * Pull the on-disk inode from the AGI unlinked list.
2138 */
2139STATIC int
2140xfs_iunlink_remove(
2141	struct xfs_trans	*tp,
2142	struct xfs_perag	*pag,
2143	struct xfs_inode	*ip)
2144{
 
 
2145	struct xfs_buf		*agibp;
 
 
 
 
 
 
 
 
2146	int			error;
2147
2148	trace_xfs_iunlink_remove(ip);
2149
2150	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2151	error = xfs_read_agi(pag, tp, &agibp);
2152	if (error)
2153		return error;
 
2154
2155	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2156}
2157
2158/*
2159 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2160 * mark it stale. We should only find clean inodes in this lookup that aren't
2161 * already stale.
2162 */
2163static void
2164xfs_ifree_mark_inode_stale(
2165	struct xfs_perag	*pag,
2166	struct xfs_inode	*free_ip,
2167	xfs_ino_t		inum)
2168{
2169	struct xfs_mount	*mp = pag->pag_mount;
2170	struct xfs_inode_log_item *iip;
2171	struct xfs_inode	*ip;
2172
2173retry:
2174	rcu_read_lock();
2175	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2176
2177	/* Inode not in memory, nothing to do */
2178	if (!ip) {
2179		rcu_read_unlock();
2180		return;
2181	}
2182
2183	/*
2184	 * because this is an RCU protected lookup, we could find a recently
2185	 * freed or even reallocated inode during the lookup. We need to check
2186	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2187	 * valid, the wrong inode or stale.
2188	 */
2189	spin_lock(&ip->i_flags_lock);
2190	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2191		goto out_iflags_unlock;
2192
2193	/*
2194	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2195	 * other inodes that we did not find in the list attached to the buffer
2196	 * and are not already marked stale. If we can't lock it, back off and
2197	 * retry.
2198	 */
2199	if (ip != free_ip) {
2200		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2201			spin_unlock(&ip->i_flags_lock);
2202			rcu_read_unlock();
2203			delay(1);
2204			goto retry;
2205		}
2206	}
2207	ip->i_flags |= XFS_ISTALE;
2208
2209	/*
2210	 * If the inode is flushing, it is already attached to the buffer.  All
2211	 * we needed to do here is mark the inode stale so buffer IO completion
2212	 * will remove it from the AIL.
 
 
2213	 */
2214	iip = ip->i_itemp;
2215	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2216		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2217		ASSERT(iip->ili_last_fields);
2218		goto out_iunlock;
 
2219	}
2220
2221	/*
2222	 * Inodes not attached to the buffer can be released immediately.
2223	 * Everything else has to go through xfs_iflush_abort() on journal
2224	 * commit as the flock synchronises removal of the inode from the
2225	 * cluster buffer against inode reclaim.
2226	 */
2227	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2228		goto out_iunlock;
 
 
 
 
2229
2230	__xfs_iflags_set(ip, XFS_IFLUSHING);
2231	spin_unlock(&ip->i_flags_lock);
2232	rcu_read_unlock();
 
 
 
2233
2234	/* we have a dirty inode in memory that has not yet been flushed. */
2235	spin_lock(&iip->ili_lock);
2236	iip->ili_last_fields = iip->ili_fields;
2237	iip->ili_fields = 0;
2238	iip->ili_fsync_fields = 0;
2239	spin_unlock(&iip->ili_lock);
2240	ASSERT(iip->ili_last_fields);
2241
2242	if (ip != free_ip)
2243		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2244	return;
 
 
 
 
 
 
 
 
 
2245
2246out_iunlock:
2247	if (ip != free_ip)
2248		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2249out_iflags_unlock:
2250	spin_unlock(&ip->i_flags_lock);
2251	rcu_read_unlock();
2252}
2253
2254/*
2255 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2256 * inodes that are in memory - they all must be marked stale and attached to
2257 * the cluster buffer.
2258 */
2259static int
2260xfs_ifree_cluster(
2261	struct xfs_trans	*tp,
2262	struct xfs_perag	*pag,
2263	struct xfs_inode	*free_ip,
2264	struct xfs_icluster	*xic)
2265{
2266	struct xfs_mount	*mp = free_ip->i_mount;
2267	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2268	struct xfs_buf		*bp;
2269	xfs_daddr_t		blkno;
2270	xfs_ino_t		inum = xic->first_ino;
2271	int			nbufs;
2272	int			i, j;
2273	int			ioffset;
2274	int			error;
 
 
 
 
 
 
 
2275
 
 
2276	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2277
2278	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2279		/*
2280		 * The allocation bitmap tells us which inodes of the chunk were
2281		 * physically allocated. Skip the cluster if an inode falls into
2282		 * a sparse region.
2283		 */
2284		ioffset = inum - xic->first_ino;
2285		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2286			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2287			continue;
2288		}
2289
2290		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2291					 XFS_INO_TO_AGBNO(mp, inum));
2292
2293		/*
2294		 * We obtain and lock the backing buffer first in the process
2295		 * here to ensure dirty inodes attached to the buffer remain in
2296		 * the flushing state while we mark them stale.
2297		 *
2298		 * If we scan the in-memory inodes first, then buffer IO can
2299		 * complete before we get a lock on it, and hence we may fail
2300		 * to mark all the active inodes on the buffer stale.
2301		 */
2302		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2303				mp->m_bsize * igeo->blocks_per_cluster,
2304				XBF_UNMAPPED, &bp);
2305		if (error)
2306			return error;
 
2307
2308		/*
2309		 * This buffer may not have been correctly initialised as we
2310		 * didn't read it from disk. That's not important because we are
2311		 * only using to mark the buffer as stale in the log, and to
2312		 * attach stale cached inodes on it. That means it will never be
2313		 * dispatched for IO. If it is, we want to know about it, and we
2314		 * want it to fail. We can acheive this by adding a write
2315		 * verifier to the buffer.
2316		 */
2317		bp->b_ops = &xfs_inode_buf_ops;
2318
2319		/*
2320		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2321		 * too. This requires lookups, and will skip inodes that we've
2322		 * already marked XFS_ISTALE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2323		 */
2324		for (i = 0; i < igeo->inodes_per_cluster; i++)
2325			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2326
2327		xfs_trans_stale_inode_buf(tp, bp);
2328		xfs_trans_binval(tp, bp);
2329	}
 
 
2330	return 0;
2331}
2332
2333/*
2334 * This is called to return an inode to the inode free list.  The inode should
2335 * already be truncated to 0 length and have no pages associated with it.  This
2336 * routine also assumes that the inode is already a part of the transaction.
2337 *
2338 * The on-disk copy of the inode will have been added to the list of unlinked
2339 * inodes in the AGI. We need to remove the inode from that list atomically with
2340 * respect to freeing it here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2341 */
2342int
2343xfs_ifree(
2344	struct xfs_trans	*tp,
2345	struct xfs_inode	*ip)
2346{
2347	struct xfs_mount	*mp = ip->i_mount;
2348	struct xfs_perag	*pag;
2349	struct xfs_icluster	xic = { 0 };
2350	struct xfs_inode_log_item *iip = ip->i_itemp;
2351	int			error;
2352
2353	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2354	ASSERT(VFS_I(ip)->i_nlink == 0);
2355	ASSERT(ip->i_df.if_nextents == 0);
2356	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2357	ASSERT(ip->i_nblocks == 0);
2358
2359	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2360
2361	/*
2362	 * Free the inode first so that we guarantee that the AGI lock is going
2363	 * to be taken before we remove the inode from the unlinked list. This
2364	 * makes the AGI lock -> unlinked list modification order the same as
2365	 * used in O_TMPFILE creation.
2366	 */
2367	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2368	if (error)
2369		goto out;
2370
2371	error = xfs_iunlink_remove(tp, pag, ip);
2372	if (error)
2373		goto out;
2374
2375	/*
2376	 * Free any local-format data sitting around before we reset the
2377	 * data fork to extents format.  Note that the attr fork data has
2378	 * already been freed by xfs_attr_inactive.
2379	 */
2380	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2381		kmem_free(ip->i_df.if_data);
2382		ip->i_df.if_data = NULL;
2383		ip->i_df.if_bytes = 0;
2384	}
2385
2386	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2387	ip->i_diflags = 0;
2388	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2389	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2390	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2391	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2392		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2393
2394	/* Don't attempt to replay owner changes for a deleted inode */
2395	spin_lock(&iip->ili_lock);
2396	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2397	spin_unlock(&iip->ili_lock);
2398
2399	/*
2400	 * Bump the generation count so no one will be confused
2401	 * by reincarnations of this inode.
2402	 */
2403	VFS_I(ip)->i_generation++;
2404	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2405
2406	if (xic.deleted)
2407		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2408out:
2409	xfs_perag_put(pag);
2410	return error;
2411}
2412
2413/*
2414 * This is called to unpin an inode.  The caller must have the inode locked
2415 * in at least shared mode so that the buffer cannot be subsequently pinned
2416 * once someone is waiting for it to be unpinned.
2417 */
2418static void
2419xfs_iunpin(
2420	struct xfs_inode	*ip)
2421{
2422	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2423
2424	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2425
2426	/* Give the log a push to start the unpinning I/O */
2427	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2428
2429}
2430
2431static void
2432__xfs_iunpin_wait(
2433	struct xfs_inode	*ip)
2434{
2435	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2436	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2437
2438	xfs_iunpin(ip);
2439
2440	do {
2441		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2442		if (xfs_ipincount(ip))
2443			io_schedule();
2444	} while (xfs_ipincount(ip));
2445	finish_wait(wq, &wait.wq_entry);
2446}
2447
2448void
2449xfs_iunpin_wait(
2450	struct xfs_inode	*ip)
2451{
2452	if (xfs_ipincount(ip))
2453		__xfs_iunpin_wait(ip);
2454}
2455
2456/*
2457 * Removing an inode from the namespace involves removing the directory entry
2458 * and dropping the link count on the inode. Removing the directory entry can
2459 * result in locking an AGF (directory blocks were freed) and removing a link
2460 * count can result in placing the inode on an unlinked list which results in
2461 * locking an AGI.
2462 *
2463 * The big problem here is that we have an ordering constraint on AGF and AGI
2464 * locking - inode allocation locks the AGI, then can allocate a new extent for
2465 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2466 * removes the inode from the unlinked list, requiring that we lock the AGI
2467 * first, and then freeing the inode can result in an inode chunk being freed
2468 * and hence freeing disk space requiring that we lock an AGF.
2469 *
2470 * Hence the ordering that is imposed by other parts of the code is AGI before
2471 * AGF. This means we cannot remove the directory entry before we drop the inode
2472 * reference count and put it on the unlinked list as this results in a lock
2473 * order of AGF then AGI, and this can deadlock against inode allocation and
2474 * freeing. Therefore we must drop the link counts before we remove the
2475 * directory entry.
2476 *
2477 * This is still safe from a transactional point of view - it is not until we
2478 * get to xfs_defer_finish() that we have the possibility of multiple
2479 * transactions in this operation. Hence as long as we remove the directory
2480 * entry and drop the link count in the first transaction of the remove
2481 * operation, there are no transactional constraints on the ordering here.
2482 */
2483int
2484xfs_remove(
2485	xfs_inode_t             *dp,
2486	struct xfs_name		*name,
2487	xfs_inode_t		*ip)
2488{
2489	xfs_mount_t		*mp = dp->i_mount;
2490	xfs_trans_t             *tp = NULL;
2491	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2492	int			dontcare;
2493	int                     error = 0;
2494	uint			resblks;
2495
2496	trace_xfs_remove(dp, name);
2497
2498	if (xfs_is_shutdown(mp))
2499		return -EIO;
2500	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
2501		return -EIO;
2502
2503	error = xfs_qm_dqattach(dp);
2504	if (error)
2505		goto std_return;
2506
2507	error = xfs_qm_dqattach(ip);
2508	if (error)
2509		goto std_return;
2510
2511	/*
2512	 * We try to get the real space reservation first, allowing for
2513	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2514	 * can't get the space reservation then we use 0 instead, and avoid the
2515	 * bmap btree insert(s) in the directory code by, if the bmap insert
2516	 * tries to happen, instead trimming the LAST block from the directory.
2517	 *
2518	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2519	 * the directory code can handle a reservationless update and we don't
2520	 * want to prevent a user from trying to free space by deleting things.
2521	 */
2522	resblks = XFS_REMOVE_SPACE_RES(mp);
2523	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2524			&tp, &dontcare);
 
 
 
 
2525	if (error) {
2526		ASSERT(error != -ENOSPC);
2527		goto std_return;
2528	}
2529
 
 
 
 
 
2530	/*
2531	 * If we're removing a directory perform some additional validation.
2532	 */
2533	if (is_dir) {
2534		ASSERT(VFS_I(ip)->i_nlink >= 2);
2535		if (VFS_I(ip)->i_nlink != 2) {
2536			error = -ENOTEMPTY;
2537			goto out_trans_cancel;
2538		}
2539		if (!xfs_dir_isempty(ip)) {
2540			error = -ENOTEMPTY;
2541			goto out_trans_cancel;
2542		}
2543
2544		/* Drop the link from ip's "..".  */
2545		error = xfs_droplink(tp, dp);
2546		if (error)
2547			goto out_trans_cancel;
2548
2549		/* Drop the "." link from ip to self.  */
2550		error = xfs_droplink(tp, ip);
2551		if (error)
2552			goto out_trans_cancel;
2553
2554		/*
2555		 * Point the unlinked child directory's ".." entry to the root
2556		 * directory to eliminate back-references to inodes that may
2557		 * get freed before the child directory is closed.  If the fs
2558		 * gets shrunk, this can lead to dirent inode validation errors.
2559		 */
2560		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2561			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2562					tp->t_mountp->m_sb.sb_rootino, 0);
2563			if (error)
2564				goto out_trans_cancel;
2565		}
2566	} else {
2567		/*
2568		 * When removing a non-directory we need to log the parent
2569		 * inode here.  For a directory this is done implicitly
2570		 * by the xfs_droplink call for the ".." entry.
2571		 */
2572		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2573	}
2574	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2575
2576	/* Drop the link from dp to ip. */
2577	error = xfs_droplink(tp, ip);
2578	if (error)
2579		goto out_trans_cancel;
2580
2581	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2582	if (error) {
2583		ASSERT(error != -ENOENT);
2584		goto out_trans_cancel;
2585	}
2586
2587	/*
2588	 * If this is a synchronous mount, make sure that the
2589	 * remove transaction goes to disk before returning to
2590	 * the user.
2591	 */
2592	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2593		xfs_trans_set_sync(tp);
2594
2595	error = xfs_trans_commit(tp);
2596	if (error)
2597		goto std_return;
2598
2599	if (is_dir && xfs_inode_is_filestream(ip))
2600		xfs_filestream_deassociate(ip);
2601
2602	return 0;
2603
2604 out_trans_cancel:
2605	xfs_trans_cancel(tp);
2606 std_return:
2607	return error;
2608}
2609
2610/*
2611 * Enter all inodes for a rename transaction into a sorted array.
2612 */
2613#define __XFS_SORT_INODES	5
2614STATIC void
2615xfs_sort_for_rename(
2616	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2617	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2618	struct xfs_inode	*ip1,	/* in: inode of old entry */
2619	struct xfs_inode	*ip2,	/* in: inode of new entry */
2620	struct xfs_inode	*wip,	/* in: whiteout inode */
2621	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2622	int			*num_inodes)  /* in/out: inodes in array */
2623{
2624	int			i, j;
2625
2626	ASSERT(*num_inodes == __XFS_SORT_INODES);
2627	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2628
2629	/*
2630	 * i_tab contains a list of pointers to inodes.  We initialize
2631	 * the table here & we'll sort it.  We will then use it to
2632	 * order the acquisition of the inode locks.
2633	 *
2634	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2635	 */
2636	i = 0;
2637	i_tab[i++] = dp1;
2638	i_tab[i++] = dp2;
2639	i_tab[i++] = ip1;
2640	if (ip2)
2641		i_tab[i++] = ip2;
2642	if (wip)
2643		i_tab[i++] = wip;
2644	*num_inodes = i;
2645
2646	/*
2647	 * Sort the elements via bubble sort.  (Remember, there are at
2648	 * most 5 elements to sort, so this is adequate.)
2649	 */
2650	for (i = 0; i < *num_inodes; i++) {
2651		for (j = 1; j < *num_inodes; j++) {
2652			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2653				struct xfs_inode *temp = i_tab[j];
2654				i_tab[j] = i_tab[j-1];
2655				i_tab[j-1] = temp;
2656			}
2657		}
2658	}
2659}
2660
2661static int
2662xfs_finish_rename(
2663	struct xfs_trans	*tp)
2664{
2665	/*
2666	 * If this is a synchronous mount, make sure that the rename transaction
2667	 * goes to disk before returning to the user.
2668	 */
2669	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2670		xfs_trans_set_sync(tp);
2671
2672	return xfs_trans_commit(tp);
2673}
2674
2675/*
2676 * xfs_cross_rename()
2677 *
2678 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2679 */
2680STATIC int
2681xfs_cross_rename(
2682	struct xfs_trans	*tp,
2683	struct xfs_inode	*dp1,
2684	struct xfs_name		*name1,
2685	struct xfs_inode	*ip1,
2686	struct xfs_inode	*dp2,
2687	struct xfs_name		*name2,
2688	struct xfs_inode	*ip2,
2689	int			spaceres)
2690{
2691	int		error = 0;
2692	int		ip1_flags = 0;
2693	int		ip2_flags = 0;
2694	int		dp2_flags = 0;
2695
2696	/* Swap inode number for dirent in first parent */
2697	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2698	if (error)
2699		goto out_trans_abort;
2700
2701	/* Swap inode number for dirent in second parent */
2702	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2703	if (error)
2704		goto out_trans_abort;
2705
2706	/*
2707	 * If we're renaming one or more directories across different parents,
2708	 * update the respective ".." entries (and link counts) to match the new
2709	 * parents.
2710	 */
2711	if (dp1 != dp2) {
2712		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2713
2714		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2715			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2716						dp1->i_ino, spaceres);
2717			if (error)
2718				goto out_trans_abort;
2719
2720			/* transfer ip2 ".." reference to dp1 */
2721			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2722				error = xfs_droplink(tp, dp2);
2723				if (error)
2724					goto out_trans_abort;
2725				xfs_bumplink(tp, dp1);
2726			}
2727
2728			/*
2729			 * Although ip1 isn't changed here, userspace needs
2730			 * to be warned about the change, so that applications
2731			 * relying on it (like backup ones), will properly
2732			 * notify the change
2733			 */
2734			ip1_flags |= XFS_ICHGTIME_CHG;
2735			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2736		}
2737
2738		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2739			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2740						dp2->i_ino, spaceres);
2741			if (error)
2742				goto out_trans_abort;
2743
2744			/* transfer ip1 ".." reference to dp2 */
2745			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2746				error = xfs_droplink(tp, dp1);
2747				if (error)
2748					goto out_trans_abort;
2749				xfs_bumplink(tp, dp2);
2750			}
2751
2752			/*
2753			 * Although ip2 isn't changed here, userspace needs
2754			 * to be warned about the change, so that applications
2755			 * relying on it (like backup ones), will properly
2756			 * notify the change
2757			 */
2758			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2759			ip2_flags |= XFS_ICHGTIME_CHG;
2760		}
2761	}
2762
2763	if (ip1_flags) {
2764		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2765		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2766	}
2767	if (ip2_flags) {
2768		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2769		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2770	}
2771	if (dp2_flags) {
2772		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2773		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2774	}
2775	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2776	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2777	return xfs_finish_rename(tp);
2778
2779out_trans_abort:
2780	xfs_trans_cancel(tp);
2781	return error;
2782}
2783
2784/*
2785 * xfs_rename_alloc_whiteout()
2786 *
2787 * Return a referenced, unlinked, unlocked inode that can be used as a
2788 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2789 * crash between allocating the inode and linking it into the rename transaction
2790 * recovery will free the inode and we won't leak it.
2791 */
2792static int
2793xfs_rename_alloc_whiteout(
2794	struct mnt_idmap	*idmap,
2795	struct xfs_name		*src_name,
2796	struct xfs_inode	*dp,
2797	struct xfs_inode	**wip)
2798{
2799	struct xfs_inode	*tmpfile;
2800	struct qstr		name;
2801	int			error;
2802
2803	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2804				   &tmpfile);
2805	if (error)
2806		return error;
2807
2808	name.name = src_name->name;
2809	name.len = src_name->len;
2810	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2811	if (error) {
2812		xfs_finish_inode_setup(tmpfile);
2813		xfs_irele(tmpfile);
2814		return error;
2815	}
2816
2817	/*
2818	 * Prepare the tmpfile inode as if it were created through the VFS.
2819	 * Complete the inode setup and flag it as linkable.  nlink is already
2820	 * zero, so we can skip the drop_nlink.
2821	 */
2822	xfs_setup_iops(tmpfile);
2823	xfs_finish_inode_setup(tmpfile);
2824	VFS_I(tmpfile)->i_state |= I_LINKABLE;
2825
2826	*wip = tmpfile;
2827	return 0;
2828}
2829
2830/*
2831 * xfs_rename
2832 */
2833int
2834xfs_rename(
2835	struct mnt_idmap	*idmap,
2836	struct xfs_inode	*src_dp,
2837	struct xfs_name		*src_name,
2838	struct xfs_inode	*src_ip,
2839	struct xfs_inode	*target_dp,
2840	struct xfs_name		*target_name,
2841	struct xfs_inode	*target_ip,
2842	unsigned int		flags)
2843{
2844	struct xfs_mount	*mp = src_dp->i_mount;
2845	struct xfs_trans	*tp;
2846	struct xfs_inode	*wip = NULL;		/* whiteout inode */
2847	struct xfs_inode	*inodes[__XFS_SORT_INODES];
2848	int			i;
2849	int			num_inodes = __XFS_SORT_INODES;
2850	bool			new_parent = (src_dp != target_dp);
2851	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2852	int			spaceres;
2853	bool			retried = false;
2854	int			error, nospace_error = 0;
2855
2856	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2857
2858	if ((flags & RENAME_EXCHANGE) && !target_ip)
2859		return -EINVAL;
2860
2861	/*
2862	 * If we are doing a whiteout operation, allocate the whiteout inode
2863	 * we will be placing at the target and ensure the type is set
2864	 * appropriately.
2865	 */
2866	if (flags & RENAME_WHITEOUT) {
2867		error = xfs_rename_alloc_whiteout(idmap, src_name,
2868						  target_dp, &wip);
2869		if (error)
2870			return error;
2871
2872		/* setup target dirent info as whiteout */
2873		src_name->type = XFS_DIR3_FT_CHRDEV;
2874	}
2875
2876	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2877				inodes, &num_inodes);
2878
2879retry:
2880	nospace_error = 0;
2881	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2882	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2883	if (error == -ENOSPC) {
2884		nospace_error = error;
2885		spaceres = 0;
2886		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2887				&tp);
2888	}
2889	if (error)
2890		goto out_release_wip;
2891
2892	/*
2893	 * Attach the dquots to the inodes
2894	 */
2895	error = xfs_qm_vop_rename_dqattach(inodes);
2896	if (error)
2897		goto out_trans_cancel;
2898
2899	/*
2900	 * Lock all the participating inodes. Depending upon whether
2901	 * the target_name exists in the target directory, and
2902	 * whether the target directory is the same as the source
2903	 * directory, we can lock from 2 to 5 inodes.
2904	 */
2905	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2906
2907	/*
2908	 * Join all the inodes to the transaction. From this point on,
2909	 * we can rely on either trans_commit or trans_cancel to unlock
2910	 * them.
2911	 */
2912	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2913	if (new_parent)
2914		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2915	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2916	if (target_ip)
2917		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2918	if (wip)
2919		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2920
2921	/*
2922	 * If we are using project inheritance, we only allow renames
2923	 * into our tree when the project IDs are the same; else the
2924	 * tree quota mechanism would be circumvented.
2925	 */
2926	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2927		     target_dp->i_projid != src_ip->i_projid)) {
2928		error = -EXDEV;
2929		goto out_trans_cancel;
2930	}
2931
2932	/* RENAME_EXCHANGE is unique from here on. */
2933	if (flags & RENAME_EXCHANGE)
2934		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2935					target_dp, target_name, target_ip,
2936					spaceres);
2937
2938	/*
2939	 * Try to reserve quota to handle an expansion of the target directory.
2940	 * We'll allow the rename to continue in reservationless mode if we hit
2941	 * a space usage constraint.  If we trigger reservationless mode, save
2942	 * the errno if there isn't any free space in the target directory.
2943	 */
2944	if (spaceres != 0) {
2945		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2946				0, false);
2947		if (error == -EDQUOT || error == -ENOSPC) {
2948			if (!retried) {
2949				xfs_trans_cancel(tp);
2950				xfs_blockgc_free_quota(target_dp, 0);
2951				retried = true;
2952				goto retry;
2953			}
2954
2955			nospace_error = error;
2956			spaceres = 0;
2957			error = 0;
2958		}
2959		if (error)
2960			goto out_trans_cancel;
2961	}
2962
2963	/*
2964	 * Check for expected errors before we dirty the transaction
2965	 * so we can return an error without a transaction abort.
2966	 */
2967	if (target_ip == NULL) {
2968		/*
2969		 * If there's no space reservation, check the entry will
2970		 * fit before actually inserting it.
2971		 */
2972		if (!spaceres) {
2973			error = xfs_dir_canenter(tp, target_dp, target_name);
2974			if (error)
2975				goto out_trans_cancel;
2976		}
2977	} else {
2978		/*
2979		 * If target exists and it's a directory, check that whether
2980		 * it can be destroyed.
2981		 */
2982		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2983		    (!xfs_dir_isempty(target_ip) ||
2984		     (VFS_I(target_ip)->i_nlink > 2))) {
2985			error = -EEXIST;
2986			goto out_trans_cancel;
2987		}
2988	}
2989
2990	/*
2991	 * Lock the AGI buffers we need to handle bumping the nlink of the
2992	 * whiteout inode off the unlinked list and to handle dropping the
2993	 * nlink of the target inode.  Per locking order rules, do this in
2994	 * increasing AG order and before directory block allocation tries to
2995	 * grab AGFs because we grab AGIs before AGFs.
2996	 *
2997	 * The (vfs) caller must ensure that if src is a directory then
2998	 * target_ip is either null or an empty directory.
2999	 */
3000	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3001		if (inodes[i] == wip ||
3002		    (inodes[i] == target_ip &&
3003		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3004			struct xfs_perag	*pag;
3005			struct xfs_buf		*bp;
3006
3007			pag = xfs_perag_get(mp,
3008					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
3009			error = xfs_read_agi(pag, tp, &bp);
3010			xfs_perag_put(pag);
3011			if (error)
3012				goto out_trans_cancel;
3013		}
3014	}
3015
3016	/*
3017	 * Directory entry creation below may acquire the AGF. Remove
3018	 * the whiteout from the unlinked list first to preserve correct
3019	 * AGI/AGF locking order. This dirties the transaction so failures
3020	 * after this point will abort and log recovery will clean up the
3021	 * mess.
3022	 *
3023	 * For whiteouts, we need to bump the link count on the whiteout
3024	 * inode. After this point, we have a real link, clear the tmpfile
3025	 * state flag from the inode so it doesn't accidentally get misused
3026	 * in future.
3027	 */
3028	if (wip) {
3029		struct xfs_perag	*pag;
3030
3031		ASSERT(VFS_I(wip)->i_nlink == 0);
3032
3033		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3034		error = xfs_iunlink_remove(tp, pag, wip);
3035		xfs_perag_put(pag);
3036		if (error)
3037			goto out_trans_cancel;
3038
3039		xfs_bumplink(tp, wip);
 
3040		VFS_I(wip)->i_state &= ~I_LINKABLE;
3041	}
3042
3043	/*
3044	 * Set up the target.
3045	 */
3046	if (target_ip == NULL) {
3047		/*
3048		 * If target does not exist and the rename crosses
3049		 * directories, adjust the target directory link count
3050		 * to account for the ".." reference from the new entry.
3051		 */
3052		error = xfs_dir_createname(tp, target_dp, target_name,
3053					   src_ip->i_ino, spaceres);
3054		if (error)
3055			goto out_trans_cancel;
3056
3057		xfs_trans_ichgtime(tp, target_dp,
3058					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3059
3060		if (new_parent && src_is_directory) {
3061			xfs_bumplink(tp, target_dp);
3062		}
3063	} else { /* target_ip != NULL */
3064		/*
3065		 * Link the source inode under the target name.
3066		 * If the source inode is a directory and we are moving
3067		 * it across directories, its ".." entry will be
3068		 * inconsistent until we replace that down below.
3069		 *
3070		 * In case there is already an entry with the same
3071		 * name at the destination directory, remove it first.
3072		 */
3073		error = xfs_dir_replace(tp, target_dp, target_name,
3074					src_ip->i_ino, spaceres);
3075		if (error)
3076			goto out_trans_cancel;
3077
3078		xfs_trans_ichgtime(tp, target_dp,
3079					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3080
3081		/*
3082		 * Decrement the link count on the target since the target
3083		 * dir no longer points to it.
3084		 */
3085		error = xfs_droplink(tp, target_ip);
3086		if (error)
3087			goto out_trans_cancel;
3088
3089		if (src_is_directory) {
3090			/*
3091			 * Drop the link from the old "." entry.
3092			 */
3093			error = xfs_droplink(tp, target_ip);
3094			if (error)
3095				goto out_trans_cancel;
3096		}
3097	} /* target_ip != NULL */
3098
3099	/*
3100	 * Remove the source.
3101	 */
3102	if (new_parent && src_is_directory) {
3103		/*
3104		 * Rewrite the ".." entry to point to the new
3105		 * directory.
3106		 */
3107		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3108					target_dp->i_ino, spaceres);
3109		ASSERT(error != -EEXIST);
3110		if (error)
3111			goto out_trans_cancel;
3112	}
3113
3114	/*
3115	 * We always want to hit the ctime on the source inode.
3116	 *
3117	 * This isn't strictly required by the standards since the source
3118	 * inode isn't really being changed, but old unix file systems did
3119	 * it and some incremental backup programs won't work without it.
3120	 */
3121	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3122	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3123
3124	/*
3125	 * Adjust the link count on src_dp.  This is necessary when
3126	 * renaming a directory, either within one parent when
3127	 * the target existed, or across two parent directories.
3128	 */
3129	if (src_is_directory && (new_parent || target_ip != NULL)) {
3130
3131		/*
3132		 * Decrement link count on src_directory since the
3133		 * entry that's moved no longer points to it.
3134		 */
3135		error = xfs_droplink(tp, src_dp);
3136		if (error)
3137			goto out_trans_cancel;
3138	}
3139
3140	/*
3141	 * For whiteouts, we only need to update the source dirent with the
3142	 * inode number of the whiteout inode rather than removing it
3143	 * altogether.
3144	 */
3145	if (wip)
3146		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3147					spaceres);
3148	else
3149		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3150					   spaceres);
3151
3152	if (error)
3153		goto out_trans_cancel;
3154
3155	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3156	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3157	if (new_parent)
3158		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3159
3160	error = xfs_finish_rename(tp);
3161	if (wip)
3162		xfs_irele(wip);
3163	return error;
3164
3165out_trans_cancel:
3166	xfs_trans_cancel(tp);
3167out_release_wip:
3168	if (wip)
3169		xfs_irele(wip);
3170	if (error == -ENOSPC && nospace_error)
3171		error = nospace_error;
3172	return error;
3173}
3174
3175static int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3176xfs_iflush(
3177	struct xfs_inode	*ip,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3178	struct xfs_buf		*bp)
3179{
3180	struct xfs_inode_log_item *iip = ip->i_itemp;
3181	struct xfs_dinode	*dip;
3182	struct xfs_mount	*mp = ip->i_mount;
3183	int			error;
3184
3185	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3186	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3187	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3188	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3189	ASSERT(iip->ili_item.li_buf == bp);
 
3190
 
3191	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3192
3193	/*
3194	 * We don't flush the inode if any of the following checks fail, but we
3195	 * do still update the log item and attach to the backing buffer as if
3196	 * the flush happened. This is a formality to facilitate predictable
3197	 * error handling as the caller will shutdown and fail the buffer.
3198	 */
3199	error = -EFSCORRUPTED;
3200	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3201			       mp, XFS_ERRTAG_IFLUSH_1)) {
3202		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3203			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3204			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3205		goto flush_out;
3206	}
3207	if (S_ISREG(VFS_I(ip)->i_mode)) {
3208		if (XFS_TEST_ERROR(
3209		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3210		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3211		    mp, XFS_ERRTAG_IFLUSH_3)) {
3212			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3213				"%s: Bad regular inode %llu, ptr "PTR_FMT,
3214				__func__, ip->i_ino, ip);
3215			goto flush_out;
3216		}
3217	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3218		if (XFS_TEST_ERROR(
3219		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3220		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3221		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3222		    mp, XFS_ERRTAG_IFLUSH_4)) {
3223			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3224				"%s: Bad directory inode %llu, ptr "PTR_FMT,
3225				__func__, ip->i_ino, ip);
3226			goto flush_out;
3227		}
3228	}
3229	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3230				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3231		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3232			"%s: detected corrupt incore inode %llu, "
3233			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3234			__func__, ip->i_ino,
3235			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3236			ip->i_nblocks, ip);
3237		goto flush_out;
3238	}
3239	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3240				mp, XFS_ERRTAG_IFLUSH_6)) {
3241		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3242			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3243			__func__, ip->i_ino, ip->i_forkoff, ip);
3244		goto flush_out;
3245	}
3246
3247	/*
3248	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3249	 * count for correct sequencing.  We bump the flush iteration count so
3250	 * we can detect flushes which postdate a log record during recovery.
3251	 * This is redundant as we now log every change and hence this can't
3252	 * happen but we need to still do it to ensure backwards compatibility
3253	 * with old kernels that predate logging all inode changes.
3254	 */
3255	if (!xfs_has_v3inodes(mp))
3256		ip->i_flushiter++;
3257
3258	/*
3259	 * If there are inline format data / attr forks attached to this inode,
3260	 * make sure they are not corrupt.
3261	 */
3262	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3263	    xfs_ifork_verify_local_data(ip))
3264		goto flush_out;
3265	if (xfs_inode_has_attr_fork(ip) &&
3266	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3267	    xfs_ifork_verify_local_attr(ip))
3268		goto flush_out;
3269
3270	/*
3271	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3272	 * copy out the core of the inode, because if the inode is dirty at all
3273	 * the core must be.
3274	 */
3275	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3276
3277	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3278	if (!xfs_has_v3inodes(mp)) {
3279		if (ip->i_flushiter == DI_MAX_FLUSH)
3280			ip->i_flushiter = 0;
3281	}
3282
3283	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3284	if (xfs_inode_has_attr_fork(ip))
3285		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
 
3286
3287	/*
3288	 * We've recorded everything logged in the inode, so we'd like to clear
3289	 * the ili_fields bits so we don't log and flush things unnecessarily.
3290	 * However, we can't stop logging all this information until the data
3291	 * we've copied into the disk buffer is written to disk.  If we did we
3292	 * might overwrite the copy of the inode in the log with all the data
3293	 * after re-logging only part of it, and in the face of a crash we
3294	 * wouldn't have all the data we need to recover.
3295	 *
3296	 * What we do is move the bits to the ili_last_fields field.  When
3297	 * logging the inode, these bits are moved back to the ili_fields field.
3298	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3299	 * we know that the information those bits represent is permanently on
3300	 * disk.  As long as the flush completes before the inode is logged
3301	 * again, then both ili_fields and ili_last_fields will be cleared.
 
 
 
 
 
 
 
 
 
3302	 */
3303	error = 0;
3304flush_out:
3305	spin_lock(&iip->ili_lock);
3306	iip->ili_last_fields = iip->ili_fields;
3307	iip->ili_fields = 0;
3308	iip->ili_fsync_fields = 0;
3309	spin_unlock(&iip->ili_lock);
3310
3311	/*
3312	 * Store the current LSN of the inode so that we can tell whether the
3313	 * item has moved in the AIL from xfs_buf_inode_iodone().
3314	 */
3315	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3316				&iip->ili_item.li_lsn);
3317
3318	/* generate the checksum. */
3319	xfs_dinode_calc_crc(mp, dip);
3320	return error;
3321}
3322
3323/*
3324 * Non-blocking flush of dirty inode metadata into the backing buffer.
3325 *
3326 * The caller must have a reference to the inode and hold the cluster buffer
3327 * locked. The function will walk across all the inodes on the cluster buffer it
3328 * can find and lock without blocking, and flush them to the cluster buffer.
3329 *
3330 * On successful flushing of at least one inode, the caller must write out the
3331 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3332 * the caller needs to release the buffer. On failure, the filesystem will be
3333 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3334 * will be returned.
3335 */
3336int
3337xfs_iflush_cluster(
3338	struct xfs_buf		*bp)
3339{
3340	struct xfs_mount	*mp = bp->b_mount;
3341	struct xfs_log_item	*lip, *n;
3342	struct xfs_inode	*ip;
3343	struct xfs_inode_log_item *iip;
3344	int			clcount = 0;
3345	int			error = 0;
3346
3347	/*
3348	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3349	 * will remove itself from the list.
 
 
3350	 */
3351	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3352		iip = (struct xfs_inode_log_item *)lip;
3353		ip = iip->ili_inode;
3354
3355		/*
3356		 * Quick and dirty check to avoid locks if possible.
3357		 */
3358		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3359			continue;
3360		if (xfs_ipincount(ip))
3361			continue;
3362
3363		/*
3364		 * The inode is still attached to the buffer, which means it is
3365		 * dirty but reclaim might try to grab it. Check carefully for
3366		 * that, and grab the ilock while still holding the i_flags_lock
3367		 * to guarantee reclaim will not be able to reclaim this inode
3368		 * once we drop the i_flags_lock.
3369		 */
3370		spin_lock(&ip->i_flags_lock);
3371		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3372		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3373			spin_unlock(&ip->i_flags_lock);
3374			continue;
3375		}
3376
3377		/*
3378		 * ILOCK will pin the inode against reclaim and prevent
3379		 * concurrent transactions modifying the inode while we are
3380		 * flushing the inode. If we get the lock, set the flushing
3381		 * state before we drop the i_flags_lock.
3382		 */
3383		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3384			spin_unlock(&ip->i_flags_lock);
3385			continue;
3386		}
3387		__xfs_iflags_set(ip, XFS_IFLUSHING);
3388		spin_unlock(&ip->i_flags_lock);
3389
3390		/*
3391		 * Abort flushing this inode if we are shut down because the
3392		 * inode may not currently be in the AIL. This can occur when
3393		 * log I/O failure unpins the inode without inserting into the
3394		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3395		 * that otherwise looks like it should be flushed.
3396		 */
3397		if (xlog_is_shutdown(mp->m_log)) {
3398			xfs_iunpin_wait(ip);
3399			xfs_iflush_abort(ip);
3400			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3401			error = -EIO;
3402			continue;
3403		}
3404
3405		/* don't block waiting on a log force to unpin dirty inodes */
3406		if (xfs_ipincount(ip)) {
3407			xfs_iflags_clear(ip, XFS_IFLUSHING);
3408			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3409			continue;
3410		}
3411
3412		if (!xfs_inode_clean(ip))
3413			error = xfs_iflush(ip, bp);
3414		else
3415			xfs_iflags_clear(ip, XFS_IFLUSHING);
3416		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3417		if (error)
3418			break;
3419		clcount++;
3420	}
3421
3422	if (error) {
3423		/*
3424		 * Shutdown first so we kill the log before we release this
3425		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3426		 * of the log, failing it before the _log_ is shut down can
3427		 * result in the log tail being moved forward in the journal
3428		 * on disk because log writes can still be taking place. Hence
3429		 * unpinning the tail will allow the ICREATE intent to be
3430		 * removed from the log an recovery will fail with uninitialised
3431		 * inode cluster buffers.
3432		 */
3433		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3434		bp->b_flags |= XBF_ASYNC;
3435		xfs_buf_ioend_fail(bp);
3436		return error;
3437	}
3438
3439	if (!clcount)
3440		return -EAGAIN;
3441
3442	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3443	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3444	return 0;
3445
 
 
3446}
3447
3448/* Release an inode. */
3449void
3450xfs_irele(
3451	struct xfs_inode	*ip)
3452{
3453	trace_xfs_irele(ip, _RET_IP_);
3454	iput(VFS_I(ip));
3455}
3456
3457/*
3458 * Ensure all commited transactions touching the inode are written to the log.
3459 */
3460int
3461xfs_log_force_inode(
3462	struct xfs_inode	*ip)
3463{
3464	xfs_csn_t		seq = 0;
3465
3466	xfs_ilock(ip, XFS_ILOCK_SHARED);
3467	if (xfs_ipincount(ip))
3468		seq = ip->i_itemp->ili_commit_seq;
3469	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3470
3471	if (!seq)
3472		return 0;
3473	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3474}
3475
3476/*
3477 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3478 * abide vfs locking order (lowest pointer value goes first) and breaking the
3479 * layout leases before proceeding.  The loop is needed because we cannot call
3480 * the blocking break_layout() with the iolocks held, and therefore have to
3481 * back out both locks.
3482 */
3483static int
3484xfs_iolock_two_inodes_and_break_layout(
3485	struct inode		*src,
3486	struct inode		*dest)
3487{
3488	int			error;
3489
3490	if (src > dest)
3491		swap(src, dest);
3492
3493retry:
3494	/* Wait to break both inodes' layouts before we start locking. */
3495	error = break_layout(src, true);
3496	if (error)
3497		return error;
3498	if (src != dest) {
3499		error = break_layout(dest, true);
3500		if (error)
3501			return error;
3502	}
3503
3504	/* Lock one inode and make sure nobody got in and leased it. */
3505	inode_lock(src);
3506	error = break_layout(src, false);
3507	if (error) {
3508		inode_unlock(src);
3509		if (error == -EWOULDBLOCK)
3510			goto retry;
3511		return error;
3512	}
3513
3514	if (src == dest)
3515		return 0;
3516
3517	/* Lock the other inode and make sure nobody got in and leased it. */
3518	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3519	error = break_layout(dest, false);
3520	if (error) {
3521		inode_unlock(src);
3522		inode_unlock(dest);
3523		if (error == -EWOULDBLOCK)
3524			goto retry;
3525		return error;
3526	}
3527
3528	return 0;
3529}
3530
3531static int
3532xfs_mmaplock_two_inodes_and_break_dax_layout(
3533	struct xfs_inode	*ip1,
3534	struct xfs_inode	*ip2)
3535{
3536	int			error;
3537	bool			retry;
3538	struct page		*page;
3539
3540	if (ip1->i_ino > ip2->i_ino)
3541		swap(ip1, ip2);
3542
3543again:
3544	retry = false;
3545	/* Lock the first inode */
3546	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3547	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3548	if (error || retry) {
3549		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3550		if (error == 0 && retry)
3551			goto again;
3552		return error;
3553	}
3554
3555	if (ip1 == ip2)
3556		return 0;
3557
3558	/* Nested lock the second inode */
3559	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3560	/*
3561	 * We cannot use xfs_break_dax_layouts() directly here because it may
3562	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3563	 * for this nested lock case.
3564	 */
3565	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3566	if (page && page_ref_count(page) != 1) {
3567		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3568		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3569		goto again;
3570	}
3571
3572	return 0;
3573}
3574
3575/*
3576 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3577 * mmap activity.
3578 */
3579int
3580xfs_ilock2_io_mmap(
3581	struct xfs_inode	*ip1,
3582	struct xfs_inode	*ip2)
3583{
3584	int			ret;
3585
3586	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3587	if (ret)
3588		return ret;
3589
3590	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3591		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3592		if (ret) {
3593			inode_unlock(VFS_I(ip2));
3594			if (ip1 != ip2)
3595				inode_unlock(VFS_I(ip1));
3596			return ret;
3597		}
3598	} else
3599		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3600					    VFS_I(ip2)->i_mapping);
3601
3602	return 0;
3603}
3604
3605/* Unlock both inodes to allow IO and mmap activity. */
3606void
3607xfs_iunlock2_io_mmap(
3608	struct xfs_inode	*ip1,
3609	struct xfs_inode	*ip2)
3610{
3611	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3612		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3613		if (ip1 != ip2)
3614			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3615	} else
3616		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3617					      VFS_I(ip2)->i_mapping);
3618
3619	inode_unlock(VFS_I(ip2));
3620	if (ip1 != ip2)
3621		inode_unlock(VFS_I(ip1));
3622}
3623
3624/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
3625void
3626xfs_iunlock2_remapping(
3627	struct xfs_inode	*ip1,
3628	struct xfs_inode	*ip2)
3629{
3630	xfs_iflags_clear(ip1, XFS_IREMAPPING);
3631
3632	if (ip1 != ip2)
3633		xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
3634	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3635
3636	if (ip1 != ip2)
3637		inode_unlock_shared(VFS_I(ip1));
3638	inode_unlock(VFS_I(ip2));
3639}
3640
3641/*
3642 * Reload the incore inode list for this inode.  Caller should ensure that
3643 * the link count cannot change, either by taking ILOCK_SHARED or otherwise
3644 * preventing other threads from executing.
3645 */
3646int
3647xfs_inode_reload_unlinked_bucket(
3648	struct xfs_trans	*tp,
3649	struct xfs_inode	*ip)
3650{
3651	struct xfs_mount	*mp = tp->t_mountp;
3652	struct xfs_buf		*agibp;
3653	struct xfs_agi		*agi;
3654	struct xfs_perag	*pag;
3655	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
3656	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
3657	xfs_agino_t		prev_agino, next_agino;
3658	unsigned int		bucket;
3659	bool			foundit = false;
3660	int			error;
3661
3662	/* Grab the first inode in the list */
3663	pag = xfs_perag_get(mp, agno);
3664	error = xfs_ialloc_read_agi(pag, tp, &agibp);
3665	xfs_perag_put(pag);
3666	if (error)
3667		return error;
3668
3669	/*
3670	 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
3671	 * incore unlinked list pointers for this inode.  Check once more to
3672	 * see if we raced with anyone else to reload the unlinked list.
3673	 */
3674	if (!xfs_inode_unlinked_incomplete(ip)) {
3675		foundit = true;
3676		goto out_agibp;
3677	}
3678
3679	bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
3680	agi = agibp->b_addr;
3681
3682	trace_xfs_inode_reload_unlinked_bucket(ip);
3683
3684	xfs_info_ratelimited(mp,
3685 "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating list recovery.",
3686			agino, agno);
3687
3688	prev_agino = NULLAGINO;
3689	next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3690	while (next_agino != NULLAGINO) {
3691		struct xfs_inode	*next_ip = NULL;
3692
3693		/* Found this caller's inode, set its backlink. */
3694		if (next_agino == agino) {
3695			next_ip = ip;
3696			next_ip->i_prev_unlinked = prev_agino;
3697			foundit = true;
3698			goto next_inode;
3699		}
3700
3701		/* Try in-memory lookup first. */
3702		next_ip = xfs_iunlink_lookup(pag, next_agino);
3703		if (next_ip)
3704			goto next_inode;
3705
3706		/* Inode not in memory, try reloading it. */
3707		error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
3708				next_agino);
3709		if (error)
3710			break;
3711
3712		/* Grab the reloaded inode. */
3713		next_ip = xfs_iunlink_lookup(pag, next_agino);
3714		if (!next_ip) {
3715			/* No incore inode at all?  We reloaded it... */
3716			ASSERT(next_ip != NULL);
3717			error = -EFSCORRUPTED;
3718			break;
3719		}
3720
3721next_inode:
3722		prev_agino = next_agino;
3723		next_agino = next_ip->i_next_unlinked;
3724	}
3725
3726out_agibp:
3727	xfs_trans_brelse(tp, agibp);
3728	/* Should have found this inode somewhere in the iunlinked bucket. */
3729	if (!error && !foundit)
3730		error = -EFSCORRUPTED;
3731	return error;
3732}
3733
3734/* Decide if this inode is missing its unlinked list and reload it. */
3735int
3736xfs_inode_reload_unlinked(
3737	struct xfs_inode	*ip)
3738{
3739	struct xfs_trans	*tp;
3740	int			error;
3741
3742	error = xfs_trans_alloc_empty(ip->i_mount, &tp);
3743	if (error)
3744		return error;
3745
3746	xfs_ilock(ip, XFS_ILOCK_SHARED);
3747	if (xfs_inode_unlinked_incomplete(ip))
3748		error = xfs_inode_reload_unlinked_bucket(tp, ip);
3749	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3750	xfs_trans_cancel(tp);
3751
3752	return error;
3753}
3754
3755/* Has this inode fork been zapped by repair? */
3756bool
3757xfs_ifork_zapped(
3758	const struct xfs_inode	*ip,
3759	int			whichfork)
3760{
3761	unsigned int		datamask = 0;
3762
3763	switch (whichfork) {
3764	case XFS_DATA_FORK:
3765		switch (ip->i_vnode.i_mode & S_IFMT) {
3766		case S_IFDIR:
3767			datamask = XFS_SICK_INO_DIR_ZAPPED;
3768			break;
3769		case S_IFLNK:
3770			datamask = XFS_SICK_INO_SYMLINK_ZAPPED;
3771			break;
3772		}
3773		return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
3774	case XFS_ATTR_FORK:
3775		return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
3776	default:
3777		return false;
3778	}
3779}