Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_dir2.h"
  18#include "xfs_attr.h"
  19#include "xfs_trans_space.h"
  20#include "xfs_trans.h"
  21#include "xfs_buf_item.h"
  22#include "xfs_inode_item.h"
 
  23#include "xfs_ialloc.h"
  24#include "xfs_bmap.h"
  25#include "xfs_bmap_util.h"
  26#include "xfs_errortag.h"
  27#include "xfs_error.h"
  28#include "xfs_quota.h"
  29#include "xfs_filestream.h"
  30#include "xfs_trace.h"
  31#include "xfs_icache.h"
  32#include "xfs_symlink.h"
  33#include "xfs_trans_priv.h"
  34#include "xfs_log.h"
  35#include "xfs_bmap_btree.h"
  36#include "xfs_reflink.h"
  37#include "xfs_ag.h"
 
 
  38
  39kmem_zone_t *xfs_inode_zone;
  40
  41/*
  42 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  43 * freed from a file in a single transaction.
  44 */
  45#define	XFS_ITRUNC_MAX_EXTENTS	2
  46
  47STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  48STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
  49	struct xfs_inode *);
  50
  51/*
  52 * helper function to extract extent size hint from inode
  53 */
  54xfs_extlen_t
  55xfs_get_extsz_hint(
  56	struct xfs_inode	*ip)
  57{
  58	/*
  59	 * No point in aligning allocations if we need to COW to actually
  60	 * write to them.
  61	 */
  62	if (xfs_is_always_cow_inode(ip))
  63		return 0;
  64	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
  65		return ip->i_extsize;
  66	if (XFS_IS_REALTIME_INODE(ip))
  67		return ip->i_mount->m_sb.sb_rextsize;
  68	return 0;
  69}
  70
  71/*
  72 * Helper function to extract CoW extent size hint from inode.
  73 * Between the extent size hint and the CoW extent size hint, we
  74 * return the greater of the two.  If the value is zero (automatic),
  75 * use the default size.
  76 */
  77xfs_extlen_t
  78xfs_get_cowextsz_hint(
  79	struct xfs_inode	*ip)
  80{
  81	xfs_extlen_t		a, b;
  82
  83	a = 0;
  84	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
  85		a = ip->i_cowextsize;
  86	b = xfs_get_extsz_hint(ip);
  87
  88	a = max(a, b);
  89	if (a == 0)
  90		return XFS_DEFAULT_COWEXTSZ_HINT;
  91	return a;
  92}
  93
  94/*
  95 * These two are wrapper routines around the xfs_ilock() routine used to
  96 * centralize some grungy code.  They are used in places that wish to lock the
  97 * inode solely for reading the extents.  The reason these places can't just
  98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  99 * bringing in of the extents from disk for a file in b-tree format.  If the
 100 * inode is in b-tree format, then we need to lock the inode exclusively until
 101 * the extents are read in.  Locking it exclusively all the time would limit
 102 * our parallelism unnecessarily, though.  What we do instead is check to see
 103 * if the extents have been read in yet, and only lock the inode exclusively
 104 * if they have not.
 105 *
 106 * The functions return a value which should be given to the corresponding
 107 * xfs_iunlock() call.
 108 */
 109uint
 110xfs_ilock_data_map_shared(
 111	struct xfs_inode	*ip)
 112{
 113	uint			lock_mode = XFS_ILOCK_SHARED;
 114
 115	if (xfs_need_iread_extents(&ip->i_df))
 116		lock_mode = XFS_ILOCK_EXCL;
 117	xfs_ilock(ip, lock_mode);
 118	return lock_mode;
 119}
 120
 121uint
 122xfs_ilock_attr_map_shared(
 123	struct xfs_inode	*ip)
 124{
 125	uint			lock_mode = XFS_ILOCK_SHARED;
 126
 127	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
 128		lock_mode = XFS_ILOCK_EXCL;
 129	xfs_ilock(ip, lock_mode);
 130	return lock_mode;
 131}
 132
 133/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 135 * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
 136 * various combinations of the locks to be obtained.
 137 *
 138 * The 3 locks should always be ordered so that the IO lock is obtained first,
 139 * the mmap lock second and the ilock last in order to prevent deadlock.
 140 *
 141 * Basic locking order:
 142 *
 143 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
 144 *
 145 * mmap_lock locking order:
 146 *
 147 * i_rwsem -> page lock -> mmap_lock
 148 * mmap_lock -> i_mmap_lock -> page_lock
 149 *
 150 * The difference in mmap_lock locking order mean that we cannot hold the
 151 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
 152 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
 153 * in get_user_pages() to map the user pages into the kernel address space for
 154 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
 155 * page faults already hold the mmap_lock.
 156 *
 157 * Hence to serialise fully against both syscall and mmap based IO, we need to
 158 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
 159 * taken in places where we need to invalidate the page cache in a race
 160 * free manner (e.g. truncate, hole punch and other extent manipulation
 161 * functions).
 162 */
 163void
 164xfs_ilock(
 165	xfs_inode_t		*ip,
 166	uint			lock_flags)
 167{
 168	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 169
 170	/*
 171	 * You can't set both SHARED and EXCL for the same lock,
 172	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 173	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 174	 */
 175	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 176	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 177	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 178	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 179	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 180	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 181	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 182
 183	if (lock_flags & XFS_IOLOCK_EXCL) {
 184		down_write_nested(&VFS_I(ip)->i_rwsem,
 185				  XFS_IOLOCK_DEP(lock_flags));
 186	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 187		down_read_nested(&VFS_I(ip)->i_rwsem,
 188				 XFS_IOLOCK_DEP(lock_flags));
 189	}
 190
 191	if (lock_flags & XFS_MMAPLOCK_EXCL)
 192		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 193	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 194		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 
 
 
 195
 196	if (lock_flags & XFS_ILOCK_EXCL)
 197		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 198	else if (lock_flags & XFS_ILOCK_SHARED)
 199		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 200}
 201
 202/*
 203 * This is just like xfs_ilock(), except that the caller
 204 * is guaranteed not to sleep.  It returns 1 if it gets
 205 * the requested locks and 0 otherwise.  If the IO lock is
 206 * obtained but the inode lock cannot be, then the IO lock
 207 * is dropped before returning.
 208 *
 209 * ip -- the inode being locked
 210 * lock_flags -- this parameter indicates the inode's locks to be
 211 *       to be locked.  See the comment for xfs_ilock() for a list
 212 *	 of valid values.
 213 */
 214int
 215xfs_ilock_nowait(
 216	xfs_inode_t		*ip,
 217	uint			lock_flags)
 218{
 219	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 220
 221	/*
 222	 * You can't set both SHARED and EXCL for the same lock,
 223	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 224	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 225	 */
 226	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 227	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 228	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 229	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 230	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 231	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 232	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 233
 234	if (lock_flags & XFS_IOLOCK_EXCL) {
 235		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 236			goto out;
 237	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 238		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 239			goto out;
 240	}
 241
 242	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 243		if (!mrtryupdate(&ip->i_mmaplock))
 244			goto out_undo_iolock;
 245	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 246		if (!mrtryaccess(&ip->i_mmaplock))
 247			goto out_undo_iolock;
 248	}
 249
 250	if (lock_flags & XFS_ILOCK_EXCL) {
 251		if (!mrtryupdate(&ip->i_lock))
 252			goto out_undo_mmaplock;
 253	} else if (lock_flags & XFS_ILOCK_SHARED) {
 254		if (!mrtryaccess(&ip->i_lock))
 255			goto out_undo_mmaplock;
 256	}
 257	return 1;
 258
 259out_undo_mmaplock:
 260	if (lock_flags & XFS_MMAPLOCK_EXCL)
 261		mrunlock_excl(&ip->i_mmaplock);
 262	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 263		mrunlock_shared(&ip->i_mmaplock);
 264out_undo_iolock:
 265	if (lock_flags & XFS_IOLOCK_EXCL)
 266		up_write(&VFS_I(ip)->i_rwsem);
 267	else if (lock_flags & XFS_IOLOCK_SHARED)
 268		up_read(&VFS_I(ip)->i_rwsem);
 269out:
 270	return 0;
 271}
 272
 273/*
 274 * xfs_iunlock() is used to drop the inode locks acquired with
 275 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 276 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 277 * that we know which locks to drop.
 278 *
 279 * ip -- the inode being unlocked
 280 * lock_flags -- this parameter indicates the inode's locks to be
 281 *       to be unlocked.  See the comment for xfs_ilock() for a list
 282 *	 of valid values for this parameter.
 283 *
 284 */
 285void
 286xfs_iunlock(
 287	xfs_inode_t		*ip,
 288	uint			lock_flags)
 289{
 290	/*
 291	 * You can't set both SHARED and EXCL for the same lock,
 292	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 293	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 294	 */
 295	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 296	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 297	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 298	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 299	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 300	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 301	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 302	ASSERT(lock_flags != 0);
 303
 304	if (lock_flags & XFS_IOLOCK_EXCL)
 305		up_write(&VFS_I(ip)->i_rwsem);
 306	else if (lock_flags & XFS_IOLOCK_SHARED)
 307		up_read(&VFS_I(ip)->i_rwsem);
 308
 309	if (lock_flags & XFS_MMAPLOCK_EXCL)
 310		mrunlock_excl(&ip->i_mmaplock);
 311	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 312		mrunlock_shared(&ip->i_mmaplock);
 313
 314	if (lock_flags & XFS_ILOCK_EXCL)
 315		mrunlock_excl(&ip->i_lock);
 316	else if (lock_flags & XFS_ILOCK_SHARED)
 317		mrunlock_shared(&ip->i_lock);
 318
 319	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 320}
 321
 322/*
 323 * give up write locks.  the i/o lock cannot be held nested
 324 * if it is being demoted.
 325 */
 326void
 327xfs_ilock_demote(
 328	xfs_inode_t		*ip,
 329	uint			lock_flags)
 330{
 331	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 332	ASSERT((lock_flags &
 333		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 334
 335	if (lock_flags & XFS_ILOCK_EXCL)
 336		mrdemote(&ip->i_lock);
 337	if (lock_flags & XFS_MMAPLOCK_EXCL)
 338		mrdemote(&ip->i_mmaplock);
 339	if (lock_flags & XFS_IOLOCK_EXCL)
 340		downgrade_write(&VFS_I(ip)->i_rwsem);
 341
 342	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 343}
 344
 345#if defined(DEBUG) || defined(XFS_WARN)
 346int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347xfs_isilocked(
 348	xfs_inode_t		*ip,
 349	uint			lock_flags)
 350{
 351	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 352		if (!(lock_flags & XFS_ILOCK_SHARED))
 353			return !!ip->i_lock.mr_writer;
 354		return rwsem_is_locked(&ip->i_lock.mr_lock);
 355	}
 356
 357	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 358		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
 359			return !!ip->i_mmaplock.mr_writer;
 360		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
 361	}
 362
 363	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
 364		if (!(lock_flags & XFS_IOLOCK_SHARED))
 365			return !debug_locks ||
 366				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
 367		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
 368	}
 369
 370	ASSERT(0);
 371	return 0;
 372}
 373#endif
 374
 375/*
 376 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 377 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 378 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 379 * errors and warnings.
 380 */
 381#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 382static bool
 383xfs_lockdep_subclass_ok(
 384	int subclass)
 385{
 386	return subclass < MAX_LOCKDEP_SUBCLASSES;
 387}
 388#else
 389#define xfs_lockdep_subclass_ok(subclass)	(true)
 390#endif
 391
 392/*
 393 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 394 * value. This can be called for any type of inode lock combination, including
 395 * parent locking. Care must be taken to ensure we don't overrun the subclass
 396 * storage fields in the class mask we build.
 397 */
 398static inline int
 399xfs_lock_inumorder(int lock_mode, int subclass)
 
 
 400{
 401	int	class = 0;
 402
 403	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 404			      XFS_ILOCK_RTSUM)));
 405	ASSERT(xfs_lockdep_subclass_ok(subclass));
 406
 407	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 408		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 409		class += subclass << XFS_IOLOCK_SHIFT;
 410	}
 411
 412	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 413		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 414		class += subclass << XFS_MMAPLOCK_SHIFT;
 415	}
 416
 417	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 418		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 419		class += subclass << XFS_ILOCK_SHIFT;
 420	}
 421
 422	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 423}
 424
 425/*
 426 * The following routine will lock n inodes in exclusive mode.  We assume the
 427 * caller calls us with the inodes in i_ino order.
 428 *
 429 * We need to detect deadlock where an inode that we lock is in the AIL and we
 430 * start waiting for another inode that is locked by a thread in a long running
 431 * transaction (such as truncate). This can result in deadlock since the long
 432 * running trans might need to wait for the inode we just locked in order to
 433 * push the tail and free space in the log.
 434 *
 435 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 436 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 437 * lock more than one at a time, lockdep will report false positives saying we
 438 * have violated locking orders.
 439 */
 440static void
 441xfs_lock_inodes(
 442	struct xfs_inode	**ips,
 443	int			inodes,
 444	uint			lock_mode)
 445{
 446	int			attempts = 0, i, j, try_lock;
 
 
 
 447	struct xfs_log_item	*lp;
 448
 449	/*
 450	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 451	 * support an arbitrary depth of locking here, but absolute limits on
 452	 * inodes depend on the type of locking and the limits placed by
 453	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 454	 * the asserts.
 455	 */
 456	ASSERT(ips && inodes >= 2 && inodes <= 5);
 457	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 458			    XFS_ILOCK_EXCL));
 459	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 460			      XFS_ILOCK_SHARED)));
 461	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 462		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 463	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 464		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 465
 466	if (lock_mode & XFS_IOLOCK_EXCL) {
 467		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 468	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 469		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 470
 471	try_lock = 0;
 472	i = 0;
 473again:
 
 
 474	for (; i < inodes; i++) {
 475		ASSERT(ips[i]);
 476
 477		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 478			continue;
 479
 480		/*
 481		 * If try_lock is not set yet, make sure all locked inodes are
 482		 * not in the AIL.  If any are, set try_lock to be used later.
 483		 */
 484		if (!try_lock) {
 485			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 486				lp = &ips[j]->i_itemp->ili_item;
 487				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 488					try_lock++;
 489			}
 490		}
 491
 492		/*
 493		 * If any of the previous locks we have locked is in the AIL,
 494		 * we must TRY to get the second and subsequent locks. If
 495		 * we can't get any, we must release all we have
 496		 * and try again.
 497		 */
 498		if (!try_lock) {
 499			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 500			continue;
 501		}
 502
 503		/* try_lock means we have an inode locked that is in the AIL. */
 504		ASSERT(i != 0);
 505		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 506			continue;
 507
 508		/*
 509		 * Unlock all previous guys and try again.  xfs_iunlock will try
 510		 * to push the tail if the inode is in the AIL.
 511		 */
 512		attempts++;
 513		for (j = i - 1; j >= 0; j--) {
 514			/*
 515			 * Check to see if we've already unlocked this one.  Not
 516			 * the first one going back, and the inode ptr is the
 517			 * same.
 518			 */
 519			if (j != (i - 1) && ips[j] == ips[j + 1])
 520				continue;
 521
 522			xfs_iunlock(ips[j], lock_mode);
 523		}
 524
 525		if ((attempts % 5) == 0) {
 526			delay(1); /* Don't just spin the CPU */
 527		}
 528		i = 0;
 529		try_lock = 0;
 530		goto again;
 531	}
 532}
 533
 534/*
 535 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
 536 * the mmaplock or the ilock, but not more than one type at a time. If we lock
 537 * more than one at a time, lockdep will report false positives saying we have
 538 * violated locking orders.  The iolock must be double-locked separately since
 539 * we use i_rwsem for that.  We now support taking one lock EXCL and the other
 540 * SHARED.
 541 */
 542void
 543xfs_lock_two_inodes(
 544	struct xfs_inode	*ip0,
 545	uint			ip0_mode,
 546	struct xfs_inode	*ip1,
 547	uint			ip1_mode)
 548{
 549	struct xfs_inode	*temp;
 550	uint			mode_temp;
 551	int			attempts = 0;
 552	struct xfs_log_item	*lp;
 553
 554	ASSERT(hweight32(ip0_mode) == 1);
 555	ASSERT(hweight32(ip1_mode) == 1);
 556	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 557	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 558	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 559	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 560	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 561	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 562	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 563	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 564	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 565	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 566
 567	ASSERT(ip0->i_ino != ip1->i_ino);
 568
 569	if (ip0->i_ino > ip1->i_ino) {
 570		temp = ip0;
 571		ip0 = ip1;
 572		ip1 = temp;
 573		mode_temp = ip0_mode;
 574		ip0_mode = ip1_mode;
 575		ip1_mode = mode_temp;
 576	}
 577
 578 again:
 579	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 580
 581	/*
 582	 * If the first lock we have locked is in the AIL, we must TRY to get
 583	 * the second lock. If we can't get it, we must release the first one
 584	 * and try again.
 585	 */
 586	lp = &ip0->i_itemp->ili_item;
 587	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 588		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 589			xfs_iunlock(ip0, ip0_mode);
 590			if ((++attempts % 5) == 0)
 591				delay(1); /* Don't just spin the CPU */
 592			goto again;
 593		}
 594	} else {
 595		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 596	}
 597}
 598
 599uint
 600xfs_ip2xflags(
 601	struct xfs_inode	*ip)
 602{
 603	uint			flags = 0;
 604
 605	if (ip->i_diflags & XFS_DIFLAG_ANY) {
 606		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
 607			flags |= FS_XFLAG_REALTIME;
 608		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
 609			flags |= FS_XFLAG_PREALLOC;
 610		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
 611			flags |= FS_XFLAG_IMMUTABLE;
 612		if (ip->i_diflags & XFS_DIFLAG_APPEND)
 613			flags |= FS_XFLAG_APPEND;
 614		if (ip->i_diflags & XFS_DIFLAG_SYNC)
 615			flags |= FS_XFLAG_SYNC;
 616		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
 617			flags |= FS_XFLAG_NOATIME;
 618		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
 619			flags |= FS_XFLAG_NODUMP;
 620		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
 621			flags |= FS_XFLAG_RTINHERIT;
 622		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 623			flags |= FS_XFLAG_PROJINHERIT;
 624		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
 625			flags |= FS_XFLAG_NOSYMLINKS;
 626		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
 627			flags |= FS_XFLAG_EXTSIZE;
 628		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
 629			flags |= FS_XFLAG_EXTSZINHERIT;
 630		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
 631			flags |= FS_XFLAG_NODEFRAG;
 632		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
 633			flags |= FS_XFLAG_FILESTREAM;
 634	}
 635
 636	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
 637		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
 638			flags |= FS_XFLAG_DAX;
 639		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
 640			flags |= FS_XFLAG_COWEXTSIZE;
 641	}
 642
 643	if (XFS_IFORK_Q(ip))
 644		flags |= FS_XFLAG_HASATTR;
 645	return flags;
 646}
 647
 648/*
 649 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 650 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 651 * ci_name->name will point to a the actual name (caller must free) or
 652 * will be set to NULL if an exact match is found.
 653 */
 654int
 655xfs_lookup(
 656	xfs_inode_t		*dp,
 657	struct xfs_name		*name,
 658	xfs_inode_t		**ipp,
 659	struct xfs_name		*ci_name)
 660{
 661	xfs_ino_t		inum;
 662	int			error;
 663
 664	trace_xfs_lookup(dp, name);
 665
 666	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 
 
 667		return -EIO;
 668
 669	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 670	if (error)
 671		goto out_unlock;
 672
 673	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 674	if (error)
 675		goto out_free_name;
 676
 677	return 0;
 678
 679out_free_name:
 680	if (ci_name)
 681		kmem_free(ci_name->name);
 682out_unlock:
 683	*ipp = NULL;
 684	return error;
 685}
 686
 687/* Propagate di_flags from a parent inode to a child inode. */
 688static void
 689xfs_inode_inherit_flags(
 690	struct xfs_inode	*ip,
 691	const struct xfs_inode	*pip)
 692{
 693	unsigned int		di_flags = 0;
 694	xfs_failaddr_t		failaddr;
 695	umode_t			mode = VFS_I(ip)->i_mode;
 696
 697	if (S_ISDIR(mode)) {
 698		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
 699			di_flags |= XFS_DIFLAG_RTINHERIT;
 700		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 701			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 702			ip->i_extsize = pip->i_extsize;
 703		}
 704		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 705			di_flags |= XFS_DIFLAG_PROJINHERIT;
 706	} else if (S_ISREG(mode)) {
 707		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
 708		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
 709			di_flags |= XFS_DIFLAG_REALTIME;
 710		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 711			di_flags |= XFS_DIFLAG_EXTSIZE;
 712			ip->i_extsize = pip->i_extsize;
 713		}
 714	}
 715	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
 716	    xfs_inherit_noatime)
 717		di_flags |= XFS_DIFLAG_NOATIME;
 718	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
 719	    xfs_inherit_nodump)
 720		di_flags |= XFS_DIFLAG_NODUMP;
 721	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
 722	    xfs_inherit_sync)
 723		di_flags |= XFS_DIFLAG_SYNC;
 724	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
 725	    xfs_inherit_nosymlinks)
 726		di_flags |= XFS_DIFLAG_NOSYMLINKS;
 727	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
 728	    xfs_inherit_nodefrag)
 729		di_flags |= XFS_DIFLAG_NODEFRAG;
 730	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
 731		di_flags |= XFS_DIFLAG_FILESTREAM;
 732
 733	ip->i_diflags |= di_flags;
 734
 735	/*
 736	 * Inode verifiers on older kernels only check that the extent size
 737	 * hint is an integer multiple of the rt extent size on realtime files.
 738	 * They did not check the hint alignment on a directory with both
 739	 * rtinherit and extszinherit flags set.  If the misaligned hint is
 740	 * propagated from a directory into a new realtime file, new file
 741	 * allocations will fail due to math errors in the rt allocator and/or
 742	 * trip the verifiers.  Validate the hint settings in the new file so
 743	 * that we don't let broken hints propagate.
 744	 */
 745	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
 746			VFS_I(ip)->i_mode, ip->i_diflags);
 747	if (failaddr) {
 748		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
 749				   XFS_DIFLAG_EXTSZINHERIT);
 750		ip->i_extsize = 0;
 751	}
 752}
 753
 754/* Propagate di_flags2 from a parent inode to a child inode. */
 755static void
 756xfs_inode_inherit_flags2(
 757	struct xfs_inode	*ip,
 758	const struct xfs_inode	*pip)
 759{
 760	xfs_failaddr_t		failaddr;
 761
 762	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
 763		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
 764		ip->i_cowextsize = pip->i_cowextsize;
 765	}
 766	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
 767		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
 768
 769	/* Don't let invalid cowextsize hints propagate. */
 770	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
 771			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
 772	if (failaddr) {
 773		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
 774		ip->i_cowextsize = 0;
 775	}
 776}
 777
 778/*
 779 * Initialise a newly allocated inode and return the in-core inode to the
 780 * caller locked exclusively.
 781 */
 782int
 783xfs_init_new_inode(
 784	struct user_namespace	*mnt_userns,
 785	struct xfs_trans	*tp,
 786	struct xfs_inode	*pip,
 787	xfs_ino_t		ino,
 788	umode_t			mode,
 789	xfs_nlink_t		nlink,
 790	dev_t			rdev,
 791	prid_t			prid,
 792	bool			init_xattrs,
 793	struct xfs_inode	**ipp)
 794{
 795	struct inode		*dir = pip ? VFS_I(pip) : NULL;
 796	struct xfs_mount	*mp = tp->t_mountp;
 797	struct xfs_inode	*ip;
 798	unsigned int		flags;
 799	int			error;
 800	struct timespec64	tv;
 801	struct inode		*inode;
 802
 803	/*
 804	 * Protect against obviously corrupt allocation btree records. Later
 805	 * xfs_iget checks will catch re-allocation of other active in-memory
 806	 * and on-disk inodes. If we don't catch reallocating the parent inode
 807	 * here we will deadlock in xfs_iget() so we have to do these checks
 808	 * first.
 809	 */
 810	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 811		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 812		return -EFSCORRUPTED;
 813	}
 814
 815	/*
 816	 * Get the in-core inode with the lock held exclusively to prevent
 817	 * others from looking at until we're done.
 818	 */
 819	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
 820	if (error)
 821		return error;
 822
 823	ASSERT(ip != NULL);
 824	inode = VFS_I(ip);
 825	set_nlink(inode, nlink);
 826	inode->i_rdev = rdev;
 827	ip->i_projid = prid;
 828
 829	if (dir && !(dir->i_mode & S_ISGID) &&
 830	    (mp->m_flags & XFS_MOUNT_GRPID)) {
 831		inode_fsuid_set(inode, mnt_userns);
 832		inode->i_gid = dir->i_gid;
 833		inode->i_mode = mode;
 834	} else {
 835		inode_init_owner(mnt_userns, inode, dir, mode);
 836	}
 837
 838	/*
 839	 * If the group ID of the new file does not match the effective group
 840	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 841	 * (and only if the irix_sgid_inherit compatibility variable is set).
 842	 */
 843	if (irix_sgid_inherit &&
 844	    (inode->i_mode & S_ISGID) &&
 845	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
 846		inode->i_mode &= ~S_ISGID;
 847
 848	ip->i_disk_size = 0;
 849	ip->i_df.if_nextents = 0;
 850	ASSERT(ip->i_nblocks == 0);
 851
 852	tv = current_time(inode);
 853	inode->i_mtime = tv;
 854	inode->i_atime = tv;
 855	inode->i_ctime = tv;
 856
 857	ip->i_extsize = 0;
 858	ip->i_diflags = 0;
 859
 860	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
 861		inode_set_iversion(inode, 1);
 862		ip->i_cowextsize = 0;
 863		ip->i_crtime = tv;
 864	}
 865
 866	flags = XFS_ILOG_CORE;
 867	switch (mode & S_IFMT) {
 868	case S_IFIFO:
 869	case S_IFCHR:
 870	case S_IFBLK:
 871	case S_IFSOCK:
 872		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
 873		flags |= XFS_ILOG_DEV;
 874		break;
 875	case S_IFREG:
 876	case S_IFDIR:
 877		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
 878			xfs_inode_inherit_flags(ip, pip);
 879		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
 880			xfs_inode_inherit_flags2(ip, pip);
 881		fallthrough;
 882	case S_IFLNK:
 883		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 884		ip->i_df.if_bytes = 0;
 885		ip->i_df.if_u1.if_root = NULL;
 886		break;
 887	default:
 888		ASSERT(0);
 889	}
 890
 891	/*
 892	 * If we need to create attributes immediately after allocating the
 893	 * inode, initialise an empty attribute fork right now. We use the
 894	 * default fork offset for attributes here as we don't know exactly what
 895	 * size or how many attributes we might be adding. We can do this
 896	 * safely here because we know the data fork is completely empty and
 897	 * this saves us from needing to run a separate transaction to set the
 898	 * fork offset in the immediate future.
 899	 */
 900	if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
 901		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
 902		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
 903	}
 904
 905	/*
 906	 * Log the new values stuffed into the inode.
 907	 */
 908	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 909	xfs_trans_log_inode(tp, ip, flags);
 910
 911	/* now that we have an i_mode we can setup the inode structure */
 912	xfs_setup_inode(ip);
 913
 914	*ipp = ip;
 915	return 0;
 916}
 917
 918/*
 919 * Decrement the link count on an inode & log the change.  If this causes the
 920 * link count to go to zero, move the inode to AGI unlinked list so that it can
 921 * be freed when the last active reference goes away via xfs_inactive().
 922 */
 923static int			/* error */
 924xfs_droplink(
 925	xfs_trans_t *tp,
 926	xfs_inode_t *ip)
 927{
 
 
 
 
 
 
 
 928	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 929
 930	drop_nlink(VFS_I(ip));
 931	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 932
 933	if (VFS_I(ip)->i_nlink)
 934		return 0;
 935
 936	return xfs_iunlink(tp, ip);
 937}
 938
 939/*
 940 * Increment the link count on an inode & log the change.
 941 */
 942static void
 943xfs_bumplink(
 944	xfs_trans_t *tp,
 945	xfs_inode_t *ip)
 946{
 947	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 948
 949	inc_nlink(VFS_I(ip));
 950	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 951}
 952
 953int
 954xfs_create(
 955	struct user_namespace	*mnt_userns,
 956	xfs_inode_t		*dp,
 957	struct xfs_name		*name,
 958	umode_t			mode,
 959	dev_t			rdev,
 960	bool			init_xattrs,
 961	xfs_inode_t		**ipp)
 962{
 963	int			is_dir = S_ISDIR(mode);
 964	struct xfs_mount	*mp = dp->i_mount;
 965	struct xfs_inode	*ip = NULL;
 966	struct xfs_trans	*tp = NULL;
 967	int			error;
 968	bool                    unlock_dp_on_error = false;
 969	prid_t			prid;
 970	struct xfs_dquot	*udqp = NULL;
 971	struct xfs_dquot	*gdqp = NULL;
 972	struct xfs_dquot	*pdqp = NULL;
 973	struct xfs_trans_res	*tres;
 974	uint			resblks;
 975	xfs_ino_t		ino;
 976
 977	trace_xfs_create(dp, name);
 978
 979	if (XFS_FORCED_SHUTDOWN(mp))
 
 
 980		return -EIO;
 981
 982	prid = xfs_get_initial_prid(dp);
 983
 984	/*
 985	 * Make sure that we have allocated dquot(s) on disk.
 986	 */
 987	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
 988			mapped_fsgid(mnt_userns), prid,
 989			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
 990			&udqp, &gdqp, &pdqp);
 991	if (error)
 992		return error;
 993
 994	if (is_dir) {
 995		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
 996		tres = &M_RES(mp)->tr_mkdir;
 997	} else {
 998		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 999		tres = &M_RES(mp)->tr_create;
1000	}
1001
1002	/*
1003	 * Initially assume that the file does not exist and
1004	 * reserve the resources for that case.  If that is not
1005	 * the case we'll drop the one we have and get a more
1006	 * appropriate transaction later.
1007	 */
1008	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1009			&tp);
1010	if (error == -ENOSPC) {
1011		/* flush outstanding delalloc blocks and retry */
1012		xfs_flush_inodes(mp);
1013		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1014				resblks, &tp);
1015	}
1016	if (error)
1017		goto out_release_dquots;
1018
1019	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1020	unlock_dp_on_error = true;
1021
1022	error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1023			XFS_IEXT_DIR_MANIP_CNT(mp));
1024	if (error)
1025		goto out_trans_cancel;
1026
1027	/*
1028	 * A newly created regular or special file just has one directory
1029	 * entry pointing to them, but a directory also the "." entry
1030	 * pointing to itself.
1031	 */
1032	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1033	if (!error)
1034		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1035				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1036	if (error)
1037		goto out_trans_cancel;
1038
1039	/*
1040	 * Now we join the directory inode to the transaction.  We do not do it
1041	 * earlier because xfs_dialloc might commit the previous transaction
1042	 * (and release all the locks).  An error from here on will result in
1043	 * the transaction cancel unlocking dp so don't do it explicitly in the
1044	 * error path.
1045	 */
1046	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1047	unlock_dp_on_error = false;
1048
1049	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1050					resblks - XFS_IALLOC_SPACE_RES(mp));
1051	if (error) {
1052		ASSERT(error != -ENOSPC);
1053		goto out_trans_cancel;
1054	}
1055	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1056	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1057
1058	if (is_dir) {
1059		error = xfs_dir_init(tp, ip, dp);
1060		if (error)
1061			goto out_trans_cancel;
1062
1063		xfs_bumplink(tp, dp);
1064	}
1065
1066	/*
1067	 * If this is a synchronous mount, make sure that the
1068	 * create transaction goes to disk before returning to
1069	 * the user.
1070	 */
1071	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1072		xfs_trans_set_sync(tp);
1073
1074	/*
1075	 * Attach the dquot(s) to the inodes and modify them incore.
1076	 * These ids of the inode couldn't have changed since the new
1077	 * inode has been locked ever since it was created.
1078	 */
1079	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1080
1081	error = xfs_trans_commit(tp);
1082	if (error)
1083		goto out_release_inode;
1084
1085	xfs_qm_dqrele(udqp);
1086	xfs_qm_dqrele(gdqp);
1087	xfs_qm_dqrele(pdqp);
1088
1089	*ipp = ip;
1090	return 0;
1091
1092 out_trans_cancel:
1093	xfs_trans_cancel(tp);
1094 out_release_inode:
1095	/*
1096	 * Wait until after the current transaction is aborted to finish the
1097	 * setup of the inode and release the inode.  This prevents recursive
1098	 * transactions and deadlocks from xfs_inactive.
1099	 */
1100	if (ip) {
1101		xfs_finish_inode_setup(ip);
1102		xfs_irele(ip);
1103	}
1104 out_release_dquots:
1105	xfs_qm_dqrele(udqp);
1106	xfs_qm_dqrele(gdqp);
1107	xfs_qm_dqrele(pdqp);
1108
1109	if (unlock_dp_on_error)
1110		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1111	return error;
1112}
1113
1114int
1115xfs_create_tmpfile(
1116	struct user_namespace	*mnt_userns,
1117	struct xfs_inode	*dp,
1118	umode_t			mode,
1119	struct xfs_inode	**ipp)
1120{
1121	struct xfs_mount	*mp = dp->i_mount;
1122	struct xfs_inode	*ip = NULL;
1123	struct xfs_trans	*tp = NULL;
1124	int			error;
1125	prid_t                  prid;
1126	struct xfs_dquot	*udqp = NULL;
1127	struct xfs_dquot	*gdqp = NULL;
1128	struct xfs_dquot	*pdqp = NULL;
1129	struct xfs_trans_res	*tres;
1130	uint			resblks;
1131	xfs_ino_t		ino;
1132
1133	if (XFS_FORCED_SHUTDOWN(mp))
1134		return -EIO;
1135
1136	prid = xfs_get_initial_prid(dp);
1137
1138	/*
1139	 * Make sure that we have allocated dquot(s) on disk.
1140	 */
1141	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
1142			mapped_fsgid(mnt_userns), prid,
1143			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1144			&udqp, &gdqp, &pdqp);
1145	if (error)
1146		return error;
1147
1148	resblks = XFS_IALLOC_SPACE_RES(mp);
1149	tres = &M_RES(mp)->tr_create_tmpfile;
1150
1151	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1152			&tp);
1153	if (error)
1154		goto out_release_dquots;
1155
1156	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1157	if (!error)
1158		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1159				0, 0, prid, false, &ip);
1160	if (error)
1161		goto out_trans_cancel;
1162
1163	if (mp->m_flags & XFS_MOUNT_WSYNC)
1164		xfs_trans_set_sync(tp);
1165
1166	/*
1167	 * Attach the dquot(s) to the inodes and modify them incore.
1168	 * These ids of the inode couldn't have changed since the new
1169	 * inode has been locked ever since it was created.
1170	 */
1171	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1172
1173	error = xfs_iunlink(tp, ip);
1174	if (error)
1175		goto out_trans_cancel;
1176
1177	error = xfs_trans_commit(tp);
1178	if (error)
1179		goto out_release_inode;
1180
1181	xfs_qm_dqrele(udqp);
1182	xfs_qm_dqrele(gdqp);
1183	xfs_qm_dqrele(pdqp);
1184
1185	*ipp = ip;
1186	return 0;
1187
1188 out_trans_cancel:
1189	xfs_trans_cancel(tp);
1190 out_release_inode:
1191	/*
1192	 * Wait until after the current transaction is aborted to finish the
1193	 * setup of the inode and release the inode.  This prevents recursive
1194	 * transactions and deadlocks from xfs_inactive.
1195	 */
1196	if (ip) {
1197		xfs_finish_inode_setup(ip);
1198		xfs_irele(ip);
1199	}
1200 out_release_dquots:
1201	xfs_qm_dqrele(udqp);
1202	xfs_qm_dqrele(gdqp);
1203	xfs_qm_dqrele(pdqp);
1204
1205	return error;
1206}
1207
1208int
1209xfs_link(
1210	xfs_inode_t		*tdp,
1211	xfs_inode_t		*sip,
1212	struct xfs_name		*target_name)
1213{
1214	xfs_mount_t		*mp = tdp->i_mount;
1215	xfs_trans_t		*tp;
1216	int			error;
1217	int			resblks;
1218
1219	trace_xfs_link(tdp, target_name);
1220
1221	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1222
1223	if (XFS_FORCED_SHUTDOWN(mp))
 
 
1224		return -EIO;
1225
1226	error = xfs_qm_dqattach(sip);
1227	if (error)
1228		goto std_return;
1229
1230	error = xfs_qm_dqattach(tdp);
1231	if (error)
1232		goto std_return;
1233
1234	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1235	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1236	if (error == -ENOSPC) {
1237		resblks = 0;
1238		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1239	}
1240	if (error)
1241		goto std_return;
1242
1243	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1244
1245	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1246	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1247
1248	error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1249			XFS_IEXT_DIR_MANIP_CNT(mp));
1250	if (error)
1251		goto error_return;
1252
1253	/*
1254	 * If we are using project inheritance, we only allow hard link
1255	 * creation in our tree when the project IDs are the same; else
1256	 * the tree quota mechanism could be circumvented.
1257	 */
1258	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1259		     tdp->i_projid != sip->i_projid)) {
1260		error = -EXDEV;
1261		goto error_return;
1262	}
1263
1264	if (!resblks) {
1265		error = xfs_dir_canenter(tp, tdp, target_name);
1266		if (error)
1267			goto error_return;
1268	}
1269
1270	/*
1271	 * Handle initial link state of O_TMPFILE inode
1272	 */
1273	if (VFS_I(sip)->i_nlink == 0) {
1274		struct xfs_perag	*pag;
1275
1276		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1277		error = xfs_iunlink_remove(tp, pag, sip);
1278		xfs_perag_put(pag);
1279		if (error)
1280			goto error_return;
1281	}
1282
1283	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1284				   resblks);
1285	if (error)
1286		goto error_return;
1287	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1288	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1289
1290	xfs_bumplink(tp, sip);
1291
1292	/*
1293	 * If this is a synchronous mount, make sure that the
1294	 * link transaction goes to disk before returning to
1295	 * the user.
1296	 */
1297	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1298		xfs_trans_set_sync(tp);
1299
1300	return xfs_trans_commit(tp);
1301
1302 error_return:
1303	xfs_trans_cancel(tp);
1304 std_return:
 
 
1305	return error;
1306}
1307
1308/* Clear the reflink flag and the cowblocks tag if possible. */
1309static void
1310xfs_itruncate_clear_reflink_flags(
1311	struct xfs_inode	*ip)
1312{
1313	struct xfs_ifork	*dfork;
1314	struct xfs_ifork	*cfork;
1315
1316	if (!xfs_is_reflink_inode(ip))
1317		return;
1318	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1319	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1320	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1321		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1322	if (cfork->if_bytes == 0)
1323		xfs_inode_clear_cowblocks_tag(ip);
1324}
1325
1326/*
1327 * Free up the underlying blocks past new_size.  The new size must be smaller
1328 * than the current size.  This routine can be used both for the attribute and
1329 * data fork, and does not modify the inode size, which is left to the caller.
1330 *
1331 * The transaction passed to this routine must have made a permanent log
1332 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1333 * given transaction and start new ones, so make sure everything involved in
1334 * the transaction is tidy before calling here.  Some transaction will be
1335 * returned to the caller to be committed.  The incoming transaction must
1336 * already include the inode, and both inode locks must be held exclusively.
1337 * The inode must also be "held" within the transaction.  On return the inode
1338 * will be "held" within the returned transaction.  This routine does NOT
1339 * require any disk space to be reserved for it within the transaction.
1340 *
1341 * If we get an error, we must return with the inode locked and linked into the
1342 * current transaction. This keeps things simple for the higher level code,
1343 * because it always knows that the inode is locked and held in the transaction
1344 * that returns to it whether errors occur or not.  We don't mark the inode
1345 * dirty on error so that transactions can be easily aborted if possible.
1346 */
1347int
1348xfs_itruncate_extents_flags(
1349	struct xfs_trans	**tpp,
1350	struct xfs_inode	*ip,
1351	int			whichfork,
1352	xfs_fsize_t		new_size,
1353	int			flags)
1354{
1355	struct xfs_mount	*mp = ip->i_mount;
1356	struct xfs_trans	*tp = *tpp;
1357	xfs_fileoff_t		first_unmap_block;
1358	xfs_filblks_t		unmap_len;
1359	int			error = 0;
1360
1361	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1362	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1363	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1364	ASSERT(new_size <= XFS_ISIZE(ip));
1365	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1366	ASSERT(ip->i_itemp != NULL);
1367	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1368	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1369
1370	trace_xfs_itruncate_extents_start(ip, new_size);
1371
1372	flags |= xfs_bmapi_aflag(whichfork);
1373
1374	/*
1375	 * Since it is possible for space to become allocated beyond
1376	 * the end of the file (in a crash where the space is allocated
1377	 * but the inode size is not yet updated), simply remove any
1378	 * blocks which show up between the new EOF and the maximum
1379	 * possible file size.
1380	 *
1381	 * We have to free all the blocks to the bmbt maximum offset, even if
1382	 * the page cache can't scale that far.
1383	 */
1384	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1385	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1386		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1387		return 0;
1388	}
1389
1390	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1391	while (unmap_len > 0) {
1392		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1393		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1394				flags, XFS_ITRUNC_MAX_EXTENTS);
1395		if (error)
1396			goto out;
1397
1398		/* free the just unmapped extents */
1399		error = xfs_defer_finish(&tp);
1400		if (error)
1401			goto out;
1402	}
1403
1404	if (whichfork == XFS_DATA_FORK) {
1405		/* Remove all pending CoW reservations. */
1406		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1407				first_unmap_block, XFS_MAX_FILEOFF, true);
1408		if (error)
1409			goto out;
1410
1411		xfs_itruncate_clear_reflink_flags(ip);
1412	}
1413
1414	/*
1415	 * Always re-log the inode so that our permanent transaction can keep
1416	 * on rolling it forward in the log.
1417	 */
1418	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1419
1420	trace_xfs_itruncate_extents_end(ip, new_size);
1421
1422out:
1423	*tpp = tp;
1424	return error;
1425}
1426
1427int
1428xfs_release(
1429	xfs_inode_t	*ip)
1430{
1431	xfs_mount_t	*mp = ip->i_mount;
1432	int		error = 0;
1433
1434	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1435		return 0;
1436
1437	/* If this is a read-only mount, don't do this (would generate I/O) */
1438	if (mp->m_flags & XFS_MOUNT_RDONLY)
1439		return 0;
1440
1441	if (!XFS_FORCED_SHUTDOWN(mp)) {
1442		int truncated;
1443
1444		/*
1445		 * If we previously truncated this file and removed old data
1446		 * in the process, we want to initiate "early" writeout on
1447		 * the last close.  This is an attempt to combat the notorious
1448		 * NULL files problem which is particularly noticeable from a
1449		 * truncate down, buffered (re-)write (delalloc), followed by
1450		 * a crash.  What we are effectively doing here is
1451		 * significantly reducing the time window where we'd otherwise
1452		 * be exposed to that problem.
1453		 */
1454		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1455		if (truncated) {
1456			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1457			if (ip->i_delayed_blks > 0) {
1458				error = filemap_flush(VFS_I(ip)->i_mapping);
1459				if (error)
1460					return error;
1461			}
1462		}
1463	}
1464
1465	if (VFS_I(ip)->i_nlink == 0)
1466		return 0;
1467
1468	/*
1469	 * If we can't get the iolock just skip truncating the blocks past EOF
1470	 * because we could deadlock with the mmap_lock otherwise. We'll get
1471	 * another chance to drop them once the last reference to the inode is
1472	 * dropped, so we'll never leak blocks permanently.
1473	 */
1474	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1475		return 0;
1476
1477	if (xfs_can_free_eofblocks(ip, false)) {
1478		/*
1479		 * Check if the inode is being opened, written and closed
1480		 * frequently and we have delayed allocation blocks outstanding
1481		 * (e.g. streaming writes from the NFS server), truncating the
1482		 * blocks past EOF will cause fragmentation to occur.
1483		 *
1484		 * In this case don't do the truncation, but we have to be
1485		 * careful how we detect this case. Blocks beyond EOF show up as
1486		 * i_delayed_blks even when the inode is clean, so we need to
1487		 * truncate them away first before checking for a dirty release.
1488		 * Hence on the first dirty close we will still remove the
1489		 * speculative allocation, but after that we will leave it in
1490		 * place.
1491		 */
1492		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1493			goto out_unlock;
1494
1495		error = xfs_free_eofblocks(ip);
1496		if (error)
1497			goto out_unlock;
1498
1499		/* delalloc blocks after truncation means it really is dirty */
1500		if (ip->i_delayed_blks)
1501			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1502	}
1503
1504out_unlock:
1505	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1506	return error;
1507}
1508
1509/*
1510 * xfs_inactive_truncate
1511 *
1512 * Called to perform a truncate when an inode becomes unlinked.
1513 */
1514STATIC int
1515xfs_inactive_truncate(
1516	struct xfs_inode *ip)
1517{
1518	struct xfs_mount	*mp = ip->i_mount;
1519	struct xfs_trans	*tp;
1520	int			error;
1521
1522	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1523	if (error) {
1524		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1525		return error;
1526	}
1527	xfs_ilock(ip, XFS_ILOCK_EXCL);
1528	xfs_trans_ijoin(tp, ip, 0);
1529
1530	/*
1531	 * Log the inode size first to prevent stale data exposure in the event
1532	 * of a system crash before the truncate completes. See the related
1533	 * comment in xfs_vn_setattr_size() for details.
1534	 */
1535	ip->i_disk_size = 0;
1536	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1537
1538	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1539	if (error)
1540		goto error_trans_cancel;
1541
1542	ASSERT(ip->i_df.if_nextents == 0);
1543
1544	error = xfs_trans_commit(tp);
1545	if (error)
1546		goto error_unlock;
1547
1548	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1549	return 0;
1550
1551error_trans_cancel:
1552	xfs_trans_cancel(tp);
1553error_unlock:
1554	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1555	return error;
1556}
1557
1558/*
1559 * xfs_inactive_ifree()
1560 *
1561 * Perform the inode free when an inode is unlinked.
1562 */
1563STATIC int
1564xfs_inactive_ifree(
1565	struct xfs_inode *ip)
1566{
1567	struct xfs_mount	*mp = ip->i_mount;
1568	struct xfs_trans	*tp;
1569	int			error;
1570
1571	/*
1572	 * We try to use a per-AG reservation for any block needed by the finobt
1573	 * tree, but as the finobt feature predates the per-AG reservation
1574	 * support a degraded file system might not have enough space for the
1575	 * reservation at mount time.  In that case try to dip into the reserved
1576	 * pool and pray.
1577	 *
1578	 * Send a warning if the reservation does happen to fail, as the inode
1579	 * now remains allocated and sits on the unlinked list until the fs is
1580	 * repaired.
1581	 */
1582	if (unlikely(mp->m_finobt_nores)) {
1583		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1584				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1585				&tp);
1586	} else {
1587		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1588	}
1589	if (error) {
1590		if (error == -ENOSPC) {
1591			xfs_warn_ratelimited(mp,
1592			"Failed to remove inode(s) from unlinked list. "
1593			"Please free space, unmount and run xfs_repair.");
1594		} else {
1595			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1596		}
1597		return error;
1598	}
1599
1600	/*
1601	 * We do not hold the inode locked across the entire rolling transaction
1602	 * here. We only need to hold it for the first transaction that
1603	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1604	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1605	 * here breaks the relationship between cluster buffer invalidation and
1606	 * stale inode invalidation on cluster buffer item journal commit
1607	 * completion, and can result in leaving dirty stale inodes hanging
1608	 * around in memory.
1609	 *
1610	 * We have no need for serialising this inode operation against other
1611	 * operations - we freed the inode and hence reallocation is required
1612	 * and that will serialise on reallocating the space the deferops need
1613	 * to free. Hence we can unlock the inode on the first commit of
1614	 * the transaction rather than roll it right through the deferops. This
1615	 * avoids relogging the XFS_ISTALE inode.
1616	 *
1617	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1618	 * by asserting that the inode is still locked when it returns.
1619	 */
1620	xfs_ilock(ip, XFS_ILOCK_EXCL);
1621	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1622
1623	error = xfs_ifree(tp, ip);
1624	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1625	if (error) {
1626		/*
1627		 * If we fail to free the inode, shut down.  The cancel
1628		 * might do that, we need to make sure.  Otherwise the
1629		 * inode might be lost for a long time or forever.
1630		 */
1631		if (!XFS_FORCED_SHUTDOWN(mp)) {
1632			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1633				__func__, error);
1634			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1635		}
1636		xfs_trans_cancel(tp);
1637		return error;
1638	}
1639
1640	/*
1641	 * Credit the quota account(s). The inode is gone.
1642	 */
1643	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1645	/*
1646	 * Just ignore errors at this point.  There is nothing we can do except
1647	 * to try to keep going. Make sure it's not a silent error.
1648	 */
1649	error = xfs_trans_commit(tp);
1650	if (error)
1651		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1652			__func__, error);
1653
1654	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1655}
1656
1657/*
1658 * xfs_inactive
1659 *
1660 * This is called when the vnode reference count for the vnode
1661 * goes to zero.  If the file has been unlinked, then it must
1662 * now be truncated.  Also, we clear all of the read-ahead state
1663 * kept for the inode here since the file is now closed.
1664 */
1665void
1666xfs_inactive(
1667	xfs_inode_t	*ip)
1668{
1669	struct xfs_mount	*mp;
1670	int			error;
1671	int			truncate = 0;
1672
1673	/*
1674	 * If the inode is already free, then there can be nothing
1675	 * to clean up here.
1676	 */
1677	if (VFS_I(ip)->i_mode == 0) {
1678		ASSERT(ip->i_df.if_broot_bytes == 0);
1679		goto out;
1680	}
1681
1682	mp = ip->i_mount;
1683	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1684
1685	/* If this is a read-only mount, don't do this (would generate I/O) */
1686	if (mp->m_flags & XFS_MOUNT_RDONLY)
 
 
 
1687		goto out;
1688
1689	/* Metadata inodes require explicit resource cleanup. */
1690	if (xfs_is_metadata_inode(ip))
1691		goto out;
1692
1693	/* Try to clean out the cow blocks if there are any. */
1694	if (xfs_inode_has_cow_data(ip))
1695		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1696
1697	if (VFS_I(ip)->i_nlink != 0) {
1698		/*
1699		 * force is true because we are evicting an inode from the
1700		 * cache. Post-eof blocks must be freed, lest we end up with
1701		 * broken free space accounting.
1702		 *
1703		 * Note: don't bother with iolock here since lockdep complains
1704		 * about acquiring it in reclaim context. We have the only
1705		 * reference to the inode at this point anyways.
1706		 */
1707		if (xfs_can_free_eofblocks(ip, true))
1708			xfs_free_eofblocks(ip);
1709
1710		goto out;
1711	}
1712
1713	if (S_ISREG(VFS_I(ip)->i_mode) &&
1714	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1715	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1716		truncate = 1;
1717
1718	error = xfs_qm_dqattach(ip);
1719	if (error)
1720		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
1721
1722	if (S_ISLNK(VFS_I(ip)->i_mode))
1723		error = xfs_inactive_symlink(ip);
1724	else if (truncate)
1725		error = xfs_inactive_truncate(ip);
1726	if (error)
1727		goto out;
1728
1729	/*
1730	 * If there are attributes associated with the file then blow them away
1731	 * now.  The code calls a routine that recursively deconstructs the
1732	 * attribute fork. If also blows away the in-core attribute fork.
1733	 */
1734	if (XFS_IFORK_Q(ip)) {
1735		error = xfs_attr_inactive(ip);
1736		if (error)
1737			goto out;
1738	}
1739
1740	ASSERT(!ip->i_afp);
1741	ASSERT(ip->i_forkoff == 0);
1742
1743	/*
1744	 * Free the inode.
1745	 */
1746	xfs_inactive_ifree(ip);
1747
1748out:
1749	/*
1750	 * We're done making metadata updates for this inode, so we can release
1751	 * the attached dquots.
1752	 */
1753	xfs_qm_dqdetach(ip);
 
1754}
1755
1756/*
1757 * In-Core Unlinked List Lookups
1758 * =============================
1759 *
1760 * Every inode is supposed to be reachable from some other piece of metadata
1761 * with the exception of the root directory.  Inodes with a connection to a
1762 * file descriptor but not linked from anywhere in the on-disk directory tree
1763 * are collectively known as unlinked inodes, though the filesystem itself
1764 * maintains links to these inodes so that on-disk metadata are consistent.
1765 *
1766 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1767 * header contains a number of buckets that point to an inode, and each inode
1768 * record has a pointer to the next inode in the hash chain.  This
1769 * singly-linked list causes scaling problems in the iunlink remove function
1770 * because we must walk that list to find the inode that points to the inode
1771 * being removed from the unlinked hash bucket list.
1772 *
1773 * What if we modelled the unlinked list as a collection of records capturing
1774 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1775 * have a fast way to look up unlinked list predecessors, which avoids the
1776 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1777 * rhashtable.
1778 *
1779 * Because this is a backref cache, we ignore operational failures since the
1780 * iunlink code can fall back to the slow bucket walk.  The only errors that
1781 * should bubble out are for obviously incorrect situations.
1782 *
1783 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1784 * access or have otherwise provided for concurrency control.
1785 */
1786
1787/* Capture a "X.next_unlinked = Y" relationship. */
1788struct xfs_iunlink {
1789	struct rhash_head	iu_rhash_head;
1790	xfs_agino_t		iu_agino;		/* X */
1791	xfs_agino_t		iu_next_unlinked;	/* Y */
1792};
1793
1794/* Unlinked list predecessor lookup hashtable construction */
1795static int
1796xfs_iunlink_obj_cmpfn(
1797	struct rhashtable_compare_arg	*arg,
1798	const void			*obj)
1799{
1800	const xfs_agino_t		*key = arg->key;
1801	const struct xfs_iunlink	*iu = obj;
1802
1803	if (iu->iu_next_unlinked != *key)
1804		return 1;
1805	return 0;
1806}
1807
1808static const struct rhashtable_params xfs_iunlink_hash_params = {
1809	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1810	.key_len		= sizeof(xfs_agino_t),
1811	.key_offset		= offsetof(struct xfs_iunlink,
1812					   iu_next_unlinked),
1813	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1814	.automatic_shrinking	= true,
1815	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1816};
1817
1818/*
1819 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1820 * relation is found.
 
 
1821 */
1822static xfs_agino_t
1823xfs_iunlink_lookup_backref(
1824	struct xfs_perag	*pag,
1825	xfs_agino_t		agino)
1826{
1827	struct xfs_iunlink	*iu;
1828
1829	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1830			xfs_iunlink_hash_params);
1831	return iu ? iu->iu_agino : NULLAGINO;
1832}
1833
1834/*
1835 * Take ownership of an iunlink cache entry and insert it into the hash table.
1836 * If successful, the entry will be owned by the cache; if not, it is freed.
1837 * Either way, the caller does not own @iu after this call.
1838 */
1839static int
1840xfs_iunlink_insert_backref(
1841	struct xfs_perag	*pag,
1842	struct xfs_iunlink	*iu)
1843{
1844	int			error;
1845
1846	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1847			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1848	/*
1849	 * Fail loudly if there already was an entry because that's a sign of
1850	 * corruption of in-memory data.  Also fail loudly if we see an error
1851	 * code we didn't anticipate from the rhashtable code.  Currently we
1852	 * only anticipate ENOMEM.
1853	 */
1854	if (error) {
1855		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1856		kmem_free(iu);
1857	}
1858	/*
1859	 * Absorb any runtime errors that aren't a result of corruption because
1860	 * this is a cache and we can always fall back to bucket list scanning.
1861	 */
1862	if (error != 0 && error != -EEXIST)
1863		error = 0;
1864	return error;
1865}
1866
1867/* Remember that @prev_agino.next_unlinked = @this_agino. */
1868static int
1869xfs_iunlink_add_backref(
1870	struct xfs_perag	*pag,
1871	xfs_agino_t		prev_agino,
1872	xfs_agino_t		this_agino)
1873{
1874	struct xfs_iunlink	*iu;
1875
1876	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
1877		return 0;
1878
1879	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
1880	iu->iu_agino = prev_agino;
1881	iu->iu_next_unlinked = this_agino;
1882
1883	return xfs_iunlink_insert_backref(pag, iu);
1884}
1885
1886/*
1887 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
1888 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
1889 * wasn't any such entry then we don't bother.
1890 */
1891static int
1892xfs_iunlink_change_backref(
1893	struct xfs_perag	*pag,
1894	xfs_agino_t		agino,
1895	xfs_agino_t		next_unlinked)
1896{
1897	struct xfs_iunlink	*iu;
1898	int			error;
1899
1900	/* Look up the old entry; if there wasn't one then exit. */
1901	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1902			xfs_iunlink_hash_params);
1903	if (!iu)
1904		return 0;
1905
1906	/*
1907	 * Remove the entry.  This shouldn't ever return an error, but if we
1908	 * couldn't remove the old entry we don't want to add it again to the
1909	 * hash table, and if the entry disappeared on us then someone's
1910	 * violated the locking rules and we need to fail loudly.  Either way
1911	 * we cannot remove the inode because internal state is or would have
1912	 * been corrupt.
1913	 */
1914	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
1915			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1916	if (error)
1917		return error;
1918
1919	/* If there is no new next entry just free our item and return. */
1920	if (next_unlinked == NULLAGINO) {
1921		kmem_free(iu);
1922		return 0;
1923	}
1924
1925	/* Update the entry and re-add it to the hash table. */
1926	iu->iu_next_unlinked = next_unlinked;
1927	return xfs_iunlink_insert_backref(pag, iu);
1928}
1929
1930/* Set up the in-core predecessor structures. */
1931int
1932xfs_iunlink_init(
1933	struct xfs_perag	*pag)
1934{
1935	return rhashtable_init(&pag->pagi_unlinked_hash,
1936			&xfs_iunlink_hash_params);
1937}
1938
1939/* Free the in-core predecessor structures. */
1940static void
1941xfs_iunlink_free_item(
1942	void			*ptr,
1943	void			*arg)
1944{
1945	struct xfs_iunlink	*iu = ptr;
1946	bool			*freed_anything = arg;
1947
1948	*freed_anything = true;
1949	kmem_free(iu);
1950}
1951
1952void
1953xfs_iunlink_destroy(
1954	struct xfs_perag	*pag)
1955{
1956	bool			freed_anything = false;
1957
1958	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
1959			xfs_iunlink_free_item, &freed_anything);
1960
1961	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
1962}
1963
1964/*
1965 * Point the AGI unlinked bucket at an inode and log the results.  The caller
1966 * is responsible for validating the old value.
1967 */
1968STATIC int
1969xfs_iunlink_update_bucket(
1970	struct xfs_trans	*tp,
1971	struct xfs_perag	*pag,
1972	struct xfs_buf		*agibp,
1973	unsigned int		bucket_index,
1974	xfs_agino_t		new_agino)
1975{
1976	struct xfs_agi		*agi = agibp->b_addr;
1977	xfs_agino_t		old_value;
1978	int			offset;
1979
1980	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
1981
1982	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1983	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1984			old_value, new_agino);
1985
1986	/*
1987	 * We should never find the head of the list already set to the value
1988	 * passed in because either we're adding or removing ourselves from the
1989	 * head of the list.
1990	 */
1991	if (old_value == new_agino) {
1992		xfs_buf_mark_corrupt(agibp);
1993		return -EFSCORRUPTED;
1994	}
1995
1996	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1997	offset = offsetof(struct xfs_agi, agi_unlinked) +
1998			(sizeof(xfs_agino_t) * bucket_index);
1999	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2000	return 0;
2001}
2002
2003/* Set an on-disk inode's next_unlinked pointer. */
2004STATIC void
2005xfs_iunlink_update_dinode(
 
 
 
 
2006	struct xfs_trans	*tp,
2007	struct xfs_perag	*pag,
2008	xfs_agino_t		agino,
2009	struct xfs_buf		*ibp,
2010	struct xfs_dinode	*dip,
2011	struct xfs_imap		*imap,
2012	xfs_agino_t		next_agino)
2013{
2014	struct xfs_mount	*mp = tp->t_mountp;
2015	int			offset;
2016
2017	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2018
2019	trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
2020			be32_to_cpu(dip->di_next_unlinked), next_agino);
2021
2022	dip->di_next_unlinked = cpu_to_be32(next_agino);
2023	offset = imap->im_boffset +
2024			offsetof(struct xfs_dinode, di_next_unlinked);
2025
2026	/* need to recalc the inode CRC if appropriate */
2027	xfs_dinode_calc_crc(mp, dip);
2028	xfs_trans_inode_buf(tp, ibp);
2029	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2030}
2031
2032/* Set an in-core inode's unlinked pointer and return the old value. */
2033STATIC int
2034xfs_iunlink_update_inode(
2035	struct xfs_trans	*tp,
2036	struct xfs_inode	*ip,
2037	struct xfs_perag	*pag,
2038	xfs_agino_t		next_agino,
2039	xfs_agino_t		*old_next_agino)
2040{
2041	struct xfs_mount	*mp = tp->t_mountp;
2042	struct xfs_dinode	*dip;
2043	struct xfs_buf		*ibp;
2044	xfs_agino_t		old_value;
2045	int			error;
2046
2047	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
 
 
2048
2049	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
 
 
 
 
 
 
 
2050	if (error)
2051		return error;
2052	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2053
2054	/* Make sure the old pointer isn't garbage. */
2055	old_value = be32_to_cpu(dip->di_next_unlinked);
2056	if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
2057		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2058				sizeof(*dip), __this_address);
2059		error = -EFSCORRUPTED;
2060		goto out;
2061	}
2062
2063	/*
2064	 * Since we're updating a linked list, we should never find that the
2065	 * current pointer is the same as the new value, unless we're
2066	 * terminating the list.
2067	 */
2068	*old_next_agino = old_value;
2069	if (old_value == next_agino) {
2070		if (next_agino != NULLAGINO) {
2071			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2072					dip, sizeof(*dip), __this_address);
2073			error = -EFSCORRUPTED;
2074		}
2075		goto out;
2076	}
2077
2078	/* Ok, update the new pointer. */
2079	xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
2080			ibp, dip, &ip->i_imap, next_agino);
2081	return 0;
2082out:
2083	xfs_trans_brelse(tp, ibp);
 
2084	return error;
2085}
2086
2087/*
2088 * This is called when the inode's link count has gone to 0 or we are creating
2089 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2090 *
2091 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2092 * list when the inode is freed.
2093 */
2094STATIC int
2095xfs_iunlink(
2096	struct xfs_trans	*tp,
 
 
2097	struct xfs_inode	*ip)
2098{
2099	struct xfs_mount	*mp = tp->t_mountp;
2100	struct xfs_perag	*pag;
2101	struct xfs_agi		*agi;
2102	struct xfs_buf		*agibp;
2103	xfs_agino_t		next_agino;
2104	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2105	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2106	int			error;
2107
2108	ASSERT(VFS_I(ip)->i_nlink == 0);
2109	ASSERT(VFS_I(ip)->i_mode != 0);
2110	trace_xfs_iunlink(ip);
2111
2112	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2113
2114	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2115	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2116	if (error)
2117		goto out;
2118	agi = agibp->b_addr;
2119
2120	/*
2121	 * Get the index into the agi hash table for the list this inode will
2122	 * go on.  Make sure the pointer isn't garbage and that this inode
2123	 * isn't already on the list.
2124	 */
2125	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2126	if (next_agino == agino ||
2127	    !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
2128		xfs_buf_mark_corrupt(agibp);
2129		error = -EFSCORRUPTED;
2130		goto out;
2131	}
2132
2133	if (next_agino != NULLAGINO) {
2134		xfs_agino_t		old_agino;
 
 
 
 
 
 
 
2135
 
2136		/*
2137		 * There is already another inode in the bucket, so point this
2138		 * inode to the current head of the list.
2139		 */
2140		error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
2141				&old_agino);
2142		if (error)
2143			goto out;
2144		ASSERT(old_agino == NULLAGINO);
2145
2146		/*
2147		 * agino has been unlinked, add a backref from the next inode
2148		 * back to agino.
2149		 */
2150		error = xfs_iunlink_add_backref(pag, agino, next_agino);
2151		if (error)
2152			goto out;
2153	}
2154
2155	/* Point the head of the list to point to this inode. */
2156	error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2157out:
2158	xfs_perag_put(pag);
2159	return error;
2160}
2161
2162/* Return the imap, dinode pointer, and buffer for an inode. */
2163STATIC int
2164xfs_iunlink_map_ino(
2165	struct xfs_trans	*tp,
2166	xfs_agnumber_t		agno,
2167	xfs_agino_t		agino,
2168	struct xfs_imap		*imap,
2169	struct xfs_dinode	**dipp,
2170	struct xfs_buf		**bpp)
2171{
2172	struct xfs_mount	*mp = tp->t_mountp;
2173	int			error;
2174
2175	imap->im_blkno = 0;
2176	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2177	if (error) {
2178		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2179				__func__, error);
2180		return error;
2181	}
2182
2183	error = xfs_imap_to_bp(mp, tp, imap, bpp);
2184	if (error) {
2185		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2186				__func__, error);
2187		return error;
2188	}
2189
2190	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
2191	return 0;
2192}
2193
2194/*
2195 * Walk the unlinked chain from @head_agino until we find the inode that
2196 * points to @target_agino.  Return the inode number, map, dinode pointer,
2197 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2198 *
2199 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2200 * @agino, @imap, @dipp, and @bpp are all output parameters.
2201 *
2202 * Do not call this function if @target_agino is the head of the list.
 
2203 */
2204STATIC int
2205xfs_iunlink_map_prev(
2206	struct xfs_trans	*tp,
2207	struct xfs_perag	*pag,
2208	xfs_agino_t		head_agino,
2209	xfs_agino_t		target_agino,
2210	xfs_agino_t		*agino,
2211	struct xfs_imap		*imap,
2212	struct xfs_dinode	**dipp,
2213	struct xfs_buf		**bpp)
2214{
2215	struct xfs_mount	*mp = tp->t_mountp;
2216	xfs_agino_t		next_agino;
 
2217	int			error;
2218
2219	ASSERT(head_agino != target_agino);
2220	*bpp = NULL;
2221
2222	/* See if our backref cache can find it faster. */
2223	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2224	if (*agino != NULLAGINO) {
2225		error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
2226				dipp, bpp);
2227		if (error)
2228			return error;
2229
2230		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2231			return 0;
2232
2233		/*
2234		 * If we get here the cache contents were corrupt, so drop the
2235		 * buffer and fall back to walking the bucket list.
2236		 */
2237		xfs_trans_brelse(tp, *bpp);
2238		*bpp = NULL;
2239		WARN_ON_ONCE(1);
2240	}
2241
2242	trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
2243
2244	/* Otherwise, walk the entire bucket until we find it. */
2245	next_agino = head_agino;
2246	while (next_agino != target_agino) {
2247		xfs_agino_t	unlinked_agino;
2248
2249		if (*bpp)
2250			xfs_trans_brelse(tp, *bpp);
2251
2252		*agino = next_agino;
2253		error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
2254				dipp, bpp);
2255		if (error)
2256			return error;
2257
2258		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2259		/*
2260		 * Make sure this pointer is valid and isn't an obvious
2261		 * infinite loop.
2262		 */
2263		if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
2264		    next_agino == unlinked_agino) {
2265			XFS_CORRUPTION_ERROR(__func__,
2266					XFS_ERRLEVEL_LOW, mp,
2267					*dipp, sizeof(**dipp));
2268			error = -EFSCORRUPTED;
2269			return error;
2270		}
2271		next_agino = unlinked_agino;
2272	}
2273
2274	return 0;
 
 
 
2275}
2276
2277/*
2278 * Pull the on-disk inode from the AGI unlinked list.
2279 */
2280STATIC int
2281xfs_iunlink_remove(
2282	struct xfs_trans	*tp,
2283	struct xfs_perag	*pag,
 
2284	struct xfs_inode	*ip)
2285{
2286	struct xfs_mount	*mp = tp->t_mountp;
2287	struct xfs_agi		*agi;
2288	struct xfs_buf		*agibp;
2289	struct xfs_buf		*last_ibp;
2290	struct xfs_dinode	*last_dip = NULL;
2291	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2292	xfs_agino_t		next_agino;
2293	xfs_agino_t		head_agino;
2294	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2295	int			error;
2296
2297	trace_xfs_iunlink_remove(ip);
2298
2299	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2300	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2301	if (error)
2302		return error;
2303	agi = agibp->b_addr;
2304
2305	/*
2306	 * Get the index into the agi hash table for the list this inode will
2307	 * go on.  Make sure the head pointer isn't garbage.
2308	 */
2309	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2310	if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
2311		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2312				agi, sizeof(*agi));
2313		return -EFSCORRUPTED;
2314	}
2315
2316	/*
2317	 * Set our inode's next_unlinked pointer to NULL and then return
2318	 * the old pointer value so that we can update whatever was previous
2319	 * to us in the list to point to whatever was next in the list.
2320	 */
2321	error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
2322	if (error)
2323		return error;
2324
2325	/*
2326	 * If there was a backref pointing from the next inode back to this
2327	 * one, remove it because we've removed this inode from the list.
2328	 *
2329	 * Later, if this inode was in the middle of the list we'll update
2330	 * this inode's backref to point from the next inode.
2331	 */
2332	if (next_agino != NULLAGINO) {
2333		error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
2334		if (error)
2335			return error;
2336	}
 
 
2337
2338	if (head_agino != agino) {
2339		struct xfs_imap	imap;
2340		xfs_agino_t	prev_agino;
2341
2342		/* We need to search the list for the inode being freed. */
2343		error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
2344				&prev_agino, &imap, &last_dip, &last_ibp);
2345		if (error)
2346			return error;
 
 
 
 
 
 
 
2347
2348		/* Point the previous inode on the list to the next inode. */
2349		xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
2350				last_dip, &imap, next_agino);
 
2351
2352		/*
2353		 * Now we deal with the backref for this inode.  If this inode
2354		 * pointed at a real inode, change the backref that pointed to
2355		 * us to point to our old next.  If this inode was the end of
2356		 * the list, delete the backref that pointed to us.  Note that
2357		 * change_backref takes care of deleting the backref if
2358		 * next_agino is NULLAGINO.
2359		 */
2360		return xfs_iunlink_change_backref(agibp->b_pag, agino,
2361				next_agino);
2362	}
 
 
 
 
 
 
 
2363
2364	/* Point the head of the list to the next unlinked inode. */
2365	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2366			next_agino);
2367}
2368
2369/*
2370 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2371 * mark it stale. We should only find clean inodes in this lookup that aren't
2372 * already stale.
2373 */
2374static void
2375xfs_ifree_mark_inode_stale(
2376	struct xfs_perag	*pag,
2377	struct xfs_inode	*free_ip,
2378	xfs_ino_t		inum)
2379{
2380	struct xfs_mount	*mp = pag->pag_mount;
2381	struct xfs_inode_log_item *iip;
2382	struct xfs_inode	*ip;
2383
2384retry:
2385	rcu_read_lock();
2386	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2387
2388	/* Inode not in memory, nothing to do */
2389	if (!ip) {
2390		rcu_read_unlock();
2391		return;
2392	}
2393
2394	/*
2395	 * because this is an RCU protected lookup, we could find a recently
2396	 * freed or even reallocated inode during the lookup. We need to check
2397	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2398	 * valid, the wrong inode or stale.
2399	 */
2400	spin_lock(&ip->i_flags_lock);
2401	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2402		goto out_iflags_unlock;
2403
2404	/*
2405	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2406	 * other inodes that we did not find in the list attached to the buffer
2407	 * and are not already marked stale. If we can't lock it, back off and
2408	 * retry.
2409	 */
2410	if (ip != free_ip) {
2411		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2412			spin_unlock(&ip->i_flags_lock);
2413			rcu_read_unlock();
2414			delay(1);
2415			goto retry;
2416		}
2417	}
2418	ip->i_flags |= XFS_ISTALE;
2419
2420	/*
2421	 * If the inode is flushing, it is already attached to the buffer.  All
2422	 * we needed to do here is mark the inode stale so buffer IO completion
2423	 * will remove it from the AIL.
2424	 */
2425	iip = ip->i_itemp;
2426	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2427		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2428		ASSERT(iip->ili_last_fields);
2429		goto out_iunlock;
2430	}
2431
2432	/*
2433	 * Inodes not attached to the buffer can be released immediately.
2434	 * Everything else has to go through xfs_iflush_abort() on journal
2435	 * commit as the flock synchronises removal of the inode from the
2436	 * cluster buffer against inode reclaim.
2437	 */
2438	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2439		goto out_iunlock;
2440
2441	__xfs_iflags_set(ip, XFS_IFLUSHING);
2442	spin_unlock(&ip->i_flags_lock);
2443	rcu_read_unlock();
2444
2445	/* we have a dirty inode in memory that has not yet been flushed. */
2446	spin_lock(&iip->ili_lock);
2447	iip->ili_last_fields = iip->ili_fields;
2448	iip->ili_fields = 0;
2449	iip->ili_fsync_fields = 0;
2450	spin_unlock(&iip->ili_lock);
2451	ASSERT(iip->ili_last_fields);
2452
2453	if (ip != free_ip)
2454		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2455	return;
2456
2457out_iunlock:
2458	if (ip != free_ip)
2459		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2460out_iflags_unlock:
2461	spin_unlock(&ip->i_flags_lock);
2462	rcu_read_unlock();
2463}
2464
2465/*
2466 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2467 * inodes that are in memory - they all must be marked stale and attached to
2468 * the cluster buffer.
2469 */
2470static int
2471xfs_ifree_cluster(
2472	struct xfs_trans	*tp,
2473	struct xfs_perag	*pag,
2474	struct xfs_inode	*free_ip,
2475	struct xfs_icluster	*xic)
2476{
2477	struct xfs_mount	*mp = free_ip->i_mount;
2478	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2479	struct xfs_buf		*bp;
2480	xfs_daddr_t		blkno;
2481	xfs_ino_t		inum = xic->first_ino;
2482	int			nbufs;
2483	int			i, j;
2484	int			ioffset;
2485	int			error;
2486
2487	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2488
2489	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2490		/*
2491		 * The allocation bitmap tells us which inodes of the chunk were
2492		 * physically allocated. Skip the cluster if an inode falls into
2493		 * a sparse region.
2494		 */
2495		ioffset = inum - xic->first_ino;
2496		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2497			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2498			continue;
2499		}
2500
2501		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2502					 XFS_INO_TO_AGBNO(mp, inum));
2503
2504		/*
2505		 * We obtain and lock the backing buffer first in the process
2506		 * here to ensure dirty inodes attached to the buffer remain in
2507		 * the flushing state while we mark them stale.
2508		 *
2509		 * If we scan the in-memory inodes first, then buffer IO can
2510		 * complete before we get a lock on it, and hence we may fail
2511		 * to mark all the active inodes on the buffer stale.
2512		 */
2513		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2514				mp->m_bsize * igeo->blocks_per_cluster,
2515				XBF_UNMAPPED, &bp);
2516		if (error)
2517			return error;
2518
2519		/*
2520		 * This buffer may not have been correctly initialised as we
2521		 * didn't read it from disk. That's not important because we are
2522		 * only using to mark the buffer as stale in the log, and to
2523		 * attach stale cached inodes on it. That means it will never be
2524		 * dispatched for IO. If it is, we want to know about it, and we
2525		 * want it to fail. We can acheive this by adding a write
2526		 * verifier to the buffer.
2527		 */
2528		bp->b_ops = &xfs_inode_buf_ops;
2529
2530		/*
2531		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2532		 * too. This requires lookups, and will skip inodes that we've
2533		 * already marked XFS_ISTALE.
2534		 */
2535		for (i = 0; i < igeo->inodes_per_cluster; i++)
2536			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2537
2538		xfs_trans_stale_inode_buf(tp, bp);
2539		xfs_trans_binval(tp, bp);
2540	}
2541	return 0;
2542}
2543
2544/*
2545 * This is called to return an inode to the inode free list.
2546 * The inode should already be truncated to 0 length and have
2547 * no pages associated with it.  This routine also assumes that
2548 * the inode is already a part of the transaction.
2549 *
2550 * The on-disk copy of the inode will have been added to the list
2551 * of unlinked inodes in the AGI. We need to remove the inode from
2552 * that list atomically with respect to freeing it here.
2553 */
2554int
2555xfs_ifree(
2556	struct xfs_trans	*tp,
2557	struct xfs_inode	*ip)
2558{
2559	struct xfs_mount	*mp = ip->i_mount;
2560	struct xfs_perag	*pag;
2561	struct xfs_icluster	xic = { 0 };
2562	struct xfs_inode_log_item *iip = ip->i_itemp;
2563	int			error;
2564
2565	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2566	ASSERT(VFS_I(ip)->i_nlink == 0);
2567	ASSERT(ip->i_df.if_nextents == 0);
2568	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2569	ASSERT(ip->i_nblocks == 0);
2570
2571	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2572
2573	/*
2574	 * Pull the on-disk inode from the AGI unlinked list.
 
 
 
2575	 */
2576	error = xfs_iunlink_remove(tp, pag, ip);
2577	if (error)
2578		goto out;
2579
2580	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2581	if (error)
2582		goto out;
2583
2584	/*
2585	 * Free any local-format data sitting around before we reset the
2586	 * data fork to extents format.  Note that the attr fork data has
2587	 * already been freed by xfs_attr_inactive.
2588	 */
2589	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2590		kmem_free(ip->i_df.if_u1.if_data);
2591		ip->i_df.if_u1.if_data = NULL;
2592		ip->i_df.if_bytes = 0;
2593	}
2594
2595	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2596	ip->i_diflags = 0;
2597	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2598	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2599	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2600	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2601		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2602
2603	/* Don't attempt to replay owner changes for a deleted inode */
2604	spin_lock(&iip->ili_lock);
2605	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2606	spin_unlock(&iip->ili_lock);
2607
2608	/*
2609	 * Bump the generation count so no one will be confused
2610	 * by reincarnations of this inode.
2611	 */
2612	VFS_I(ip)->i_generation++;
2613	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2614
2615	if (xic.deleted)
2616		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2617out:
2618	xfs_perag_put(pag);
2619	return error;
2620}
2621
2622/*
2623 * This is called to unpin an inode.  The caller must have the inode locked
2624 * in at least shared mode so that the buffer cannot be subsequently pinned
2625 * once someone is waiting for it to be unpinned.
2626 */
2627static void
2628xfs_iunpin(
2629	struct xfs_inode	*ip)
2630{
2631	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2632
2633	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2634
2635	/* Give the log a push to start the unpinning I/O */
2636	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2637
2638}
2639
2640static void
2641__xfs_iunpin_wait(
2642	struct xfs_inode	*ip)
2643{
2644	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2645	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2646
2647	xfs_iunpin(ip);
2648
2649	do {
2650		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2651		if (xfs_ipincount(ip))
2652			io_schedule();
2653	} while (xfs_ipincount(ip));
2654	finish_wait(wq, &wait.wq_entry);
2655}
2656
2657void
2658xfs_iunpin_wait(
2659	struct xfs_inode	*ip)
2660{
2661	if (xfs_ipincount(ip))
2662		__xfs_iunpin_wait(ip);
2663}
2664
2665/*
2666 * Removing an inode from the namespace involves removing the directory entry
2667 * and dropping the link count on the inode. Removing the directory entry can
2668 * result in locking an AGF (directory blocks were freed) and removing a link
2669 * count can result in placing the inode on an unlinked list which results in
2670 * locking an AGI.
2671 *
2672 * The big problem here is that we have an ordering constraint on AGF and AGI
2673 * locking - inode allocation locks the AGI, then can allocate a new extent for
2674 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2675 * removes the inode from the unlinked list, requiring that we lock the AGI
2676 * first, and then freeing the inode can result in an inode chunk being freed
2677 * and hence freeing disk space requiring that we lock an AGF.
2678 *
2679 * Hence the ordering that is imposed by other parts of the code is AGI before
2680 * AGF. This means we cannot remove the directory entry before we drop the inode
2681 * reference count and put it on the unlinked list as this results in a lock
2682 * order of AGF then AGI, and this can deadlock against inode allocation and
2683 * freeing. Therefore we must drop the link counts before we remove the
2684 * directory entry.
2685 *
2686 * This is still safe from a transactional point of view - it is not until we
2687 * get to xfs_defer_finish() that we have the possibility of multiple
2688 * transactions in this operation. Hence as long as we remove the directory
2689 * entry and drop the link count in the first transaction of the remove
2690 * operation, there are no transactional constraints on the ordering here.
2691 */
2692int
2693xfs_remove(
2694	xfs_inode_t             *dp,
2695	struct xfs_name		*name,
2696	xfs_inode_t		*ip)
2697{
2698	xfs_mount_t		*mp = dp->i_mount;
2699	xfs_trans_t             *tp = NULL;
2700	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
 
2701	int                     error = 0;
2702	uint			resblks;
2703
2704	trace_xfs_remove(dp, name);
2705
2706	if (XFS_FORCED_SHUTDOWN(mp))
 
 
2707		return -EIO;
2708
2709	error = xfs_qm_dqattach(dp);
2710	if (error)
2711		goto std_return;
2712
2713	error = xfs_qm_dqattach(ip);
2714	if (error)
2715		goto std_return;
2716
2717	/*
2718	 * We try to get the real space reservation first,
2719	 * allowing for directory btree deletion(s) implying
2720	 * possible bmap insert(s).  If we can't get the space
2721	 * reservation then we use 0 instead, and avoid the bmap
2722	 * btree insert(s) in the directory code by, if the bmap
2723	 * insert tries to happen, instead trimming the LAST
2724	 * block from the directory.
 
 
2725	 */
2726	resblks = XFS_REMOVE_SPACE_RES(mp);
2727	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2728	if (error == -ENOSPC) {
2729		resblks = 0;
2730		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2731				&tp);
2732	}
2733	if (error) {
2734		ASSERT(error != -ENOSPC);
2735		goto std_return;
2736	}
2737
2738	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2739
2740	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2741	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2742
2743	/*
2744	 * If we're removing a directory perform some additional validation.
2745	 */
2746	if (is_dir) {
2747		ASSERT(VFS_I(ip)->i_nlink >= 2);
2748		if (VFS_I(ip)->i_nlink != 2) {
2749			error = -ENOTEMPTY;
2750			goto out_trans_cancel;
2751		}
2752		if (!xfs_dir_isempty(ip)) {
2753			error = -ENOTEMPTY;
2754			goto out_trans_cancel;
2755		}
2756
2757		/* Drop the link from ip's "..".  */
2758		error = xfs_droplink(tp, dp);
2759		if (error)
2760			goto out_trans_cancel;
2761
2762		/* Drop the "." link from ip to self.  */
2763		error = xfs_droplink(tp, ip);
2764		if (error)
2765			goto out_trans_cancel;
2766
2767		/*
2768		 * Point the unlinked child directory's ".." entry to the root
2769		 * directory to eliminate back-references to inodes that may
2770		 * get freed before the child directory is closed.  If the fs
2771		 * gets shrunk, this can lead to dirent inode validation errors.
2772		 */
2773		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2774			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2775					tp->t_mountp->m_sb.sb_rootino, 0);
2776			if (error)
2777				return error;
2778		}
2779	} else {
2780		/*
2781		 * When removing a non-directory we need to log the parent
2782		 * inode here.  For a directory this is done implicitly
2783		 * by the xfs_droplink call for the ".." entry.
2784		 */
2785		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2786	}
2787	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2788
2789	/* Drop the link from dp to ip. */
2790	error = xfs_droplink(tp, ip);
2791	if (error)
2792		goto out_trans_cancel;
2793
2794	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2795	if (error) {
2796		ASSERT(error != -ENOENT);
2797		goto out_trans_cancel;
2798	}
2799
2800	/*
2801	 * If this is a synchronous mount, make sure that the
2802	 * remove transaction goes to disk before returning to
2803	 * the user.
2804	 */
2805	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2806		xfs_trans_set_sync(tp);
2807
2808	error = xfs_trans_commit(tp);
2809	if (error)
2810		goto std_return;
2811
2812	if (is_dir && xfs_inode_is_filestream(ip))
2813		xfs_filestream_deassociate(ip);
2814
2815	return 0;
2816
2817 out_trans_cancel:
2818	xfs_trans_cancel(tp);
2819 std_return:
2820	return error;
2821}
2822
2823/*
2824 * Enter all inodes for a rename transaction into a sorted array.
2825 */
2826#define __XFS_SORT_INODES	5
2827STATIC void
2828xfs_sort_for_rename(
2829	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2830	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2831	struct xfs_inode	*ip1,	/* in: inode of old entry */
2832	struct xfs_inode	*ip2,	/* in: inode of new entry */
2833	struct xfs_inode	*wip,	/* in: whiteout inode */
2834	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2835	int			*num_inodes)  /* in/out: inodes in array */
2836{
2837	int			i, j;
2838
2839	ASSERT(*num_inodes == __XFS_SORT_INODES);
2840	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2841
2842	/*
2843	 * i_tab contains a list of pointers to inodes.  We initialize
2844	 * the table here & we'll sort it.  We will then use it to
2845	 * order the acquisition of the inode locks.
2846	 *
2847	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2848	 */
2849	i = 0;
2850	i_tab[i++] = dp1;
2851	i_tab[i++] = dp2;
2852	i_tab[i++] = ip1;
2853	if (ip2)
2854		i_tab[i++] = ip2;
2855	if (wip)
2856		i_tab[i++] = wip;
2857	*num_inodes = i;
2858
2859	/*
2860	 * Sort the elements via bubble sort.  (Remember, there are at
2861	 * most 5 elements to sort, so this is adequate.)
2862	 */
2863	for (i = 0; i < *num_inodes; i++) {
2864		for (j = 1; j < *num_inodes; j++) {
2865			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2866				struct xfs_inode *temp = i_tab[j];
2867				i_tab[j] = i_tab[j-1];
2868				i_tab[j-1] = temp;
2869			}
2870		}
2871	}
2872}
2873
2874static int
2875xfs_finish_rename(
2876	struct xfs_trans	*tp)
2877{
2878	/*
2879	 * If this is a synchronous mount, make sure that the rename transaction
2880	 * goes to disk before returning to the user.
2881	 */
2882	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2883		xfs_trans_set_sync(tp);
2884
2885	return xfs_trans_commit(tp);
2886}
2887
2888/*
2889 * xfs_cross_rename()
2890 *
2891 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2892 */
2893STATIC int
2894xfs_cross_rename(
2895	struct xfs_trans	*tp,
2896	struct xfs_inode	*dp1,
2897	struct xfs_name		*name1,
2898	struct xfs_inode	*ip1,
2899	struct xfs_inode	*dp2,
2900	struct xfs_name		*name2,
2901	struct xfs_inode	*ip2,
2902	int			spaceres)
2903{
2904	int		error = 0;
2905	int		ip1_flags = 0;
2906	int		ip2_flags = 0;
2907	int		dp2_flags = 0;
2908
2909	/* Swap inode number for dirent in first parent */
2910	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2911	if (error)
2912		goto out_trans_abort;
2913
2914	/* Swap inode number for dirent in second parent */
2915	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2916	if (error)
2917		goto out_trans_abort;
2918
2919	/*
2920	 * If we're renaming one or more directories across different parents,
2921	 * update the respective ".." entries (and link counts) to match the new
2922	 * parents.
2923	 */
2924	if (dp1 != dp2) {
2925		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2926
2927		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2928			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2929						dp1->i_ino, spaceres);
2930			if (error)
2931				goto out_trans_abort;
2932
2933			/* transfer ip2 ".." reference to dp1 */
2934			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2935				error = xfs_droplink(tp, dp2);
2936				if (error)
2937					goto out_trans_abort;
2938				xfs_bumplink(tp, dp1);
2939			}
2940
2941			/*
2942			 * Although ip1 isn't changed here, userspace needs
2943			 * to be warned about the change, so that applications
2944			 * relying on it (like backup ones), will properly
2945			 * notify the change
2946			 */
2947			ip1_flags |= XFS_ICHGTIME_CHG;
2948			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2949		}
2950
2951		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2952			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2953						dp2->i_ino, spaceres);
2954			if (error)
2955				goto out_trans_abort;
2956
2957			/* transfer ip1 ".." reference to dp2 */
2958			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2959				error = xfs_droplink(tp, dp1);
2960				if (error)
2961					goto out_trans_abort;
2962				xfs_bumplink(tp, dp2);
2963			}
2964
2965			/*
2966			 * Although ip2 isn't changed here, userspace needs
2967			 * to be warned about the change, so that applications
2968			 * relying on it (like backup ones), will properly
2969			 * notify the change
2970			 */
2971			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2972			ip2_flags |= XFS_ICHGTIME_CHG;
2973		}
2974	}
2975
2976	if (ip1_flags) {
2977		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2978		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2979	}
2980	if (ip2_flags) {
2981		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2982		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2983	}
2984	if (dp2_flags) {
2985		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2986		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2987	}
2988	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2989	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2990	return xfs_finish_rename(tp);
2991
2992out_trans_abort:
2993	xfs_trans_cancel(tp);
2994	return error;
2995}
2996
2997/*
2998 * xfs_rename_alloc_whiteout()
2999 *
3000 * Return a referenced, unlinked, unlocked inode that can be used as a
3001 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3002 * crash between allocating the inode and linking it into the rename transaction
3003 * recovery will free the inode and we won't leak it.
3004 */
3005static int
3006xfs_rename_alloc_whiteout(
3007	struct user_namespace	*mnt_userns,
 
3008	struct xfs_inode	*dp,
3009	struct xfs_inode	**wip)
3010{
3011	struct xfs_inode	*tmpfile;
 
3012	int			error;
3013
3014	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3015				   &tmpfile);
3016	if (error)
3017		return error;
3018
 
 
 
 
 
 
 
 
 
3019	/*
3020	 * Prepare the tmpfile inode as if it were created through the VFS.
3021	 * Complete the inode setup and flag it as linkable.  nlink is already
3022	 * zero, so we can skip the drop_nlink.
3023	 */
3024	xfs_setup_iops(tmpfile);
3025	xfs_finish_inode_setup(tmpfile);
3026	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3027
3028	*wip = tmpfile;
3029	return 0;
3030}
3031
3032/*
3033 * xfs_rename
3034 */
3035int
3036xfs_rename(
3037	struct user_namespace	*mnt_userns,
3038	struct xfs_inode	*src_dp,
3039	struct xfs_name		*src_name,
3040	struct xfs_inode	*src_ip,
3041	struct xfs_inode	*target_dp,
3042	struct xfs_name		*target_name,
3043	struct xfs_inode	*target_ip,
3044	unsigned int		flags)
3045{
3046	struct xfs_mount	*mp = src_dp->i_mount;
3047	struct xfs_trans	*tp;
3048	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3049	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3050	int			i;
3051	int			num_inodes = __XFS_SORT_INODES;
3052	bool			new_parent = (src_dp != target_dp);
3053	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3054	int			spaceres;
3055	int			error;
 
3056
3057	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3058
3059	if ((flags & RENAME_EXCHANGE) && !target_ip)
3060		return -EINVAL;
3061
3062	/*
3063	 * If we are doing a whiteout operation, allocate the whiteout inode
3064	 * we will be placing at the target and ensure the type is set
3065	 * appropriately.
3066	 */
3067	if (flags & RENAME_WHITEOUT) {
3068		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3069		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
3070		if (error)
3071			return error;
3072
3073		/* setup target dirent info as whiteout */
3074		src_name->type = XFS_DIR3_FT_CHRDEV;
3075	}
3076
3077	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3078				inodes, &num_inodes);
3079
 
 
3080	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3081	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3082	if (error == -ENOSPC) {
 
3083		spaceres = 0;
3084		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3085				&tp);
3086	}
3087	if (error)
3088		goto out_release_wip;
3089
3090	/*
3091	 * Attach the dquots to the inodes
3092	 */
3093	error = xfs_qm_vop_rename_dqattach(inodes);
3094	if (error)
3095		goto out_trans_cancel;
3096
3097	/*
3098	 * Lock all the participating inodes. Depending upon whether
3099	 * the target_name exists in the target directory, and
3100	 * whether the target directory is the same as the source
3101	 * directory, we can lock from 2 to 4 inodes.
3102	 */
3103	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3104
3105	/*
3106	 * Join all the inodes to the transaction. From this point on,
3107	 * we can rely on either trans_commit or trans_cancel to unlock
3108	 * them.
3109	 */
3110	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3111	if (new_parent)
3112		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3113	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3114	if (target_ip)
3115		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3116	if (wip)
3117		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3118
3119	/*
3120	 * If we are using project inheritance, we only allow renames
3121	 * into our tree when the project IDs are the same; else the
3122	 * tree quota mechanism would be circumvented.
3123	 */
3124	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3125		     target_dp->i_projid != src_ip->i_projid)) {
3126		error = -EXDEV;
3127		goto out_trans_cancel;
3128	}
3129
3130	/* RENAME_EXCHANGE is unique from here on. */
3131	if (flags & RENAME_EXCHANGE)
3132		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3133					target_dp, target_name, target_ip,
3134					spaceres);
3135
3136	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3137	 * Check for expected errors before we dirty the transaction
3138	 * so we can return an error without a transaction abort.
3139	 *
3140	 * Extent count overflow check:
3141	 *
3142	 * From the perspective of src_dp, a rename operation is essentially a
3143	 * directory entry remove operation. Hence the only place where we check
3144	 * for extent count overflow for src_dp is in
3145	 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
3146	 * -ENOSPC when it detects a possible extent count overflow and in
3147	 * response, the higher layers of directory handling code do the
3148	 * following:
3149	 * 1. Data/Free blocks: XFS lets these blocks linger until a
3150	 *    future remove operation removes them.
3151	 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
3152	 *    Leaf space and unmaps the last block.
3153	 *
3154	 * For target_dp, there are two cases depending on whether the
3155	 * destination directory entry exists or not.
3156	 *
3157	 * When destination directory entry does not exist (i.e. target_ip ==
3158	 * NULL), extent count overflow check is performed only when transaction
3159	 * has a non-zero sized space reservation associated with it.  With a
3160	 * zero-sized space reservation, XFS allows a rename operation to
3161	 * continue only when the directory has sufficient free space in its
3162	 * data/leaf/free space blocks to hold the new entry.
3163	 *
3164	 * When destination directory entry exists (i.e. target_ip != NULL), all
3165	 * we need to do is change the inode number associated with the already
3166	 * existing entry. Hence there is no need to perform an extent count
3167	 * overflow check.
3168	 */
3169	if (target_ip == NULL) {
3170		/*
3171		 * If there's no space reservation, check the entry will
3172		 * fit before actually inserting it.
3173		 */
3174		if (!spaceres) {
3175			error = xfs_dir_canenter(tp, target_dp, target_name);
3176			if (error)
3177				goto out_trans_cancel;
3178		} else {
3179			error = xfs_iext_count_may_overflow(target_dp,
3180					XFS_DATA_FORK,
3181					XFS_IEXT_DIR_MANIP_CNT(mp));
3182			if (error)
3183				goto out_trans_cancel;
3184		}
3185	} else {
3186		/*
3187		 * If target exists and it's a directory, check that whether
3188		 * it can be destroyed.
3189		 */
3190		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3191		    (!xfs_dir_isempty(target_ip) ||
3192		     (VFS_I(target_ip)->i_nlink > 2))) {
3193			error = -EEXIST;
3194			goto out_trans_cancel;
3195		}
3196	}
3197
3198	/*
3199	 * Lock the AGI buffers we need to handle bumping the nlink of the
3200	 * whiteout inode off the unlinked list and to handle dropping the
3201	 * nlink of the target inode.  Per locking order rules, do this in
3202	 * increasing AG order and before directory block allocation tries to
3203	 * grab AGFs because we grab AGIs before AGFs.
3204	 *
3205	 * The (vfs) caller must ensure that if src is a directory then
3206	 * target_ip is either null or an empty directory.
3207	 */
3208	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3209		if (inodes[i] == wip ||
3210		    (inodes[i] == target_ip &&
3211		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3212			struct xfs_buf	*bp;
3213			xfs_agnumber_t	agno;
3214
3215			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
3216			error = xfs_read_agi(mp, tp, agno, &bp);
 
 
3217			if (error)
3218				goto out_trans_cancel;
3219		}
3220	}
3221
3222	/*
3223	 * Directory entry creation below may acquire the AGF. Remove
3224	 * the whiteout from the unlinked list first to preserve correct
3225	 * AGI/AGF locking order. This dirties the transaction so failures
3226	 * after this point will abort and log recovery will clean up the
3227	 * mess.
3228	 *
3229	 * For whiteouts, we need to bump the link count on the whiteout
3230	 * inode. After this point, we have a real link, clear the tmpfile
3231	 * state flag from the inode so it doesn't accidentally get misused
3232	 * in future.
3233	 */
3234	if (wip) {
3235		struct xfs_perag	*pag;
3236
3237		ASSERT(VFS_I(wip)->i_nlink == 0);
3238
3239		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3240		error = xfs_iunlink_remove(tp, pag, wip);
3241		xfs_perag_put(pag);
3242		if (error)
3243			goto out_trans_cancel;
3244
3245		xfs_bumplink(tp, wip);
3246		VFS_I(wip)->i_state &= ~I_LINKABLE;
3247	}
3248
3249	/*
3250	 * Set up the target.
3251	 */
3252	if (target_ip == NULL) {
3253		/*
3254		 * If target does not exist and the rename crosses
3255		 * directories, adjust the target directory link count
3256		 * to account for the ".." reference from the new entry.
3257		 */
3258		error = xfs_dir_createname(tp, target_dp, target_name,
3259					   src_ip->i_ino, spaceres);
3260		if (error)
3261			goto out_trans_cancel;
3262
3263		xfs_trans_ichgtime(tp, target_dp,
3264					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3265
3266		if (new_parent && src_is_directory) {
3267			xfs_bumplink(tp, target_dp);
3268		}
3269	} else { /* target_ip != NULL */
3270		/*
3271		 * Link the source inode under the target name.
3272		 * If the source inode is a directory and we are moving
3273		 * it across directories, its ".." entry will be
3274		 * inconsistent until we replace that down below.
3275		 *
3276		 * In case there is already an entry with the same
3277		 * name at the destination directory, remove it first.
3278		 */
3279		error = xfs_dir_replace(tp, target_dp, target_name,
3280					src_ip->i_ino, spaceres);
3281		if (error)
3282			goto out_trans_cancel;
3283
3284		xfs_trans_ichgtime(tp, target_dp,
3285					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3286
3287		/*
3288		 * Decrement the link count on the target since the target
3289		 * dir no longer points to it.
3290		 */
3291		error = xfs_droplink(tp, target_ip);
3292		if (error)
3293			goto out_trans_cancel;
3294
3295		if (src_is_directory) {
3296			/*
3297			 * Drop the link from the old "." entry.
3298			 */
3299			error = xfs_droplink(tp, target_ip);
3300			if (error)
3301				goto out_trans_cancel;
3302		}
3303	} /* target_ip != NULL */
3304
3305	/*
3306	 * Remove the source.
3307	 */
3308	if (new_parent && src_is_directory) {
3309		/*
3310		 * Rewrite the ".." entry to point to the new
3311		 * directory.
3312		 */
3313		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3314					target_dp->i_ino, spaceres);
3315		ASSERT(error != -EEXIST);
3316		if (error)
3317			goto out_trans_cancel;
3318	}
3319
3320	/*
3321	 * We always want to hit the ctime on the source inode.
3322	 *
3323	 * This isn't strictly required by the standards since the source
3324	 * inode isn't really being changed, but old unix file systems did
3325	 * it and some incremental backup programs won't work without it.
3326	 */
3327	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3328	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3329
3330	/*
3331	 * Adjust the link count on src_dp.  This is necessary when
3332	 * renaming a directory, either within one parent when
3333	 * the target existed, or across two parent directories.
3334	 */
3335	if (src_is_directory && (new_parent || target_ip != NULL)) {
3336
3337		/*
3338		 * Decrement link count on src_directory since the
3339		 * entry that's moved no longer points to it.
3340		 */
3341		error = xfs_droplink(tp, src_dp);
3342		if (error)
3343			goto out_trans_cancel;
3344	}
3345
3346	/*
3347	 * For whiteouts, we only need to update the source dirent with the
3348	 * inode number of the whiteout inode rather than removing it
3349	 * altogether.
3350	 */
3351	if (wip) {
3352		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3353					spaceres);
3354	} else {
3355		/*
3356		 * NOTE: We don't need to check for extent count overflow here
3357		 * because the dir remove name code will leave the dir block in
3358		 * place if the extent count would overflow.
3359		 */
3360		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3361					   spaceres);
3362	}
3363
3364	if (error)
3365		goto out_trans_cancel;
3366
3367	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3368	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3369	if (new_parent)
3370		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3371
3372	error = xfs_finish_rename(tp);
3373	if (wip)
3374		xfs_irele(wip);
3375	return error;
3376
3377out_trans_cancel:
3378	xfs_trans_cancel(tp);
3379out_release_wip:
3380	if (wip)
3381		xfs_irele(wip);
 
 
3382	return error;
3383}
3384
3385static int
3386xfs_iflush(
3387	struct xfs_inode	*ip,
3388	struct xfs_buf		*bp)
3389{
3390	struct xfs_inode_log_item *iip = ip->i_itemp;
3391	struct xfs_dinode	*dip;
3392	struct xfs_mount	*mp = ip->i_mount;
3393	int			error;
3394
3395	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3396	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3397	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3398	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3399	ASSERT(iip->ili_item.li_buf == bp);
3400
3401	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3402
3403	/*
3404	 * We don't flush the inode if any of the following checks fail, but we
3405	 * do still update the log item and attach to the backing buffer as if
3406	 * the flush happened. This is a formality to facilitate predictable
3407	 * error handling as the caller will shutdown and fail the buffer.
3408	 */
3409	error = -EFSCORRUPTED;
3410	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3411			       mp, XFS_ERRTAG_IFLUSH_1)) {
3412		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3413			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3414			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3415		goto flush_out;
3416	}
3417	if (S_ISREG(VFS_I(ip)->i_mode)) {
3418		if (XFS_TEST_ERROR(
3419		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3420		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3421		    mp, XFS_ERRTAG_IFLUSH_3)) {
3422			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3423				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3424				__func__, ip->i_ino, ip);
3425			goto flush_out;
3426		}
3427	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3428		if (XFS_TEST_ERROR(
3429		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3430		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3431		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3432		    mp, XFS_ERRTAG_IFLUSH_4)) {
3433			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3434				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3435				__func__, ip->i_ino, ip);
3436			goto flush_out;
3437		}
3438	}
3439	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3440				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3441		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3442			"%s: detected corrupt incore inode %Lu, "
3443			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3444			__func__, ip->i_ino,
3445			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3446			ip->i_nblocks, ip);
3447		goto flush_out;
3448	}
3449	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3450				mp, XFS_ERRTAG_IFLUSH_6)) {
3451		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3452			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3453			__func__, ip->i_ino, ip->i_forkoff, ip);
3454		goto flush_out;
3455	}
3456
3457	/*
3458	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3459	 * count for correct sequencing.  We bump the flush iteration count so
3460	 * we can detect flushes which postdate a log record during recovery.
3461	 * This is redundant as we now log every change and hence this can't
3462	 * happen but we need to still do it to ensure backwards compatibility
3463	 * with old kernels that predate logging all inode changes.
3464	 */
3465	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3466		ip->i_flushiter++;
3467
3468	/*
3469	 * If there are inline format data / attr forks attached to this inode,
3470	 * make sure they are not corrupt.
3471	 */
3472	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3473	    xfs_ifork_verify_local_data(ip))
3474		goto flush_out;
3475	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
 
3476	    xfs_ifork_verify_local_attr(ip))
3477		goto flush_out;
3478
3479	/*
3480	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3481	 * copy out the core of the inode, because if the inode is dirty at all
3482	 * the core must be.
3483	 */
3484	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3485
3486	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3487	if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
3488		if (ip->i_flushiter == DI_MAX_FLUSH)
3489			ip->i_flushiter = 0;
3490	}
3491
3492	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3493	if (XFS_IFORK_Q(ip))
3494		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3495
3496	/*
3497	 * We've recorded everything logged in the inode, so we'd like to clear
3498	 * the ili_fields bits so we don't log and flush things unnecessarily.
3499	 * However, we can't stop logging all this information until the data
3500	 * we've copied into the disk buffer is written to disk.  If we did we
3501	 * might overwrite the copy of the inode in the log with all the data
3502	 * after re-logging only part of it, and in the face of a crash we
3503	 * wouldn't have all the data we need to recover.
3504	 *
3505	 * What we do is move the bits to the ili_last_fields field.  When
3506	 * logging the inode, these bits are moved back to the ili_fields field.
3507	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3508	 * we know that the information those bits represent is permanently on
3509	 * disk.  As long as the flush completes before the inode is logged
3510	 * again, then both ili_fields and ili_last_fields will be cleared.
3511	 */
3512	error = 0;
3513flush_out:
3514	spin_lock(&iip->ili_lock);
3515	iip->ili_last_fields = iip->ili_fields;
3516	iip->ili_fields = 0;
3517	iip->ili_fsync_fields = 0;
3518	spin_unlock(&iip->ili_lock);
3519
3520	/*
3521	 * Store the current LSN of the inode so that we can tell whether the
3522	 * item has moved in the AIL from xfs_buf_inode_iodone().
3523	 */
3524	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3525				&iip->ili_item.li_lsn);
3526
3527	/* generate the checksum. */
3528	xfs_dinode_calc_crc(mp, dip);
3529	return error;
3530}
3531
3532/*
3533 * Non-blocking flush of dirty inode metadata into the backing buffer.
3534 *
3535 * The caller must have a reference to the inode and hold the cluster buffer
3536 * locked. The function will walk across all the inodes on the cluster buffer it
3537 * can find and lock without blocking, and flush them to the cluster buffer.
3538 *
3539 * On successful flushing of at least one inode, the caller must write out the
3540 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3541 * the caller needs to release the buffer. On failure, the filesystem will be
3542 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3543 * will be returned.
3544 */
3545int
3546xfs_iflush_cluster(
3547	struct xfs_buf		*bp)
3548{
3549	struct xfs_mount	*mp = bp->b_mount;
3550	struct xfs_log_item	*lip, *n;
3551	struct xfs_inode	*ip;
3552	struct xfs_inode_log_item *iip;
3553	int			clcount = 0;
3554	int			error = 0;
3555
3556	/*
3557	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3558	 * can remove itself from the list.
3559	 */
3560	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3561		iip = (struct xfs_inode_log_item *)lip;
3562		ip = iip->ili_inode;
3563
3564		/*
3565		 * Quick and dirty check to avoid locks if possible.
3566		 */
3567		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3568			continue;
3569		if (xfs_ipincount(ip))
3570			continue;
3571
3572		/*
3573		 * The inode is still attached to the buffer, which means it is
3574		 * dirty but reclaim might try to grab it. Check carefully for
3575		 * that, and grab the ilock while still holding the i_flags_lock
3576		 * to guarantee reclaim will not be able to reclaim this inode
3577		 * once we drop the i_flags_lock.
3578		 */
3579		spin_lock(&ip->i_flags_lock);
3580		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3581		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3582			spin_unlock(&ip->i_flags_lock);
3583			continue;
3584		}
3585
3586		/*
3587		 * ILOCK will pin the inode against reclaim and prevent
3588		 * concurrent transactions modifying the inode while we are
3589		 * flushing the inode. If we get the lock, set the flushing
3590		 * state before we drop the i_flags_lock.
3591		 */
3592		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3593			spin_unlock(&ip->i_flags_lock);
3594			continue;
3595		}
3596		__xfs_iflags_set(ip, XFS_IFLUSHING);
3597		spin_unlock(&ip->i_flags_lock);
3598
3599		/*
3600		 * Abort flushing this inode if we are shut down because the
3601		 * inode may not currently be in the AIL. This can occur when
3602		 * log I/O failure unpins the inode without inserting into the
3603		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3604		 * that otherwise looks like it should be flushed.
3605		 */
3606		if (XFS_FORCED_SHUTDOWN(mp)) {
3607			xfs_iunpin_wait(ip);
3608			xfs_iflush_abort(ip);
3609			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3610			error = -EIO;
3611			continue;
3612		}
3613
3614		/* don't block waiting on a log force to unpin dirty inodes */
3615		if (xfs_ipincount(ip)) {
3616			xfs_iflags_clear(ip, XFS_IFLUSHING);
3617			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3618			continue;
3619		}
3620
3621		if (!xfs_inode_clean(ip))
3622			error = xfs_iflush(ip, bp);
3623		else
3624			xfs_iflags_clear(ip, XFS_IFLUSHING);
3625		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3626		if (error)
3627			break;
3628		clcount++;
3629	}
3630
3631	if (error) {
 
 
 
 
 
 
 
 
 
 
 
3632		bp->b_flags |= XBF_ASYNC;
3633		xfs_buf_ioend_fail(bp);
3634		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3635		return error;
3636	}
3637
3638	if (!clcount)
3639		return -EAGAIN;
3640
3641	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3642	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3643	return 0;
3644
3645}
3646
3647/* Release an inode. */
3648void
3649xfs_irele(
3650	struct xfs_inode	*ip)
3651{
3652	trace_xfs_irele(ip, _RET_IP_);
3653	iput(VFS_I(ip));
3654}
3655
3656/*
3657 * Ensure all commited transactions touching the inode are written to the log.
3658 */
3659int
3660xfs_log_force_inode(
3661	struct xfs_inode	*ip)
3662{
3663	xfs_csn_t		seq = 0;
3664
3665	xfs_ilock(ip, XFS_ILOCK_SHARED);
3666	if (xfs_ipincount(ip))
3667		seq = ip->i_itemp->ili_commit_seq;
3668	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3669
3670	if (!seq)
3671		return 0;
3672	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3673}
3674
3675/*
3676 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3677 * abide vfs locking order (lowest pointer value goes first) and breaking the
3678 * layout leases before proceeding.  The loop is needed because we cannot call
3679 * the blocking break_layout() with the iolocks held, and therefore have to
3680 * back out both locks.
3681 */
3682static int
3683xfs_iolock_two_inodes_and_break_layout(
3684	struct inode		*src,
3685	struct inode		*dest)
3686{
3687	int			error;
3688
3689	if (src > dest)
3690		swap(src, dest);
3691
3692retry:
3693	/* Wait to break both inodes' layouts before we start locking. */
3694	error = break_layout(src, true);
3695	if (error)
3696		return error;
3697	if (src != dest) {
3698		error = break_layout(dest, true);
3699		if (error)
3700			return error;
3701	}
3702
3703	/* Lock one inode and make sure nobody got in and leased it. */
3704	inode_lock(src);
3705	error = break_layout(src, false);
3706	if (error) {
3707		inode_unlock(src);
3708		if (error == -EWOULDBLOCK)
3709			goto retry;
3710		return error;
3711	}
3712
3713	if (src == dest)
3714		return 0;
3715
3716	/* Lock the other inode and make sure nobody got in and leased it. */
3717	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3718	error = break_layout(dest, false);
3719	if (error) {
3720		inode_unlock(src);
3721		inode_unlock(dest);
3722		if (error == -EWOULDBLOCK)
3723			goto retry;
3724		return error;
3725	}
3726
3727	return 0;
3728}
3729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3730/*
3731 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3732 * mmap activity.
3733 */
3734int
3735xfs_ilock2_io_mmap(
3736	struct xfs_inode	*ip1,
3737	struct xfs_inode	*ip2)
3738{
3739	int			ret;
3740
3741	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3742	if (ret)
3743		return ret;
3744	if (ip1 == ip2)
3745		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3746	else
3747		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3748				    ip2, XFS_MMAPLOCK_EXCL);
 
 
 
 
 
 
 
 
3749	return 0;
3750}
3751
3752/* Unlock both inodes to allow IO and mmap activity. */
3753void
3754xfs_iunlock2_io_mmap(
3755	struct xfs_inode	*ip1,
3756	struct xfs_inode	*ip2)
3757{
3758	bool			same_inode = (ip1 == ip2);
 
 
 
 
 
 
3759
3760	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3761	if (!same_inode)
3762		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3763	inode_unlock(VFS_I(ip2));
3764	if (!same_inode)
3765		inode_unlock(VFS_I(ip1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3766}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_dir2.h"
  18#include "xfs_attr.h"
  19#include "xfs_trans_space.h"
  20#include "xfs_trans.h"
  21#include "xfs_buf_item.h"
  22#include "xfs_inode_item.h"
  23#include "xfs_iunlink_item.h"
  24#include "xfs_ialloc.h"
  25#include "xfs_bmap.h"
  26#include "xfs_bmap_util.h"
  27#include "xfs_errortag.h"
  28#include "xfs_error.h"
  29#include "xfs_quota.h"
  30#include "xfs_filestream.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_symlink.h"
  34#include "xfs_trans_priv.h"
  35#include "xfs_log.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_reflink.h"
  38#include "xfs_ag.h"
  39#include "xfs_log_priv.h"
  40#include "xfs_health.h"
  41
  42struct kmem_cache *xfs_inode_cache;
 
 
 
 
 
 
  43
  44STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  45STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
  46	struct xfs_inode *);
  47
  48/*
  49 * helper function to extract extent size hint from inode
  50 */
  51xfs_extlen_t
  52xfs_get_extsz_hint(
  53	struct xfs_inode	*ip)
  54{
  55	/*
  56	 * No point in aligning allocations if we need to COW to actually
  57	 * write to them.
  58	 */
  59	if (xfs_is_always_cow_inode(ip))
  60		return 0;
  61	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
  62		return ip->i_extsize;
  63	if (XFS_IS_REALTIME_INODE(ip))
  64		return ip->i_mount->m_sb.sb_rextsize;
  65	return 0;
  66}
  67
  68/*
  69 * Helper function to extract CoW extent size hint from inode.
  70 * Between the extent size hint and the CoW extent size hint, we
  71 * return the greater of the two.  If the value is zero (automatic),
  72 * use the default size.
  73 */
  74xfs_extlen_t
  75xfs_get_cowextsz_hint(
  76	struct xfs_inode	*ip)
  77{
  78	xfs_extlen_t		a, b;
  79
  80	a = 0;
  81	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
  82		a = ip->i_cowextsize;
  83	b = xfs_get_extsz_hint(ip);
  84
  85	a = max(a, b);
  86	if (a == 0)
  87		return XFS_DEFAULT_COWEXTSZ_HINT;
  88	return a;
  89}
  90
  91/*
  92 * These two are wrapper routines around the xfs_ilock() routine used to
  93 * centralize some grungy code.  They are used in places that wish to lock the
  94 * inode solely for reading the extents.  The reason these places can't just
  95 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  96 * bringing in of the extents from disk for a file in b-tree format.  If the
  97 * inode is in b-tree format, then we need to lock the inode exclusively until
  98 * the extents are read in.  Locking it exclusively all the time would limit
  99 * our parallelism unnecessarily, though.  What we do instead is check to see
 100 * if the extents have been read in yet, and only lock the inode exclusively
 101 * if they have not.
 102 *
 103 * The functions return a value which should be given to the corresponding
 104 * xfs_iunlock() call.
 105 */
 106uint
 107xfs_ilock_data_map_shared(
 108	struct xfs_inode	*ip)
 109{
 110	uint			lock_mode = XFS_ILOCK_SHARED;
 111
 112	if (xfs_need_iread_extents(&ip->i_df))
 113		lock_mode = XFS_ILOCK_EXCL;
 114	xfs_ilock(ip, lock_mode);
 115	return lock_mode;
 116}
 117
 118uint
 119xfs_ilock_attr_map_shared(
 120	struct xfs_inode	*ip)
 121{
 122	uint			lock_mode = XFS_ILOCK_SHARED;
 123
 124	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
 125		lock_mode = XFS_ILOCK_EXCL;
 126	xfs_ilock(ip, lock_mode);
 127	return lock_mode;
 128}
 129
 130/*
 131 * You can't set both SHARED and EXCL for the same lock,
 132 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
 133 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
 134 * to set in lock_flags.
 135 */
 136static inline void
 137xfs_lock_flags_assert(
 138	uint		lock_flags)
 139{
 140	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 141		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 142	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 143		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 144	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 145		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 146	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 147	ASSERT(lock_flags != 0);
 148}
 149
 150/*
 151 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 152 * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
 153 * various combinations of the locks to be obtained.
 154 *
 155 * The 3 locks should always be ordered so that the IO lock is obtained first,
 156 * the mmap lock second and the ilock last in order to prevent deadlock.
 157 *
 158 * Basic locking order:
 159 *
 160 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
 161 *
 162 * mmap_lock locking order:
 163 *
 164 * i_rwsem -> page lock -> mmap_lock
 165 * mmap_lock -> invalidate_lock -> page_lock
 166 *
 167 * The difference in mmap_lock locking order mean that we cannot hold the
 168 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
 169 * can fault in pages during copy in/out (for buffered IO) or require the
 170 * mmap_lock in get_user_pages() to map the user pages into the kernel address
 171 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
 172 * fault because page faults already hold the mmap_lock.
 173 *
 174 * Hence to serialise fully against both syscall and mmap based IO, we need to
 175 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
 176 * both taken in places where we need to invalidate the page cache in a race
 177 * free manner (e.g. truncate, hole punch and other extent manipulation
 178 * functions).
 179 */
 180void
 181xfs_ilock(
 182	xfs_inode_t		*ip,
 183	uint			lock_flags)
 184{
 185	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 186
 187	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 188
 189	if (lock_flags & XFS_IOLOCK_EXCL) {
 190		down_write_nested(&VFS_I(ip)->i_rwsem,
 191				  XFS_IOLOCK_DEP(lock_flags));
 192	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 193		down_read_nested(&VFS_I(ip)->i_rwsem,
 194				 XFS_IOLOCK_DEP(lock_flags));
 195	}
 196
 197	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 198		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
 199				  XFS_MMAPLOCK_DEP(lock_flags));
 200	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 201		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
 202				 XFS_MMAPLOCK_DEP(lock_flags));
 203	}
 204
 205	if (lock_flags & XFS_ILOCK_EXCL)
 206		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 207	else if (lock_flags & XFS_ILOCK_SHARED)
 208		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 209}
 210
 211/*
 212 * This is just like xfs_ilock(), except that the caller
 213 * is guaranteed not to sleep.  It returns 1 if it gets
 214 * the requested locks and 0 otherwise.  If the IO lock is
 215 * obtained but the inode lock cannot be, then the IO lock
 216 * is dropped before returning.
 217 *
 218 * ip -- the inode being locked
 219 * lock_flags -- this parameter indicates the inode's locks to be
 220 *       to be locked.  See the comment for xfs_ilock() for a list
 221 *	 of valid values.
 222 */
 223int
 224xfs_ilock_nowait(
 225	xfs_inode_t		*ip,
 226	uint			lock_flags)
 227{
 228	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 229
 230	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 231
 232	if (lock_flags & XFS_IOLOCK_EXCL) {
 233		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 234			goto out;
 235	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 236		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 237			goto out;
 238	}
 239
 240	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 241		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
 242			goto out_undo_iolock;
 243	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 244		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
 245			goto out_undo_iolock;
 246	}
 247
 248	if (lock_flags & XFS_ILOCK_EXCL) {
 249		if (!mrtryupdate(&ip->i_lock))
 250			goto out_undo_mmaplock;
 251	} else if (lock_flags & XFS_ILOCK_SHARED) {
 252		if (!mrtryaccess(&ip->i_lock))
 253			goto out_undo_mmaplock;
 254	}
 255	return 1;
 256
 257out_undo_mmaplock:
 258	if (lock_flags & XFS_MMAPLOCK_EXCL)
 259		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 260	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 261		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 262out_undo_iolock:
 263	if (lock_flags & XFS_IOLOCK_EXCL)
 264		up_write(&VFS_I(ip)->i_rwsem);
 265	else if (lock_flags & XFS_IOLOCK_SHARED)
 266		up_read(&VFS_I(ip)->i_rwsem);
 267out:
 268	return 0;
 269}
 270
 271/*
 272 * xfs_iunlock() is used to drop the inode locks acquired with
 273 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 274 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 275 * that we know which locks to drop.
 276 *
 277 * ip -- the inode being unlocked
 278 * lock_flags -- this parameter indicates the inode's locks to be
 279 *       to be unlocked.  See the comment for xfs_ilock() for a list
 280 *	 of valid values for this parameter.
 281 *
 282 */
 283void
 284xfs_iunlock(
 285	xfs_inode_t		*ip,
 286	uint			lock_flags)
 287{
 288	xfs_lock_flags_assert(lock_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 289
 290	if (lock_flags & XFS_IOLOCK_EXCL)
 291		up_write(&VFS_I(ip)->i_rwsem);
 292	else if (lock_flags & XFS_IOLOCK_SHARED)
 293		up_read(&VFS_I(ip)->i_rwsem);
 294
 295	if (lock_flags & XFS_MMAPLOCK_EXCL)
 296		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 297	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 298		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 299
 300	if (lock_flags & XFS_ILOCK_EXCL)
 301		mrunlock_excl(&ip->i_lock);
 302	else if (lock_flags & XFS_ILOCK_SHARED)
 303		mrunlock_shared(&ip->i_lock);
 304
 305	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 306}
 307
 308/*
 309 * give up write locks.  the i/o lock cannot be held nested
 310 * if it is being demoted.
 311 */
 312void
 313xfs_ilock_demote(
 314	xfs_inode_t		*ip,
 315	uint			lock_flags)
 316{
 317	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 318	ASSERT((lock_flags &
 319		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 320
 321	if (lock_flags & XFS_ILOCK_EXCL)
 322		mrdemote(&ip->i_lock);
 323	if (lock_flags & XFS_MMAPLOCK_EXCL)
 324		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
 325	if (lock_flags & XFS_IOLOCK_EXCL)
 326		downgrade_write(&VFS_I(ip)->i_rwsem);
 327
 328	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 329}
 330
 331#if defined(DEBUG) || defined(XFS_WARN)
 332static inline bool
 333__xfs_rwsem_islocked(
 334	struct rw_semaphore	*rwsem,
 335	bool			shared)
 336{
 337	if (!debug_locks)
 338		return rwsem_is_locked(rwsem);
 339
 340	if (!shared)
 341		return lockdep_is_held_type(rwsem, 0);
 342
 343	/*
 344	 * We are checking that the lock is held at least in shared
 345	 * mode but don't care that it might be held exclusively
 346	 * (i.e. shared | excl). Hence we check if the lock is held
 347	 * in any mode rather than an explicit shared mode.
 348	 */
 349	return lockdep_is_held_type(rwsem, -1);
 350}
 351
 352bool
 353xfs_isilocked(
 354	struct xfs_inode	*ip,
 355	uint			lock_flags)
 356{
 357	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 358		if (!(lock_flags & XFS_ILOCK_SHARED))
 359			return !!ip->i_lock.mr_writer;
 360		return rwsem_is_locked(&ip->i_lock.mr_lock);
 361	}
 362
 363	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 364		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
 365				(lock_flags & XFS_MMAPLOCK_SHARED));
 
 366	}
 367
 368	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
 369		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
 370				(lock_flags & XFS_IOLOCK_SHARED));
 
 
 371	}
 372
 373	ASSERT(0);
 374	return false;
 375}
 376#endif
 377
 378/*
 379 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 380 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 381 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 382 * errors and warnings.
 383 */
 384#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 385static bool
 386xfs_lockdep_subclass_ok(
 387	int subclass)
 388{
 389	return subclass < MAX_LOCKDEP_SUBCLASSES;
 390}
 391#else
 392#define xfs_lockdep_subclass_ok(subclass)	(true)
 393#endif
 394
 395/*
 396 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 397 * value. This can be called for any type of inode lock combination, including
 398 * parent locking. Care must be taken to ensure we don't overrun the subclass
 399 * storage fields in the class mask we build.
 400 */
 401static inline uint
 402xfs_lock_inumorder(
 403	uint	lock_mode,
 404	uint	subclass)
 405{
 406	uint	class = 0;
 407
 408	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 409			      XFS_ILOCK_RTSUM)));
 410	ASSERT(xfs_lockdep_subclass_ok(subclass));
 411
 412	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 413		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 414		class += subclass << XFS_IOLOCK_SHIFT;
 415	}
 416
 417	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 418		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 419		class += subclass << XFS_MMAPLOCK_SHIFT;
 420	}
 421
 422	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 423		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 424		class += subclass << XFS_ILOCK_SHIFT;
 425	}
 426
 427	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 428}
 429
 430/*
 431 * The following routine will lock n inodes in exclusive mode.  We assume the
 432 * caller calls us with the inodes in i_ino order.
 433 *
 434 * We need to detect deadlock where an inode that we lock is in the AIL and we
 435 * start waiting for another inode that is locked by a thread in a long running
 436 * transaction (such as truncate). This can result in deadlock since the long
 437 * running trans might need to wait for the inode we just locked in order to
 438 * push the tail and free space in the log.
 439 *
 440 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 441 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 442 * lock more than one at a time, lockdep will report false positives saying we
 443 * have violated locking orders.
 444 */
 445static void
 446xfs_lock_inodes(
 447	struct xfs_inode	**ips,
 448	int			inodes,
 449	uint			lock_mode)
 450{
 451	int			attempts = 0;
 452	uint			i;
 453	int			j;
 454	bool			try_lock;
 455	struct xfs_log_item	*lp;
 456
 457	/*
 458	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 459	 * support an arbitrary depth of locking here, but absolute limits on
 460	 * inodes depend on the type of locking and the limits placed by
 461	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 462	 * the asserts.
 463	 */
 464	ASSERT(ips && inodes >= 2 && inodes <= 5);
 465	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 466			    XFS_ILOCK_EXCL));
 467	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 468			      XFS_ILOCK_SHARED)));
 469	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 470		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 471	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 472		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 473
 474	if (lock_mode & XFS_IOLOCK_EXCL) {
 475		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 476	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 477		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 478
 
 
 479again:
 480	try_lock = false;
 481	i = 0;
 482	for (; i < inodes; i++) {
 483		ASSERT(ips[i]);
 484
 485		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 486			continue;
 487
 488		/*
 489		 * If try_lock is not set yet, make sure all locked inodes are
 490		 * not in the AIL.  If any are, set try_lock to be used later.
 491		 */
 492		if (!try_lock) {
 493			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 494				lp = &ips[j]->i_itemp->ili_item;
 495				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 496					try_lock = true;
 497			}
 498		}
 499
 500		/*
 501		 * If any of the previous locks we have locked is in the AIL,
 502		 * we must TRY to get the second and subsequent locks. If
 503		 * we can't get any, we must release all we have
 504		 * and try again.
 505		 */
 506		if (!try_lock) {
 507			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 508			continue;
 509		}
 510
 511		/* try_lock means we have an inode locked that is in the AIL. */
 512		ASSERT(i != 0);
 513		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 514			continue;
 515
 516		/*
 517		 * Unlock all previous guys and try again.  xfs_iunlock will try
 518		 * to push the tail if the inode is in the AIL.
 519		 */
 520		attempts++;
 521		for (j = i - 1; j >= 0; j--) {
 522			/*
 523			 * Check to see if we've already unlocked this one.  Not
 524			 * the first one going back, and the inode ptr is the
 525			 * same.
 526			 */
 527			if (j != (i - 1) && ips[j] == ips[j + 1])
 528				continue;
 529
 530			xfs_iunlock(ips[j], lock_mode);
 531		}
 532
 533		if ((attempts % 5) == 0) {
 534			delay(1); /* Don't just spin the CPU */
 535		}
 
 
 536		goto again;
 537	}
 538}
 539
 540/*
 541 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
 542 * mmaplock must be double-locked separately since we use i_rwsem and
 543 * invalidate_lock for that. We now support taking one lock EXCL and the
 544 * other SHARED.
 
 
 545 */
 546void
 547xfs_lock_two_inodes(
 548	struct xfs_inode	*ip0,
 549	uint			ip0_mode,
 550	struct xfs_inode	*ip1,
 551	uint			ip1_mode)
 552{
 
 
 553	int			attempts = 0;
 554	struct xfs_log_item	*lp;
 555
 556	ASSERT(hweight32(ip0_mode) == 1);
 557	ASSERT(hweight32(ip1_mode) == 1);
 558	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 559	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 560	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
 561	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
 
 
 
 
 
 
 
 562	ASSERT(ip0->i_ino != ip1->i_ino);
 563
 564	if (ip0->i_ino > ip1->i_ino) {
 565		swap(ip0, ip1);
 566		swap(ip0_mode, ip1_mode);
 
 
 
 
 567	}
 568
 569 again:
 570	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 571
 572	/*
 573	 * If the first lock we have locked is in the AIL, we must TRY to get
 574	 * the second lock. If we can't get it, we must release the first one
 575	 * and try again.
 576	 */
 577	lp = &ip0->i_itemp->ili_item;
 578	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 579		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 580			xfs_iunlock(ip0, ip0_mode);
 581			if ((++attempts % 5) == 0)
 582				delay(1); /* Don't just spin the CPU */
 583			goto again;
 584		}
 585	} else {
 586		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 587	}
 588}
 589
 590uint
 591xfs_ip2xflags(
 592	struct xfs_inode	*ip)
 593{
 594	uint			flags = 0;
 595
 596	if (ip->i_diflags & XFS_DIFLAG_ANY) {
 597		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
 598			flags |= FS_XFLAG_REALTIME;
 599		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
 600			flags |= FS_XFLAG_PREALLOC;
 601		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
 602			flags |= FS_XFLAG_IMMUTABLE;
 603		if (ip->i_diflags & XFS_DIFLAG_APPEND)
 604			flags |= FS_XFLAG_APPEND;
 605		if (ip->i_diflags & XFS_DIFLAG_SYNC)
 606			flags |= FS_XFLAG_SYNC;
 607		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
 608			flags |= FS_XFLAG_NOATIME;
 609		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
 610			flags |= FS_XFLAG_NODUMP;
 611		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
 612			flags |= FS_XFLAG_RTINHERIT;
 613		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 614			flags |= FS_XFLAG_PROJINHERIT;
 615		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
 616			flags |= FS_XFLAG_NOSYMLINKS;
 617		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
 618			flags |= FS_XFLAG_EXTSIZE;
 619		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
 620			flags |= FS_XFLAG_EXTSZINHERIT;
 621		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
 622			flags |= FS_XFLAG_NODEFRAG;
 623		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
 624			flags |= FS_XFLAG_FILESTREAM;
 625	}
 626
 627	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
 628		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
 629			flags |= FS_XFLAG_DAX;
 630		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
 631			flags |= FS_XFLAG_COWEXTSIZE;
 632	}
 633
 634	if (xfs_inode_has_attr_fork(ip))
 635		flags |= FS_XFLAG_HASATTR;
 636	return flags;
 637}
 638
 639/*
 640 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 641 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 642 * ci_name->name will point to a the actual name (caller must free) or
 643 * will be set to NULL if an exact match is found.
 644 */
 645int
 646xfs_lookup(
 647	struct xfs_inode	*dp,
 648	const struct xfs_name	*name,
 649	struct xfs_inode	**ipp,
 650	struct xfs_name		*ci_name)
 651{
 652	xfs_ino_t		inum;
 653	int			error;
 654
 655	trace_xfs_lookup(dp, name);
 656
 657	if (xfs_is_shutdown(dp->i_mount))
 658		return -EIO;
 659	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
 660		return -EIO;
 661
 662	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 663	if (error)
 664		goto out_unlock;
 665
 666	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 667	if (error)
 668		goto out_free_name;
 669
 670	return 0;
 671
 672out_free_name:
 673	if (ci_name)
 674		kmem_free(ci_name->name);
 675out_unlock:
 676	*ipp = NULL;
 677	return error;
 678}
 679
 680/* Propagate di_flags from a parent inode to a child inode. */
 681static void
 682xfs_inode_inherit_flags(
 683	struct xfs_inode	*ip,
 684	const struct xfs_inode	*pip)
 685{
 686	unsigned int		di_flags = 0;
 687	xfs_failaddr_t		failaddr;
 688	umode_t			mode = VFS_I(ip)->i_mode;
 689
 690	if (S_ISDIR(mode)) {
 691		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
 692			di_flags |= XFS_DIFLAG_RTINHERIT;
 693		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 694			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 695			ip->i_extsize = pip->i_extsize;
 696		}
 697		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 698			di_flags |= XFS_DIFLAG_PROJINHERIT;
 699	} else if (S_ISREG(mode)) {
 700		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
 701		    xfs_has_realtime(ip->i_mount))
 702			di_flags |= XFS_DIFLAG_REALTIME;
 703		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 704			di_flags |= XFS_DIFLAG_EXTSIZE;
 705			ip->i_extsize = pip->i_extsize;
 706		}
 707	}
 708	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
 709	    xfs_inherit_noatime)
 710		di_flags |= XFS_DIFLAG_NOATIME;
 711	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
 712	    xfs_inherit_nodump)
 713		di_flags |= XFS_DIFLAG_NODUMP;
 714	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
 715	    xfs_inherit_sync)
 716		di_flags |= XFS_DIFLAG_SYNC;
 717	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
 718	    xfs_inherit_nosymlinks)
 719		di_flags |= XFS_DIFLAG_NOSYMLINKS;
 720	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
 721	    xfs_inherit_nodefrag)
 722		di_flags |= XFS_DIFLAG_NODEFRAG;
 723	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
 724		di_flags |= XFS_DIFLAG_FILESTREAM;
 725
 726	ip->i_diflags |= di_flags;
 727
 728	/*
 729	 * Inode verifiers on older kernels only check that the extent size
 730	 * hint is an integer multiple of the rt extent size on realtime files.
 731	 * They did not check the hint alignment on a directory with both
 732	 * rtinherit and extszinherit flags set.  If the misaligned hint is
 733	 * propagated from a directory into a new realtime file, new file
 734	 * allocations will fail due to math errors in the rt allocator and/or
 735	 * trip the verifiers.  Validate the hint settings in the new file so
 736	 * that we don't let broken hints propagate.
 737	 */
 738	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
 739			VFS_I(ip)->i_mode, ip->i_diflags);
 740	if (failaddr) {
 741		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
 742				   XFS_DIFLAG_EXTSZINHERIT);
 743		ip->i_extsize = 0;
 744	}
 745}
 746
 747/* Propagate di_flags2 from a parent inode to a child inode. */
 748static void
 749xfs_inode_inherit_flags2(
 750	struct xfs_inode	*ip,
 751	const struct xfs_inode	*pip)
 752{
 753	xfs_failaddr_t		failaddr;
 754
 755	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
 756		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
 757		ip->i_cowextsize = pip->i_cowextsize;
 758	}
 759	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
 760		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
 761
 762	/* Don't let invalid cowextsize hints propagate. */
 763	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
 764			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
 765	if (failaddr) {
 766		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
 767		ip->i_cowextsize = 0;
 768	}
 769}
 770
 771/*
 772 * Initialise a newly allocated inode and return the in-core inode to the
 773 * caller locked exclusively.
 774 */
 775int
 776xfs_init_new_inode(
 777	struct mnt_idmap	*idmap,
 778	struct xfs_trans	*tp,
 779	struct xfs_inode	*pip,
 780	xfs_ino_t		ino,
 781	umode_t			mode,
 782	xfs_nlink_t		nlink,
 783	dev_t			rdev,
 784	prid_t			prid,
 785	bool			init_xattrs,
 786	struct xfs_inode	**ipp)
 787{
 788	struct inode		*dir = pip ? VFS_I(pip) : NULL;
 789	struct xfs_mount	*mp = tp->t_mountp;
 790	struct xfs_inode	*ip;
 791	unsigned int		flags;
 792	int			error;
 793	struct timespec64	tv;
 794	struct inode		*inode;
 795
 796	/*
 797	 * Protect against obviously corrupt allocation btree records. Later
 798	 * xfs_iget checks will catch re-allocation of other active in-memory
 799	 * and on-disk inodes. If we don't catch reallocating the parent inode
 800	 * here we will deadlock in xfs_iget() so we have to do these checks
 801	 * first.
 802	 */
 803	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 804		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 805		return -EFSCORRUPTED;
 806	}
 807
 808	/*
 809	 * Get the in-core inode with the lock held exclusively to prevent
 810	 * others from looking at until we're done.
 811	 */
 812	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
 813	if (error)
 814		return error;
 815
 816	ASSERT(ip != NULL);
 817	inode = VFS_I(ip);
 818	set_nlink(inode, nlink);
 819	inode->i_rdev = rdev;
 820	ip->i_projid = prid;
 821
 822	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
 823		inode_fsuid_set(inode, idmap);
 
 824		inode->i_gid = dir->i_gid;
 825		inode->i_mode = mode;
 826	} else {
 827		inode_init_owner(idmap, inode, dir, mode);
 828	}
 829
 830	/*
 831	 * If the group ID of the new file does not match the effective group
 832	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 833	 * (and only if the irix_sgid_inherit compatibility variable is set).
 834	 */
 835	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
 836	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
 
 837		inode->i_mode &= ~S_ISGID;
 838
 839	ip->i_disk_size = 0;
 840	ip->i_df.if_nextents = 0;
 841	ASSERT(ip->i_nblocks == 0);
 842
 843	tv = inode_set_ctime_current(inode);
 844	inode_set_mtime_to_ts(inode, tv);
 845	inode_set_atime_to_ts(inode, tv);
 
 846
 847	ip->i_extsize = 0;
 848	ip->i_diflags = 0;
 849
 850	if (xfs_has_v3inodes(mp)) {
 851		inode_set_iversion(inode, 1);
 852		ip->i_cowextsize = 0;
 853		ip->i_crtime = tv;
 854	}
 855
 856	flags = XFS_ILOG_CORE;
 857	switch (mode & S_IFMT) {
 858	case S_IFIFO:
 859	case S_IFCHR:
 860	case S_IFBLK:
 861	case S_IFSOCK:
 862		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
 863		flags |= XFS_ILOG_DEV;
 864		break;
 865	case S_IFREG:
 866	case S_IFDIR:
 867		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
 868			xfs_inode_inherit_flags(ip, pip);
 869		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
 870			xfs_inode_inherit_flags2(ip, pip);
 871		fallthrough;
 872	case S_IFLNK:
 873		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 874		ip->i_df.if_bytes = 0;
 875		ip->i_df.if_data = NULL;
 876		break;
 877	default:
 878		ASSERT(0);
 879	}
 880
 881	/*
 882	 * If we need to create attributes immediately after allocating the
 883	 * inode, initialise an empty attribute fork right now. We use the
 884	 * default fork offset for attributes here as we don't know exactly what
 885	 * size or how many attributes we might be adding. We can do this
 886	 * safely here because we know the data fork is completely empty and
 887	 * this saves us from needing to run a separate transaction to set the
 888	 * fork offset in the immediate future.
 889	 */
 890	if (init_xattrs && xfs_has_attr(mp)) {
 891		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
 892		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
 893	}
 894
 895	/*
 896	 * Log the new values stuffed into the inode.
 897	 */
 898	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 899	xfs_trans_log_inode(tp, ip, flags);
 900
 901	/* now that we have an i_mode we can setup the inode structure */
 902	xfs_setup_inode(ip);
 903
 904	*ipp = ip;
 905	return 0;
 906}
 907
 908/*
 909 * Decrement the link count on an inode & log the change.  If this causes the
 910 * link count to go to zero, move the inode to AGI unlinked list so that it can
 911 * be freed when the last active reference goes away via xfs_inactive().
 912 */
 913static int			/* error */
 914xfs_droplink(
 915	xfs_trans_t *tp,
 916	xfs_inode_t *ip)
 917{
 918	if (VFS_I(ip)->i_nlink == 0) {
 919		xfs_alert(ip->i_mount,
 920			  "%s: Attempt to drop inode (%llu) with nlink zero.",
 921			  __func__, ip->i_ino);
 922		return -EFSCORRUPTED;
 923	}
 924
 925	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 926
 927	drop_nlink(VFS_I(ip));
 928	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 929
 930	if (VFS_I(ip)->i_nlink)
 931		return 0;
 932
 933	return xfs_iunlink(tp, ip);
 934}
 935
 936/*
 937 * Increment the link count on an inode & log the change.
 938 */
 939static void
 940xfs_bumplink(
 941	xfs_trans_t *tp,
 942	xfs_inode_t *ip)
 943{
 944	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 945
 946	inc_nlink(VFS_I(ip));
 947	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 948}
 949
 950int
 951xfs_create(
 952	struct mnt_idmap	*idmap,
 953	xfs_inode_t		*dp,
 954	struct xfs_name		*name,
 955	umode_t			mode,
 956	dev_t			rdev,
 957	bool			init_xattrs,
 958	xfs_inode_t		**ipp)
 959{
 960	int			is_dir = S_ISDIR(mode);
 961	struct xfs_mount	*mp = dp->i_mount;
 962	struct xfs_inode	*ip = NULL;
 963	struct xfs_trans	*tp = NULL;
 964	int			error;
 965	bool                    unlock_dp_on_error = false;
 966	prid_t			prid;
 967	struct xfs_dquot	*udqp = NULL;
 968	struct xfs_dquot	*gdqp = NULL;
 969	struct xfs_dquot	*pdqp = NULL;
 970	struct xfs_trans_res	*tres;
 971	uint			resblks;
 972	xfs_ino_t		ino;
 973
 974	trace_xfs_create(dp, name);
 975
 976	if (xfs_is_shutdown(mp))
 977		return -EIO;
 978	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
 979		return -EIO;
 980
 981	prid = xfs_get_initial_prid(dp);
 982
 983	/*
 984	 * Make sure that we have allocated dquot(s) on disk.
 985	 */
 986	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
 987			mapped_fsgid(idmap, &init_user_ns), prid,
 988			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
 989			&udqp, &gdqp, &pdqp);
 990	if (error)
 991		return error;
 992
 993	if (is_dir) {
 994		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
 995		tres = &M_RES(mp)->tr_mkdir;
 996	} else {
 997		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 998		tres = &M_RES(mp)->tr_create;
 999	}
1000
1001	/*
1002	 * Initially assume that the file does not exist and
1003	 * reserve the resources for that case.  If that is not
1004	 * the case we'll drop the one we have and get a more
1005	 * appropriate transaction later.
1006	 */
1007	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1008			&tp);
1009	if (error == -ENOSPC) {
1010		/* flush outstanding delalloc blocks and retry */
1011		xfs_flush_inodes(mp);
1012		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1013				resblks, &tp);
1014	}
1015	if (error)
1016		goto out_release_dquots;
1017
1018	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1019	unlock_dp_on_error = true;
1020
 
 
 
 
 
1021	/*
1022	 * A newly created regular or special file just has one directory
1023	 * entry pointing to them, but a directory also the "." entry
1024	 * pointing to itself.
1025	 */
1026	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1027	if (!error)
1028		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1029				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1030	if (error)
1031		goto out_trans_cancel;
1032
1033	/*
1034	 * Now we join the directory inode to the transaction.  We do not do it
1035	 * earlier because xfs_dialloc might commit the previous transaction
1036	 * (and release all the locks).  An error from here on will result in
1037	 * the transaction cancel unlocking dp so don't do it explicitly in the
1038	 * error path.
1039	 */
1040	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1041	unlock_dp_on_error = false;
1042
1043	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1044					resblks - XFS_IALLOC_SPACE_RES(mp));
1045	if (error) {
1046		ASSERT(error != -ENOSPC);
1047		goto out_trans_cancel;
1048	}
1049	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1050	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1051
1052	if (is_dir) {
1053		error = xfs_dir_init(tp, ip, dp);
1054		if (error)
1055			goto out_trans_cancel;
1056
1057		xfs_bumplink(tp, dp);
1058	}
1059
1060	/*
1061	 * If this is a synchronous mount, make sure that the
1062	 * create transaction goes to disk before returning to
1063	 * the user.
1064	 */
1065	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1066		xfs_trans_set_sync(tp);
1067
1068	/*
1069	 * Attach the dquot(s) to the inodes and modify them incore.
1070	 * These ids of the inode couldn't have changed since the new
1071	 * inode has been locked ever since it was created.
1072	 */
1073	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1074
1075	error = xfs_trans_commit(tp);
1076	if (error)
1077		goto out_release_inode;
1078
1079	xfs_qm_dqrele(udqp);
1080	xfs_qm_dqrele(gdqp);
1081	xfs_qm_dqrele(pdqp);
1082
1083	*ipp = ip;
1084	return 0;
1085
1086 out_trans_cancel:
1087	xfs_trans_cancel(tp);
1088 out_release_inode:
1089	/*
1090	 * Wait until after the current transaction is aborted to finish the
1091	 * setup of the inode and release the inode.  This prevents recursive
1092	 * transactions and deadlocks from xfs_inactive.
1093	 */
1094	if (ip) {
1095		xfs_finish_inode_setup(ip);
1096		xfs_irele(ip);
1097	}
1098 out_release_dquots:
1099	xfs_qm_dqrele(udqp);
1100	xfs_qm_dqrele(gdqp);
1101	xfs_qm_dqrele(pdqp);
1102
1103	if (unlock_dp_on_error)
1104		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1105	return error;
1106}
1107
1108int
1109xfs_create_tmpfile(
1110	struct mnt_idmap	*idmap,
1111	struct xfs_inode	*dp,
1112	umode_t			mode,
1113	struct xfs_inode	**ipp)
1114{
1115	struct xfs_mount	*mp = dp->i_mount;
1116	struct xfs_inode	*ip = NULL;
1117	struct xfs_trans	*tp = NULL;
1118	int			error;
1119	prid_t                  prid;
1120	struct xfs_dquot	*udqp = NULL;
1121	struct xfs_dquot	*gdqp = NULL;
1122	struct xfs_dquot	*pdqp = NULL;
1123	struct xfs_trans_res	*tres;
1124	uint			resblks;
1125	xfs_ino_t		ino;
1126
1127	if (xfs_is_shutdown(mp))
1128		return -EIO;
1129
1130	prid = xfs_get_initial_prid(dp);
1131
1132	/*
1133	 * Make sure that we have allocated dquot(s) on disk.
1134	 */
1135	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1136			mapped_fsgid(idmap, &init_user_ns), prid,
1137			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1138			&udqp, &gdqp, &pdqp);
1139	if (error)
1140		return error;
1141
1142	resblks = XFS_IALLOC_SPACE_RES(mp);
1143	tres = &M_RES(mp)->tr_create_tmpfile;
1144
1145	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1146			&tp);
1147	if (error)
1148		goto out_release_dquots;
1149
1150	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1151	if (!error)
1152		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1153				0, 0, prid, false, &ip);
1154	if (error)
1155		goto out_trans_cancel;
1156
1157	if (xfs_has_wsync(mp))
1158		xfs_trans_set_sync(tp);
1159
1160	/*
1161	 * Attach the dquot(s) to the inodes and modify them incore.
1162	 * These ids of the inode couldn't have changed since the new
1163	 * inode has been locked ever since it was created.
1164	 */
1165	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1166
1167	error = xfs_iunlink(tp, ip);
1168	if (error)
1169		goto out_trans_cancel;
1170
1171	error = xfs_trans_commit(tp);
1172	if (error)
1173		goto out_release_inode;
1174
1175	xfs_qm_dqrele(udqp);
1176	xfs_qm_dqrele(gdqp);
1177	xfs_qm_dqrele(pdqp);
1178
1179	*ipp = ip;
1180	return 0;
1181
1182 out_trans_cancel:
1183	xfs_trans_cancel(tp);
1184 out_release_inode:
1185	/*
1186	 * Wait until after the current transaction is aborted to finish the
1187	 * setup of the inode and release the inode.  This prevents recursive
1188	 * transactions and deadlocks from xfs_inactive.
1189	 */
1190	if (ip) {
1191		xfs_finish_inode_setup(ip);
1192		xfs_irele(ip);
1193	}
1194 out_release_dquots:
1195	xfs_qm_dqrele(udqp);
1196	xfs_qm_dqrele(gdqp);
1197	xfs_qm_dqrele(pdqp);
1198
1199	return error;
1200}
1201
1202int
1203xfs_link(
1204	xfs_inode_t		*tdp,
1205	xfs_inode_t		*sip,
1206	struct xfs_name		*target_name)
1207{
1208	xfs_mount_t		*mp = tdp->i_mount;
1209	xfs_trans_t		*tp;
1210	int			error, nospace_error = 0;
1211	int			resblks;
1212
1213	trace_xfs_link(tdp, target_name);
1214
1215	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1216
1217	if (xfs_is_shutdown(mp))
1218		return -EIO;
1219	if (xfs_ifork_zapped(tdp, XFS_DATA_FORK))
1220		return -EIO;
1221
1222	error = xfs_qm_dqattach(sip);
1223	if (error)
1224		goto std_return;
1225
1226	error = xfs_qm_dqattach(tdp);
1227	if (error)
1228		goto std_return;
1229
1230	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1231	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1232			&tp, &nospace_error);
 
 
 
1233	if (error)
1234		goto std_return;
1235
 
 
 
 
 
 
 
 
 
 
1236	/*
1237	 * If we are using project inheritance, we only allow hard link
1238	 * creation in our tree when the project IDs are the same; else
1239	 * the tree quota mechanism could be circumvented.
1240	 */
1241	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1242		     tdp->i_projid != sip->i_projid)) {
1243		error = -EXDEV;
1244		goto error_return;
1245	}
1246
1247	if (!resblks) {
1248		error = xfs_dir_canenter(tp, tdp, target_name);
1249		if (error)
1250			goto error_return;
1251	}
1252
1253	/*
1254	 * Handle initial link state of O_TMPFILE inode
1255	 */
1256	if (VFS_I(sip)->i_nlink == 0) {
1257		struct xfs_perag	*pag;
1258
1259		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1260		error = xfs_iunlink_remove(tp, pag, sip);
1261		xfs_perag_put(pag);
1262		if (error)
1263			goto error_return;
1264	}
1265
1266	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1267				   resblks);
1268	if (error)
1269		goto error_return;
1270	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1271	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1272
1273	xfs_bumplink(tp, sip);
1274
1275	/*
1276	 * If this is a synchronous mount, make sure that the
1277	 * link transaction goes to disk before returning to
1278	 * the user.
1279	 */
1280	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1281		xfs_trans_set_sync(tp);
1282
1283	return xfs_trans_commit(tp);
1284
1285 error_return:
1286	xfs_trans_cancel(tp);
1287 std_return:
1288	if (error == -ENOSPC && nospace_error)
1289		error = nospace_error;
1290	return error;
1291}
1292
1293/* Clear the reflink flag and the cowblocks tag if possible. */
1294static void
1295xfs_itruncate_clear_reflink_flags(
1296	struct xfs_inode	*ip)
1297{
1298	struct xfs_ifork	*dfork;
1299	struct xfs_ifork	*cfork;
1300
1301	if (!xfs_is_reflink_inode(ip))
1302		return;
1303	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1304	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1305	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1306		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1307	if (cfork->if_bytes == 0)
1308		xfs_inode_clear_cowblocks_tag(ip);
1309}
1310
1311/*
1312 * Free up the underlying blocks past new_size.  The new size must be smaller
1313 * than the current size.  This routine can be used both for the attribute and
1314 * data fork, and does not modify the inode size, which is left to the caller.
1315 *
1316 * The transaction passed to this routine must have made a permanent log
1317 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1318 * given transaction and start new ones, so make sure everything involved in
1319 * the transaction is tidy before calling here.  Some transaction will be
1320 * returned to the caller to be committed.  The incoming transaction must
1321 * already include the inode, and both inode locks must be held exclusively.
1322 * The inode must also be "held" within the transaction.  On return the inode
1323 * will be "held" within the returned transaction.  This routine does NOT
1324 * require any disk space to be reserved for it within the transaction.
1325 *
1326 * If we get an error, we must return with the inode locked and linked into the
1327 * current transaction. This keeps things simple for the higher level code,
1328 * because it always knows that the inode is locked and held in the transaction
1329 * that returns to it whether errors occur or not.  We don't mark the inode
1330 * dirty on error so that transactions can be easily aborted if possible.
1331 */
1332int
1333xfs_itruncate_extents_flags(
1334	struct xfs_trans	**tpp,
1335	struct xfs_inode	*ip,
1336	int			whichfork,
1337	xfs_fsize_t		new_size,
1338	int			flags)
1339{
1340	struct xfs_mount	*mp = ip->i_mount;
1341	struct xfs_trans	*tp = *tpp;
1342	xfs_fileoff_t		first_unmap_block;
 
1343	int			error = 0;
1344
1345	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1346	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1347	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1348	ASSERT(new_size <= XFS_ISIZE(ip));
1349	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1350	ASSERT(ip->i_itemp != NULL);
1351	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1352	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1353
1354	trace_xfs_itruncate_extents_start(ip, new_size);
1355
1356	flags |= xfs_bmapi_aflag(whichfork);
1357
1358	/*
1359	 * Since it is possible for space to become allocated beyond
1360	 * the end of the file (in a crash where the space is allocated
1361	 * but the inode size is not yet updated), simply remove any
1362	 * blocks which show up between the new EOF and the maximum
1363	 * possible file size.
1364	 *
1365	 * We have to free all the blocks to the bmbt maximum offset, even if
1366	 * the page cache can't scale that far.
1367	 */
1368	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1369	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1370		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1371		return 0;
1372	}
1373
1374	error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1375			XFS_MAX_FILEOFF);
1376	if (error)
1377		goto out;
 
 
 
 
 
 
 
 
 
1378
1379	if (whichfork == XFS_DATA_FORK) {
1380		/* Remove all pending CoW reservations. */
1381		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1382				first_unmap_block, XFS_MAX_FILEOFF, true);
1383		if (error)
1384			goto out;
1385
1386		xfs_itruncate_clear_reflink_flags(ip);
1387	}
1388
1389	/*
1390	 * Always re-log the inode so that our permanent transaction can keep
1391	 * on rolling it forward in the log.
1392	 */
1393	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1394
1395	trace_xfs_itruncate_extents_end(ip, new_size);
1396
1397out:
1398	*tpp = tp;
1399	return error;
1400}
1401
1402int
1403xfs_release(
1404	xfs_inode_t	*ip)
1405{
1406	xfs_mount_t	*mp = ip->i_mount;
1407	int		error = 0;
1408
1409	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1410		return 0;
1411
1412	/* If this is a read-only mount, don't do this (would generate I/O) */
1413	if (xfs_is_readonly(mp))
1414		return 0;
1415
1416	if (!xfs_is_shutdown(mp)) {
1417		int truncated;
1418
1419		/*
1420		 * If we previously truncated this file and removed old data
1421		 * in the process, we want to initiate "early" writeout on
1422		 * the last close.  This is an attempt to combat the notorious
1423		 * NULL files problem which is particularly noticeable from a
1424		 * truncate down, buffered (re-)write (delalloc), followed by
1425		 * a crash.  What we are effectively doing here is
1426		 * significantly reducing the time window where we'd otherwise
1427		 * be exposed to that problem.
1428		 */
1429		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1430		if (truncated) {
1431			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1432			if (ip->i_delayed_blks > 0) {
1433				error = filemap_flush(VFS_I(ip)->i_mapping);
1434				if (error)
1435					return error;
1436			}
1437		}
1438	}
1439
1440	if (VFS_I(ip)->i_nlink == 0)
1441		return 0;
1442
1443	/*
1444	 * If we can't get the iolock just skip truncating the blocks past EOF
1445	 * because we could deadlock with the mmap_lock otherwise. We'll get
1446	 * another chance to drop them once the last reference to the inode is
1447	 * dropped, so we'll never leak blocks permanently.
1448	 */
1449	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1450		return 0;
1451
1452	if (xfs_can_free_eofblocks(ip, false)) {
1453		/*
1454		 * Check if the inode is being opened, written and closed
1455		 * frequently and we have delayed allocation blocks outstanding
1456		 * (e.g. streaming writes from the NFS server), truncating the
1457		 * blocks past EOF will cause fragmentation to occur.
1458		 *
1459		 * In this case don't do the truncation, but we have to be
1460		 * careful how we detect this case. Blocks beyond EOF show up as
1461		 * i_delayed_blks even when the inode is clean, so we need to
1462		 * truncate them away first before checking for a dirty release.
1463		 * Hence on the first dirty close we will still remove the
1464		 * speculative allocation, but after that we will leave it in
1465		 * place.
1466		 */
1467		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1468			goto out_unlock;
1469
1470		error = xfs_free_eofblocks(ip);
1471		if (error)
1472			goto out_unlock;
1473
1474		/* delalloc blocks after truncation means it really is dirty */
1475		if (ip->i_delayed_blks)
1476			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1477	}
1478
1479out_unlock:
1480	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1481	return error;
1482}
1483
1484/*
1485 * xfs_inactive_truncate
1486 *
1487 * Called to perform a truncate when an inode becomes unlinked.
1488 */
1489STATIC int
1490xfs_inactive_truncate(
1491	struct xfs_inode *ip)
1492{
1493	struct xfs_mount	*mp = ip->i_mount;
1494	struct xfs_trans	*tp;
1495	int			error;
1496
1497	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1498	if (error) {
1499		ASSERT(xfs_is_shutdown(mp));
1500		return error;
1501	}
1502	xfs_ilock(ip, XFS_ILOCK_EXCL);
1503	xfs_trans_ijoin(tp, ip, 0);
1504
1505	/*
1506	 * Log the inode size first to prevent stale data exposure in the event
1507	 * of a system crash before the truncate completes. See the related
1508	 * comment in xfs_vn_setattr_size() for details.
1509	 */
1510	ip->i_disk_size = 0;
1511	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1512
1513	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1514	if (error)
1515		goto error_trans_cancel;
1516
1517	ASSERT(ip->i_df.if_nextents == 0);
1518
1519	error = xfs_trans_commit(tp);
1520	if (error)
1521		goto error_unlock;
1522
1523	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1524	return 0;
1525
1526error_trans_cancel:
1527	xfs_trans_cancel(tp);
1528error_unlock:
1529	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1530	return error;
1531}
1532
1533/*
1534 * xfs_inactive_ifree()
1535 *
1536 * Perform the inode free when an inode is unlinked.
1537 */
1538STATIC int
1539xfs_inactive_ifree(
1540	struct xfs_inode *ip)
1541{
1542	struct xfs_mount	*mp = ip->i_mount;
1543	struct xfs_trans	*tp;
1544	int			error;
1545
1546	/*
1547	 * We try to use a per-AG reservation for any block needed by the finobt
1548	 * tree, but as the finobt feature predates the per-AG reservation
1549	 * support a degraded file system might not have enough space for the
1550	 * reservation at mount time.  In that case try to dip into the reserved
1551	 * pool and pray.
1552	 *
1553	 * Send a warning if the reservation does happen to fail, as the inode
1554	 * now remains allocated and sits on the unlinked list until the fs is
1555	 * repaired.
1556	 */
1557	if (unlikely(mp->m_finobt_nores)) {
1558		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1559				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1560				&tp);
1561	} else {
1562		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1563	}
1564	if (error) {
1565		if (error == -ENOSPC) {
1566			xfs_warn_ratelimited(mp,
1567			"Failed to remove inode(s) from unlinked list. "
1568			"Please free space, unmount and run xfs_repair.");
1569		} else {
1570			ASSERT(xfs_is_shutdown(mp));
1571		}
1572		return error;
1573	}
1574
1575	/*
1576	 * We do not hold the inode locked across the entire rolling transaction
1577	 * here. We only need to hold it for the first transaction that
1578	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1579	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1580	 * here breaks the relationship between cluster buffer invalidation and
1581	 * stale inode invalidation on cluster buffer item journal commit
1582	 * completion, and can result in leaving dirty stale inodes hanging
1583	 * around in memory.
1584	 *
1585	 * We have no need for serialising this inode operation against other
1586	 * operations - we freed the inode and hence reallocation is required
1587	 * and that will serialise on reallocating the space the deferops need
1588	 * to free. Hence we can unlock the inode on the first commit of
1589	 * the transaction rather than roll it right through the deferops. This
1590	 * avoids relogging the XFS_ISTALE inode.
1591	 *
1592	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1593	 * by asserting that the inode is still locked when it returns.
1594	 */
1595	xfs_ilock(ip, XFS_ILOCK_EXCL);
1596	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1597
1598	error = xfs_ifree(tp, ip);
1599	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1600	if (error) {
1601		/*
1602		 * If we fail to free the inode, shut down.  The cancel
1603		 * might do that, we need to make sure.  Otherwise the
1604		 * inode might be lost for a long time or forever.
1605		 */
1606		if (!xfs_is_shutdown(mp)) {
1607			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1608				__func__, error);
1609			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1610		}
1611		xfs_trans_cancel(tp);
1612		return error;
1613	}
1614
1615	/*
1616	 * Credit the quota account(s). The inode is gone.
1617	 */
1618	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1619
1620	return xfs_trans_commit(tp);
1621}
1622
1623/*
1624 * Returns true if we need to update the on-disk metadata before we can free
1625 * the memory used by this inode.  Updates include freeing post-eof
1626 * preallocations; freeing COW staging extents; and marking the inode free in
1627 * the inobt if it is on the unlinked list.
1628 */
1629bool
1630xfs_inode_needs_inactive(
1631	struct xfs_inode	*ip)
1632{
1633	struct xfs_mount	*mp = ip->i_mount;
1634	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1635
1636	/*
1637	 * If the inode is already free, then there can be nothing
1638	 * to clean up here.
1639	 */
1640	if (VFS_I(ip)->i_mode == 0)
1641		return false;
 
 
1642
1643	/*
1644	 * If this is a read-only mount, don't do this (would generate I/O)
1645	 * unless we're in log recovery and cleaning the iunlinked list.
1646	 */
1647	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1648		return false;
1649
1650	/* If the log isn't running, push inodes straight to reclaim. */
1651	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1652		return false;
1653
1654	/* Metadata inodes require explicit resource cleanup. */
1655	if (xfs_is_metadata_inode(ip))
1656		return false;
1657
1658	/* Want to clean out the cow blocks if there are any. */
1659	if (cow_ifp && cow_ifp->if_bytes > 0)
1660		return true;
1661
1662	/* Unlinked files must be freed. */
1663	if (VFS_I(ip)->i_nlink == 0)
1664		return true;
1665
1666	/*
1667	 * This file isn't being freed, so check if there are post-eof blocks
1668	 * to free.  @force is true because we are evicting an inode from the
1669	 * cache.  Post-eof blocks must be freed, lest we end up with broken
1670	 * free space accounting.
1671	 *
1672	 * Note: don't bother with iolock here since lockdep complains about
1673	 * acquiring it in reclaim context. We have the only reference to the
1674	 * inode at this point anyways.
1675	 */
1676	return xfs_can_free_eofblocks(ip, true);
1677}
1678
1679/*
1680 * xfs_inactive
1681 *
1682 * This is called when the vnode reference count for the vnode
1683 * goes to zero.  If the file has been unlinked, then it must
1684 * now be truncated.  Also, we clear all of the read-ahead state
1685 * kept for the inode here since the file is now closed.
1686 */
1687int
1688xfs_inactive(
1689	xfs_inode_t	*ip)
1690{
1691	struct xfs_mount	*mp;
1692	int			error = 0;
1693	int			truncate = 0;
1694
1695	/*
1696	 * If the inode is already free, then there can be nothing
1697	 * to clean up here.
1698	 */
1699	if (VFS_I(ip)->i_mode == 0) {
1700		ASSERT(ip->i_df.if_broot_bytes == 0);
1701		goto out;
1702	}
1703
1704	mp = ip->i_mount;
1705	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1706
1707	/*
1708	 * If this is a read-only mount, don't do this (would generate I/O)
1709	 * unless we're in log recovery and cleaning the iunlinked list.
1710	 */
1711	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1712		goto out;
1713
1714	/* Metadata inodes require explicit resource cleanup. */
1715	if (xfs_is_metadata_inode(ip))
1716		goto out;
1717
1718	/* Try to clean out the cow blocks if there are any. */
1719	if (xfs_inode_has_cow_data(ip))
1720		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1721
1722	if (VFS_I(ip)->i_nlink != 0) {
1723		/*
1724		 * force is true because we are evicting an inode from the
1725		 * cache. Post-eof blocks must be freed, lest we end up with
1726		 * broken free space accounting.
1727		 *
1728		 * Note: don't bother with iolock here since lockdep complains
1729		 * about acquiring it in reclaim context. We have the only
1730		 * reference to the inode at this point anyways.
1731		 */
1732		if (xfs_can_free_eofblocks(ip, true))
1733			error = xfs_free_eofblocks(ip);
1734
1735		goto out;
1736	}
1737
1738	if (S_ISREG(VFS_I(ip)->i_mode) &&
1739	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1740	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1741		truncate = 1;
1742
1743	if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1744		/*
1745		 * If this inode is being inactivated during a quotacheck and
1746		 * has not yet been scanned by quotacheck, we /must/ remove
1747		 * the dquots from the inode before inactivation changes the
1748		 * block and inode counts.  Most probably this is a result of
1749		 * reloading the incore iunlinked list to purge unrecovered
1750		 * unlinked inodes.
1751		 */
1752		xfs_qm_dqdetach(ip);
1753	} else {
1754		error = xfs_qm_dqattach(ip);
1755		if (error)
1756			goto out;
1757	}
1758
1759	if (S_ISLNK(VFS_I(ip)->i_mode))
1760		error = xfs_inactive_symlink(ip);
1761	else if (truncate)
1762		error = xfs_inactive_truncate(ip);
1763	if (error)
1764		goto out;
1765
1766	/*
1767	 * If there are attributes associated with the file then blow them away
1768	 * now.  The code calls a routine that recursively deconstructs the
1769	 * attribute fork. If also blows away the in-core attribute fork.
1770	 */
1771	if (xfs_inode_has_attr_fork(ip)) {
1772		error = xfs_attr_inactive(ip);
1773		if (error)
1774			goto out;
1775	}
1776
 
1777	ASSERT(ip->i_forkoff == 0);
1778
1779	/*
1780	 * Free the inode.
1781	 */
1782	error = xfs_inactive_ifree(ip);
1783
1784out:
1785	/*
1786	 * We're done making metadata updates for this inode, so we can release
1787	 * the attached dquots.
1788	 */
1789	xfs_qm_dqdetach(ip);
1790	return error;
1791}
1792
1793/*
1794 * In-Core Unlinked List Lookups
1795 * =============================
1796 *
1797 * Every inode is supposed to be reachable from some other piece of metadata
1798 * with the exception of the root directory.  Inodes with a connection to a
1799 * file descriptor but not linked from anywhere in the on-disk directory tree
1800 * are collectively known as unlinked inodes, though the filesystem itself
1801 * maintains links to these inodes so that on-disk metadata are consistent.
1802 *
1803 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1804 * header contains a number of buckets that point to an inode, and each inode
1805 * record has a pointer to the next inode in the hash chain.  This
1806 * singly-linked list causes scaling problems in the iunlink remove function
1807 * because we must walk that list to find the inode that points to the inode
1808 * being removed from the unlinked hash bucket list.
1809 *
1810 * Hence we keep an in-memory double linked list to link each inode on an
1811 * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1812 * based lists would require having 64 list heads in the perag, one for each
1813 * list. This is expensive in terms of memory (think millions of AGs) and cache
1814 * misses on lookups. Instead, use the fact that inodes on the unlinked list
1815 * must be referenced at the VFS level to keep them on the list and hence we
1816 * have an existence guarantee for inodes on the unlinked list.
1817 *
1818 * Given we have an existence guarantee, we can use lockless inode cache lookups
1819 * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1820 * for the double linked unlinked list, and we don't need any extra locking to
1821 * keep the list safe as all manipulations are done under the AGI buffer lock.
1822 * Keeping the list up to date does not require memory allocation, just finding
1823 * the XFS inode and updating the next/prev unlinked list aginos.
1824 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825
1826/*
1827 * Find an inode on the unlinked list. This does not take references to the
1828 * inode as we have existence guarantees by holding the AGI buffer lock and that
1829 * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1830 * don't find the inode in cache, then let the caller handle the situation.
1831 */
1832static struct xfs_inode *
1833xfs_iunlink_lookup(
1834	struct xfs_perag	*pag,
1835	xfs_agino_t		agino)
1836{
1837	struct xfs_inode	*ip;
 
 
 
 
 
1838
1839	rcu_read_lock();
1840	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1841	if (!ip) {
1842		/* Caller can handle inode not being in memory. */
1843		rcu_read_unlock();
1844		return NULL;
1845	}
 
 
 
 
1846
 
 
1847	/*
1848	 * Inode in RCU freeing limbo should not happen.  Warn about this and
1849	 * let the caller handle the failure.
 
 
1850	 */
1851	if (WARN_ON_ONCE(!ip->i_ino)) {
1852		rcu_read_unlock();
1853		return NULL;
1854	}
1855	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1856	rcu_read_unlock();
1857	return ip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858}
1859
1860/*
1861 * Update the prev pointer of the next agino.  Returns -ENOLINK if the inode
1862 * is not in cache.
 
1863 */
1864static int
1865xfs_iunlink_update_backref(
1866	struct xfs_perag	*pag,
1867	xfs_agino_t		prev_agino,
1868	xfs_agino_t		next_agino)
1869{
1870	struct xfs_inode	*ip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1871
1872	/* No update necessary if we are at the end of the list. */
1873	if (next_agino == NULLAGINO)
 
1874		return 0;
 
1875
1876	ip = xfs_iunlink_lookup(pag, next_agino);
1877	if (!ip)
1878		return -ENOLINK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879
1880	ip->i_prev_unlinked = prev_agino;
1881	return 0;
 
 
1882}
1883
1884/*
1885 * Point the AGI unlinked bucket at an inode and log the results.  The caller
1886 * is responsible for validating the old value.
1887 */
1888STATIC int
1889xfs_iunlink_update_bucket(
1890	struct xfs_trans	*tp,
1891	struct xfs_perag	*pag,
1892	struct xfs_buf		*agibp,
1893	unsigned int		bucket_index,
1894	xfs_agino_t		new_agino)
1895{
1896	struct xfs_agi		*agi = agibp->b_addr;
1897	xfs_agino_t		old_value;
1898	int			offset;
1899
1900	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1901
1902	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1903	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1904			old_value, new_agino);
1905
1906	/*
1907	 * We should never find the head of the list already set to the value
1908	 * passed in because either we're adding or removing ourselves from the
1909	 * head of the list.
1910	 */
1911	if (old_value == new_agino) {
1912		xfs_buf_mark_corrupt(agibp);
1913		return -EFSCORRUPTED;
1914	}
1915
1916	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1917	offset = offsetof(struct xfs_agi, agi_unlinked) +
1918			(sizeof(xfs_agino_t) * bucket_index);
1919	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1920	return 0;
1921}
1922
1923/*
1924 * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1925 * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
1926 * to the unlinked list.
1927 */
1928STATIC int
1929xfs_iunlink_reload_next(
1930	struct xfs_trans	*tp,
1931	struct xfs_buf		*agibp,
1932	xfs_agino_t		prev_agino,
 
 
 
1933	xfs_agino_t		next_agino)
1934{
1935	struct xfs_perag	*pag = agibp->b_pag;
1936	struct xfs_mount	*mp = pag->pag_mount;
1937	struct xfs_inode	*next_ip = NULL;
1938	xfs_ino_t		ino;
1939	int			error;
 
 
 
 
 
 
1940
1941	ASSERT(next_agino != NULLAGINO);
 
 
 
 
1942
1943#ifdef DEBUG
1944	rcu_read_lock();
1945	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1946	ASSERT(next_ip == NULL);
1947	rcu_read_unlock();
1948#endif
 
 
 
 
 
 
 
 
1949
1950	xfs_info_ratelimited(mp,
1951 "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
1952			next_agino, pag->pag_agno);
1953
1954	/*
1955	 * Use an untrusted lookup just to be cautious in case the AGI has been
1956	 * corrupted and now points at a free inode.  That shouldn't happen,
1957	 * but we'd rather shut down now since we're already running in a weird
1958	 * situation.
1959	 */
1960	ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1961	error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1962	if (error)
1963		return error;
 
1964
1965	/* If this is not an unlinked inode, something is very wrong. */
1966	if (VFS_I(next_ip)->i_nlink != 0) {
 
 
 
1967		error = -EFSCORRUPTED;
1968		goto rele;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1969	}
1970
1971	next_ip->i_prev_unlinked = prev_agino;
1972	trace_xfs_iunlink_reload_next(next_ip);
1973rele:
1974	ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1975	if (xfs_is_quotacheck_running(mp) && next_ip)
1976		xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1977	xfs_irele(next_ip);
1978	return error;
1979}
1980
1981static int
1982xfs_iunlink_insert_inode(
 
 
 
 
 
 
 
1983	struct xfs_trans	*tp,
1984	struct xfs_perag	*pag,
1985	struct xfs_buf		*agibp,
1986	struct xfs_inode	*ip)
1987{
1988	struct xfs_mount	*mp = tp->t_mountp;
1989	struct xfs_agi		*agi = agibp->b_addr;
 
 
1990	xfs_agino_t		next_agino;
1991	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1992	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1993	int			error;
1994
 
 
 
 
 
 
 
 
 
 
 
 
1995	/*
1996	 * Get the index into the agi hash table for the list this inode will
1997	 * go on.  Make sure the pointer isn't garbage and that this inode
1998	 * isn't already on the list.
1999	 */
2000	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2001	if (next_agino == agino ||
2002	    !xfs_verify_agino_or_null(pag, next_agino)) {
2003		xfs_buf_mark_corrupt(agibp);
2004		return -EFSCORRUPTED;
 
2005	}
2006
2007	/*
2008	 * Update the prev pointer in the next inode to point back to this
2009	 * inode.
2010	 */
2011	error = xfs_iunlink_update_backref(pag, agino, next_agino);
2012	if (error == -ENOLINK)
2013		error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
2014	if (error)
2015		return error;
2016
2017	if (next_agino != NULLAGINO) {
2018		/*
2019		 * There is already another inode in the bucket, so point this
2020		 * inode to the current head of the list.
2021		 */
2022		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
 
2023		if (error)
2024			return error;
2025		ip->i_next_unlinked = next_agino;
 
 
 
 
 
 
 
 
2026	}
2027
2028	/* Point the head of the list to point to this inode. */
2029	ip->i_prev_unlinked = NULLAGINO;
2030	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2031}
2032
2033/*
2034 * This is called when the inode's link count has gone to 0 or we are creating
2035 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
 
 
 
 
2036 *
2037 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2038 * list when the inode is freed.
2039 */
2040STATIC int
2041xfs_iunlink(
2042	struct xfs_trans	*tp,
2043	struct xfs_inode	*ip)
 
 
 
 
 
 
2044{
2045	struct xfs_mount	*mp = tp->t_mountp;
2046	struct xfs_perag	*pag;
2047	struct xfs_buf		*agibp;
2048	int			error;
2049
2050	ASSERT(VFS_I(ip)->i_nlink == 0);
2051	ASSERT(VFS_I(ip)->i_mode != 0);
2052	trace_xfs_iunlink(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2053
2054	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 
 
 
 
2055
2056	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2057	error = xfs_read_agi(pag, tp, &agibp);
2058	if (error)
2059		goto out;
 
 
 
 
 
 
 
 
 
 
 
2060
2061	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2062out:
2063	xfs_perag_put(pag);
2064	return error;
2065}
2066
2067static int
2068xfs_iunlink_remove_inode(
 
 
 
2069	struct xfs_trans	*tp,
2070	struct xfs_perag	*pag,
2071	struct xfs_buf		*agibp,
2072	struct xfs_inode	*ip)
2073{
2074	struct xfs_mount	*mp = tp->t_mountp;
2075	struct xfs_agi		*agi = agibp->b_addr;
 
 
 
2076	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
 
2077	xfs_agino_t		head_agino;
2078	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2079	int			error;
2080
2081	trace_xfs_iunlink_remove(ip);
2082
 
 
 
 
 
 
2083	/*
2084	 * Get the index into the agi hash table for the list this inode will
2085	 * go on.  Make sure the head pointer isn't garbage.
2086	 */
2087	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2088	if (!xfs_verify_agino(pag, head_agino)) {
2089		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2090				agi, sizeof(*agi));
2091		return -EFSCORRUPTED;
2092	}
2093
2094	/*
2095	 * Set our inode's next_unlinked pointer to NULL and then return
2096	 * the old pointer value so that we can update whatever was previous
2097	 * to us in the list to point to whatever was next in the list.
2098	 */
2099	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2100	if (error)
2101		return error;
2102
2103	/*
2104	 * Update the prev pointer in the next inode to point back to previous
2105	 * inode in the chain.
 
 
 
2106	 */
2107	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2108			ip->i_next_unlinked);
2109	if (error == -ENOLINK)
2110		error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2111				ip->i_next_unlinked);
2112	if (error)
2113		return error;
2114
2115	if (head_agino != agino) {
2116		struct xfs_inode	*prev_ip;
 
2117
2118		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2119		if (!prev_ip)
2120			return -EFSCORRUPTED;
2121
2122		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2123				ip->i_next_unlinked);
2124		prev_ip->i_next_unlinked = ip->i_next_unlinked;
2125	} else {
2126		/* Point the head of the list to the next unlinked inode. */
2127		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2128				ip->i_next_unlinked);
2129	}
2130
2131	ip->i_next_unlinked = NULLAGINO;
2132	ip->i_prev_unlinked = 0;
2133	return error;
2134}
2135
2136/*
2137 * Pull the on-disk inode from the AGI unlinked list.
2138 */
2139STATIC int
2140xfs_iunlink_remove(
2141	struct xfs_trans	*tp,
2142	struct xfs_perag	*pag,
2143	struct xfs_inode	*ip)
2144{
2145	struct xfs_buf		*agibp;
2146	int			error;
2147
2148	trace_xfs_iunlink_remove(ip);
2149
2150	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2151	error = xfs_read_agi(pag, tp, &agibp);
2152	if (error)
2153		return error;
2154
2155	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
 
 
2156}
2157
2158/*
2159 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2160 * mark it stale. We should only find clean inodes in this lookup that aren't
2161 * already stale.
2162 */
2163static void
2164xfs_ifree_mark_inode_stale(
2165	struct xfs_perag	*pag,
2166	struct xfs_inode	*free_ip,
2167	xfs_ino_t		inum)
2168{
2169	struct xfs_mount	*mp = pag->pag_mount;
2170	struct xfs_inode_log_item *iip;
2171	struct xfs_inode	*ip;
2172
2173retry:
2174	rcu_read_lock();
2175	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2176
2177	/* Inode not in memory, nothing to do */
2178	if (!ip) {
2179		rcu_read_unlock();
2180		return;
2181	}
2182
2183	/*
2184	 * because this is an RCU protected lookup, we could find a recently
2185	 * freed or even reallocated inode during the lookup. We need to check
2186	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2187	 * valid, the wrong inode or stale.
2188	 */
2189	spin_lock(&ip->i_flags_lock);
2190	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2191		goto out_iflags_unlock;
2192
2193	/*
2194	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2195	 * other inodes that we did not find in the list attached to the buffer
2196	 * and are not already marked stale. If we can't lock it, back off and
2197	 * retry.
2198	 */
2199	if (ip != free_ip) {
2200		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2201			spin_unlock(&ip->i_flags_lock);
2202			rcu_read_unlock();
2203			delay(1);
2204			goto retry;
2205		}
2206	}
2207	ip->i_flags |= XFS_ISTALE;
2208
2209	/*
2210	 * If the inode is flushing, it is already attached to the buffer.  All
2211	 * we needed to do here is mark the inode stale so buffer IO completion
2212	 * will remove it from the AIL.
2213	 */
2214	iip = ip->i_itemp;
2215	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2216		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2217		ASSERT(iip->ili_last_fields);
2218		goto out_iunlock;
2219	}
2220
2221	/*
2222	 * Inodes not attached to the buffer can be released immediately.
2223	 * Everything else has to go through xfs_iflush_abort() on journal
2224	 * commit as the flock synchronises removal of the inode from the
2225	 * cluster buffer against inode reclaim.
2226	 */
2227	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2228		goto out_iunlock;
2229
2230	__xfs_iflags_set(ip, XFS_IFLUSHING);
2231	spin_unlock(&ip->i_flags_lock);
2232	rcu_read_unlock();
2233
2234	/* we have a dirty inode in memory that has not yet been flushed. */
2235	spin_lock(&iip->ili_lock);
2236	iip->ili_last_fields = iip->ili_fields;
2237	iip->ili_fields = 0;
2238	iip->ili_fsync_fields = 0;
2239	spin_unlock(&iip->ili_lock);
2240	ASSERT(iip->ili_last_fields);
2241
2242	if (ip != free_ip)
2243		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2244	return;
2245
2246out_iunlock:
2247	if (ip != free_ip)
2248		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2249out_iflags_unlock:
2250	spin_unlock(&ip->i_flags_lock);
2251	rcu_read_unlock();
2252}
2253
2254/*
2255 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2256 * inodes that are in memory - they all must be marked stale and attached to
2257 * the cluster buffer.
2258 */
2259static int
2260xfs_ifree_cluster(
2261	struct xfs_trans	*tp,
2262	struct xfs_perag	*pag,
2263	struct xfs_inode	*free_ip,
2264	struct xfs_icluster	*xic)
2265{
2266	struct xfs_mount	*mp = free_ip->i_mount;
2267	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2268	struct xfs_buf		*bp;
2269	xfs_daddr_t		blkno;
2270	xfs_ino_t		inum = xic->first_ino;
2271	int			nbufs;
2272	int			i, j;
2273	int			ioffset;
2274	int			error;
2275
2276	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2277
2278	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2279		/*
2280		 * The allocation bitmap tells us which inodes of the chunk were
2281		 * physically allocated. Skip the cluster if an inode falls into
2282		 * a sparse region.
2283		 */
2284		ioffset = inum - xic->first_ino;
2285		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2286			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2287			continue;
2288		}
2289
2290		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2291					 XFS_INO_TO_AGBNO(mp, inum));
2292
2293		/*
2294		 * We obtain and lock the backing buffer first in the process
2295		 * here to ensure dirty inodes attached to the buffer remain in
2296		 * the flushing state while we mark them stale.
2297		 *
2298		 * If we scan the in-memory inodes first, then buffer IO can
2299		 * complete before we get a lock on it, and hence we may fail
2300		 * to mark all the active inodes on the buffer stale.
2301		 */
2302		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2303				mp->m_bsize * igeo->blocks_per_cluster,
2304				XBF_UNMAPPED, &bp);
2305		if (error)
2306			return error;
2307
2308		/*
2309		 * This buffer may not have been correctly initialised as we
2310		 * didn't read it from disk. That's not important because we are
2311		 * only using to mark the buffer as stale in the log, and to
2312		 * attach stale cached inodes on it. That means it will never be
2313		 * dispatched for IO. If it is, we want to know about it, and we
2314		 * want it to fail. We can acheive this by adding a write
2315		 * verifier to the buffer.
2316		 */
2317		bp->b_ops = &xfs_inode_buf_ops;
2318
2319		/*
2320		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2321		 * too. This requires lookups, and will skip inodes that we've
2322		 * already marked XFS_ISTALE.
2323		 */
2324		for (i = 0; i < igeo->inodes_per_cluster; i++)
2325			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2326
2327		xfs_trans_stale_inode_buf(tp, bp);
2328		xfs_trans_binval(tp, bp);
2329	}
2330	return 0;
2331}
2332
2333/*
2334 * This is called to return an inode to the inode free list.  The inode should
2335 * already be truncated to 0 length and have no pages associated with it.  This
2336 * routine also assumes that the inode is already a part of the transaction.
2337 *
2338 * The on-disk copy of the inode will have been added to the list of unlinked
2339 * inodes in the AGI. We need to remove the inode from that list atomically with
2340 * respect to freeing it here.
 
2341 */
2342int
2343xfs_ifree(
2344	struct xfs_trans	*tp,
2345	struct xfs_inode	*ip)
2346{
2347	struct xfs_mount	*mp = ip->i_mount;
2348	struct xfs_perag	*pag;
2349	struct xfs_icluster	xic = { 0 };
2350	struct xfs_inode_log_item *iip = ip->i_itemp;
2351	int			error;
2352
2353	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2354	ASSERT(VFS_I(ip)->i_nlink == 0);
2355	ASSERT(ip->i_df.if_nextents == 0);
2356	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2357	ASSERT(ip->i_nblocks == 0);
2358
2359	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2360
2361	/*
2362	 * Free the inode first so that we guarantee that the AGI lock is going
2363	 * to be taken before we remove the inode from the unlinked list. This
2364	 * makes the AGI lock -> unlinked list modification order the same as
2365	 * used in O_TMPFILE creation.
2366	 */
2367	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2368	if (error)
2369		goto out;
2370
2371	error = xfs_iunlink_remove(tp, pag, ip);
2372	if (error)
2373		goto out;
2374
2375	/*
2376	 * Free any local-format data sitting around before we reset the
2377	 * data fork to extents format.  Note that the attr fork data has
2378	 * already been freed by xfs_attr_inactive.
2379	 */
2380	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2381		kmem_free(ip->i_df.if_data);
2382		ip->i_df.if_data = NULL;
2383		ip->i_df.if_bytes = 0;
2384	}
2385
2386	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2387	ip->i_diflags = 0;
2388	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2389	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2390	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2391	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2392		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2393
2394	/* Don't attempt to replay owner changes for a deleted inode */
2395	spin_lock(&iip->ili_lock);
2396	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2397	spin_unlock(&iip->ili_lock);
2398
2399	/*
2400	 * Bump the generation count so no one will be confused
2401	 * by reincarnations of this inode.
2402	 */
2403	VFS_I(ip)->i_generation++;
2404	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2405
2406	if (xic.deleted)
2407		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2408out:
2409	xfs_perag_put(pag);
2410	return error;
2411}
2412
2413/*
2414 * This is called to unpin an inode.  The caller must have the inode locked
2415 * in at least shared mode so that the buffer cannot be subsequently pinned
2416 * once someone is waiting for it to be unpinned.
2417 */
2418static void
2419xfs_iunpin(
2420	struct xfs_inode	*ip)
2421{
2422	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2423
2424	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2425
2426	/* Give the log a push to start the unpinning I/O */
2427	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2428
2429}
2430
2431static void
2432__xfs_iunpin_wait(
2433	struct xfs_inode	*ip)
2434{
2435	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2436	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2437
2438	xfs_iunpin(ip);
2439
2440	do {
2441		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2442		if (xfs_ipincount(ip))
2443			io_schedule();
2444	} while (xfs_ipincount(ip));
2445	finish_wait(wq, &wait.wq_entry);
2446}
2447
2448void
2449xfs_iunpin_wait(
2450	struct xfs_inode	*ip)
2451{
2452	if (xfs_ipincount(ip))
2453		__xfs_iunpin_wait(ip);
2454}
2455
2456/*
2457 * Removing an inode from the namespace involves removing the directory entry
2458 * and dropping the link count on the inode. Removing the directory entry can
2459 * result in locking an AGF (directory blocks were freed) and removing a link
2460 * count can result in placing the inode on an unlinked list which results in
2461 * locking an AGI.
2462 *
2463 * The big problem here is that we have an ordering constraint on AGF and AGI
2464 * locking - inode allocation locks the AGI, then can allocate a new extent for
2465 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2466 * removes the inode from the unlinked list, requiring that we lock the AGI
2467 * first, and then freeing the inode can result in an inode chunk being freed
2468 * and hence freeing disk space requiring that we lock an AGF.
2469 *
2470 * Hence the ordering that is imposed by other parts of the code is AGI before
2471 * AGF. This means we cannot remove the directory entry before we drop the inode
2472 * reference count and put it on the unlinked list as this results in a lock
2473 * order of AGF then AGI, and this can deadlock against inode allocation and
2474 * freeing. Therefore we must drop the link counts before we remove the
2475 * directory entry.
2476 *
2477 * This is still safe from a transactional point of view - it is not until we
2478 * get to xfs_defer_finish() that we have the possibility of multiple
2479 * transactions in this operation. Hence as long as we remove the directory
2480 * entry and drop the link count in the first transaction of the remove
2481 * operation, there are no transactional constraints on the ordering here.
2482 */
2483int
2484xfs_remove(
2485	xfs_inode_t             *dp,
2486	struct xfs_name		*name,
2487	xfs_inode_t		*ip)
2488{
2489	xfs_mount_t		*mp = dp->i_mount;
2490	xfs_trans_t             *tp = NULL;
2491	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2492	int			dontcare;
2493	int                     error = 0;
2494	uint			resblks;
2495
2496	trace_xfs_remove(dp, name);
2497
2498	if (xfs_is_shutdown(mp))
2499		return -EIO;
2500	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
2501		return -EIO;
2502
2503	error = xfs_qm_dqattach(dp);
2504	if (error)
2505		goto std_return;
2506
2507	error = xfs_qm_dqattach(ip);
2508	if (error)
2509		goto std_return;
2510
2511	/*
2512	 * We try to get the real space reservation first, allowing for
2513	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2514	 * can't get the space reservation then we use 0 instead, and avoid the
2515	 * bmap btree insert(s) in the directory code by, if the bmap insert
2516	 * tries to happen, instead trimming the LAST block from the directory.
2517	 *
2518	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2519	 * the directory code can handle a reservationless update and we don't
2520	 * want to prevent a user from trying to free space by deleting things.
2521	 */
2522	resblks = XFS_REMOVE_SPACE_RES(mp);
2523	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2524			&tp, &dontcare);
 
 
 
 
2525	if (error) {
2526		ASSERT(error != -ENOSPC);
2527		goto std_return;
2528	}
2529
 
 
 
 
 
2530	/*
2531	 * If we're removing a directory perform some additional validation.
2532	 */
2533	if (is_dir) {
2534		ASSERT(VFS_I(ip)->i_nlink >= 2);
2535		if (VFS_I(ip)->i_nlink != 2) {
2536			error = -ENOTEMPTY;
2537			goto out_trans_cancel;
2538		}
2539		if (!xfs_dir_isempty(ip)) {
2540			error = -ENOTEMPTY;
2541			goto out_trans_cancel;
2542		}
2543
2544		/* Drop the link from ip's "..".  */
2545		error = xfs_droplink(tp, dp);
2546		if (error)
2547			goto out_trans_cancel;
2548
2549		/* Drop the "." link from ip to self.  */
2550		error = xfs_droplink(tp, ip);
2551		if (error)
2552			goto out_trans_cancel;
2553
2554		/*
2555		 * Point the unlinked child directory's ".." entry to the root
2556		 * directory to eliminate back-references to inodes that may
2557		 * get freed before the child directory is closed.  If the fs
2558		 * gets shrunk, this can lead to dirent inode validation errors.
2559		 */
2560		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2561			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2562					tp->t_mountp->m_sb.sb_rootino, 0);
2563			if (error)
2564				goto out_trans_cancel;
2565		}
2566	} else {
2567		/*
2568		 * When removing a non-directory we need to log the parent
2569		 * inode here.  For a directory this is done implicitly
2570		 * by the xfs_droplink call for the ".." entry.
2571		 */
2572		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2573	}
2574	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2575
2576	/* Drop the link from dp to ip. */
2577	error = xfs_droplink(tp, ip);
2578	if (error)
2579		goto out_trans_cancel;
2580
2581	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2582	if (error) {
2583		ASSERT(error != -ENOENT);
2584		goto out_trans_cancel;
2585	}
2586
2587	/*
2588	 * If this is a synchronous mount, make sure that the
2589	 * remove transaction goes to disk before returning to
2590	 * the user.
2591	 */
2592	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2593		xfs_trans_set_sync(tp);
2594
2595	error = xfs_trans_commit(tp);
2596	if (error)
2597		goto std_return;
2598
2599	if (is_dir && xfs_inode_is_filestream(ip))
2600		xfs_filestream_deassociate(ip);
2601
2602	return 0;
2603
2604 out_trans_cancel:
2605	xfs_trans_cancel(tp);
2606 std_return:
2607	return error;
2608}
2609
2610/*
2611 * Enter all inodes for a rename transaction into a sorted array.
2612 */
2613#define __XFS_SORT_INODES	5
2614STATIC void
2615xfs_sort_for_rename(
2616	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2617	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2618	struct xfs_inode	*ip1,	/* in: inode of old entry */
2619	struct xfs_inode	*ip2,	/* in: inode of new entry */
2620	struct xfs_inode	*wip,	/* in: whiteout inode */
2621	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2622	int			*num_inodes)  /* in/out: inodes in array */
2623{
2624	int			i, j;
2625
2626	ASSERT(*num_inodes == __XFS_SORT_INODES);
2627	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2628
2629	/*
2630	 * i_tab contains a list of pointers to inodes.  We initialize
2631	 * the table here & we'll sort it.  We will then use it to
2632	 * order the acquisition of the inode locks.
2633	 *
2634	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2635	 */
2636	i = 0;
2637	i_tab[i++] = dp1;
2638	i_tab[i++] = dp2;
2639	i_tab[i++] = ip1;
2640	if (ip2)
2641		i_tab[i++] = ip2;
2642	if (wip)
2643		i_tab[i++] = wip;
2644	*num_inodes = i;
2645
2646	/*
2647	 * Sort the elements via bubble sort.  (Remember, there are at
2648	 * most 5 elements to sort, so this is adequate.)
2649	 */
2650	for (i = 0; i < *num_inodes; i++) {
2651		for (j = 1; j < *num_inodes; j++) {
2652			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2653				struct xfs_inode *temp = i_tab[j];
2654				i_tab[j] = i_tab[j-1];
2655				i_tab[j-1] = temp;
2656			}
2657		}
2658	}
2659}
2660
2661static int
2662xfs_finish_rename(
2663	struct xfs_trans	*tp)
2664{
2665	/*
2666	 * If this is a synchronous mount, make sure that the rename transaction
2667	 * goes to disk before returning to the user.
2668	 */
2669	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2670		xfs_trans_set_sync(tp);
2671
2672	return xfs_trans_commit(tp);
2673}
2674
2675/*
2676 * xfs_cross_rename()
2677 *
2678 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2679 */
2680STATIC int
2681xfs_cross_rename(
2682	struct xfs_trans	*tp,
2683	struct xfs_inode	*dp1,
2684	struct xfs_name		*name1,
2685	struct xfs_inode	*ip1,
2686	struct xfs_inode	*dp2,
2687	struct xfs_name		*name2,
2688	struct xfs_inode	*ip2,
2689	int			spaceres)
2690{
2691	int		error = 0;
2692	int		ip1_flags = 0;
2693	int		ip2_flags = 0;
2694	int		dp2_flags = 0;
2695
2696	/* Swap inode number for dirent in first parent */
2697	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2698	if (error)
2699		goto out_trans_abort;
2700
2701	/* Swap inode number for dirent in second parent */
2702	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2703	if (error)
2704		goto out_trans_abort;
2705
2706	/*
2707	 * If we're renaming one or more directories across different parents,
2708	 * update the respective ".." entries (and link counts) to match the new
2709	 * parents.
2710	 */
2711	if (dp1 != dp2) {
2712		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2713
2714		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2715			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2716						dp1->i_ino, spaceres);
2717			if (error)
2718				goto out_trans_abort;
2719
2720			/* transfer ip2 ".." reference to dp1 */
2721			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2722				error = xfs_droplink(tp, dp2);
2723				if (error)
2724					goto out_trans_abort;
2725				xfs_bumplink(tp, dp1);
2726			}
2727
2728			/*
2729			 * Although ip1 isn't changed here, userspace needs
2730			 * to be warned about the change, so that applications
2731			 * relying on it (like backup ones), will properly
2732			 * notify the change
2733			 */
2734			ip1_flags |= XFS_ICHGTIME_CHG;
2735			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2736		}
2737
2738		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2739			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2740						dp2->i_ino, spaceres);
2741			if (error)
2742				goto out_trans_abort;
2743
2744			/* transfer ip1 ".." reference to dp2 */
2745			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2746				error = xfs_droplink(tp, dp1);
2747				if (error)
2748					goto out_trans_abort;
2749				xfs_bumplink(tp, dp2);
2750			}
2751
2752			/*
2753			 * Although ip2 isn't changed here, userspace needs
2754			 * to be warned about the change, so that applications
2755			 * relying on it (like backup ones), will properly
2756			 * notify the change
2757			 */
2758			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2759			ip2_flags |= XFS_ICHGTIME_CHG;
2760		}
2761	}
2762
2763	if (ip1_flags) {
2764		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2765		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2766	}
2767	if (ip2_flags) {
2768		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2769		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2770	}
2771	if (dp2_flags) {
2772		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2773		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2774	}
2775	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2776	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2777	return xfs_finish_rename(tp);
2778
2779out_trans_abort:
2780	xfs_trans_cancel(tp);
2781	return error;
2782}
2783
2784/*
2785 * xfs_rename_alloc_whiteout()
2786 *
2787 * Return a referenced, unlinked, unlocked inode that can be used as a
2788 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2789 * crash between allocating the inode and linking it into the rename transaction
2790 * recovery will free the inode and we won't leak it.
2791 */
2792static int
2793xfs_rename_alloc_whiteout(
2794	struct mnt_idmap	*idmap,
2795	struct xfs_name		*src_name,
2796	struct xfs_inode	*dp,
2797	struct xfs_inode	**wip)
2798{
2799	struct xfs_inode	*tmpfile;
2800	struct qstr		name;
2801	int			error;
2802
2803	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2804				   &tmpfile);
2805	if (error)
2806		return error;
2807
2808	name.name = src_name->name;
2809	name.len = src_name->len;
2810	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2811	if (error) {
2812		xfs_finish_inode_setup(tmpfile);
2813		xfs_irele(tmpfile);
2814		return error;
2815	}
2816
2817	/*
2818	 * Prepare the tmpfile inode as if it were created through the VFS.
2819	 * Complete the inode setup and flag it as linkable.  nlink is already
2820	 * zero, so we can skip the drop_nlink.
2821	 */
2822	xfs_setup_iops(tmpfile);
2823	xfs_finish_inode_setup(tmpfile);
2824	VFS_I(tmpfile)->i_state |= I_LINKABLE;
2825
2826	*wip = tmpfile;
2827	return 0;
2828}
2829
2830/*
2831 * xfs_rename
2832 */
2833int
2834xfs_rename(
2835	struct mnt_idmap	*idmap,
2836	struct xfs_inode	*src_dp,
2837	struct xfs_name		*src_name,
2838	struct xfs_inode	*src_ip,
2839	struct xfs_inode	*target_dp,
2840	struct xfs_name		*target_name,
2841	struct xfs_inode	*target_ip,
2842	unsigned int		flags)
2843{
2844	struct xfs_mount	*mp = src_dp->i_mount;
2845	struct xfs_trans	*tp;
2846	struct xfs_inode	*wip = NULL;		/* whiteout inode */
2847	struct xfs_inode	*inodes[__XFS_SORT_INODES];
2848	int			i;
2849	int			num_inodes = __XFS_SORT_INODES;
2850	bool			new_parent = (src_dp != target_dp);
2851	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2852	int			spaceres;
2853	bool			retried = false;
2854	int			error, nospace_error = 0;
2855
2856	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2857
2858	if ((flags & RENAME_EXCHANGE) && !target_ip)
2859		return -EINVAL;
2860
2861	/*
2862	 * If we are doing a whiteout operation, allocate the whiteout inode
2863	 * we will be placing at the target and ensure the type is set
2864	 * appropriately.
2865	 */
2866	if (flags & RENAME_WHITEOUT) {
2867		error = xfs_rename_alloc_whiteout(idmap, src_name,
2868						  target_dp, &wip);
2869		if (error)
2870			return error;
2871
2872		/* setup target dirent info as whiteout */
2873		src_name->type = XFS_DIR3_FT_CHRDEV;
2874	}
2875
2876	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2877				inodes, &num_inodes);
2878
2879retry:
2880	nospace_error = 0;
2881	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2882	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2883	if (error == -ENOSPC) {
2884		nospace_error = error;
2885		spaceres = 0;
2886		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2887				&tp);
2888	}
2889	if (error)
2890		goto out_release_wip;
2891
2892	/*
2893	 * Attach the dquots to the inodes
2894	 */
2895	error = xfs_qm_vop_rename_dqattach(inodes);
2896	if (error)
2897		goto out_trans_cancel;
2898
2899	/*
2900	 * Lock all the participating inodes. Depending upon whether
2901	 * the target_name exists in the target directory, and
2902	 * whether the target directory is the same as the source
2903	 * directory, we can lock from 2 to 5 inodes.
2904	 */
2905	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2906
2907	/*
2908	 * Join all the inodes to the transaction. From this point on,
2909	 * we can rely on either trans_commit or trans_cancel to unlock
2910	 * them.
2911	 */
2912	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2913	if (new_parent)
2914		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2915	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2916	if (target_ip)
2917		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2918	if (wip)
2919		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2920
2921	/*
2922	 * If we are using project inheritance, we only allow renames
2923	 * into our tree when the project IDs are the same; else the
2924	 * tree quota mechanism would be circumvented.
2925	 */
2926	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2927		     target_dp->i_projid != src_ip->i_projid)) {
2928		error = -EXDEV;
2929		goto out_trans_cancel;
2930	}
2931
2932	/* RENAME_EXCHANGE is unique from here on. */
2933	if (flags & RENAME_EXCHANGE)
2934		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2935					target_dp, target_name, target_ip,
2936					spaceres);
2937
2938	/*
2939	 * Try to reserve quota to handle an expansion of the target directory.
2940	 * We'll allow the rename to continue in reservationless mode if we hit
2941	 * a space usage constraint.  If we trigger reservationless mode, save
2942	 * the errno if there isn't any free space in the target directory.
2943	 */
2944	if (spaceres != 0) {
2945		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2946				0, false);
2947		if (error == -EDQUOT || error == -ENOSPC) {
2948			if (!retried) {
2949				xfs_trans_cancel(tp);
2950				xfs_blockgc_free_quota(target_dp, 0);
2951				retried = true;
2952				goto retry;
2953			}
2954
2955			nospace_error = error;
2956			spaceres = 0;
2957			error = 0;
2958		}
2959		if (error)
2960			goto out_trans_cancel;
2961	}
2962
2963	/*
2964	 * Check for expected errors before we dirty the transaction
2965	 * so we can return an error without a transaction abort.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2966	 */
2967	if (target_ip == NULL) {
2968		/*
2969		 * If there's no space reservation, check the entry will
2970		 * fit before actually inserting it.
2971		 */
2972		if (!spaceres) {
2973			error = xfs_dir_canenter(tp, target_dp, target_name);
2974			if (error)
2975				goto out_trans_cancel;
 
 
 
 
 
 
2976		}
2977	} else {
2978		/*
2979		 * If target exists and it's a directory, check that whether
2980		 * it can be destroyed.
2981		 */
2982		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2983		    (!xfs_dir_isempty(target_ip) ||
2984		     (VFS_I(target_ip)->i_nlink > 2))) {
2985			error = -EEXIST;
2986			goto out_trans_cancel;
2987		}
2988	}
2989
2990	/*
2991	 * Lock the AGI buffers we need to handle bumping the nlink of the
2992	 * whiteout inode off the unlinked list and to handle dropping the
2993	 * nlink of the target inode.  Per locking order rules, do this in
2994	 * increasing AG order and before directory block allocation tries to
2995	 * grab AGFs because we grab AGIs before AGFs.
2996	 *
2997	 * The (vfs) caller must ensure that if src is a directory then
2998	 * target_ip is either null or an empty directory.
2999	 */
3000	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3001		if (inodes[i] == wip ||
3002		    (inodes[i] == target_ip &&
3003		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3004			struct xfs_perag	*pag;
3005			struct xfs_buf		*bp;
3006
3007			pag = xfs_perag_get(mp,
3008					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
3009			error = xfs_read_agi(pag, tp, &bp);
3010			xfs_perag_put(pag);
3011			if (error)
3012				goto out_trans_cancel;
3013		}
3014	}
3015
3016	/*
3017	 * Directory entry creation below may acquire the AGF. Remove
3018	 * the whiteout from the unlinked list first to preserve correct
3019	 * AGI/AGF locking order. This dirties the transaction so failures
3020	 * after this point will abort and log recovery will clean up the
3021	 * mess.
3022	 *
3023	 * For whiteouts, we need to bump the link count on the whiteout
3024	 * inode. After this point, we have a real link, clear the tmpfile
3025	 * state flag from the inode so it doesn't accidentally get misused
3026	 * in future.
3027	 */
3028	if (wip) {
3029		struct xfs_perag	*pag;
3030
3031		ASSERT(VFS_I(wip)->i_nlink == 0);
3032
3033		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3034		error = xfs_iunlink_remove(tp, pag, wip);
3035		xfs_perag_put(pag);
3036		if (error)
3037			goto out_trans_cancel;
3038
3039		xfs_bumplink(tp, wip);
3040		VFS_I(wip)->i_state &= ~I_LINKABLE;
3041	}
3042
3043	/*
3044	 * Set up the target.
3045	 */
3046	if (target_ip == NULL) {
3047		/*
3048		 * If target does not exist and the rename crosses
3049		 * directories, adjust the target directory link count
3050		 * to account for the ".." reference from the new entry.
3051		 */
3052		error = xfs_dir_createname(tp, target_dp, target_name,
3053					   src_ip->i_ino, spaceres);
3054		if (error)
3055			goto out_trans_cancel;
3056
3057		xfs_trans_ichgtime(tp, target_dp,
3058					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3059
3060		if (new_parent && src_is_directory) {
3061			xfs_bumplink(tp, target_dp);
3062		}
3063	} else { /* target_ip != NULL */
3064		/*
3065		 * Link the source inode under the target name.
3066		 * If the source inode is a directory and we are moving
3067		 * it across directories, its ".." entry will be
3068		 * inconsistent until we replace that down below.
3069		 *
3070		 * In case there is already an entry with the same
3071		 * name at the destination directory, remove it first.
3072		 */
3073		error = xfs_dir_replace(tp, target_dp, target_name,
3074					src_ip->i_ino, spaceres);
3075		if (error)
3076			goto out_trans_cancel;
3077
3078		xfs_trans_ichgtime(tp, target_dp,
3079					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3080
3081		/*
3082		 * Decrement the link count on the target since the target
3083		 * dir no longer points to it.
3084		 */
3085		error = xfs_droplink(tp, target_ip);
3086		if (error)
3087			goto out_trans_cancel;
3088
3089		if (src_is_directory) {
3090			/*
3091			 * Drop the link from the old "." entry.
3092			 */
3093			error = xfs_droplink(tp, target_ip);
3094			if (error)
3095				goto out_trans_cancel;
3096		}
3097	} /* target_ip != NULL */
3098
3099	/*
3100	 * Remove the source.
3101	 */
3102	if (new_parent && src_is_directory) {
3103		/*
3104		 * Rewrite the ".." entry to point to the new
3105		 * directory.
3106		 */
3107		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3108					target_dp->i_ino, spaceres);
3109		ASSERT(error != -EEXIST);
3110		if (error)
3111			goto out_trans_cancel;
3112	}
3113
3114	/*
3115	 * We always want to hit the ctime on the source inode.
3116	 *
3117	 * This isn't strictly required by the standards since the source
3118	 * inode isn't really being changed, but old unix file systems did
3119	 * it and some incremental backup programs won't work without it.
3120	 */
3121	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3122	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3123
3124	/*
3125	 * Adjust the link count on src_dp.  This is necessary when
3126	 * renaming a directory, either within one parent when
3127	 * the target existed, or across two parent directories.
3128	 */
3129	if (src_is_directory && (new_parent || target_ip != NULL)) {
3130
3131		/*
3132		 * Decrement link count on src_directory since the
3133		 * entry that's moved no longer points to it.
3134		 */
3135		error = xfs_droplink(tp, src_dp);
3136		if (error)
3137			goto out_trans_cancel;
3138	}
3139
3140	/*
3141	 * For whiteouts, we only need to update the source dirent with the
3142	 * inode number of the whiteout inode rather than removing it
3143	 * altogether.
3144	 */
3145	if (wip)
3146		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3147					spaceres);
3148	else
 
 
 
 
 
3149		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3150					   spaceres);
 
3151
3152	if (error)
3153		goto out_trans_cancel;
3154
3155	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3156	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3157	if (new_parent)
3158		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3159
3160	error = xfs_finish_rename(tp);
3161	if (wip)
3162		xfs_irele(wip);
3163	return error;
3164
3165out_trans_cancel:
3166	xfs_trans_cancel(tp);
3167out_release_wip:
3168	if (wip)
3169		xfs_irele(wip);
3170	if (error == -ENOSPC && nospace_error)
3171		error = nospace_error;
3172	return error;
3173}
3174
3175static int
3176xfs_iflush(
3177	struct xfs_inode	*ip,
3178	struct xfs_buf		*bp)
3179{
3180	struct xfs_inode_log_item *iip = ip->i_itemp;
3181	struct xfs_dinode	*dip;
3182	struct xfs_mount	*mp = ip->i_mount;
3183	int			error;
3184
3185	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3186	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3187	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3188	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3189	ASSERT(iip->ili_item.li_buf == bp);
3190
3191	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3192
3193	/*
3194	 * We don't flush the inode if any of the following checks fail, but we
3195	 * do still update the log item and attach to the backing buffer as if
3196	 * the flush happened. This is a formality to facilitate predictable
3197	 * error handling as the caller will shutdown and fail the buffer.
3198	 */
3199	error = -EFSCORRUPTED;
3200	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3201			       mp, XFS_ERRTAG_IFLUSH_1)) {
3202		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3203			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3204			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3205		goto flush_out;
3206	}
3207	if (S_ISREG(VFS_I(ip)->i_mode)) {
3208		if (XFS_TEST_ERROR(
3209		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3210		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3211		    mp, XFS_ERRTAG_IFLUSH_3)) {
3212			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3213				"%s: Bad regular inode %llu, ptr "PTR_FMT,
3214				__func__, ip->i_ino, ip);
3215			goto flush_out;
3216		}
3217	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3218		if (XFS_TEST_ERROR(
3219		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3220		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3221		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3222		    mp, XFS_ERRTAG_IFLUSH_4)) {
3223			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3224				"%s: Bad directory inode %llu, ptr "PTR_FMT,
3225				__func__, ip->i_ino, ip);
3226			goto flush_out;
3227		}
3228	}
3229	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3230				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3231		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3232			"%s: detected corrupt incore inode %llu, "
3233			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3234			__func__, ip->i_ino,
3235			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3236			ip->i_nblocks, ip);
3237		goto flush_out;
3238	}
3239	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3240				mp, XFS_ERRTAG_IFLUSH_6)) {
3241		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3242			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3243			__func__, ip->i_ino, ip->i_forkoff, ip);
3244		goto flush_out;
3245	}
3246
3247	/*
3248	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3249	 * count for correct sequencing.  We bump the flush iteration count so
3250	 * we can detect flushes which postdate a log record during recovery.
3251	 * This is redundant as we now log every change and hence this can't
3252	 * happen but we need to still do it to ensure backwards compatibility
3253	 * with old kernels that predate logging all inode changes.
3254	 */
3255	if (!xfs_has_v3inodes(mp))
3256		ip->i_flushiter++;
3257
3258	/*
3259	 * If there are inline format data / attr forks attached to this inode,
3260	 * make sure they are not corrupt.
3261	 */
3262	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3263	    xfs_ifork_verify_local_data(ip))
3264		goto flush_out;
3265	if (xfs_inode_has_attr_fork(ip) &&
3266	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3267	    xfs_ifork_verify_local_attr(ip))
3268		goto flush_out;
3269
3270	/*
3271	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3272	 * copy out the core of the inode, because if the inode is dirty at all
3273	 * the core must be.
3274	 */
3275	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3276
3277	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3278	if (!xfs_has_v3inodes(mp)) {
3279		if (ip->i_flushiter == DI_MAX_FLUSH)
3280			ip->i_flushiter = 0;
3281	}
3282
3283	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3284	if (xfs_inode_has_attr_fork(ip))
3285		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3286
3287	/*
3288	 * We've recorded everything logged in the inode, so we'd like to clear
3289	 * the ili_fields bits so we don't log and flush things unnecessarily.
3290	 * However, we can't stop logging all this information until the data
3291	 * we've copied into the disk buffer is written to disk.  If we did we
3292	 * might overwrite the copy of the inode in the log with all the data
3293	 * after re-logging only part of it, and in the face of a crash we
3294	 * wouldn't have all the data we need to recover.
3295	 *
3296	 * What we do is move the bits to the ili_last_fields field.  When
3297	 * logging the inode, these bits are moved back to the ili_fields field.
3298	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3299	 * we know that the information those bits represent is permanently on
3300	 * disk.  As long as the flush completes before the inode is logged
3301	 * again, then both ili_fields and ili_last_fields will be cleared.
3302	 */
3303	error = 0;
3304flush_out:
3305	spin_lock(&iip->ili_lock);
3306	iip->ili_last_fields = iip->ili_fields;
3307	iip->ili_fields = 0;
3308	iip->ili_fsync_fields = 0;
3309	spin_unlock(&iip->ili_lock);
3310
3311	/*
3312	 * Store the current LSN of the inode so that we can tell whether the
3313	 * item has moved in the AIL from xfs_buf_inode_iodone().
3314	 */
3315	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3316				&iip->ili_item.li_lsn);
3317
3318	/* generate the checksum. */
3319	xfs_dinode_calc_crc(mp, dip);
3320	return error;
3321}
3322
3323/*
3324 * Non-blocking flush of dirty inode metadata into the backing buffer.
3325 *
3326 * The caller must have a reference to the inode and hold the cluster buffer
3327 * locked. The function will walk across all the inodes on the cluster buffer it
3328 * can find and lock without blocking, and flush them to the cluster buffer.
3329 *
3330 * On successful flushing of at least one inode, the caller must write out the
3331 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3332 * the caller needs to release the buffer. On failure, the filesystem will be
3333 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3334 * will be returned.
3335 */
3336int
3337xfs_iflush_cluster(
3338	struct xfs_buf		*bp)
3339{
3340	struct xfs_mount	*mp = bp->b_mount;
3341	struct xfs_log_item	*lip, *n;
3342	struct xfs_inode	*ip;
3343	struct xfs_inode_log_item *iip;
3344	int			clcount = 0;
3345	int			error = 0;
3346
3347	/*
3348	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3349	 * will remove itself from the list.
3350	 */
3351	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3352		iip = (struct xfs_inode_log_item *)lip;
3353		ip = iip->ili_inode;
3354
3355		/*
3356		 * Quick and dirty check to avoid locks if possible.
3357		 */
3358		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3359			continue;
3360		if (xfs_ipincount(ip))
3361			continue;
3362
3363		/*
3364		 * The inode is still attached to the buffer, which means it is
3365		 * dirty but reclaim might try to grab it. Check carefully for
3366		 * that, and grab the ilock while still holding the i_flags_lock
3367		 * to guarantee reclaim will not be able to reclaim this inode
3368		 * once we drop the i_flags_lock.
3369		 */
3370		spin_lock(&ip->i_flags_lock);
3371		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3372		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3373			spin_unlock(&ip->i_flags_lock);
3374			continue;
3375		}
3376
3377		/*
3378		 * ILOCK will pin the inode against reclaim and prevent
3379		 * concurrent transactions modifying the inode while we are
3380		 * flushing the inode. If we get the lock, set the flushing
3381		 * state before we drop the i_flags_lock.
3382		 */
3383		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3384			spin_unlock(&ip->i_flags_lock);
3385			continue;
3386		}
3387		__xfs_iflags_set(ip, XFS_IFLUSHING);
3388		spin_unlock(&ip->i_flags_lock);
3389
3390		/*
3391		 * Abort flushing this inode if we are shut down because the
3392		 * inode may not currently be in the AIL. This can occur when
3393		 * log I/O failure unpins the inode without inserting into the
3394		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3395		 * that otherwise looks like it should be flushed.
3396		 */
3397		if (xlog_is_shutdown(mp->m_log)) {
3398			xfs_iunpin_wait(ip);
3399			xfs_iflush_abort(ip);
3400			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3401			error = -EIO;
3402			continue;
3403		}
3404
3405		/* don't block waiting on a log force to unpin dirty inodes */
3406		if (xfs_ipincount(ip)) {
3407			xfs_iflags_clear(ip, XFS_IFLUSHING);
3408			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3409			continue;
3410		}
3411
3412		if (!xfs_inode_clean(ip))
3413			error = xfs_iflush(ip, bp);
3414		else
3415			xfs_iflags_clear(ip, XFS_IFLUSHING);
3416		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3417		if (error)
3418			break;
3419		clcount++;
3420	}
3421
3422	if (error) {
3423		/*
3424		 * Shutdown first so we kill the log before we release this
3425		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3426		 * of the log, failing it before the _log_ is shut down can
3427		 * result in the log tail being moved forward in the journal
3428		 * on disk because log writes can still be taking place. Hence
3429		 * unpinning the tail will allow the ICREATE intent to be
3430		 * removed from the log an recovery will fail with uninitialised
3431		 * inode cluster buffers.
3432		 */
3433		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3434		bp->b_flags |= XBF_ASYNC;
3435		xfs_buf_ioend_fail(bp);
 
3436		return error;
3437	}
3438
3439	if (!clcount)
3440		return -EAGAIN;
3441
3442	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3443	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3444	return 0;
3445
3446}
3447
3448/* Release an inode. */
3449void
3450xfs_irele(
3451	struct xfs_inode	*ip)
3452{
3453	trace_xfs_irele(ip, _RET_IP_);
3454	iput(VFS_I(ip));
3455}
3456
3457/*
3458 * Ensure all commited transactions touching the inode are written to the log.
3459 */
3460int
3461xfs_log_force_inode(
3462	struct xfs_inode	*ip)
3463{
3464	xfs_csn_t		seq = 0;
3465
3466	xfs_ilock(ip, XFS_ILOCK_SHARED);
3467	if (xfs_ipincount(ip))
3468		seq = ip->i_itemp->ili_commit_seq;
3469	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3470
3471	if (!seq)
3472		return 0;
3473	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3474}
3475
3476/*
3477 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3478 * abide vfs locking order (lowest pointer value goes first) and breaking the
3479 * layout leases before proceeding.  The loop is needed because we cannot call
3480 * the blocking break_layout() with the iolocks held, and therefore have to
3481 * back out both locks.
3482 */
3483static int
3484xfs_iolock_two_inodes_and_break_layout(
3485	struct inode		*src,
3486	struct inode		*dest)
3487{
3488	int			error;
3489
3490	if (src > dest)
3491		swap(src, dest);
3492
3493retry:
3494	/* Wait to break both inodes' layouts before we start locking. */
3495	error = break_layout(src, true);
3496	if (error)
3497		return error;
3498	if (src != dest) {
3499		error = break_layout(dest, true);
3500		if (error)
3501			return error;
3502	}
3503
3504	/* Lock one inode and make sure nobody got in and leased it. */
3505	inode_lock(src);
3506	error = break_layout(src, false);
3507	if (error) {
3508		inode_unlock(src);
3509		if (error == -EWOULDBLOCK)
3510			goto retry;
3511		return error;
3512	}
3513
3514	if (src == dest)
3515		return 0;
3516
3517	/* Lock the other inode and make sure nobody got in and leased it. */
3518	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3519	error = break_layout(dest, false);
3520	if (error) {
3521		inode_unlock(src);
3522		inode_unlock(dest);
3523		if (error == -EWOULDBLOCK)
3524			goto retry;
3525		return error;
3526	}
3527
3528	return 0;
3529}
3530
3531static int
3532xfs_mmaplock_two_inodes_and_break_dax_layout(
3533	struct xfs_inode	*ip1,
3534	struct xfs_inode	*ip2)
3535{
3536	int			error;
3537	bool			retry;
3538	struct page		*page;
3539
3540	if (ip1->i_ino > ip2->i_ino)
3541		swap(ip1, ip2);
3542
3543again:
3544	retry = false;
3545	/* Lock the first inode */
3546	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3547	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3548	if (error || retry) {
3549		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3550		if (error == 0 && retry)
3551			goto again;
3552		return error;
3553	}
3554
3555	if (ip1 == ip2)
3556		return 0;
3557
3558	/* Nested lock the second inode */
3559	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3560	/*
3561	 * We cannot use xfs_break_dax_layouts() directly here because it may
3562	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3563	 * for this nested lock case.
3564	 */
3565	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3566	if (page && page_ref_count(page) != 1) {
3567		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3568		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3569		goto again;
3570	}
3571
3572	return 0;
3573}
3574
3575/*
3576 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3577 * mmap activity.
3578 */
3579int
3580xfs_ilock2_io_mmap(
3581	struct xfs_inode	*ip1,
3582	struct xfs_inode	*ip2)
3583{
3584	int			ret;
3585
3586	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3587	if (ret)
3588		return ret;
3589
3590	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3591		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3592		if (ret) {
3593			inode_unlock(VFS_I(ip2));
3594			if (ip1 != ip2)
3595				inode_unlock(VFS_I(ip1));
3596			return ret;
3597		}
3598	} else
3599		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3600					    VFS_I(ip2)->i_mapping);
3601
3602	return 0;
3603}
3604
3605/* Unlock both inodes to allow IO and mmap activity. */
3606void
3607xfs_iunlock2_io_mmap(
3608	struct xfs_inode	*ip1,
3609	struct xfs_inode	*ip2)
3610{
3611	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3612		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3613		if (ip1 != ip2)
3614			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3615	} else
3616		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3617					      VFS_I(ip2)->i_mapping);
3618
 
 
 
3619	inode_unlock(VFS_I(ip2));
3620	if (ip1 != ip2)
3621		inode_unlock(VFS_I(ip1));
3622}
3623
3624/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
3625void
3626xfs_iunlock2_remapping(
3627	struct xfs_inode	*ip1,
3628	struct xfs_inode	*ip2)
3629{
3630	xfs_iflags_clear(ip1, XFS_IREMAPPING);
3631
3632	if (ip1 != ip2)
3633		xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
3634	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3635
3636	if (ip1 != ip2)
3637		inode_unlock_shared(VFS_I(ip1));
3638	inode_unlock(VFS_I(ip2));
3639}
3640
3641/*
3642 * Reload the incore inode list for this inode.  Caller should ensure that
3643 * the link count cannot change, either by taking ILOCK_SHARED or otherwise
3644 * preventing other threads from executing.
3645 */
3646int
3647xfs_inode_reload_unlinked_bucket(
3648	struct xfs_trans	*tp,
3649	struct xfs_inode	*ip)
3650{
3651	struct xfs_mount	*mp = tp->t_mountp;
3652	struct xfs_buf		*agibp;
3653	struct xfs_agi		*agi;
3654	struct xfs_perag	*pag;
3655	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
3656	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
3657	xfs_agino_t		prev_agino, next_agino;
3658	unsigned int		bucket;
3659	bool			foundit = false;
3660	int			error;
3661
3662	/* Grab the first inode in the list */
3663	pag = xfs_perag_get(mp, agno);
3664	error = xfs_ialloc_read_agi(pag, tp, &agibp);
3665	xfs_perag_put(pag);
3666	if (error)
3667		return error;
3668
3669	/*
3670	 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
3671	 * incore unlinked list pointers for this inode.  Check once more to
3672	 * see if we raced with anyone else to reload the unlinked list.
3673	 */
3674	if (!xfs_inode_unlinked_incomplete(ip)) {
3675		foundit = true;
3676		goto out_agibp;
3677	}
3678
3679	bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
3680	agi = agibp->b_addr;
3681
3682	trace_xfs_inode_reload_unlinked_bucket(ip);
3683
3684	xfs_info_ratelimited(mp,
3685 "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating list recovery.",
3686			agino, agno);
3687
3688	prev_agino = NULLAGINO;
3689	next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3690	while (next_agino != NULLAGINO) {
3691		struct xfs_inode	*next_ip = NULL;
3692
3693		/* Found this caller's inode, set its backlink. */
3694		if (next_agino == agino) {
3695			next_ip = ip;
3696			next_ip->i_prev_unlinked = prev_agino;
3697			foundit = true;
3698			goto next_inode;
3699		}
3700
3701		/* Try in-memory lookup first. */
3702		next_ip = xfs_iunlink_lookup(pag, next_agino);
3703		if (next_ip)
3704			goto next_inode;
3705
3706		/* Inode not in memory, try reloading it. */
3707		error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
3708				next_agino);
3709		if (error)
3710			break;
3711
3712		/* Grab the reloaded inode. */
3713		next_ip = xfs_iunlink_lookup(pag, next_agino);
3714		if (!next_ip) {
3715			/* No incore inode at all?  We reloaded it... */
3716			ASSERT(next_ip != NULL);
3717			error = -EFSCORRUPTED;
3718			break;
3719		}
3720
3721next_inode:
3722		prev_agino = next_agino;
3723		next_agino = next_ip->i_next_unlinked;
3724	}
3725
3726out_agibp:
3727	xfs_trans_brelse(tp, agibp);
3728	/* Should have found this inode somewhere in the iunlinked bucket. */
3729	if (!error && !foundit)
3730		error = -EFSCORRUPTED;
3731	return error;
3732}
3733
3734/* Decide if this inode is missing its unlinked list and reload it. */
3735int
3736xfs_inode_reload_unlinked(
3737	struct xfs_inode	*ip)
3738{
3739	struct xfs_trans	*tp;
3740	int			error;
3741
3742	error = xfs_trans_alloc_empty(ip->i_mount, &tp);
3743	if (error)
3744		return error;
3745
3746	xfs_ilock(ip, XFS_ILOCK_SHARED);
3747	if (xfs_inode_unlinked_incomplete(ip))
3748		error = xfs_inode_reload_unlinked_bucket(tp, ip);
3749	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3750	xfs_trans_cancel(tp);
3751
3752	return error;
3753}
3754
3755/* Has this inode fork been zapped by repair? */
3756bool
3757xfs_ifork_zapped(
3758	const struct xfs_inode	*ip,
3759	int			whichfork)
3760{
3761	unsigned int		datamask = 0;
3762
3763	switch (whichfork) {
3764	case XFS_DATA_FORK:
3765		switch (ip->i_vnode.i_mode & S_IFMT) {
3766		case S_IFDIR:
3767			datamask = XFS_SICK_INO_DIR_ZAPPED;
3768			break;
3769		case S_IFLNK:
3770			datamask = XFS_SICK_INO_SYMLINK_ZAPPED;
3771			break;
3772		}
3773		return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
3774	case XFS_ATTR_FORK:
3775		return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
3776	default:
3777		return false;
3778	}
3779}