Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
 
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_dir2.h"
  18#include "xfs_attr.h"
  19#include "xfs_trans_space.h"
  20#include "xfs_trans.h"
  21#include "xfs_buf_item.h"
  22#include "xfs_inode_item.h"
  23#include "xfs_ialloc.h"
  24#include "xfs_bmap.h"
  25#include "xfs_bmap_util.h"
  26#include "xfs_errortag.h"
  27#include "xfs_error.h"
  28#include "xfs_quota.h"
  29#include "xfs_filestream.h"
  30#include "xfs_trace.h"
  31#include "xfs_icache.h"
  32#include "xfs_symlink.h"
  33#include "xfs_trans_priv.h"
  34#include "xfs_log.h"
  35#include "xfs_bmap_btree.h"
  36#include "xfs_reflink.h"
  37#include "xfs_ag.h"
  38
  39kmem_zone_t *xfs_inode_zone;
  40
  41/*
  42 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  43 * freed from a file in a single transaction.
  44 */
  45#define	XFS_ITRUNC_MAX_EXTENTS	2
  46
  47STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  48STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
  49	struct xfs_inode *);
  50
  51/*
  52 * helper function to extract extent size hint from inode
  53 */
  54xfs_extlen_t
  55xfs_get_extsz_hint(
  56	struct xfs_inode	*ip)
  57{
  58	/*
  59	 * No point in aligning allocations if we need to COW to actually
  60	 * write to them.
  61	 */
  62	if (xfs_is_always_cow_inode(ip))
  63		return 0;
  64	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
  65		return ip->i_extsize;
  66	if (XFS_IS_REALTIME_INODE(ip))
  67		return ip->i_mount->m_sb.sb_rextsize;
  68	return 0;
  69}
  70
  71/*
  72 * Helper function to extract CoW extent size hint from inode.
  73 * Between the extent size hint and the CoW extent size hint, we
  74 * return the greater of the two.  If the value is zero (automatic),
  75 * use the default size.
  76 */
  77xfs_extlen_t
  78xfs_get_cowextsz_hint(
  79	struct xfs_inode	*ip)
  80{
  81	xfs_extlen_t		a, b;
  82
  83	a = 0;
  84	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
  85		a = ip->i_cowextsize;
  86	b = xfs_get_extsz_hint(ip);
  87
  88	a = max(a, b);
  89	if (a == 0)
  90		return XFS_DEFAULT_COWEXTSZ_HINT;
  91	return a;
  92}
  93
  94/*
  95 * These two are wrapper routines around the xfs_ilock() routine used to
  96 * centralize some grungy code.  They are used in places that wish to lock the
  97 * inode solely for reading the extents.  The reason these places can't just
  98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  99 * bringing in of the extents from disk for a file in b-tree format.  If the
 100 * inode is in b-tree format, then we need to lock the inode exclusively until
 101 * the extents are read in.  Locking it exclusively all the time would limit
 102 * our parallelism unnecessarily, though.  What we do instead is check to see
 103 * if the extents have been read in yet, and only lock the inode exclusively
 104 * if they have not.
 105 *
 106 * The functions return a value which should be given to the corresponding
 107 * xfs_iunlock() call.
 108 */
 109uint
 110xfs_ilock_data_map_shared(
 111	struct xfs_inode	*ip)
 112{
 113	uint			lock_mode = XFS_ILOCK_SHARED;
 114
 115	if (xfs_need_iread_extents(&ip->i_df))
 
 116		lock_mode = XFS_ILOCK_EXCL;
 117	xfs_ilock(ip, lock_mode);
 118	return lock_mode;
 119}
 120
 121uint
 122xfs_ilock_attr_map_shared(
 123	struct xfs_inode	*ip)
 124{
 125	uint			lock_mode = XFS_ILOCK_SHARED;
 126
 127	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
 
 
 128		lock_mode = XFS_ILOCK_EXCL;
 129	xfs_ilock(ip, lock_mode);
 130	return lock_mode;
 131}
 132
 133/*
 134 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 135 * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
 136 * various combinations of the locks to be obtained.
 137 *
 138 * The 3 locks should always be ordered so that the IO lock is obtained first,
 139 * the mmap lock second and the ilock last in order to prevent deadlock.
 140 *
 141 * Basic locking order:
 142 *
 143 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
 144 *
 145 * mmap_lock locking order:
 146 *
 147 * i_rwsem -> page lock -> mmap_lock
 148 * mmap_lock -> i_mmap_lock -> page_lock
 149 *
 150 * The difference in mmap_lock locking order mean that we cannot hold the
 151 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
 152 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
 153 * in get_user_pages() to map the user pages into the kernel address space for
 154 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
 155 * page faults already hold the mmap_lock.
 156 *
 157 * Hence to serialise fully against both syscall and mmap based IO, we need to
 158 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
 159 * taken in places where we need to invalidate the page cache in a race
 160 * free manner (e.g. truncate, hole punch and other extent manipulation
 161 * functions).
 162 */
 163void
 164xfs_ilock(
 165	xfs_inode_t		*ip,
 166	uint			lock_flags)
 167{
 168	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 169
 170	/*
 171	 * You can't set both SHARED and EXCL for the same lock,
 172	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 173	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 174	 */
 175	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 176	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 177	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 178	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 179	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 180	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 181	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 182
 183	if (lock_flags & XFS_IOLOCK_EXCL) {
 184		down_write_nested(&VFS_I(ip)->i_rwsem,
 185				  XFS_IOLOCK_DEP(lock_flags));
 186	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 187		down_read_nested(&VFS_I(ip)->i_rwsem,
 188				 XFS_IOLOCK_DEP(lock_flags));
 189	}
 190
 191	if (lock_flags & XFS_MMAPLOCK_EXCL)
 192		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 193	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 194		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 195
 196	if (lock_flags & XFS_ILOCK_EXCL)
 197		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 198	else if (lock_flags & XFS_ILOCK_SHARED)
 199		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 200}
 201
 202/*
 203 * This is just like xfs_ilock(), except that the caller
 204 * is guaranteed not to sleep.  It returns 1 if it gets
 205 * the requested locks and 0 otherwise.  If the IO lock is
 206 * obtained but the inode lock cannot be, then the IO lock
 207 * is dropped before returning.
 208 *
 209 * ip -- the inode being locked
 210 * lock_flags -- this parameter indicates the inode's locks to be
 211 *       to be locked.  See the comment for xfs_ilock() for a list
 212 *	 of valid values.
 213 */
 214int
 215xfs_ilock_nowait(
 216	xfs_inode_t		*ip,
 217	uint			lock_flags)
 218{
 219	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 220
 221	/*
 222	 * You can't set both SHARED and EXCL for the same lock,
 223	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 224	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 225	 */
 226	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 227	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 228	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 229	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 230	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 231	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 232	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 233
 234	if (lock_flags & XFS_IOLOCK_EXCL) {
 235		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 236			goto out;
 237	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 238		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 239			goto out;
 240	}
 241
 242	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 243		if (!mrtryupdate(&ip->i_mmaplock))
 244			goto out_undo_iolock;
 245	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 246		if (!mrtryaccess(&ip->i_mmaplock))
 247			goto out_undo_iolock;
 248	}
 249
 250	if (lock_flags & XFS_ILOCK_EXCL) {
 251		if (!mrtryupdate(&ip->i_lock))
 252			goto out_undo_mmaplock;
 253	} else if (lock_flags & XFS_ILOCK_SHARED) {
 254		if (!mrtryaccess(&ip->i_lock))
 255			goto out_undo_mmaplock;
 256	}
 257	return 1;
 258
 259out_undo_mmaplock:
 260	if (lock_flags & XFS_MMAPLOCK_EXCL)
 261		mrunlock_excl(&ip->i_mmaplock);
 262	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 263		mrunlock_shared(&ip->i_mmaplock);
 264out_undo_iolock:
 265	if (lock_flags & XFS_IOLOCK_EXCL)
 266		up_write(&VFS_I(ip)->i_rwsem);
 267	else if (lock_flags & XFS_IOLOCK_SHARED)
 268		up_read(&VFS_I(ip)->i_rwsem);
 269out:
 270	return 0;
 271}
 272
 273/*
 274 * xfs_iunlock() is used to drop the inode locks acquired with
 275 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 276 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 277 * that we know which locks to drop.
 278 *
 279 * ip -- the inode being unlocked
 280 * lock_flags -- this parameter indicates the inode's locks to be
 281 *       to be unlocked.  See the comment for xfs_ilock() for a list
 282 *	 of valid values for this parameter.
 283 *
 284 */
 285void
 286xfs_iunlock(
 287	xfs_inode_t		*ip,
 288	uint			lock_flags)
 289{
 290	/*
 291	 * You can't set both SHARED and EXCL for the same lock,
 292	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 293	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 294	 */
 295	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 296	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 297	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 298	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 299	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 300	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 301	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 302	ASSERT(lock_flags != 0);
 303
 304	if (lock_flags & XFS_IOLOCK_EXCL)
 305		up_write(&VFS_I(ip)->i_rwsem);
 306	else if (lock_flags & XFS_IOLOCK_SHARED)
 307		up_read(&VFS_I(ip)->i_rwsem);
 308
 309	if (lock_flags & XFS_MMAPLOCK_EXCL)
 310		mrunlock_excl(&ip->i_mmaplock);
 311	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 312		mrunlock_shared(&ip->i_mmaplock);
 313
 314	if (lock_flags & XFS_ILOCK_EXCL)
 315		mrunlock_excl(&ip->i_lock);
 316	else if (lock_flags & XFS_ILOCK_SHARED)
 317		mrunlock_shared(&ip->i_lock);
 318
 319	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 320}
 321
 322/*
 323 * give up write locks.  the i/o lock cannot be held nested
 324 * if it is being demoted.
 325 */
 326void
 327xfs_ilock_demote(
 328	xfs_inode_t		*ip,
 329	uint			lock_flags)
 330{
 331	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 332	ASSERT((lock_flags &
 333		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 334
 335	if (lock_flags & XFS_ILOCK_EXCL)
 336		mrdemote(&ip->i_lock);
 337	if (lock_flags & XFS_MMAPLOCK_EXCL)
 338		mrdemote(&ip->i_mmaplock);
 339	if (lock_flags & XFS_IOLOCK_EXCL)
 340		downgrade_write(&VFS_I(ip)->i_rwsem);
 341
 342	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 343}
 344
 345#if defined(DEBUG) || defined(XFS_WARN)
 346int
 347xfs_isilocked(
 348	xfs_inode_t		*ip,
 349	uint			lock_flags)
 350{
 351	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 352		if (!(lock_flags & XFS_ILOCK_SHARED))
 353			return !!ip->i_lock.mr_writer;
 354		return rwsem_is_locked(&ip->i_lock.mr_lock);
 355	}
 356
 357	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 358		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
 359			return !!ip->i_mmaplock.mr_writer;
 360		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
 361	}
 362
 363	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
 364		if (!(lock_flags & XFS_IOLOCK_SHARED))
 365			return !debug_locks ||
 366				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
 367		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
 368	}
 369
 370	ASSERT(0);
 371	return 0;
 372}
 373#endif
 374
 375/*
 376 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 377 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 378 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 379 * errors and warnings.
 380 */
 381#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 382static bool
 383xfs_lockdep_subclass_ok(
 384	int subclass)
 385{
 386	return subclass < MAX_LOCKDEP_SUBCLASSES;
 387}
 388#else
 389#define xfs_lockdep_subclass_ok(subclass)	(true)
 390#endif
 391
 392/*
 393 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 394 * value. This can be called for any type of inode lock combination, including
 395 * parent locking. Care must be taken to ensure we don't overrun the subclass
 396 * storage fields in the class mask we build.
 397 */
 398static inline int
 399xfs_lock_inumorder(int lock_mode, int subclass)
 400{
 401	int	class = 0;
 402
 403	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 404			      XFS_ILOCK_RTSUM)));
 405	ASSERT(xfs_lockdep_subclass_ok(subclass));
 406
 407	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 408		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 409		class += subclass << XFS_IOLOCK_SHIFT;
 410	}
 411
 412	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 413		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 414		class += subclass << XFS_MMAPLOCK_SHIFT;
 415	}
 416
 417	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 418		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 419		class += subclass << XFS_ILOCK_SHIFT;
 420	}
 421
 422	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 423}
 424
 425/*
 426 * The following routine will lock n inodes in exclusive mode.  We assume the
 427 * caller calls us with the inodes in i_ino order.
 428 *
 429 * We need to detect deadlock where an inode that we lock is in the AIL and we
 430 * start waiting for another inode that is locked by a thread in a long running
 431 * transaction (such as truncate). This can result in deadlock since the long
 432 * running trans might need to wait for the inode we just locked in order to
 433 * push the tail and free space in the log.
 434 *
 435 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 436 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 437 * lock more than one at a time, lockdep will report false positives saying we
 438 * have violated locking orders.
 439 */
 440static void
 441xfs_lock_inodes(
 442	struct xfs_inode	**ips,
 443	int			inodes,
 444	uint			lock_mode)
 445{
 446	int			attempts = 0, i, j, try_lock;
 447	struct xfs_log_item	*lp;
 448
 449	/*
 450	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 451	 * support an arbitrary depth of locking here, but absolute limits on
 452	 * inodes depend on the type of locking and the limits placed by
 453	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 454	 * the asserts.
 455	 */
 456	ASSERT(ips && inodes >= 2 && inodes <= 5);
 457	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 458			    XFS_ILOCK_EXCL));
 459	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 460			      XFS_ILOCK_SHARED)));
 461	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 462		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 463	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 464		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 465
 466	if (lock_mode & XFS_IOLOCK_EXCL) {
 467		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 468	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 469		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 470
 471	try_lock = 0;
 472	i = 0;
 473again:
 474	for (; i < inodes; i++) {
 475		ASSERT(ips[i]);
 476
 477		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 478			continue;
 479
 480		/*
 481		 * If try_lock is not set yet, make sure all locked inodes are
 482		 * not in the AIL.  If any are, set try_lock to be used later.
 483		 */
 484		if (!try_lock) {
 485			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 486				lp = &ips[j]->i_itemp->ili_item;
 487				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 488					try_lock++;
 489			}
 490		}
 491
 492		/*
 493		 * If any of the previous locks we have locked is in the AIL,
 494		 * we must TRY to get the second and subsequent locks. If
 495		 * we can't get any, we must release all we have
 496		 * and try again.
 497		 */
 498		if (!try_lock) {
 499			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 500			continue;
 501		}
 502
 503		/* try_lock means we have an inode locked that is in the AIL. */
 504		ASSERT(i != 0);
 505		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 506			continue;
 507
 508		/*
 509		 * Unlock all previous guys and try again.  xfs_iunlock will try
 510		 * to push the tail if the inode is in the AIL.
 511		 */
 512		attempts++;
 513		for (j = i - 1; j >= 0; j--) {
 514			/*
 515			 * Check to see if we've already unlocked this one.  Not
 516			 * the first one going back, and the inode ptr is the
 517			 * same.
 518			 */
 519			if (j != (i - 1) && ips[j] == ips[j + 1])
 520				continue;
 521
 522			xfs_iunlock(ips[j], lock_mode);
 523		}
 524
 525		if ((attempts % 5) == 0) {
 526			delay(1); /* Don't just spin the CPU */
 527		}
 528		i = 0;
 529		try_lock = 0;
 530		goto again;
 531	}
 532}
 533
 534/*
 535 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
 536 * the mmaplock or the ilock, but not more than one type at a time. If we lock
 537 * more than one at a time, lockdep will report false positives saying we have
 538 * violated locking orders.  The iolock must be double-locked separately since
 539 * we use i_rwsem for that.  We now support taking one lock EXCL and the other
 540 * SHARED.
 541 */
 542void
 543xfs_lock_two_inodes(
 544	struct xfs_inode	*ip0,
 545	uint			ip0_mode,
 546	struct xfs_inode	*ip1,
 547	uint			ip1_mode)
 548{
 549	struct xfs_inode	*temp;
 550	uint			mode_temp;
 551	int			attempts = 0;
 552	struct xfs_log_item	*lp;
 553
 554	ASSERT(hweight32(ip0_mode) == 1);
 555	ASSERT(hweight32(ip1_mode) == 1);
 556	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 557	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 558	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 559	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 560	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 561	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 562	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 563	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 564	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 565	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 566
 567	ASSERT(ip0->i_ino != ip1->i_ino);
 568
 569	if (ip0->i_ino > ip1->i_ino) {
 570		temp = ip0;
 571		ip0 = ip1;
 572		ip1 = temp;
 573		mode_temp = ip0_mode;
 574		ip0_mode = ip1_mode;
 575		ip1_mode = mode_temp;
 576	}
 577
 578 again:
 579	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 580
 581	/*
 582	 * If the first lock we have locked is in the AIL, we must TRY to get
 583	 * the second lock. If we can't get it, we must release the first one
 584	 * and try again.
 585	 */
 586	lp = &ip0->i_itemp->ili_item;
 587	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 588		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 589			xfs_iunlock(ip0, ip0_mode);
 590			if ((++attempts % 5) == 0)
 591				delay(1); /* Don't just spin the CPU */
 592			goto again;
 593		}
 594	} else {
 595		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 596	}
 597}
 598
 599uint
 600xfs_ip2xflags(
 601	struct xfs_inode	*ip)
 602{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603	uint			flags = 0;
 604
 605	if (ip->i_diflags & XFS_DIFLAG_ANY) {
 606		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
 607			flags |= FS_XFLAG_REALTIME;
 608		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
 609			flags |= FS_XFLAG_PREALLOC;
 610		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
 611			flags |= FS_XFLAG_IMMUTABLE;
 612		if (ip->i_diflags & XFS_DIFLAG_APPEND)
 613			flags |= FS_XFLAG_APPEND;
 614		if (ip->i_diflags & XFS_DIFLAG_SYNC)
 615			flags |= FS_XFLAG_SYNC;
 616		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
 617			flags |= FS_XFLAG_NOATIME;
 618		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
 619			flags |= FS_XFLAG_NODUMP;
 620		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
 621			flags |= FS_XFLAG_RTINHERIT;
 622		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 623			flags |= FS_XFLAG_PROJINHERIT;
 624		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
 625			flags |= FS_XFLAG_NOSYMLINKS;
 626		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
 627			flags |= FS_XFLAG_EXTSIZE;
 628		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
 629			flags |= FS_XFLAG_EXTSZINHERIT;
 630		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
 631			flags |= FS_XFLAG_NODEFRAG;
 632		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
 633			flags |= FS_XFLAG_FILESTREAM;
 634	}
 635
 636	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
 637		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
 638			flags |= FS_XFLAG_DAX;
 639		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
 640			flags |= FS_XFLAG_COWEXTSIZE;
 641	}
 642
 643	if (XFS_IFORK_Q(ip))
 644		flags |= FS_XFLAG_HASATTR;
 
 645	return flags;
 646}
 647
 
 
 
 
 
 
 
 
 
 648/*
 649 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 650 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 651 * ci_name->name will point to a the actual name (caller must free) or
 652 * will be set to NULL if an exact match is found.
 653 */
 654int
 655xfs_lookup(
 656	xfs_inode_t		*dp,
 657	struct xfs_name		*name,
 658	xfs_inode_t		**ipp,
 659	struct xfs_name		*ci_name)
 660{
 661	xfs_ino_t		inum;
 662	int			error;
 663
 664	trace_xfs_lookup(dp, name);
 665
 666	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 667		return -EIO;
 668
 669	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 670	if (error)
 671		goto out_unlock;
 672
 673	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 674	if (error)
 675		goto out_free_name;
 676
 677	return 0;
 678
 679out_free_name:
 680	if (ci_name)
 681		kmem_free(ci_name->name);
 682out_unlock:
 683	*ipp = NULL;
 684	return error;
 685}
 686
 687/* Propagate di_flags from a parent inode to a child inode. */
 688static void
 689xfs_inode_inherit_flags(
 690	struct xfs_inode	*ip,
 691	const struct xfs_inode	*pip)
 692{
 693	unsigned int		di_flags = 0;
 694	xfs_failaddr_t		failaddr;
 695	umode_t			mode = VFS_I(ip)->i_mode;
 696
 697	if (S_ISDIR(mode)) {
 698		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
 699			di_flags |= XFS_DIFLAG_RTINHERIT;
 700		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 701			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 702			ip->i_extsize = pip->i_extsize;
 703		}
 704		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
 705			di_flags |= XFS_DIFLAG_PROJINHERIT;
 706	} else if (S_ISREG(mode)) {
 707		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
 708		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
 709			di_flags |= XFS_DIFLAG_REALTIME;
 710		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
 711			di_flags |= XFS_DIFLAG_EXTSIZE;
 712			ip->i_extsize = pip->i_extsize;
 713		}
 714	}
 715	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
 716	    xfs_inherit_noatime)
 717		di_flags |= XFS_DIFLAG_NOATIME;
 718	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
 719	    xfs_inherit_nodump)
 720		di_flags |= XFS_DIFLAG_NODUMP;
 721	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
 722	    xfs_inherit_sync)
 723		di_flags |= XFS_DIFLAG_SYNC;
 724	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
 725	    xfs_inherit_nosymlinks)
 726		di_flags |= XFS_DIFLAG_NOSYMLINKS;
 727	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
 728	    xfs_inherit_nodefrag)
 729		di_flags |= XFS_DIFLAG_NODEFRAG;
 730	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
 731		di_flags |= XFS_DIFLAG_FILESTREAM;
 732
 733	ip->i_diflags |= di_flags;
 734
 735	/*
 736	 * Inode verifiers on older kernels only check that the extent size
 737	 * hint is an integer multiple of the rt extent size on realtime files.
 738	 * They did not check the hint alignment on a directory with both
 739	 * rtinherit and extszinherit flags set.  If the misaligned hint is
 740	 * propagated from a directory into a new realtime file, new file
 741	 * allocations will fail due to math errors in the rt allocator and/or
 742	 * trip the verifiers.  Validate the hint settings in the new file so
 743	 * that we don't let broken hints propagate.
 744	 */
 745	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
 746			VFS_I(ip)->i_mode, ip->i_diflags);
 747	if (failaddr) {
 748		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
 749				   XFS_DIFLAG_EXTSZINHERIT);
 750		ip->i_extsize = 0;
 751	}
 752}
 753
 754/* Propagate di_flags2 from a parent inode to a child inode. */
 755static void
 756xfs_inode_inherit_flags2(
 757	struct xfs_inode	*ip,
 758	const struct xfs_inode	*pip)
 759{
 760	xfs_failaddr_t		failaddr;
 761
 762	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
 763		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
 764		ip->i_cowextsize = pip->i_cowextsize;
 765	}
 766	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
 767		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
 768
 769	/* Don't let invalid cowextsize hints propagate. */
 770	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
 771			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
 772	if (failaddr) {
 773		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
 774		ip->i_cowextsize = 0;
 775	}
 776}
 777
 778/*
 779 * Initialise a newly allocated inode and return the in-core inode to the
 780 * caller locked exclusively.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781 */
 782int
 783xfs_init_new_inode(
 784	struct user_namespace	*mnt_userns,
 785	struct xfs_trans	*tp,
 786	struct xfs_inode	*pip,
 787	xfs_ino_t		ino,
 788	umode_t			mode,
 789	xfs_nlink_t		nlink,
 790	dev_t			rdev,
 791	prid_t			prid,
 792	bool			init_xattrs,
 793	struct xfs_inode	**ipp)
 794{
 795	struct inode		*dir = pip ? VFS_I(pip) : NULL;
 796	struct xfs_mount	*mp = tp->t_mountp;
 797	struct xfs_inode	*ip;
 798	unsigned int		flags;
 799	int			error;
 800	struct timespec64	tv;
 801	struct inode		*inode;
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	/*
 804	 * Protect against obviously corrupt allocation btree records. Later
 805	 * xfs_iget checks will catch re-allocation of other active in-memory
 806	 * and on-disk inodes. If we don't catch reallocating the parent inode
 807	 * here we will deadlock in xfs_iget() so we have to do these checks
 808	 * first.
 809	 */
 810	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 811		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 812		return -EFSCORRUPTED;
 813	}
 814
 815	/*
 816	 * Get the in-core inode with the lock held exclusively to prevent
 817	 * others from looking at until we're done.
 
 818	 */
 819	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
 
 820	if (error)
 821		return error;
 822
 823	ASSERT(ip != NULL);
 824	inode = VFS_I(ip);
 
 825	set_nlink(inode, nlink);
 
 826	inode->i_rdev = rdev;
 827	ip->i_projid = prid;
 828
 829	if (dir && !(dir->i_mode & S_ISGID) &&
 830	    (mp->m_flags & XFS_MOUNT_GRPID)) {
 831		inode_fsuid_set(inode, mnt_userns);
 832		inode->i_gid = dir->i_gid;
 833		inode->i_mode = mode;
 834	} else {
 835		inode_init_owner(mnt_userns, inode, dir, mode);
 836	}
 837
 838	/*
 839	 * If the group ID of the new file does not match the effective group
 840	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 841	 * (and only if the irix_sgid_inherit compatibility variable is set).
 842	 */
 843	if (irix_sgid_inherit &&
 844	    (inode->i_mode & S_ISGID) &&
 845	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
 846		inode->i_mode &= ~S_ISGID;
 847
 848	ip->i_disk_size = 0;
 849	ip->i_df.if_nextents = 0;
 850	ASSERT(ip->i_nblocks == 0);
 851
 852	tv = current_time(inode);
 853	inode->i_mtime = tv;
 854	inode->i_atime = tv;
 855	inode->i_ctime = tv;
 856
 857	ip->i_extsize = 0;
 858	ip->i_diflags = 0;
 
 
 859
 860	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
 861		inode_set_iversion(inode, 1);
 862		ip->i_cowextsize = 0;
 863		ip->i_crtime = tv;
 
 864	}
 865
 866	flags = XFS_ILOG_CORE;
 867	switch (mode & S_IFMT) {
 868	case S_IFIFO:
 869	case S_IFCHR:
 870	case S_IFBLK:
 871	case S_IFSOCK:
 872		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
 
 873		flags |= XFS_ILOG_DEV;
 874		break;
 875	case S_IFREG:
 876	case S_IFDIR:
 877		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
 878			xfs_inode_inherit_flags(ip, pip);
 879		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
 880			xfs_inode_inherit_flags2(ip, pip);
 881		fallthrough;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882	case S_IFLNK:
 883		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 
 884		ip->i_df.if_bytes = 0;
 885		ip->i_df.if_u1.if_root = NULL;
 886		break;
 887	default:
 888		ASSERT(0);
 889	}
 890
 891	/*
 892	 * If we need to create attributes immediately after allocating the
 893	 * inode, initialise an empty attribute fork right now. We use the
 894	 * default fork offset for attributes here as we don't know exactly what
 895	 * size or how many attributes we might be adding. We can do this
 896	 * safely here because we know the data fork is completely empty and
 897	 * this saves us from needing to run a separate transaction to set the
 898	 * fork offset in the immediate future.
 899	 */
 900	if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
 901		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
 902		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
 903	}
 904
 905	/*
 906	 * Log the new values stuffed into the inode.
 907	 */
 908	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 909	xfs_trans_log_inode(tp, ip, flags);
 910
 911	/* now that we have an i_mode we can setup the inode structure */
 912	xfs_setup_inode(ip);
 913
 914	*ipp = ip;
 915	return 0;
 916}
 917
 918/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919 * Decrement the link count on an inode & log the change.  If this causes the
 920 * link count to go to zero, move the inode to AGI unlinked list so that it can
 921 * be freed when the last active reference goes away via xfs_inactive().
 922 */
 923static int			/* error */
 924xfs_droplink(
 925	xfs_trans_t *tp,
 926	xfs_inode_t *ip)
 927{
 928	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 929
 930	drop_nlink(VFS_I(ip));
 931	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 932
 933	if (VFS_I(ip)->i_nlink)
 934		return 0;
 935
 936	return xfs_iunlink(tp, ip);
 937}
 938
 939/*
 940 * Increment the link count on an inode & log the change.
 941 */
 942static void
 943xfs_bumplink(
 944	xfs_trans_t *tp,
 945	xfs_inode_t *ip)
 946{
 947	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 948
 949	inc_nlink(VFS_I(ip));
 950	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 951}
 952
 953int
 954xfs_create(
 955	struct user_namespace	*mnt_userns,
 956	xfs_inode_t		*dp,
 957	struct xfs_name		*name,
 958	umode_t			mode,
 959	dev_t			rdev,
 960	bool			init_xattrs,
 961	xfs_inode_t		**ipp)
 962{
 963	int			is_dir = S_ISDIR(mode);
 964	struct xfs_mount	*mp = dp->i_mount;
 965	struct xfs_inode	*ip = NULL;
 966	struct xfs_trans	*tp = NULL;
 967	int			error;
 968	bool                    unlock_dp_on_error = false;
 969	prid_t			prid;
 970	struct xfs_dquot	*udqp = NULL;
 971	struct xfs_dquot	*gdqp = NULL;
 972	struct xfs_dquot	*pdqp = NULL;
 973	struct xfs_trans_res	*tres;
 974	uint			resblks;
 975	xfs_ino_t		ino;
 976
 977	trace_xfs_create(dp, name);
 978
 979	if (XFS_FORCED_SHUTDOWN(mp))
 980		return -EIO;
 981
 982	prid = xfs_get_initial_prid(dp);
 983
 984	/*
 985	 * Make sure that we have allocated dquot(s) on disk.
 986	 */
 987	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
 988			mapped_fsgid(mnt_userns), prid,
 989			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
 990			&udqp, &gdqp, &pdqp);
 991	if (error)
 992		return error;
 993
 994	if (is_dir) {
 995		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
 996		tres = &M_RES(mp)->tr_mkdir;
 997	} else {
 998		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 999		tres = &M_RES(mp)->tr_create;
1000	}
1001
1002	/*
1003	 * Initially assume that the file does not exist and
1004	 * reserve the resources for that case.  If that is not
1005	 * the case we'll drop the one we have and get a more
1006	 * appropriate transaction later.
1007	 */
1008	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1009			&tp);
1010	if (error == -ENOSPC) {
1011		/* flush outstanding delalloc blocks and retry */
1012		xfs_flush_inodes(mp);
1013		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1014				resblks, &tp);
1015	}
1016	if (error)
1017		goto out_release_dquots;
1018
1019	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1020	unlock_dp_on_error = true;
1021
1022	error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1023			XFS_IEXT_DIR_MANIP_CNT(mp));
 
 
 
1024	if (error)
1025		goto out_trans_cancel;
1026
1027	/*
1028	 * A newly created regular or special file just has one directory
1029	 * entry pointing to them, but a directory also the "." entry
1030	 * pointing to itself.
1031	 */
1032	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1033	if (!error)
1034		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1035				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1036	if (error)
1037		goto out_trans_cancel;
1038
1039	/*
1040	 * Now we join the directory inode to the transaction.  We do not do it
1041	 * earlier because xfs_dialloc might commit the previous transaction
1042	 * (and release all the locks).  An error from here on will result in
1043	 * the transaction cancel unlocking dp so don't do it explicitly in the
1044	 * error path.
1045	 */
1046	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1047	unlock_dp_on_error = false;
1048
1049	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1050					resblks - XFS_IALLOC_SPACE_RES(mp));
1051	if (error) {
1052		ASSERT(error != -ENOSPC);
1053		goto out_trans_cancel;
1054	}
1055	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1056	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1057
1058	if (is_dir) {
1059		error = xfs_dir_init(tp, ip, dp);
1060		if (error)
1061			goto out_trans_cancel;
1062
1063		xfs_bumplink(tp, dp);
1064	}
1065
1066	/*
1067	 * If this is a synchronous mount, make sure that the
1068	 * create transaction goes to disk before returning to
1069	 * the user.
1070	 */
1071	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1072		xfs_trans_set_sync(tp);
1073
1074	/*
1075	 * Attach the dquot(s) to the inodes and modify them incore.
1076	 * These ids of the inode couldn't have changed since the new
1077	 * inode has been locked ever since it was created.
1078	 */
1079	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1080
1081	error = xfs_trans_commit(tp);
1082	if (error)
1083		goto out_release_inode;
1084
1085	xfs_qm_dqrele(udqp);
1086	xfs_qm_dqrele(gdqp);
1087	xfs_qm_dqrele(pdqp);
1088
1089	*ipp = ip;
1090	return 0;
1091
1092 out_trans_cancel:
1093	xfs_trans_cancel(tp);
1094 out_release_inode:
1095	/*
1096	 * Wait until after the current transaction is aborted to finish the
1097	 * setup of the inode and release the inode.  This prevents recursive
1098	 * transactions and deadlocks from xfs_inactive.
1099	 */
1100	if (ip) {
1101		xfs_finish_inode_setup(ip);
1102		xfs_irele(ip);
1103	}
1104 out_release_dquots:
1105	xfs_qm_dqrele(udqp);
1106	xfs_qm_dqrele(gdqp);
1107	xfs_qm_dqrele(pdqp);
1108
1109	if (unlock_dp_on_error)
1110		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1111	return error;
1112}
1113
1114int
1115xfs_create_tmpfile(
1116	struct user_namespace	*mnt_userns,
1117	struct xfs_inode	*dp,
1118	umode_t			mode,
1119	struct xfs_inode	**ipp)
1120{
1121	struct xfs_mount	*mp = dp->i_mount;
1122	struct xfs_inode	*ip = NULL;
1123	struct xfs_trans	*tp = NULL;
1124	int			error;
1125	prid_t                  prid;
1126	struct xfs_dquot	*udqp = NULL;
1127	struct xfs_dquot	*gdqp = NULL;
1128	struct xfs_dquot	*pdqp = NULL;
1129	struct xfs_trans_res	*tres;
1130	uint			resblks;
1131	xfs_ino_t		ino;
1132
1133	if (XFS_FORCED_SHUTDOWN(mp))
1134		return -EIO;
1135
1136	prid = xfs_get_initial_prid(dp);
1137
1138	/*
1139	 * Make sure that we have allocated dquot(s) on disk.
1140	 */
1141	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
1142			mapped_fsgid(mnt_userns), prid,
1143			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1144			&udqp, &gdqp, &pdqp);
1145	if (error)
1146		return error;
1147
1148	resblks = XFS_IALLOC_SPACE_RES(mp);
1149	tres = &M_RES(mp)->tr_create_tmpfile;
1150
1151	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1152			&tp);
 
 
 
 
1153	if (error)
1154		goto out_release_dquots;
1155
1156	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1157	if (!error)
1158		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1159				0, 0, prid, false, &ip);
1160	if (error)
1161		goto out_trans_cancel;
1162
1163	if (mp->m_flags & XFS_MOUNT_WSYNC)
1164		xfs_trans_set_sync(tp);
1165
1166	/*
1167	 * Attach the dquot(s) to the inodes and modify them incore.
1168	 * These ids of the inode couldn't have changed since the new
1169	 * inode has been locked ever since it was created.
1170	 */
1171	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1172
1173	error = xfs_iunlink(tp, ip);
1174	if (error)
1175		goto out_trans_cancel;
1176
1177	error = xfs_trans_commit(tp);
1178	if (error)
1179		goto out_release_inode;
1180
1181	xfs_qm_dqrele(udqp);
1182	xfs_qm_dqrele(gdqp);
1183	xfs_qm_dqrele(pdqp);
1184
1185	*ipp = ip;
1186	return 0;
1187
1188 out_trans_cancel:
1189	xfs_trans_cancel(tp);
1190 out_release_inode:
1191	/*
1192	 * Wait until after the current transaction is aborted to finish the
1193	 * setup of the inode and release the inode.  This prevents recursive
1194	 * transactions and deadlocks from xfs_inactive.
1195	 */
1196	if (ip) {
1197		xfs_finish_inode_setup(ip);
1198		xfs_irele(ip);
1199	}
1200 out_release_dquots:
1201	xfs_qm_dqrele(udqp);
1202	xfs_qm_dqrele(gdqp);
1203	xfs_qm_dqrele(pdqp);
1204
1205	return error;
1206}
1207
1208int
1209xfs_link(
1210	xfs_inode_t		*tdp,
1211	xfs_inode_t		*sip,
1212	struct xfs_name		*target_name)
1213{
1214	xfs_mount_t		*mp = tdp->i_mount;
1215	xfs_trans_t		*tp;
1216	int			error;
1217	int			resblks;
1218
1219	trace_xfs_link(tdp, target_name);
1220
1221	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1222
1223	if (XFS_FORCED_SHUTDOWN(mp))
1224		return -EIO;
1225
1226	error = xfs_qm_dqattach(sip);
1227	if (error)
1228		goto std_return;
1229
1230	error = xfs_qm_dqattach(tdp);
1231	if (error)
1232		goto std_return;
1233
1234	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1235	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1236	if (error == -ENOSPC) {
1237		resblks = 0;
1238		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1239	}
1240	if (error)
1241		goto std_return;
1242
1243	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1244
1245	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1246	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1247
1248	error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1249			XFS_IEXT_DIR_MANIP_CNT(mp));
1250	if (error)
1251		goto error_return;
1252
1253	/*
1254	 * If we are using project inheritance, we only allow hard link
1255	 * creation in our tree when the project IDs are the same; else
1256	 * the tree quota mechanism could be circumvented.
1257	 */
1258	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1259		     tdp->i_projid != sip->i_projid)) {
1260		error = -EXDEV;
1261		goto error_return;
1262	}
1263
1264	if (!resblks) {
1265		error = xfs_dir_canenter(tp, tdp, target_name);
1266		if (error)
1267			goto error_return;
1268	}
1269
1270	/*
1271	 * Handle initial link state of O_TMPFILE inode
1272	 */
1273	if (VFS_I(sip)->i_nlink == 0) {
1274		struct xfs_perag	*pag;
1275
1276		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1277		error = xfs_iunlink_remove(tp, pag, sip);
1278		xfs_perag_put(pag);
1279		if (error)
1280			goto error_return;
1281	}
1282
1283	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1284				   resblks);
1285	if (error)
1286		goto error_return;
1287	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1288	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1289
1290	xfs_bumplink(tp, sip);
1291
1292	/*
1293	 * If this is a synchronous mount, make sure that the
1294	 * link transaction goes to disk before returning to
1295	 * the user.
1296	 */
1297	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1298		xfs_trans_set_sync(tp);
1299
1300	return xfs_trans_commit(tp);
1301
1302 error_return:
1303	xfs_trans_cancel(tp);
1304 std_return:
1305	return error;
1306}
1307
1308/* Clear the reflink flag and the cowblocks tag if possible. */
1309static void
1310xfs_itruncate_clear_reflink_flags(
1311	struct xfs_inode	*ip)
1312{
1313	struct xfs_ifork	*dfork;
1314	struct xfs_ifork	*cfork;
1315
1316	if (!xfs_is_reflink_inode(ip))
1317		return;
1318	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1319	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1320	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1321		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1322	if (cfork->if_bytes == 0)
1323		xfs_inode_clear_cowblocks_tag(ip);
1324}
1325
1326/*
1327 * Free up the underlying blocks past new_size.  The new size must be smaller
1328 * than the current size.  This routine can be used both for the attribute and
1329 * data fork, and does not modify the inode size, which is left to the caller.
1330 *
1331 * The transaction passed to this routine must have made a permanent log
1332 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1333 * given transaction and start new ones, so make sure everything involved in
1334 * the transaction is tidy before calling here.  Some transaction will be
1335 * returned to the caller to be committed.  The incoming transaction must
1336 * already include the inode, and both inode locks must be held exclusively.
1337 * The inode must also be "held" within the transaction.  On return the inode
1338 * will be "held" within the returned transaction.  This routine does NOT
1339 * require any disk space to be reserved for it within the transaction.
1340 *
1341 * If we get an error, we must return with the inode locked and linked into the
1342 * current transaction. This keeps things simple for the higher level code,
1343 * because it always knows that the inode is locked and held in the transaction
1344 * that returns to it whether errors occur or not.  We don't mark the inode
1345 * dirty on error so that transactions can be easily aborted if possible.
1346 */
1347int
1348xfs_itruncate_extents_flags(
1349	struct xfs_trans	**tpp,
1350	struct xfs_inode	*ip,
1351	int			whichfork,
1352	xfs_fsize_t		new_size,
1353	int			flags)
1354{
1355	struct xfs_mount	*mp = ip->i_mount;
1356	struct xfs_trans	*tp = *tpp;
1357	xfs_fileoff_t		first_unmap_block;
1358	xfs_filblks_t		unmap_len;
1359	int			error = 0;
1360
1361	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1362	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1363	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1364	ASSERT(new_size <= XFS_ISIZE(ip));
1365	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1366	ASSERT(ip->i_itemp != NULL);
1367	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1368	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1369
1370	trace_xfs_itruncate_extents_start(ip, new_size);
1371
1372	flags |= xfs_bmapi_aflag(whichfork);
1373
1374	/*
1375	 * Since it is possible for space to become allocated beyond
1376	 * the end of the file (in a crash where the space is allocated
1377	 * but the inode size is not yet updated), simply remove any
1378	 * blocks which show up between the new EOF and the maximum
1379	 * possible file size.
1380	 *
1381	 * We have to free all the blocks to the bmbt maximum offset, even if
1382	 * the page cache can't scale that far.
1383	 */
1384	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1385	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1386		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1387		return 0;
1388	}
1389
1390	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1391	while (unmap_len > 0) {
1392		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1393		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1394				flags, XFS_ITRUNC_MAX_EXTENTS);
1395		if (error)
1396			goto out;
1397
1398		/* free the just unmapped extents */
 
 
 
1399		error = xfs_defer_finish(&tp);
1400		if (error)
1401			goto out;
 
 
 
 
1402	}
1403
1404	if (whichfork == XFS_DATA_FORK) {
1405		/* Remove all pending CoW reservations. */
1406		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1407				first_unmap_block, XFS_MAX_FILEOFF, true);
1408		if (error)
1409			goto out;
1410
1411		xfs_itruncate_clear_reflink_flags(ip);
1412	}
1413
1414	/*
1415	 * Always re-log the inode so that our permanent transaction can keep
1416	 * on rolling it forward in the log.
1417	 */
1418	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1419
1420	trace_xfs_itruncate_extents_end(ip, new_size);
1421
1422out:
1423	*tpp = tp;
1424	return error;
1425}
1426
1427int
1428xfs_release(
1429	xfs_inode_t	*ip)
1430{
1431	xfs_mount_t	*mp = ip->i_mount;
1432	int		error = 0;
1433
1434	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1435		return 0;
1436
1437	/* If this is a read-only mount, don't do this (would generate I/O) */
1438	if (mp->m_flags & XFS_MOUNT_RDONLY)
1439		return 0;
1440
1441	if (!XFS_FORCED_SHUTDOWN(mp)) {
1442		int truncated;
1443
1444		/*
1445		 * If we previously truncated this file and removed old data
1446		 * in the process, we want to initiate "early" writeout on
1447		 * the last close.  This is an attempt to combat the notorious
1448		 * NULL files problem which is particularly noticeable from a
1449		 * truncate down, buffered (re-)write (delalloc), followed by
1450		 * a crash.  What we are effectively doing here is
1451		 * significantly reducing the time window where we'd otherwise
1452		 * be exposed to that problem.
1453		 */
1454		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1455		if (truncated) {
1456			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1457			if (ip->i_delayed_blks > 0) {
1458				error = filemap_flush(VFS_I(ip)->i_mapping);
1459				if (error)
1460					return error;
1461			}
1462		}
1463	}
1464
1465	if (VFS_I(ip)->i_nlink == 0)
1466		return 0;
1467
1468	/*
1469	 * If we can't get the iolock just skip truncating the blocks past EOF
1470	 * because we could deadlock with the mmap_lock otherwise. We'll get
1471	 * another chance to drop them once the last reference to the inode is
1472	 * dropped, so we'll never leak blocks permanently.
1473	 */
1474	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1475		return 0;
1476
1477	if (xfs_can_free_eofblocks(ip, false)) {
 
1478		/*
1479		 * Check if the inode is being opened, written and closed
1480		 * frequently and we have delayed allocation blocks outstanding
1481		 * (e.g. streaming writes from the NFS server), truncating the
1482		 * blocks past EOF will cause fragmentation to occur.
1483		 *
1484		 * In this case don't do the truncation, but we have to be
1485		 * careful how we detect this case. Blocks beyond EOF show up as
1486		 * i_delayed_blks even when the inode is clean, so we need to
1487		 * truncate them away first before checking for a dirty release.
1488		 * Hence on the first dirty close we will still remove the
1489		 * speculative allocation, but after that we will leave it in
1490		 * place.
1491		 */
1492		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1493			goto out_unlock;
1494
1495		error = xfs_free_eofblocks(ip);
1496		if (error)
1497			goto out_unlock;
 
 
 
 
 
 
 
 
 
1498
1499		/* delalloc blocks after truncation means it really is dirty */
1500		if (ip->i_delayed_blks)
1501			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1502	}
1503
1504out_unlock:
1505	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1506	return error;
1507}
1508
1509/*
1510 * xfs_inactive_truncate
1511 *
1512 * Called to perform a truncate when an inode becomes unlinked.
1513 */
1514STATIC int
1515xfs_inactive_truncate(
1516	struct xfs_inode *ip)
1517{
1518	struct xfs_mount	*mp = ip->i_mount;
1519	struct xfs_trans	*tp;
1520	int			error;
1521
1522	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1523	if (error) {
1524		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1525		return error;
1526	}
1527	xfs_ilock(ip, XFS_ILOCK_EXCL);
1528	xfs_trans_ijoin(tp, ip, 0);
1529
1530	/*
1531	 * Log the inode size first to prevent stale data exposure in the event
1532	 * of a system crash before the truncate completes. See the related
1533	 * comment in xfs_vn_setattr_size() for details.
1534	 */
1535	ip->i_disk_size = 0;
1536	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1537
1538	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1539	if (error)
1540		goto error_trans_cancel;
1541
1542	ASSERT(ip->i_df.if_nextents == 0);
1543
1544	error = xfs_trans_commit(tp);
1545	if (error)
1546		goto error_unlock;
1547
1548	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1549	return 0;
1550
1551error_trans_cancel:
1552	xfs_trans_cancel(tp);
1553error_unlock:
1554	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1555	return error;
1556}
1557
1558/*
1559 * xfs_inactive_ifree()
1560 *
1561 * Perform the inode free when an inode is unlinked.
1562 */
1563STATIC int
1564xfs_inactive_ifree(
1565	struct xfs_inode *ip)
1566{
1567	struct xfs_mount	*mp = ip->i_mount;
1568	struct xfs_trans	*tp;
1569	int			error;
1570
1571	/*
1572	 * We try to use a per-AG reservation for any block needed by the finobt
1573	 * tree, but as the finobt feature predates the per-AG reservation
1574	 * support a degraded file system might not have enough space for the
1575	 * reservation at mount time.  In that case try to dip into the reserved
1576	 * pool and pray.
1577	 *
1578	 * Send a warning if the reservation does happen to fail, as the inode
1579	 * now remains allocated and sits on the unlinked list until the fs is
1580	 * repaired.
1581	 */
1582	if (unlikely(mp->m_finobt_nores)) {
1583		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1584				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1585				&tp);
1586	} else {
1587		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1588	}
1589	if (error) {
1590		if (error == -ENOSPC) {
1591			xfs_warn_ratelimited(mp,
1592			"Failed to remove inode(s) from unlinked list. "
1593			"Please free space, unmount and run xfs_repair.");
1594		} else {
1595			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1596		}
1597		return error;
1598	}
1599
1600	/*
1601	 * We do not hold the inode locked across the entire rolling transaction
1602	 * here. We only need to hold it for the first transaction that
1603	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1604	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1605	 * here breaks the relationship between cluster buffer invalidation and
1606	 * stale inode invalidation on cluster buffer item journal commit
1607	 * completion, and can result in leaving dirty stale inodes hanging
1608	 * around in memory.
1609	 *
1610	 * We have no need for serialising this inode operation against other
1611	 * operations - we freed the inode and hence reallocation is required
1612	 * and that will serialise on reallocating the space the deferops need
1613	 * to free. Hence we can unlock the inode on the first commit of
1614	 * the transaction rather than roll it right through the deferops. This
1615	 * avoids relogging the XFS_ISTALE inode.
1616	 *
1617	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1618	 * by asserting that the inode is still locked when it returns.
1619	 */
1620	xfs_ilock(ip, XFS_ILOCK_EXCL);
1621	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1622
1623	error = xfs_ifree(tp, ip);
1624	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1625	if (error) {
1626		/*
1627		 * If we fail to free the inode, shut down.  The cancel
1628		 * might do that, we need to make sure.  Otherwise the
1629		 * inode might be lost for a long time or forever.
1630		 */
1631		if (!XFS_FORCED_SHUTDOWN(mp)) {
1632			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1633				__func__, error);
1634			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1635		}
1636		xfs_trans_cancel(tp);
1637		return error;
1638	}
1639
1640	/*
1641	 * Credit the quota account(s). The inode is gone.
1642	 */
1643	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1644
1645	/*
1646	 * Just ignore errors at this point.  There is nothing we can do except
1647	 * to try to keep going. Make sure it's not a silent error.
1648	 */
1649	error = xfs_trans_commit(tp);
1650	if (error)
1651		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1652			__func__, error);
1653
1654	return 0;
1655}
1656
1657/*
1658 * xfs_inactive
1659 *
1660 * This is called when the vnode reference count for the vnode
1661 * goes to zero.  If the file has been unlinked, then it must
1662 * now be truncated.  Also, we clear all of the read-ahead state
1663 * kept for the inode here since the file is now closed.
1664 */
1665void
1666xfs_inactive(
1667	xfs_inode_t	*ip)
1668{
1669	struct xfs_mount	*mp;
1670	int			error;
1671	int			truncate = 0;
1672
1673	/*
1674	 * If the inode is already free, then there can be nothing
1675	 * to clean up here.
1676	 */
1677	if (VFS_I(ip)->i_mode == 0) {
1678		ASSERT(ip->i_df.if_broot_bytes == 0);
1679		goto out;
1680	}
1681
1682	mp = ip->i_mount;
1683	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1684
1685	/* If this is a read-only mount, don't do this (would generate I/O) */
1686	if (mp->m_flags & XFS_MOUNT_RDONLY)
1687		goto out;
1688
1689	/* Metadata inodes require explicit resource cleanup. */
1690	if (xfs_is_metadata_inode(ip))
1691		goto out;
1692
1693	/* Try to clean out the cow blocks if there are any. */
1694	if (xfs_inode_has_cow_data(ip))
1695		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1696
1697	if (VFS_I(ip)->i_nlink != 0) {
1698		/*
1699		 * force is true because we are evicting an inode from the
1700		 * cache. Post-eof blocks must be freed, lest we end up with
1701		 * broken free space accounting.
1702		 *
1703		 * Note: don't bother with iolock here since lockdep complains
1704		 * about acquiring it in reclaim context. We have the only
1705		 * reference to the inode at this point anyways.
1706		 */
1707		if (xfs_can_free_eofblocks(ip, true))
1708			xfs_free_eofblocks(ip);
1709
1710		goto out;
1711	}
1712
1713	if (S_ISREG(VFS_I(ip)->i_mode) &&
1714	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1715	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1716		truncate = 1;
1717
1718	error = xfs_qm_dqattach(ip);
1719	if (error)
1720		goto out;
1721
1722	if (S_ISLNK(VFS_I(ip)->i_mode))
1723		error = xfs_inactive_symlink(ip);
1724	else if (truncate)
1725		error = xfs_inactive_truncate(ip);
1726	if (error)
1727		goto out;
1728
1729	/*
1730	 * If there are attributes associated with the file then blow them away
1731	 * now.  The code calls a routine that recursively deconstructs the
1732	 * attribute fork. If also blows away the in-core attribute fork.
1733	 */
1734	if (XFS_IFORK_Q(ip)) {
1735		error = xfs_attr_inactive(ip);
1736		if (error)
1737			goto out;
1738	}
1739
1740	ASSERT(!ip->i_afp);
1741	ASSERT(ip->i_forkoff == 0);
1742
1743	/*
1744	 * Free the inode.
1745	 */
1746	xfs_inactive_ifree(ip);
 
 
1747
1748out:
1749	/*
1750	 * We're done making metadata updates for this inode, so we can release
1751	 * the attached dquots.
1752	 */
1753	xfs_qm_dqdetach(ip);
1754}
1755
1756/*
1757 * In-Core Unlinked List Lookups
1758 * =============================
1759 *
1760 * Every inode is supposed to be reachable from some other piece of metadata
1761 * with the exception of the root directory.  Inodes with a connection to a
1762 * file descriptor but not linked from anywhere in the on-disk directory tree
1763 * are collectively known as unlinked inodes, though the filesystem itself
1764 * maintains links to these inodes so that on-disk metadata are consistent.
1765 *
1766 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1767 * header contains a number of buckets that point to an inode, and each inode
1768 * record has a pointer to the next inode in the hash chain.  This
1769 * singly-linked list causes scaling problems in the iunlink remove function
1770 * because we must walk that list to find the inode that points to the inode
1771 * being removed from the unlinked hash bucket list.
1772 *
1773 * What if we modelled the unlinked list as a collection of records capturing
1774 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1775 * have a fast way to look up unlinked list predecessors, which avoids the
1776 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1777 * rhashtable.
1778 *
1779 * Because this is a backref cache, we ignore operational failures since the
1780 * iunlink code can fall back to the slow bucket walk.  The only errors that
1781 * should bubble out are for obviously incorrect situations.
1782 *
1783 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1784 * access or have otherwise provided for concurrency control.
1785 */
1786
1787/* Capture a "X.next_unlinked = Y" relationship. */
1788struct xfs_iunlink {
1789	struct rhash_head	iu_rhash_head;
1790	xfs_agino_t		iu_agino;		/* X */
1791	xfs_agino_t		iu_next_unlinked;	/* Y */
1792};
1793
1794/* Unlinked list predecessor lookup hashtable construction */
1795static int
1796xfs_iunlink_obj_cmpfn(
1797	struct rhashtable_compare_arg	*arg,
1798	const void			*obj)
1799{
1800	const xfs_agino_t		*key = arg->key;
1801	const struct xfs_iunlink	*iu = obj;
1802
1803	if (iu->iu_next_unlinked != *key)
1804		return 1;
1805	return 0;
1806}
1807
1808static const struct rhashtable_params xfs_iunlink_hash_params = {
1809	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1810	.key_len		= sizeof(xfs_agino_t),
1811	.key_offset		= offsetof(struct xfs_iunlink,
1812					   iu_next_unlinked),
1813	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1814	.automatic_shrinking	= true,
1815	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1816};
1817
1818/*
1819 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1820 * relation is found.
1821 */
1822static xfs_agino_t
1823xfs_iunlink_lookup_backref(
1824	struct xfs_perag	*pag,
1825	xfs_agino_t		agino)
1826{
1827	struct xfs_iunlink	*iu;
1828
1829	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1830			xfs_iunlink_hash_params);
1831	return iu ? iu->iu_agino : NULLAGINO;
1832}
1833
1834/*
1835 * Take ownership of an iunlink cache entry and insert it into the hash table.
1836 * If successful, the entry will be owned by the cache; if not, it is freed.
1837 * Either way, the caller does not own @iu after this call.
1838 */
1839static int
1840xfs_iunlink_insert_backref(
1841	struct xfs_perag	*pag,
1842	struct xfs_iunlink	*iu)
1843{
1844	int			error;
1845
1846	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1847			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1848	/*
1849	 * Fail loudly if there already was an entry because that's a sign of
1850	 * corruption of in-memory data.  Also fail loudly if we see an error
1851	 * code we didn't anticipate from the rhashtable code.  Currently we
1852	 * only anticipate ENOMEM.
1853	 */
1854	if (error) {
1855		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1856		kmem_free(iu);
1857	}
1858	/*
1859	 * Absorb any runtime errors that aren't a result of corruption because
1860	 * this is a cache and we can always fall back to bucket list scanning.
1861	 */
1862	if (error != 0 && error != -EEXIST)
1863		error = 0;
1864	return error;
1865}
1866
1867/* Remember that @prev_agino.next_unlinked = @this_agino. */
1868static int
1869xfs_iunlink_add_backref(
1870	struct xfs_perag	*pag,
1871	xfs_agino_t		prev_agino,
1872	xfs_agino_t		this_agino)
1873{
1874	struct xfs_iunlink	*iu;
1875
1876	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
1877		return 0;
1878
1879	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
1880	iu->iu_agino = prev_agino;
1881	iu->iu_next_unlinked = this_agino;
1882
1883	return xfs_iunlink_insert_backref(pag, iu);
1884}
1885
1886/*
1887 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
1888 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
1889 * wasn't any such entry then we don't bother.
1890 */
1891static int
1892xfs_iunlink_change_backref(
1893	struct xfs_perag	*pag,
1894	xfs_agino_t		agino,
1895	xfs_agino_t		next_unlinked)
1896{
1897	struct xfs_iunlink	*iu;
1898	int			error;
1899
1900	/* Look up the old entry; if there wasn't one then exit. */
1901	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1902			xfs_iunlink_hash_params);
1903	if (!iu)
1904		return 0;
1905
1906	/*
1907	 * Remove the entry.  This shouldn't ever return an error, but if we
1908	 * couldn't remove the old entry we don't want to add it again to the
1909	 * hash table, and if the entry disappeared on us then someone's
1910	 * violated the locking rules and we need to fail loudly.  Either way
1911	 * we cannot remove the inode because internal state is or would have
1912	 * been corrupt.
1913	 */
1914	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
1915			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1916	if (error)
1917		return error;
1918
1919	/* If there is no new next entry just free our item and return. */
1920	if (next_unlinked == NULLAGINO) {
1921		kmem_free(iu);
1922		return 0;
1923	}
1924
1925	/* Update the entry and re-add it to the hash table. */
1926	iu->iu_next_unlinked = next_unlinked;
1927	return xfs_iunlink_insert_backref(pag, iu);
1928}
1929
1930/* Set up the in-core predecessor structures. */
1931int
1932xfs_iunlink_init(
1933	struct xfs_perag	*pag)
1934{
1935	return rhashtable_init(&pag->pagi_unlinked_hash,
1936			&xfs_iunlink_hash_params);
1937}
1938
1939/* Free the in-core predecessor structures. */
1940static void
1941xfs_iunlink_free_item(
1942	void			*ptr,
1943	void			*arg)
1944{
1945	struct xfs_iunlink	*iu = ptr;
1946	bool			*freed_anything = arg;
1947
1948	*freed_anything = true;
1949	kmem_free(iu);
1950}
1951
1952void
1953xfs_iunlink_destroy(
1954	struct xfs_perag	*pag)
1955{
1956	bool			freed_anything = false;
1957
1958	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
1959			xfs_iunlink_free_item, &freed_anything);
1960
1961	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
1962}
1963
1964/*
1965 * Point the AGI unlinked bucket at an inode and log the results.  The caller
1966 * is responsible for validating the old value.
1967 */
1968STATIC int
1969xfs_iunlink_update_bucket(
1970	struct xfs_trans	*tp,
1971	struct xfs_perag	*pag,
1972	struct xfs_buf		*agibp,
1973	unsigned int		bucket_index,
1974	xfs_agino_t		new_agino)
1975{
1976	struct xfs_agi		*agi = agibp->b_addr;
1977	xfs_agino_t		old_value;
1978	int			offset;
1979
1980	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
1981
1982	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1983	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1984			old_value, new_agino);
1985
1986	/*
1987	 * We should never find the head of the list already set to the value
1988	 * passed in because either we're adding or removing ourselves from the
1989	 * head of the list.
1990	 */
1991	if (old_value == new_agino) {
1992		xfs_buf_mark_corrupt(agibp);
1993		return -EFSCORRUPTED;
1994	}
1995
1996	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1997	offset = offsetof(struct xfs_agi, agi_unlinked) +
1998			(sizeof(xfs_agino_t) * bucket_index);
1999	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2000	return 0;
2001}
2002
2003/* Set an on-disk inode's next_unlinked pointer. */
2004STATIC void
2005xfs_iunlink_update_dinode(
2006	struct xfs_trans	*tp,
2007	struct xfs_perag	*pag,
2008	xfs_agino_t		agino,
2009	struct xfs_buf		*ibp,
2010	struct xfs_dinode	*dip,
2011	struct xfs_imap		*imap,
2012	xfs_agino_t		next_agino)
2013{
2014	struct xfs_mount	*mp = tp->t_mountp;
2015	int			offset;
2016
2017	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2018
2019	trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
2020			be32_to_cpu(dip->di_next_unlinked), next_agino);
2021
2022	dip->di_next_unlinked = cpu_to_be32(next_agino);
2023	offset = imap->im_boffset +
2024			offsetof(struct xfs_dinode, di_next_unlinked);
2025
2026	/* need to recalc the inode CRC if appropriate */
2027	xfs_dinode_calc_crc(mp, dip);
2028	xfs_trans_inode_buf(tp, ibp);
2029	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2030}
2031
2032/* Set an in-core inode's unlinked pointer and return the old value. */
2033STATIC int
2034xfs_iunlink_update_inode(
2035	struct xfs_trans	*tp,
2036	struct xfs_inode	*ip,
2037	struct xfs_perag	*pag,
2038	xfs_agino_t		next_agino,
2039	xfs_agino_t		*old_next_agino)
2040{
2041	struct xfs_mount	*mp = tp->t_mountp;
2042	struct xfs_dinode	*dip;
2043	struct xfs_buf		*ibp;
2044	xfs_agino_t		old_value;
2045	int			error;
2046
2047	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2048
2049	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2050	if (error)
2051		return error;
2052	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2053
2054	/* Make sure the old pointer isn't garbage. */
2055	old_value = be32_to_cpu(dip->di_next_unlinked);
2056	if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
2057		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2058				sizeof(*dip), __this_address);
2059		error = -EFSCORRUPTED;
2060		goto out;
2061	}
2062
2063	/*
2064	 * Since we're updating a linked list, we should never find that the
2065	 * current pointer is the same as the new value, unless we're
2066	 * terminating the list.
2067	 */
2068	*old_next_agino = old_value;
2069	if (old_value == next_agino) {
2070		if (next_agino != NULLAGINO) {
2071			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2072					dip, sizeof(*dip), __this_address);
2073			error = -EFSCORRUPTED;
2074		}
2075		goto out;
2076	}
2077
2078	/* Ok, update the new pointer. */
2079	xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
2080			ibp, dip, &ip->i_imap, next_agino);
2081	return 0;
2082out:
2083	xfs_trans_brelse(tp, ibp);
2084	return error;
2085}
2086
2087/*
2088 * This is called when the inode's link count has gone to 0 or we are creating
2089 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2090 *
2091 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2092 * list when the inode is freed.
2093 */
2094STATIC int
2095xfs_iunlink(
2096	struct xfs_trans	*tp,
2097	struct xfs_inode	*ip)
2098{
2099	struct xfs_mount	*mp = tp->t_mountp;
2100	struct xfs_perag	*pag;
2101	struct xfs_agi		*agi;
2102	struct xfs_buf		*agibp;
2103	xfs_agino_t		next_agino;
 
2104	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2105	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2106	int			error;
2107
2108	ASSERT(VFS_I(ip)->i_nlink == 0);
2109	ASSERT(VFS_I(ip)->i_mode != 0);
2110	trace_xfs_iunlink(ip);
2111
2112	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2113
2114	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2115	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2116	if (error)
2117		goto out;
2118	agi = agibp->b_addr;
2119
2120	/*
2121	 * Get the index into the agi hash table for the list this inode will
2122	 * go on.  Make sure the pointer isn't garbage and that this inode
2123	 * isn't already on the list.
2124	 */
2125	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2126	if (next_agino == agino ||
2127	    !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
2128		xfs_buf_mark_corrupt(agibp);
2129		error = -EFSCORRUPTED;
2130		goto out;
2131	}
2132
2133	if (next_agino != NULLAGINO) {
2134		xfs_agino_t		old_agino;
2135
2136		/*
2137		 * There is already another inode in the bucket, so point this
2138		 * inode to the current head of the list.
2139		 */
2140		error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
2141				&old_agino);
2142		if (error)
2143			goto out;
2144		ASSERT(old_agino == NULLAGINO);
2145
2146		/*
2147		 * agino has been unlinked, add a backref from the next inode
2148		 * back to agino.
2149		 */
2150		error = xfs_iunlink_add_backref(pag, agino, next_agino);
2151		if (error)
2152			goto out;
2153	}
2154
2155	/* Point the head of the list to point to this inode. */
2156	error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2157out:
2158	xfs_perag_put(pag);
2159	return error;
2160}
2161
2162/* Return the imap, dinode pointer, and buffer for an inode. */
2163STATIC int
2164xfs_iunlink_map_ino(
2165	struct xfs_trans	*tp,
2166	xfs_agnumber_t		agno,
2167	xfs_agino_t		agino,
2168	struct xfs_imap		*imap,
2169	struct xfs_dinode	**dipp,
2170	struct xfs_buf		**bpp)
2171{
2172	struct xfs_mount	*mp = tp->t_mountp;
2173	int			error;
2174
2175	imap->im_blkno = 0;
2176	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2177	if (error) {
2178		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2179				__func__, error);
2180		return error;
2181	}
2182
2183	error = xfs_imap_to_bp(mp, tp, imap, bpp);
2184	if (error) {
2185		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2186				__func__, error);
2187		return error;
2188	}
2189
2190	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
2191	return 0;
2192}
2193
2194/*
2195 * Walk the unlinked chain from @head_agino until we find the inode that
2196 * points to @target_agino.  Return the inode number, map, dinode pointer,
2197 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2198 *
2199 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2200 * @agino, @imap, @dipp, and @bpp are all output parameters.
2201 *
2202 * Do not call this function if @target_agino is the head of the list.
2203 */
2204STATIC int
2205xfs_iunlink_map_prev(
2206	struct xfs_trans	*tp,
2207	struct xfs_perag	*pag,
2208	xfs_agino_t		head_agino,
2209	xfs_agino_t		target_agino,
2210	xfs_agino_t		*agino,
2211	struct xfs_imap		*imap,
2212	struct xfs_dinode	**dipp,
2213	struct xfs_buf		**bpp)
 
2214{
2215	struct xfs_mount	*mp = tp->t_mountp;
2216	xfs_agino_t		next_agino;
2217	int			error;
2218
2219	ASSERT(head_agino != target_agino);
2220	*bpp = NULL;
2221
2222	/* See if our backref cache can find it faster. */
2223	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2224	if (*agino != NULLAGINO) {
2225		error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
2226				dipp, bpp);
2227		if (error)
2228			return error;
2229
2230		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2231			return 0;
2232
2233		/*
2234		 * If we get here the cache contents were corrupt, so drop the
2235		 * buffer and fall back to walking the bucket list.
2236		 */
2237		xfs_trans_brelse(tp, *bpp);
2238		*bpp = NULL;
2239		WARN_ON_ONCE(1);
2240	}
2241
2242	trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
2243
2244	/* Otherwise, walk the entire bucket until we find it. */
2245	next_agino = head_agino;
2246	while (next_agino != target_agino) {
2247		xfs_agino_t	unlinked_agino;
2248
2249		if (*bpp)
2250			xfs_trans_brelse(tp, *bpp);
2251
2252		*agino = next_agino;
2253		error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
2254				dipp, bpp);
2255		if (error)
2256			return error;
2257
2258		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2259		/*
2260		 * Make sure this pointer is valid and isn't an obvious
2261		 * infinite loop.
2262		 */
2263		if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
2264		    next_agino == unlinked_agino) {
2265			XFS_CORRUPTION_ERROR(__func__,
2266					XFS_ERRLEVEL_LOW, mp,
2267					*dipp, sizeof(**dipp));
2268			error = -EFSCORRUPTED;
2269			return error;
2270		}
2271		next_agino = unlinked_agino;
2272	}
2273
2274	return 0;
2275}
2276
2277/*
2278 * Pull the on-disk inode from the AGI unlinked list.
2279 */
2280STATIC int
2281xfs_iunlink_remove(
2282	struct xfs_trans	*tp,
2283	struct xfs_perag	*pag,
2284	struct xfs_inode	*ip)
2285{
2286	struct xfs_mount	*mp = tp->t_mountp;
2287	struct xfs_agi		*agi;
2288	struct xfs_buf		*agibp;
2289	struct xfs_buf		*last_ibp;
2290	struct xfs_dinode	*last_dip = NULL;
 
2291	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2292	xfs_agino_t		next_agino;
2293	xfs_agino_t		head_agino;
2294	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2295	int			error;
2296
2297	trace_xfs_iunlink_remove(ip);
2298
2299	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2300	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2301	if (error)
2302		return error;
2303	agi = agibp->b_addr;
2304
2305	/*
2306	 * Get the index into the agi hash table for the list this inode will
2307	 * go on.  Make sure the head pointer isn't garbage.
2308	 */
2309	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2310	if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
2311		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2312				agi, sizeof(*agi));
2313		return -EFSCORRUPTED;
2314	}
2315
2316	/*
2317	 * Set our inode's next_unlinked pointer to NULL and then return
2318	 * the old pointer value so that we can update whatever was previous
2319	 * to us in the list to point to whatever was next in the list.
2320	 */
2321	error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
2322	if (error)
2323		return error;
2324
2325	/*
2326	 * If there was a backref pointing from the next inode back to this
2327	 * one, remove it because we've removed this inode from the list.
2328	 *
2329	 * Later, if this inode was in the middle of the list we'll update
2330	 * this inode's backref to point from the next inode.
2331	 */
2332	if (next_agino != NULLAGINO) {
2333		error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
 
2334		if (error)
2335			return error;
2336	}
2337
2338	if (head_agino != agino) {
2339		struct xfs_imap	imap;
2340		xfs_agino_t	prev_agino;
2341
2342		/* We need to search the list for the inode being freed. */
2343		error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
2344				&prev_agino, &imap, &last_dip, &last_ibp);
 
2345		if (error)
2346			return error;
2347
2348		/* Point the previous inode on the list to the next inode. */
2349		xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
2350				last_dip, &imap, next_agino);
2351
2352		/*
2353		 * Now we deal with the backref for this inode.  If this inode
2354		 * pointed at a real inode, change the backref that pointed to
2355		 * us to point to our old next.  If this inode was the end of
2356		 * the list, delete the backref that pointed to us.  Note that
2357		 * change_backref takes care of deleting the backref if
2358		 * next_agino is NULLAGINO.
2359		 */
2360		return xfs_iunlink_change_backref(agibp->b_pag, agino,
2361				next_agino);
2362	}
2363
2364	/* Point the head of the list to the next unlinked inode. */
2365	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2366			next_agino);
2367}
2368
2369/*
2370 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2371 * mark it stale. We should only find clean inodes in this lookup that aren't
2372 * already stale.
2373 */
2374static void
2375xfs_ifree_mark_inode_stale(
2376	struct xfs_perag	*pag,
2377	struct xfs_inode	*free_ip,
2378	xfs_ino_t		inum)
2379{
2380	struct xfs_mount	*mp = pag->pag_mount;
 
2381	struct xfs_inode_log_item *iip;
2382	struct xfs_inode	*ip;
2383
2384retry:
2385	rcu_read_lock();
2386	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2387
2388	/* Inode not in memory, nothing to do */
2389	if (!ip) {
2390		rcu_read_unlock();
2391		return;
2392	}
2393
2394	/*
2395	 * because this is an RCU protected lookup, we could find a recently
2396	 * freed or even reallocated inode during the lookup. We need to check
2397	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2398	 * valid, the wrong inode or stale.
2399	 */
2400	spin_lock(&ip->i_flags_lock);
2401	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2402		goto out_iflags_unlock;
 
 
 
2403
2404	/*
2405	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2406	 * other inodes that we did not find in the list attached to the buffer
2407	 * and are not already marked stale. If we can't lock it, back off and
2408	 * retry.
2409	 */
2410	if (ip != free_ip) {
2411		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2412			spin_unlock(&ip->i_flags_lock);
2413			rcu_read_unlock();
2414			delay(1);
2415			goto retry;
2416		}
2417	}
2418	ip->i_flags |= XFS_ISTALE;
 
 
2419
2420	/*
2421	 * If the inode is flushing, it is already attached to the buffer.  All
2422	 * we needed to do here is mark the inode stale so buffer IO completion
2423	 * will remove it from the AIL.
2424	 */
2425	iip = ip->i_itemp;
2426	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2427		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2428		ASSERT(iip->ili_last_fields);
2429		goto out_iunlock;
2430	}
2431
2432	/*
2433	 * Inodes not attached to the buffer can be released immediately.
2434	 * Everything else has to go through xfs_iflush_abort() on journal
2435	 * commit as the flock synchronises removal of the inode from the
2436	 * cluster buffer against inode reclaim.
2437	 */
2438	if (!iip || list_empty(&iip->ili_item.li_bio_list))
 
2439		goto out_iunlock;
2440
2441	__xfs_iflags_set(ip, XFS_IFLUSHING);
2442	spin_unlock(&ip->i_flags_lock);
2443	rcu_read_unlock();
2444
2445	/* we have a dirty inode in memory that has not yet been flushed. */
2446	spin_lock(&iip->ili_lock);
2447	iip->ili_last_fields = iip->ili_fields;
2448	iip->ili_fields = 0;
2449	iip->ili_fsync_fields = 0;
2450	spin_unlock(&iip->ili_lock);
2451	ASSERT(iip->ili_last_fields);
2452
2453	if (ip != free_ip)
2454		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2455	return;
2456
2457out_iunlock:
2458	if (ip != free_ip)
2459		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2460out_iflags_unlock:
2461	spin_unlock(&ip->i_flags_lock);
2462	rcu_read_unlock();
2463}
2464
2465/*
2466 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2467 * inodes that are in memory - they all must be marked stale and attached to
2468 * the cluster buffer.
2469 */
2470static int
2471xfs_ifree_cluster(
2472	struct xfs_trans	*tp,
2473	struct xfs_perag	*pag,
2474	struct xfs_inode	*free_ip,
 
2475	struct xfs_icluster	*xic)
2476{
2477	struct xfs_mount	*mp = free_ip->i_mount;
2478	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2479	struct xfs_buf		*bp;
2480	xfs_daddr_t		blkno;
2481	xfs_ino_t		inum = xic->first_ino;
2482	int			nbufs;
2483	int			i, j;
2484	int			ioffset;
2485	int			error;
2486
2487	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2488
2489	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2490		/*
2491		 * The allocation bitmap tells us which inodes of the chunk were
2492		 * physically allocated. Skip the cluster if an inode falls into
2493		 * a sparse region.
2494		 */
2495		ioffset = inum - xic->first_ino;
2496		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2497			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2498			continue;
2499		}
2500
2501		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2502					 XFS_INO_TO_AGBNO(mp, inum));
2503
2504		/*
2505		 * We obtain and lock the backing buffer first in the process
2506		 * here to ensure dirty inodes attached to the buffer remain in
2507		 * the flushing state while we mark them stale.
2508		 *
2509		 * If we scan the in-memory inodes first, then buffer IO can
2510		 * complete before we get a lock on it, and hence we may fail
2511		 * to mark all the active inodes on the buffer stale.
2512		 */
2513		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2514				mp->m_bsize * igeo->blocks_per_cluster,
2515				XBF_UNMAPPED, &bp);
2516		if (error)
2517			return error;
2518
2519		/*
2520		 * This buffer may not have been correctly initialised as we
2521		 * didn't read it from disk. That's not important because we are
2522		 * only using to mark the buffer as stale in the log, and to
2523		 * attach stale cached inodes on it. That means it will never be
2524		 * dispatched for IO. If it is, we want to know about it, and we
2525		 * want it to fail. We can acheive this by adding a write
2526		 * verifier to the buffer.
2527		 */
2528		bp->b_ops = &xfs_inode_buf_ops;
2529
2530		/*
2531		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2532		 * too. This requires lookups, and will skip inodes that we've
2533		 * already marked XFS_ISTALE.
2534		 */
2535		for (i = 0; i < igeo->inodes_per_cluster; i++)
2536			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2537
2538		xfs_trans_stale_inode_buf(tp, bp);
2539		xfs_trans_binval(tp, bp);
2540	}
2541	return 0;
2542}
2543
2544/*
2545 * This is called to return an inode to the inode free list.
2546 * The inode should already be truncated to 0 length and have
2547 * no pages associated with it.  This routine also assumes that
2548 * the inode is already a part of the transaction.
2549 *
2550 * The on-disk copy of the inode will have been added to the list
2551 * of unlinked inodes in the AGI. We need to remove the inode from
2552 * that list atomically with respect to freeing it here.
2553 */
2554int
2555xfs_ifree(
2556	struct xfs_trans	*tp,
2557	struct xfs_inode	*ip)
2558{
2559	struct xfs_mount	*mp = ip->i_mount;
2560	struct xfs_perag	*pag;
2561	struct xfs_icluster	xic = { 0 };
2562	struct xfs_inode_log_item *iip = ip->i_itemp;
2563	int			error;
2564
2565	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2566	ASSERT(VFS_I(ip)->i_nlink == 0);
2567	ASSERT(ip->i_df.if_nextents == 0);
2568	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2569	ASSERT(ip->i_nblocks == 0);
2570
2571	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2572
2573	/*
2574	 * Pull the on-disk inode from the AGI unlinked list.
2575	 */
2576	error = xfs_iunlink_remove(tp, pag, ip);
2577	if (error)
2578		goto out;
2579
2580	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2581	if (error)
2582		goto out;
2583
2584	/*
2585	 * Free any local-format data sitting around before we reset the
2586	 * data fork to extents format.  Note that the attr fork data has
2587	 * already been freed by xfs_attr_inactive.
2588	 */
2589	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2590		kmem_free(ip->i_df.if_u1.if_data);
2591		ip->i_df.if_u1.if_data = NULL;
2592		ip->i_df.if_bytes = 0;
2593	}
2594
2595	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2596	ip->i_diflags = 0;
2597	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2598	ip->i_forkoff = 0;		/* mark the attr fork not in use */
 
2599	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2600	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2601		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2602
2603	/* Don't attempt to replay owner changes for a deleted inode */
2604	spin_lock(&iip->ili_lock);
2605	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2606	spin_unlock(&iip->ili_lock);
2607
2608	/*
2609	 * Bump the generation count so no one will be confused
2610	 * by reincarnations of this inode.
2611	 */
2612	VFS_I(ip)->i_generation++;
2613	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2614
2615	if (xic.deleted)
2616		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2617out:
2618	xfs_perag_put(pag);
2619	return error;
2620}
2621
2622/*
2623 * This is called to unpin an inode.  The caller must have the inode locked
2624 * in at least shared mode so that the buffer cannot be subsequently pinned
2625 * once someone is waiting for it to be unpinned.
2626 */
2627static void
2628xfs_iunpin(
2629	struct xfs_inode	*ip)
2630{
2631	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2632
2633	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2634
2635	/* Give the log a push to start the unpinning I/O */
2636	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2637
2638}
2639
2640static void
2641__xfs_iunpin_wait(
2642	struct xfs_inode	*ip)
2643{
2644	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2645	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2646
2647	xfs_iunpin(ip);
2648
2649	do {
2650		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2651		if (xfs_ipincount(ip))
2652			io_schedule();
2653	} while (xfs_ipincount(ip));
2654	finish_wait(wq, &wait.wq_entry);
2655}
2656
2657void
2658xfs_iunpin_wait(
2659	struct xfs_inode	*ip)
2660{
2661	if (xfs_ipincount(ip))
2662		__xfs_iunpin_wait(ip);
2663}
2664
2665/*
2666 * Removing an inode from the namespace involves removing the directory entry
2667 * and dropping the link count on the inode. Removing the directory entry can
2668 * result in locking an AGF (directory blocks were freed) and removing a link
2669 * count can result in placing the inode on an unlinked list which results in
2670 * locking an AGI.
2671 *
2672 * The big problem here is that we have an ordering constraint on AGF and AGI
2673 * locking - inode allocation locks the AGI, then can allocate a new extent for
2674 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2675 * removes the inode from the unlinked list, requiring that we lock the AGI
2676 * first, and then freeing the inode can result in an inode chunk being freed
2677 * and hence freeing disk space requiring that we lock an AGF.
2678 *
2679 * Hence the ordering that is imposed by other parts of the code is AGI before
2680 * AGF. This means we cannot remove the directory entry before we drop the inode
2681 * reference count and put it on the unlinked list as this results in a lock
2682 * order of AGF then AGI, and this can deadlock against inode allocation and
2683 * freeing. Therefore we must drop the link counts before we remove the
2684 * directory entry.
2685 *
2686 * This is still safe from a transactional point of view - it is not until we
2687 * get to xfs_defer_finish() that we have the possibility of multiple
2688 * transactions in this operation. Hence as long as we remove the directory
2689 * entry and drop the link count in the first transaction of the remove
2690 * operation, there are no transactional constraints on the ordering here.
2691 */
2692int
2693xfs_remove(
2694	xfs_inode_t             *dp,
2695	struct xfs_name		*name,
2696	xfs_inode_t		*ip)
2697{
2698	xfs_mount_t		*mp = dp->i_mount;
2699	xfs_trans_t             *tp = NULL;
2700	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2701	int                     error = 0;
2702	uint			resblks;
2703
2704	trace_xfs_remove(dp, name);
2705
2706	if (XFS_FORCED_SHUTDOWN(mp))
2707		return -EIO;
2708
2709	error = xfs_qm_dqattach(dp);
2710	if (error)
2711		goto std_return;
2712
2713	error = xfs_qm_dqattach(ip);
2714	if (error)
2715		goto std_return;
2716
2717	/*
2718	 * We try to get the real space reservation first,
2719	 * allowing for directory btree deletion(s) implying
2720	 * possible bmap insert(s).  If we can't get the space
2721	 * reservation then we use 0 instead, and avoid the bmap
2722	 * btree insert(s) in the directory code by, if the bmap
2723	 * insert tries to happen, instead trimming the LAST
2724	 * block from the directory.
2725	 */
2726	resblks = XFS_REMOVE_SPACE_RES(mp);
2727	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2728	if (error == -ENOSPC) {
2729		resblks = 0;
2730		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2731				&tp);
2732	}
2733	if (error) {
2734		ASSERT(error != -ENOSPC);
2735		goto std_return;
2736	}
2737
2738	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2739
2740	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2741	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2742
2743	/*
2744	 * If we're removing a directory perform some additional validation.
2745	 */
2746	if (is_dir) {
2747		ASSERT(VFS_I(ip)->i_nlink >= 2);
2748		if (VFS_I(ip)->i_nlink != 2) {
2749			error = -ENOTEMPTY;
2750			goto out_trans_cancel;
2751		}
2752		if (!xfs_dir_isempty(ip)) {
2753			error = -ENOTEMPTY;
2754			goto out_trans_cancel;
2755		}
2756
2757		/* Drop the link from ip's "..".  */
2758		error = xfs_droplink(tp, dp);
2759		if (error)
2760			goto out_trans_cancel;
2761
2762		/* Drop the "." link from ip to self.  */
2763		error = xfs_droplink(tp, ip);
2764		if (error)
2765			goto out_trans_cancel;
2766
2767		/*
2768		 * Point the unlinked child directory's ".." entry to the root
2769		 * directory to eliminate back-references to inodes that may
2770		 * get freed before the child directory is closed.  If the fs
2771		 * gets shrunk, this can lead to dirent inode validation errors.
2772		 */
2773		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2774			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2775					tp->t_mountp->m_sb.sb_rootino, 0);
2776			if (error)
2777				return error;
2778		}
2779	} else {
2780		/*
2781		 * When removing a non-directory we need to log the parent
2782		 * inode here.  For a directory this is done implicitly
2783		 * by the xfs_droplink call for the ".." entry.
2784		 */
2785		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2786	}
2787	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2788
2789	/* Drop the link from dp to ip. */
2790	error = xfs_droplink(tp, ip);
2791	if (error)
2792		goto out_trans_cancel;
2793
2794	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2795	if (error) {
2796		ASSERT(error != -ENOENT);
2797		goto out_trans_cancel;
2798	}
2799
2800	/*
2801	 * If this is a synchronous mount, make sure that the
2802	 * remove transaction goes to disk before returning to
2803	 * the user.
2804	 */
2805	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2806		xfs_trans_set_sync(tp);
2807
2808	error = xfs_trans_commit(tp);
2809	if (error)
2810		goto std_return;
2811
2812	if (is_dir && xfs_inode_is_filestream(ip))
2813		xfs_filestream_deassociate(ip);
2814
2815	return 0;
2816
2817 out_trans_cancel:
2818	xfs_trans_cancel(tp);
2819 std_return:
2820	return error;
2821}
2822
2823/*
2824 * Enter all inodes for a rename transaction into a sorted array.
2825 */
2826#define __XFS_SORT_INODES	5
2827STATIC void
2828xfs_sort_for_rename(
2829	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2830	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2831	struct xfs_inode	*ip1,	/* in: inode of old entry */
2832	struct xfs_inode	*ip2,	/* in: inode of new entry */
2833	struct xfs_inode	*wip,	/* in: whiteout inode */
2834	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2835	int			*num_inodes)  /* in/out: inodes in array */
2836{
2837	int			i, j;
2838
2839	ASSERT(*num_inodes == __XFS_SORT_INODES);
2840	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2841
2842	/*
2843	 * i_tab contains a list of pointers to inodes.  We initialize
2844	 * the table here & we'll sort it.  We will then use it to
2845	 * order the acquisition of the inode locks.
2846	 *
2847	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2848	 */
2849	i = 0;
2850	i_tab[i++] = dp1;
2851	i_tab[i++] = dp2;
2852	i_tab[i++] = ip1;
2853	if (ip2)
2854		i_tab[i++] = ip2;
2855	if (wip)
2856		i_tab[i++] = wip;
2857	*num_inodes = i;
2858
2859	/*
2860	 * Sort the elements via bubble sort.  (Remember, there are at
2861	 * most 5 elements to sort, so this is adequate.)
2862	 */
2863	for (i = 0; i < *num_inodes; i++) {
2864		for (j = 1; j < *num_inodes; j++) {
2865			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2866				struct xfs_inode *temp = i_tab[j];
2867				i_tab[j] = i_tab[j-1];
2868				i_tab[j-1] = temp;
2869			}
2870		}
2871	}
2872}
2873
2874static int
2875xfs_finish_rename(
2876	struct xfs_trans	*tp)
2877{
2878	/*
2879	 * If this is a synchronous mount, make sure that the rename transaction
2880	 * goes to disk before returning to the user.
2881	 */
2882	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2883		xfs_trans_set_sync(tp);
2884
2885	return xfs_trans_commit(tp);
2886}
2887
2888/*
2889 * xfs_cross_rename()
2890 *
2891 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2892 */
2893STATIC int
2894xfs_cross_rename(
2895	struct xfs_trans	*tp,
2896	struct xfs_inode	*dp1,
2897	struct xfs_name		*name1,
2898	struct xfs_inode	*ip1,
2899	struct xfs_inode	*dp2,
2900	struct xfs_name		*name2,
2901	struct xfs_inode	*ip2,
2902	int			spaceres)
2903{
2904	int		error = 0;
2905	int		ip1_flags = 0;
2906	int		ip2_flags = 0;
2907	int		dp2_flags = 0;
2908
2909	/* Swap inode number for dirent in first parent */
2910	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2911	if (error)
2912		goto out_trans_abort;
2913
2914	/* Swap inode number for dirent in second parent */
2915	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2916	if (error)
2917		goto out_trans_abort;
2918
2919	/*
2920	 * If we're renaming one or more directories across different parents,
2921	 * update the respective ".." entries (and link counts) to match the new
2922	 * parents.
2923	 */
2924	if (dp1 != dp2) {
2925		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2926
2927		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2928			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2929						dp1->i_ino, spaceres);
2930			if (error)
2931				goto out_trans_abort;
2932
2933			/* transfer ip2 ".." reference to dp1 */
2934			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2935				error = xfs_droplink(tp, dp2);
2936				if (error)
2937					goto out_trans_abort;
2938				xfs_bumplink(tp, dp1);
2939			}
2940
2941			/*
2942			 * Although ip1 isn't changed here, userspace needs
2943			 * to be warned about the change, so that applications
2944			 * relying on it (like backup ones), will properly
2945			 * notify the change
2946			 */
2947			ip1_flags |= XFS_ICHGTIME_CHG;
2948			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2949		}
2950
2951		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2952			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2953						dp2->i_ino, spaceres);
2954			if (error)
2955				goto out_trans_abort;
2956
2957			/* transfer ip1 ".." reference to dp2 */
2958			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2959				error = xfs_droplink(tp, dp1);
2960				if (error)
2961					goto out_trans_abort;
2962				xfs_bumplink(tp, dp2);
2963			}
2964
2965			/*
2966			 * Although ip2 isn't changed here, userspace needs
2967			 * to be warned about the change, so that applications
2968			 * relying on it (like backup ones), will properly
2969			 * notify the change
2970			 */
2971			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2972			ip2_flags |= XFS_ICHGTIME_CHG;
2973		}
2974	}
2975
2976	if (ip1_flags) {
2977		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2978		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2979	}
2980	if (ip2_flags) {
2981		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2982		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2983	}
2984	if (dp2_flags) {
2985		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2986		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2987	}
2988	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2989	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2990	return xfs_finish_rename(tp);
2991
2992out_trans_abort:
2993	xfs_trans_cancel(tp);
2994	return error;
2995}
2996
2997/*
2998 * xfs_rename_alloc_whiteout()
2999 *
3000 * Return a referenced, unlinked, unlocked inode that can be used as a
3001 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3002 * crash between allocating the inode and linking it into the rename transaction
3003 * recovery will free the inode and we won't leak it.
3004 */
3005static int
3006xfs_rename_alloc_whiteout(
3007	struct user_namespace	*mnt_userns,
3008	struct xfs_inode	*dp,
3009	struct xfs_inode	**wip)
3010{
3011	struct xfs_inode	*tmpfile;
3012	int			error;
3013
3014	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3015				   &tmpfile);
3016	if (error)
3017		return error;
3018
3019	/*
3020	 * Prepare the tmpfile inode as if it were created through the VFS.
3021	 * Complete the inode setup and flag it as linkable.  nlink is already
3022	 * zero, so we can skip the drop_nlink.
3023	 */
3024	xfs_setup_iops(tmpfile);
3025	xfs_finish_inode_setup(tmpfile);
3026	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3027
3028	*wip = tmpfile;
3029	return 0;
3030}
3031
3032/*
3033 * xfs_rename
3034 */
3035int
3036xfs_rename(
3037	struct user_namespace	*mnt_userns,
3038	struct xfs_inode	*src_dp,
3039	struct xfs_name		*src_name,
3040	struct xfs_inode	*src_ip,
3041	struct xfs_inode	*target_dp,
3042	struct xfs_name		*target_name,
3043	struct xfs_inode	*target_ip,
3044	unsigned int		flags)
3045{
3046	struct xfs_mount	*mp = src_dp->i_mount;
3047	struct xfs_trans	*tp;
3048	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3049	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3050	int			i;
3051	int			num_inodes = __XFS_SORT_INODES;
3052	bool			new_parent = (src_dp != target_dp);
3053	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3054	int			spaceres;
3055	int			error;
3056
3057	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3058
3059	if ((flags & RENAME_EXCHANGE) && !target_ip)
3060		return -EINVAL;
3061
3062	/*
3063	 * If we are doing a whiteout operation, allocate the whiteout inode
3064	 * we will be placing at the target and ensure the type is set
3065	 * appropriately.
3066	 */
3067	if (flags & RENAME_WHITEOUT) {
3068		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3069		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
3070		if (error)
3071			return error;
3072
3073		/* setup target dirent info as whiteout */
3074		src_name->type = XFS_DIR3_FT_CHRDEV;
3075	}
3076
3077	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3078				inodes, &num_inodes);
3079
3080	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3081	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3082	if (error == -ENOSPC) {
3083		spaceres = 0;
3084		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3085				&tp);
3086	}
3087	if (error)
3088		goto out_release_wip;
3089
3090	/*
3091	 * Attach the dquots to the inodes
3092	 */
3093	error = xfs_qm_vop_rename_dqattach(inodes);
3094	if (error)
3095		goto out_trans_cancel;
3096
3097	/*
3098	 * Lock all the participating inodes. Depending upon whether
3099	 * the target_name exists in the target directory, and
3100	 * whether the target directory is the same as the source
3101	 * directory, we can lock from 2 to 4 inodes.
3102	 */
3103	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3104
3105	/*
3106	 * Join all the inodes to the transaction. From this point on,
3107	 * we can rely on either trans_commit or trans_cancel to unlock
3108	 * them.
3109	 */
3110	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3111	if (new_parent)
3112		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3113	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3114	if (target_ip)
3115		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3116	if (wip)
3117		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3118
3119	/*
3120	 * If we are using project inheritance, we only allow renames
3121	 * into our tree when the project IDs are the same; else the
3122	 * tree quota mechanism would be circumvented.
3123	 */
3124	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3125		     target_dp->i_projid != src_ip->i_projid)) {
3126		error = -EXDEV;
3127		goto out_trans_cancel;
3128	}
3129
3130	/* RENAME_EXCHANGE is unique from here on. */
3131	if (flags & RENAME_EXCHANGE)
3132		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3133					target_dp, target_name, target_ip,
3134					spaceres);
3135
3136	/*
3137	 * Check for expected errors before we dirty the transaction
3138	 * so we can return an error without a transaction abort.
3139	 *
3140	 * Extent count overflow check:
3141	 *
3142	 * From the perspective of src_dp, a rename operation is essentially a
3143	 * directory entry remove operation. Hence the only place where we check
3144	 * for extent count overflow for src_dp is in
3145	 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
3146	 * -ENOSPC when it detects a possible extent count overflow and in
3147	 * response, the higher layers of directory handling code do the
3148	 * following:
3149	 * 1. Data/Free blocks: XFS lets these blocks linger until a
3150	 *    future remove operation removes them.
3151	 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
3152	 *    Leaf space and unmaps the last block.
3153	 *
3154	 * For target_dp, there are two cases depending on whether the
3155	 * destination directory entry exists or not.
3156	 *
3157	 * When destination directory entry does not exist (i.e. target_ip ==
3158	 * NULL), extent count overflow check is performed only when transaction
3159	 * has a non-zero sized space reservation associated with it.  With a
3160	 * zero-sized space reservation, XFS allows a rename operation to
3161	 * continue only when the directory has sufficient free space in its
3162	 * data/leaf/free space blocks to hold the new entry.
3163	 *
3164	 * When destination directory entry exists (i.e. target_ip != NULL), all
3165	 * we need to do is change the inode number associated with the already
3166	 * existing entry. Hence there is no need to perform an extent count
3167	 * overflow check.
3168	 */
3169	if (target_ip == NULL) {
3170		/*
3171		 * If there's no space reservation, check the entry will
3172		 * fit before actually inserting it.
3173		 */
3174		if (!spaceres) {
3175			error = xfs_dir_canenter(tp, target_dp, target_name);
3176			if (error)
3177				goto out_trans_cancel;
3178		} else {
3179			error = xfs_iext_count_may_overflow(target_dp,
3180					XFS_DATA_FORK,
3181					XFS_IEXT_DIR_MANIP_CNT(mp));
3182			if (error)
3183				goto out_trans_cancel;
3184		}
3185	} else {
3186		/*
3187		 * If target exists and it's a directory, check that whether
3188		 * it can be destroyed.
3189		 */
3190		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3191		    (!xfs_dir_isempty(target_ip) ||
3192		     (VFS_I(target_ip)->i_nlink > 2))) {
3193			error = -EEXIST;
3194			goto out_trans_cancel;
3195		}
3196	}
3197
3198	/*
3199	 * Lock the AGI buffers we need to handle bumping the nlink of the
3200	 * whiteout inode off the unlinked list and to handle dropping the
3201	 * nlink of the target inode.  Per locking order rules, do this in
3202	 * increasing AG order and before directory block allocation tries to
3203	 * grab AGFs because we grab AGIs before AGFs.
3204	 *
3205	 * The (vfs) caller must ensure that if src is a directory then
3206	 * target_ip is either null or an empty directory.
3207	 */
3208	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3209		if (inodes[i] == wip ||
3210		    (inodes[i] == target_ip &&
3211		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3212			struct xfs_buf	*bp;
3213			xfs_agnumber_t	agno;
3214
3215			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
3216			error = xfs_read_agi(mp, tp, agno, &bp);
3217			if (error)
3218				goto out_trans_cancel;
3219		}
3220	}
3221
3222	/*
3223	 * Directory entry creation below may acquire the AGF. Remove
3224	 * the whiteout from the unlinked list first to preserve correct
3225	 * AGI/AGF locking order. This dirties the transaction so failures
3226	 * after this point will abort and log recovery will clean up the
3227	 * mess.
3228	 *
3229	 * For whiteouts, we need to bump the link count on the whiteout
3230	 * inode. After this point, we have a real link, clear the tmpfile
3231	 * state flag from the inode so it doesn't accidentally get misused
3232	 * in future.
3233	 */
3234	if (wip) {
3235		struct xfs_perag	*pag;
3236
3237		ASSERT(VFS_I(wip)->i_nlink == 0);
3238
3239		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3240		error = xfs_iunlink_remove(tp, pag, wip);
3241		xfs_perag_put(pag);
3242		if (error)
3243			goto out_trans_cancel;
3244
3245		xfs_bumplink(tp, wip);
3246		VFS_I(wip)->i_state &= ~I_LINKABLE;
3247	}
3248
3249	/*
3250	 * Set up the target.
3251	 */
3252	if (target_ip == NULL) {
3253		/*
3254		 * If target does not exist and the rename crosses
3255		 * directories, adjust the target directory link count
3256		 * to account for the ".." reference from the new entry.
3257		 */
3258		error = xfs_dir_createname(tp, target_dp, target_name,
3259					   src_ip->i_ino, spaceres);
3260		if (error)
3261			goto out_trans_cancel;
3262
3263		xfs_trans_ichgtime(tp, target_dp,
3264					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3265
3266		if (new_parent && src_is_directory) {
3267			xfs_bumplink(tp, target_dp);
3268		}
3269	} else { /* target_ip != NULL */
3270		/*
3271		 * Link the source inode under the target name.
3272		 * If the source inode is a directory and we are moving
3273		 * it across directories, its ".." entry will be
3274		 * inconsistent until we replace that down below.
3275		 *
3276		 * In case there is already an entry with the same
3277		 * name at the destination directory, remove it first.
3278		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3279		error = xfs_dir_replace(tp, target_dp, target_name,
3280					src_ip->i_ino, spaceres);
3281		if (error)
3282			goto out_trans_cancel;
3283
3284		xfs_trans_ichgtime(tp, target_dp,
3285					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3286
3287		/*
3288		 * Decrement the link count on the target since the target
3289		 * dir no longer points to it.
3290		 */
3291		error = xfs_droplink(tp, target_ip);
3292		if (error)
3293			goto out_trans_cancel;
3294
3295		if (src_is_directory) {
3296			/*
3297			 * Drop the link from the old "." entry.
3298			 */
3299			error = xfs_droplink(tp, target_ip);
3300			if (error)
3301				goto out_trans_cancel;
3302		}
3303	} /* target_ip != NULL */
3304
3305	/*
3306	 * Remove the source.
3307	 */
3308	if (new_parent && src_is_directory) {
3309		/*
3310		 * Rewrite the ".." entry to point to the new
3311		 * directory.
3312		 */
3313		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3314					target_dp->i_ino, spaceres);
3315		ASSERT(error != -EEXIST);
3316		if (error)
3317			goto out_trans_cancel;
3318	}
3319
3320	/*
3321	 * We always want to hit the ctime on the source inode.
3322	 *
3323	 * This isn't strictly required by the standards since the source
3324	 * inode isn't really being changed, but old unix file systems did
3325	 * it and some incremental backup programs won't work without it.
3326	 */
3327	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3328	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3329
3330	/*
3331	 * Adjust the link count on src_dp.  This is necessary when
3332	 * renaming a directory, either within one parent when
3333	 * the target existed, or across two parent directories.
3334	 */
3335	if (src_is_directory && (new_parent || target_ip != NULL)) {
3336
3337		/*
3338		 * Decrement link count on src_directory since the
3339		 * entry that's moved no longer points to it.
3340		 */
3341		error = xfs_droplink(tp, src_dp);
3342		if (error)
3343			goto out_trans_cancel;
3344	}
3345
3346	/*
3347	 * For whiteouts, we only need to update the source dirent with the
3348	 * inode number of the whiteout inode rather than removing it
3349	 * altogether.
3350	 */
3351	if (wip) {
3352		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3353					spaceres);
3354	} else {
3355		/*
3356		 * NOTE: We don't need to check for extent count overflow here
3357		 * because the dir remove name code will leave the dir block in
3358		 * place if the extent count would overflow.
3359		 */
3360		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3361					   spaceres);
3362	}
3363
3364	if (error)
3365		goto out_trans_cancel;
3366
3367	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3368	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3369	if (new_parent)
3370		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3371
3372	error = xfs_finish_rename(tp);
3373	if (wip)
3374		xfs_irele(wip);
3375	return error;
3376
3377out_trans_cancel:
3378	xfs_trans_cancel(tp);
3379out_release_wip:
3380	if (wip)
3381		xfs_irele(wip);
3382	return error;
3383}
3384
3385static int
3386xfs_iflush(
3387	struct xfs_inode	*ip,
3388	struct xfs_buf		*bp)
3389{
3390	struct xfs_inode_log_item *iip = ip->i_itemp;
3391	struct xfs_dinode	*dip;
3392	struct xfs_mount	*mp = ip->i_mount;
3393	int			error;
3394
3395	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3396	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3397	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3398	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3399	ASSERT(iip->ili_item.li_buf == bp);
3400
3401	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3402
3403	/*
3404	 * We don't flush the inode if any of the following checks fail, but we
3405	 * do still update the log item and attach to the backing buffer as if
3406	 * the flush happened. This is a formality to facilitate predictable
3407	 * error handling as the caller will shutdown and fail the buffer.
3408	 */
3409	error = -EFSCORRUPTED;
3410	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3411			       mp, XFS_ERRTAG_IFLUSH_1)) {
3412		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3413			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3414			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3415		goto flush_out;
3416	}
3417	if (S_ISREG(VFS_I(ip)->i_mode)) {
3418		if (XFS_TEST_ERROR(
3419		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3420		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3421		    mp, XFS_ERRTAG_IFLUSH_3)) {
3422			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3423				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3424				__func__, ip->i_ino, ip);
3425			goto flush_out;
3426		}
3427	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3428		if (XFS_TEST_ERROR(
3429		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3430		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3431		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3432		    mp, XFS_ERRTAG_IFLUSH_4)) {
3433			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3434				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3435				__func__, ip->i_ino, ip);
3436			goto flush_out;
3437		}
3438	}
3439	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3440				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3441		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3442			"%s: detected corrupt incore inode %Lu, "
3443			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3444			__func__, ip->i_ino,
3445			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3446			ip->i_nblocks, ip);
3447		goto flush_out;
3448	}
3449	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3450				mp, XFS_ERRTAG_IFLUSH_6)) {
3451		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3452			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3453			__func__, ip->i_ino, ip->i_forkoff, ip);
3454		goto flush_out;
3455	}
3456
3457	/*
3458	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3459	 * count for correct sequencing.  We bump the flush iteration count so
3460	 * we can detect flushes which postdate a log record during recovery.
3461	 * This is redundant as we now log every change and hence this can't
3462	 * happen but we need to still do it to ensure backwards compatibility
3463	 * with old kernels that predate logging all inode changes.
 
3464	 */
3465	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3466		ip->i_flushiter++;
3467
3468	/*
3469	 * If there are inline format data / attr forks attached to this inode,
3470	 * make sure they are not corrupt.
3471	 */
3472	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3473	    xfs_ifork_verify_local_data(ip))
3474		goto flush_out;
3475	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3476	    xfs_ifork_verify_local_attr(ip))
3477		goto flush_out;
3478
3479	/*
3480	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3481	 * copy out the core of the inode, because if the inode is dirty at all
3482	 * the core must be.
3483	 */
3484	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3485
3486	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3487	if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
3488		if (ip->i_flushiter == DI_MAX_FLUSH)
3489			ip->i_flushiter = 0;
3490	}
3491
3492	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3493	if (XFS_IFORK_Q(ip))
3494		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3495
3496	/*
3497	 * We've recorded everything logged in the inode, so we'd like to clear
3498	 * the ili_fields bits so we don't log and flush things unnecessarily.
3499	 * However, we can't stop logging all this information until the data
3500	 * we've copied into the disk buffer is written to disk.  If we did we
3501	 * might overwrite the copy of the inode in the log with all the data
3502	 * after re-logging only part of it, and in the face of a crash we
3503	 * wouldn't have all the data we need to recover.
3504	 *
3505	 * What we do is move the bits to the ili_last_fields field.  When
3506	 * logging the inode, these bits are moved back to the ili_fields field.
3507	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3508	 * we know that the information those bits represent is permanently on
3509	 * disk.  As long as the flush completes before the inode is logged
3510	 * again, then both ili_fields and ili_last_fields will be cleared.
3511	 */
3512	error = 0;
3513flush_out:
3514	spin_lock(&iip->ili_lock);
3515	iip->ili_last_fields = iip->ili_fields;
3516	iip->ili_fields = 0;
3517	iip->ili_fsync_fields = 0;
3518	spin_unlock(&iip->ili_lock);
3519
3520	/*
3521	 * Store the current LSN of the inode so that we can tell whether the
3522	 * item has moved in the AIL from xfs_buf_inode_iodone().
3523	 */
3524	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3525				&iip->ili_item.li_lsn);
3526
3527	/* generate the checksum. */
3528	xfs_dinode_calc_crc(mp, dip);
3529	return error;
3530}
3531
3532/*
3533 * Non-blocking flush of dirty inode metadata into the backing buffer.
3534 *
3535 * The caller must have a reference to the inode and hold the cluster buffer
3536 * locked. The function will walk across all the inodes on the cluster buffer it
3537 * can find and lock without blocking, and flush them to the cluster buffer.
3538 *
3539 * On successful flushing of at least one inode, the caller must write out the
3540 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3541 * the caller needs to release the buffer. On failure, the filesystem will be
3542 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3543 * will be returned.
3544 */
3545int
3546xfs_iflush_cluster(
3547	struct xfs_buf		*bp)
3548{
3549	struct xfs_mount	*mp = bp->b_mount;
3550	struct xfs_log_item	*lip, *n;
3551	struct xfs_inode	*ip;
3552	struct xfs_inode_log_item *iip;
3553	int			clcount = 0;
3554	int			error = 0;
3555
3556	/*
3557	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3558	 * can remove itself from the list.
3559	 */
3560	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3561		iip = (struct xfs_inode_log_item *)lip;
3562		ip = iip->ili_inode;
3563
3564		/*
3565		 * Quick and dirty check to avoid locks if possible.
3566		 */
3567		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3568			continue;
3569		if (xfs_ipincount(ip))
3570			continue;
3571
3572		/*
3573		 * The inode is still attached to the buffer, which means it is
3574		 * dirty but reclaim might try to grab it. Check carefully for
3575		 * that, and grab the ilock while still holding the i_flags_lock
3576		 * to guarantee reclaim will not be able to reclaim this inode
3577		 * once we drop the i_flags_lock.
3578		 */
3579		spin_lock(&ip->i_flags_lock);
3580		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3581		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3582			spin_unlock(&ip->i_flags_lock);
3583			continue;
3584		}
3585
3586		/*
3587		 * ILOCK will pin the inode against reclaim and prevent
3588		 * concurrent transactions modifying the inode while we are
3589		 * flushing the inode. If we get the lock, set the flushing
3590		 * state before we drop the i_flags_lock.
3591		 */
3592		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3593			spin_unlock(&ip->i_flags_lock);
3594			continue;
3595		}
3596		__xfs_iflags_set(ip, XFS_IFLUSHING);
3597		spin_unlock(&ip->i_flags_lock);
3598
3599		/*
 
 
 
 
 
 
 
 
 
3600		 * Abort flushing this inode if we are shut down because the
3601		 * inode may not currently be in the AIL. This can occur when
3602		 * log I/O failure unpins the inode without inserting into the
3603		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3604		 * that otherwise looks like it should be flushed.
3605		 */
3606		if (XFS_FORCED_SHUTDOWN(mp)) {
3607			xfs_iunpin_wait(ip);
 
3608			xfs_iflush_abort(ip);
3609			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3610			error = -EIO;
3611			continue;
3612		}
3613
3614		/* don't block waiting on a log force to unpin dirty inodes */
3615		if (xfs_ipincount(ip)) {
3616			xfs_iflags_clear(ip, XFS_IFLUSHING);
3617			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3618			continue;
3619		}
3620
3621		if (!xfs_inode_clean(ip))
3622			error = xfs_iflush(ip, bp);
3623		else
3624			xfs_iflags_clear(ip, XFS_IFLUSHING);
3625		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3626		if (error)
3627			break;
3628		clcount++;
3629	}
3630
3631	if (error) {
3632		bp->b_flags |= XBF_ASYNC;
3633		xfs_buf_ioend_fail(bp);
3634		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3635		return error;
3636	}
3637
3638	if (!clcount)
3639		return -EAGAIN;
3640
3641	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3642	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3643	return 0;
3644
3645}
3646
3647/* Release an inode. */
3648void
3649xfs_irele(
3650	struct xfs_inode	*ip)
3651{
3652	trace_xfs_irele(ip, _RET_IP_);
3653	iput(VFS_I(ip));
3654}
3655
3656/*
3657 * Ensure all commited transactions touching the inode are written to the log.
3658 */
3659int
3660xfs_log_force_inode(
3661	struct xfs_inode	*ip)
3662{
3663	xfs_csn_t		seq = 0;
3664
3665	xfs_ilock(ip, XFS_ILOCK_SHARED);
3666	if (xfs_ipincount(ip))
3667		seq = ip->i_itemp->ili_commit_seq;
3668	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3669
3670	if (!seq)
3671		return 0;
3672	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3673}
3674
3675/*
3676 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3677 * abide vfs locking order (lowest pointer value goes first) and breaking the
3678 * layout leases before proceeding.  The loop is needed because we cannot call
3679 * the blocking break_layout() with the iolocks held, and therefore have to
3680 * back out both locks.
3681 */
3682static int
3683xfs_iolock_two_inodes_and_break_layout(
3684	struct inode		*src,
3685	struct inode		*dest)
3686{
3687	int			error;
3688
3689	if (src > dest)
3690		swap(src, dest);
3691
3692retry:
3693	/* Wait to break both inodes' layouts before we start locking. */
3694	error = break_layout(src, true);
3695	if (error)
3696		return error;
3697	if (src != dest) {
3698		error = break_layout(dest, true);
3699		if (error)
3700			return error;
3701	}
3702
3703	/* Lock one inode and make sure nobody got in and leased it. */
3704	inode_lock(src);
3705	error = break_layout(src, false);
3706	if (error) {
3707		inode_unlock(src);
3708		if (error == -EWOULDBLOCK)
3709			goto retry;
3710		return error;
3711	}
3712
3713	if (src == dest)
3714		return 0;
3715
3716	/* Lock the other inode and make sure nobody got in and leased it. */
3717	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3718	error = break_layout(dest, false);
3719	if (error) {
3720		inode_unlock(src);
3721		inode_unlock(dest);
3722		if (error == -EWOULDBLOCK)
3723			goto retry;
3724		return error;
3725	}
3726
3727	return 0;
3728}
3729
3730/*
3731 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3732 * mmap activity.
3733 */
3734int
3735xfs_ilock2_io_mmap(
3736	struct xfs_inode	*ip1,
3737	struct xfs_inode	*ip2)
3738{
3739	int			ret;
3740
3741	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3742	if (ret)
3743		return ret;
3744	if (ip1 == ip2)
3745		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3746	else
3747		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3748				    ip2, XFS_MMAPLOCK_EXCL);
3749	return 0;
3750}
3751
3752/* Unlock both inodes to allow IO and mmap activity. */
3753void
3754xfs_iunlock2_io_mmap(
3755	struct xfs_inode	*ip1,
3756	struct xfs_inode	*ip2)
3757{
3758	bool			same_inode = (ip1 == ip2);
3759
3760	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3761	if (!same_inode)
3762		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3763	inode_unlock(VFS_I(ip2));
3764	if (!same_inode)
3765		inode_unlock(VFS_I(ip1));
3766}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include <linux/iversion.h>
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_sb.h"
  15#include "xfs_mount.h"
  16#include "xfs_defer.h"
  17#include "xfs_inode.h"
  18#include "xfs_dir2.h"
  19#include "xfs_attr.h"
  20#include "xfs_trans_space.h"
  21#include "xfs_trans.h"
  22#include "xfs_buf_item.h"
  23#include "xfs_inode_item.h"
  24#include "xfs_ialloc.h"
  25#include "xfs_bmap.h"
  26#include "xfs_bmap_util.h"
  27#include "xfs_errortag.h"
  28#include "xfs_error.h"
  29#include "xfs_quota.h"
  30#include "xfs_filestream.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_symlink.h"
  34#include "xfs_trans_priv.h"
  35#include "xfs_log.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_reflink.h"
 
  38
  39kmem_zone_t *xfs_inode_zone;
  40
  41/*
  42 * Used in xfs_itruncate_extents().  This is the maximum number of extents
  43 * freed from a file in a single transaction.
  44 */
  45#define	XFS_ITRUNC_MAX_EXTENTS	2
  46
  47STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  48STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
 
  49
  50/*
  51 * helper function to extract extent size hint from inode
  52 */
  53xfs_extlen_t
  54xfs_get_extsz_hint(
  55	struct xfs_inode	*ip)
  56{
  57	/*
  58	 * No point in aligning allocations if we need to COW to actually
  59	 * write to them.
  60	 */
  61	if (xfs_is_always_cow_inode(ip))
  62		return 0;
  63	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  64		return ip->i_d.di_extsize;
  65	if (XFS_IS_REALTIME_INODE(ip))
  66		return ip->i_mount->m_sb.sb_rextsize;
  67	return 0;
  68}
  69
  70/*
  71 * Helper function to extract CoW extent size hint from inode.
  72 * Between the extent size hint and the CoW extent size hint, we
  73 * return the greater of the two.  If the value is zero (automatic),
  74 * use the default size.
  75 */
  76xfs_extlen_t
  77xfs_get_cowextsz_hint(
  78	struct xfs_inode	*ip)
  79{
  80	xfs_extlen_t		a, b;
  81
  82	a = 0;
  83	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  84		a = ip->i_d.di_cowextsize;
  85	b = xfs_get_extsz_hint(ip);
  86
  87	a = max(a, b);
  88	if (a == 0)
  89		return XFS_DEFAULT_COWEXTSZ_HINT;
  90	return a;
  91}
  92
  93/*
  94 * These two are wrapper routines around the xfs_ilock() routine used to
  95 * centralize some grungy code.  They are used in places that wish to lock the
  96 * inode solely for reading the extents.  The reason these places can't just
  97 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  98 * bringing in of the extents from disk for a file in b-tree format.  If the
  99 * inode is in b-tree format, then we need to lock the inode exclusively until
 100 * the extents are read in.  Locking it exclusively all the time would limit
 101 * our parallelism unnecessarily, though.  What we do instead is check to see
 102 * if the extents have been read in yet, and only lock the inode exclusively
 103 * if they have not.
 104 *
 105 * The functions return a value which should be given to the corresponding
 106 * xfs_iunlock() call.
 107 */
 108uint
 109xfs_ilock_data_map_shared(
 110	struct xfs_inode	*ip)
 111{
 112	uint			lock_mode = XFS_ILOCK_SHARED;
 113
 114	if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
 115	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
 116		lock_mode = XFS_ILOCK_EXCL;
 117	xfs_ilock(ip, lock_mode);
 118	return lock_mode;
 119}
 120
 121uint
 122xfs_ilock_attr_map_shared(
 123	struct xfs_inode	*ip)
 124{
 125	uint			lock_mode = XFS_ILOCK_SHARED;
 126
 127	if (ip->i_afp &&
 128	    ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
 129	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
 130		lock_mode = XFS_ILOCK_EXCL;
 131	xfs_ilock(ip, lock_mode);
 132	return lock_mode;
 133}
 134
 135/*
 136 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 137 * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
 138 * various combinations of the locks to be obtained.
 139 *
 140 * The 3 locks should always be ordered so that the IO lock is obtained first,
 141 * the mmap lock second and the ilock last in order to prevent deadlock.
 142 *
 143 * Basic locking order:
 144 *
 145 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
 146 *
 147 * mmap_lock locking order:
 148 *
 149 * i_rwsem -> page lock -> mmap_lock
 150 * mmap_lock -> i_mmap_lock -> page_lock
 151 *
 152 * The difference in mmap_lock locking order mean that we cannot hold the
 153 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
 154 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
 155 * in get_user_pages() to map the user pages into the kernel address space for
 156 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
 157 * page faults already hold the mmap_lock.
 158 *
 159 * Hence to serialise fully against both syscall and mmap based IO, we need to
 160 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
 161 * taken in places where we need to invalidate the page cache in a race
 162 * free manner (e.g. truncate, hole punch and other extent manipulation
 163 * functions).
 164 */
 165void
 166xfs_ilock(
 167	xfs_inode_t		*ip,
 168	uint			lock_flags)
 169{
 170	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 171
 172	/*
 173	 * You can't set both SHARED and EXCL for the same lock,
 174	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 175	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 176	 */
 177	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 178	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 179	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 180	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 181	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 182	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 183	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 184
 185	if (lock_flags & XFS_IOLOCK_EXCL) {
 186		down_write_nested(&VFS_I(ip)->i_rwsem,
 187				  XFS_IOLOCK_DEP(lock_flags));
 188	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 189		down_read_nested(&VFS_I(ip)->i_rwsem,
 190				 XFS_IOLOCK_DEP(lock_flags));
 191	}
 192
 193	if (lock_flags & XFS_MMAPLOCK_EXCL)
 194		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 195	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 196		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
 197
 198	if (lock_flags & XFS_ILOCK_EXCL)
 199		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 200	else if (lock_flags & XFS_ILOCK_SHARED)
 201		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
 202}
 203
 204/*
 205 * This is just like xfs_ilock(), except that the caller
 206 * is guaranteed not to sleep.  It returns 1 if it gets
 207 * the requested locks and 0 otherwise.  If the IO lock is
 208 * obtained but the inode lock cannot be, then the IO lock
 209 * is dropped before returning.
 210 *
 211 * ip -- the inode being locked
 212 * lock_flags -- this parameter indicates the inode's locks to be
 213 *       to be locked.  See the comment for xfs_ilock() for a list
 214 *	 of valid values.
 215 */
 216int
 217xfs_ilock_nowait(
 218	xfs_inode_t		*ip,
 219	uint			lock_flags)
 220{
 221	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 222
 223	/*
 224	 * You can't set both SHARED and EXCL for the same lock,
 225	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 226	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 227	 */
 228	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 229	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 230	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 231	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 232	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 233	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 234	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 235
 236	if (lock_flags & XFS_IOLOCK_EXCL) {
 237		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 238			goto out;
 239	} else if (lock_flags & XFS_IOLOCK_SHARED) {
 240		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 241			goto out;
 242	}
 243
 244	if (lock_flags & XFS_MMAPLOCK_EXCL) {
 245		if (!mrtryupdate(&ip->i_mmaplock))
 246			goto out_undo_iolock;
 247	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
 248		if (!mrtryaccess(&ip->i_mmaplock))
 249			goto out_undo_iolock;
 250	}
 251
 252	if (lock_flags & XFS_ILOCK_EXCL) {
 253		if (!mrtryupdate(&ip->i_lock))
 254			goto out_undo_mmaplock;
 255	} else if (lock_flags & XFS_ILOCK_SHARED) {
 256		if (!mrtryaccess(&ip->i_lock))
 257			goto out_undo_mmaplock;
 258	}
 259	return 1;
 260
 261out_undo_mmaplock:
 262	if (lock_flags & XFS_MMAPLOCK_EXCL)
 263		mrunlock_excl(&ip->i_mmaplock);
 264	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 265		mrunlock_shared(&ip->i_mmaplock);
 266out_undo_iolock:
 267	if (lock_flags & XFS_IOLOCK_EXCL)
 268		up_write(&VFS_I(ip)->i_rwsem);
 269	else if (lock_flags & XFS_IOLOCK_SHARED)
 270		up_read(&VFS_I(ip)->i_rwsem);
 271out:
 272	return 0;
 273}
 274
 275/*
 276 * xfs_iunlock() is used to drop the inode locks acquired with
 277 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 279 * that we know which locks to drop.
 280 *
 281 * ip -- the inode being unlocked
 282 * lock_flags -- this parameter indicates the inode's locks to be
 283 *       to be unlocked.  See the comment for xfs_ilock() for a list
 284 *	 of valid values for this parameter.
 285 *
 286 */
 287void
 288xfs_iunlock(
 289	xfs_inode_t		*ip,
 290	uint			lock_flags)
 291{
 292	/*
 293	 * You can't set both SHARED and EXCL for the same lock,
 294	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
 295	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
 296	 */
 297	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
 298	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
 299	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
 300	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 301	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 302	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 303	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 304	ASSERT(lock_flags != 0);
 305
 306	if (lock_flags & XFS_IOLOCK_EXCL)
 307		up_write(&VFS_I(ip)->i_rwsem);
 308	else if (lock_flags & XFS_IOLOCK_SHARED)
 309		up_read(&VFS_I(ip)->i_rwsem);
 310
 311	if (lock_flags & XFS_MMAPLOCK_EXCL)
 312		mrunlock_excl(&ip->i_mmaplock);
 313	else if (lock_flags & XFS_MMAPLOCK_SHARED)
 314		mrunlock_shared(&ip->i_mmaplock);
 315
 316	if (lock_flags & XFS_ILOCK_EXCL)
 317		mrunlock_excl(&ip->i_lock);
 318	else if (lock_flags & XFS_ILOCK_SHARED)
 319		mrunlock_shared(&ip->i_lock);
 320
 321	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
 322}
 323
 324/*
 325 * give up write locks.  the i/o lock cannot be held nested
 326 * if it is being demoted.
 327 */
 328void
 329xfs_ilock_demote(
 330	xfs_inode_t		*ip,
 331	uint			lock_flags)
 332{
 333	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
 334	ASSERT((lock_flags &
 335		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 336
 337	if (lock_flags & XFS_ILOCK_EXCL)
 338		mrdemote(&ip->i_lock);
 339	if (lock_flags & XFS_MMAPLOCK_EXCL)
 340		mrdemote(&ip->i_mmaplock);
 341	if (lock_flags & XFS_IOLOCK_EXCL)
 342		downgrade_write(&VFS_I(ip)->i_rwsem);
 343
 344	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 345}
 346
 347#if defined(DEBUG) || defined(XFS_WARN)
 348int
 349xfs_isilocked(
 350	xfs_inode_t		*ip,
 351	uint			lock_flags)
 352{
 353	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
 354		if (!(lock_flags & XFS_ILOCK_SHARED))
 355			return !!ip->i_lock.mr_writer;
 356		return rwsem_is_locked(&ip->i_lock.mr_lock);
 357	}
 358
 359	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
 360		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
 361			return !!ip->i_mmaplock.mr_writer;
 362		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
 363	}
 364
 365	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
 366		if (!(lock_flags & XFS_IOLOCK_SHARED))
 367			return !debug_locks ||
 368				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
 369		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
 370	}
 371
 372	ASSERT(0);
 373	return 0;
 374}
 375#endif
 376
 377/*
 378 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 379 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 380 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 381 * errors and warnings.
 382 */
 383#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
 384static bool
 385xfs_lockdep_subclass_ok(
 386	int subclass)
 387{
 388	return subclass < MAX_LOCKDEP_SUBCLASSES;
 389}
 390#else
 391#define xfs_lockdep_subclass_ok(subclass)	(true)
 392#endif
 393
 394/*
 395 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
 396 * value. This can be called for any type of inode lock combination, including
 397 * parent locking. Care must be taken to ensure we don't overrun the subclass
 398 * storage fields in the class mask we build.
 399 */
 400static inline int
 401xfs_lock_inumorder(int lock_mode, int subclass)
 402{
 403	int	class = 0;
 404
 405	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
 406			      XFS_ILOCK_RTSUM)));
 407	ASSERT(xfs_lockdep_subclass_ok(subclass));
 408
 409	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 410		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
 411		class += subclass << XFS_IOLOCK_SHIFT;
 412	}
 413
 414	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
 415		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
 416		class += subclass << XFS_MMAPLOCK_SHIFT;
 417	}
 418
 419	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
 420		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
 421		class += subclass << XFS_ILOCK_SHIFT;
 422	}
 423
 424	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 425}
 426
 427/*
 428 * The following routine will lock n inodes in exclusive mode.  We assume the
 429 * caller calls us with the inodes in i_ino order.
 430 *
 431 * We need to detect deadlock where an inode that we lock is in the AIL and we
 432 * start waiting for another inode that is locked by a thread in a long running
 433 * transaction (such as truncate). This can result in deadlock since the long
 434 * running trans might need to wait for the inode we just locked in order to
 435 * push the tail and free space in the log.
 436 *
 437 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 438 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 439 * lock more than one at a time, lockdep will report false positives saying we
 440 * have violated locking orders.
 441 */
 442static void
 443xfs_lock_inodes(
 444	struct xfs_inode	**ips,
 445	int			inodes,
 446	uint			lock_mode)
 447{
 448	int			attempts = 0, i, j, try_lock;
 449	struct xfs_log_item	*lp;
 450
 451	/*
 452	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
 453	 * support an arbitrary depth of locking here, but absolute limits on
 454	 * inodes depend on the type of locking and the limits placed by
 455	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
 456	 * the asserts.
 457	 */
 458	ASSERT(ips && inodes >= 2 && inodes <= 5);
 459	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
 460			    XFS_ILOCK_EXCL));
 461	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 462			      XFS_ILOCK_SHARED)));
 463	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 464		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 465	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
 466		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
 467
 468	if (lock_mode & XFS_IOLOCK_EXCL) {
 469		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
 470	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
 471		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 472
 473	try_lock = 0;
 474	i = 0;
 475again:
 476	for (; i < inodes; i++) {
 477		ASSERT(ips[i]);
 478
 479		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
 480			continue;
 481
 482		/*
 483		 * If try_lock is not set yet, make sure all locked inodes are
 484		 * not in the AIL.  If any are, set try_lock to be used later.
 485		 */
 486		if (!try_lock) {
 487			for (j = (i - 1); j >= 0 && !try_lock; j--) {
 488				lp = &ips[j]->i_itemp->ili_item;
 489				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
 490					try_lock++;
 491			}
 492		}
 493
 494		/*
 495		 * If any of the previous locks we have locked is in the AIL,
 496		 * we must TRY to get the second and subsequent locks. If
 497		 * we can't get any, we must release all we have
 498		 * and try again.
 499		 */
 500		if (!try_lock) {
 501			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
 502			continue;
 503		}
 504
 505		/* try_lock means we have an inode locked that is in the AIL. */
 506		ASSERT(i != 0);
 507		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
 508			continue;
 509
 510		/*
 511		 * Unlock all previous guys and try again.  xfs_iunlock will try
 512		 * to push the tail if the inode is in the AIL.
 513		 */
 514		attempts++;
 515		for (j = i - 1; j >= 0; j--) {
 516			/*
 517			 * Check to see if we've already unlocked this one.  Not
 518			 * the first one going back, and the inode ptr is the
 519			 * same.
 520			 */
 521			if (j != (i - 1) && ips[j] == ips[j + 1])
 522				continue;
 523
 524			xfs_iunlock(ips[j], lock_mode);
 525		}
 526
 527		if ((attempts % 5) == 0) {
 528			delay(1); /* Don't just spin the CPU */
 529		}
 530		i = 0;
 531		try_lock = 0;
 532		goto again;
 533	}
 534}
 535
 536/*
 537 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
 538 * the mmaplock or the ilock, but not more than one type at a time. If we lock
 539 * more than one at a time, lockdep will report false positives saying we have
 540 * violated locking orders.  The iolock must be double-locked separately since
 541 * we use i_rwsem for that.  We now support taking one lock EXCL and the other
 542 * SHARED.
 543 */
 544void
 545xfs_lock_two_inodes(
 546	struct xfs_inode	*ip0,
 547	uint			ip0_mode,
 548	struct xfs_inode	*ip1,
 549	uint			ip1_mode)
 550{
 551	struct xfs_inode	*temp;
 552	uint			mode_temp;
 553	int			attempts = 0;
 554	struct xfs_log_item	*lp;
 555
 556	ASSERT(hweight32(ip0_mode) == 1);
 557	ASSERT(hweight32(ip1_mode) == 1);
 558	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 559	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
 560	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 561	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 562	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 563	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 564	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 565	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 566	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
 567	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 568
 569	ASSERT(ip0->i_ino != ip1->i_ino);
 570
 571	if (ip0->i_ino > ip1->i_ino) {
 572		temp = ip0;
 573		ip0 = ip1;
 574		ip1 = temp;
 575		mode_temp = ip0_mode;
 576		ip0_mode = ip1_mode;
 577		ip1_mode = mode_temp;
 578	}
 579
 580 again:
 581	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
 582
 583	/*
 584	 * If the first lock we have locked is in the AIL, we must TRY to get
 585	 * the second lock. If we can't get it, we must release the first one
 586	 * and try again.
 587	 */
 588	lp = &ip0->i_itemp->ili_item;
 589	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
 590		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
 591			xfs_iunlock(ip0, ip0_mode);
 592			if ((++attempts % 5) == 0)
 593				delay(1); /* Don't just spin the CPU */
 594			goto again;
 595		}
 596	} else {
 597		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
 598	}
 599}
 600
 601void
 602__xfs_iflock(
 603	struct xfs_inode	*ip)
 604{
 605	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
 606	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
 607
 608	do {
 609		prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 610		if (xfs_isiflocked(ip))
 611			io_schedule();
 612	} while (!xfs_iflock_nowait(ip));
 613
 614	finish_wait(wq, &wait.wq_entry);
 615}
 616
 617STATIC uint
 618_xfs_dic2xflags(
 619	uint16_t		di_flags,
 620	uint64_t		di_flags2,
 621	bool			has_attr)
 622{
 623	uint			flags = 0;
 624
 625	if (di_flags & XFS_DIFLAG_ANY) {
 626		if (di_flags & XFS_DIFLAG_REALTIME)
 627			flags |= FS_XFLAG_REALTIME;
 628		if (di_flags & XFS_DIFLAG_PREALLOC)
 629			flags |= FS_XFLAG_PREALLOC;
 630		if (di_flags & XFS_DIFLAG_IMMUTABLE)
 631			flags |= FS_XFLAG_IMMUTABLE;
 632		if (di_flags & XFS_DIFLAG_APPEND)
 633			flags |= FS_XFLAG_APPEND;
 634		if (di_flags & XFS_DIFLAG_SYNC)
 635			flags |= FS_XFLAG_SYNC;
 636		if (di_flags & XFS_DIFLAG_NOATIME)
 637			flags |= FS_XFLAG_NOATIME;
 638		if (di_flags & XFS_DIFLAG_NODUMP)
 639			flags |= FS_XFLAG_NODUMP;
 640		if (di_flags & XFS_DIFLAG_RTINHERIT)
 641			flags |= FS_XFLAG_RTINHERIT;
 642		if (di_flags & XFS_DIFLAG_PROJINHERIT)
 643			flags |= FS_XFLAG_PROJINHERIT;
 644		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
 645			flags |= FS_XFLAG_NOSYMLINKS;
 646		if (di_flags & XFS_DIFLAG_EXTSIZE)
 647			flags |= FS_XFLAG_EXTSIZE;
 648		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
 649			flags |= FS_XFLAG_EXTSZINHERIT;
 650		if (di_flags & XFS_DIFLAG_NODEFRAG)
 651			flags |= FS_XFLAG_NODEFRAG;
 652		if (di_flags & XFS_DIFLAG_FILESTREAM)
 653			flags |= FS_XFLAG_FILESTREAM;
 654	}
 655
 656	if (di_flags2 & XFS_DIFLAG2_ANY) {
 657		if (di_flags2 & XFS_DIFLAG2_DAX)
 658			flags |= FS_XFLAG_DAX;
 659		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
 660			flags |= FS_XFLAG_COWEXTSIZE;
 661	}
 662
 663	if (has_attr)
 664		flags |= FS_XFLAG_HASATTR;
 665
 666	return flags;
 667}
 668
 669uint
 670xfs_ip2xflags(
 671	struct xfs_inode	*ip)
 672{
 673	struct xfs_icdinode	*dic = &ip->i_d;
 674
 675	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
 676}
 677
 678/*
 679 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 680 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 681 * ci_name->name will point to a the actual name (caller must free) or
 682 * will be set to NULL if an exact match is found.
 683 */
 684int
 685xfs_lookup(
 686	xfs_inode_t		*dp,
 687	struct xfs_name		*name,
 688	xfs_inode_t		**ipp,
 689	struct xfs_name		*ci_name)
 690{
 691	xfs_ino_t		inum;
 692	int			error;
 693
 694	trace_xfs_lookup(dp, name);
 695
 696	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 697		return -EIO;
 698
 699	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 700	if (error)
 701		goto out_unlock;
 702
 703	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 704	if (error)
 705		goto out_free_name;
 706
 707	return 0;
 708
 709out_free_name:
 710	if (ci_name)
 711		kmem_free(ci_name->name);
 712out_unlock:
 713	*ipp = NULL;
 714	return error;
 715}
 716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717/*
 718 * Allocate an inode on disk and return a copy of its in-core version.
 719 * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
 720 * appropriately within the inode.  The uid and gid for the inode are
 721 * set according to the contents of the given cred structure.
 722 *
 723 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
 724 * has a free inode available, call xfs_iget() to obtain the in-core
 725 * version of the allocated inode.  Finally, fill in the inode and
 726 * log its initial contents.  In this case, ialloc_context would be
 727 * set to NULL.
 728 *
 729 * If xfs_dialloc() does not have an available inode, it will replenish
 730 * its supply by doing an allocation. Since we can only do one
 731 * allocation within a transaction without deadlocks, we must commit
 732 * the current transaction before returning the inode itself.
 733 * In this case, therefore, we will set ialloc_context and return.
 734 * The caller should then commit the current transaction, start a new
 735 * transaction, and call xfs_ialloc() again to actually get the inode.
 736 *
 737 * To ensure that some other process does not grab the inode that
 738 * was allocated during the first call to xfs_ialloc(), this routine
 739 * also returns the [locked] bp pointing to the head of the freelist
 740 * as ialloc_context.  The caller should hold this buffer across
 741 * the commit and pass it back into this routine on the second call.
 742 *
 743 * If we are allocating quota inodes, we do not have a parent inode
 744 * to attach to or associate with (i.e. pip == NULL) because they
 745 * are not linked into the directory structure - they are attached
 746 * directly to the superblock - and so have no parent.
 747 */
 748static int
 749xfs_ialloc(
 750	xfs_trans_t	*tp,
 751	xfs_inode_t	*pip,
 752	umode_t		mode,
 753	xfs_nlink_t	nlink,
 754	dev_t		rdev,
 755	prid_t		prid,
 756	xfs_buf_t	**ialloc_context,
 757	xfs_inode_t	**ipp)
 758{
 759	struct xfs_mount *mp = tp->t_mountp;
 760	xfs_ino_t	ino;
 761	xfs_inode_t	*ip;
 762	uint		flags;
 763	int		error;
 764	struct timespec64 tv;
 765	struct inode	*inode;
 766
 767	/*
 768	 * Call the space management code to pick
 769	 * the on-disk inode to be allocated.
 770	 */
 771	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
 772			    ialloc_context, &ino);
 773	if (error)
 774		return error;
 775	if (*ialloc_context || ino == NULLFSINO) {
 776		*ipp = NULL;
 777		return 0;
 778	}
 779	ASSERT(*ialloc_context == NULL);
 780
 781	/*
 782	 * Protect against obviously corrupt allocation btree records. Later
 783	 * xfs_iget checks will catch re-allocation of other active in-memory
 784	 * and on-disk inodes. If we don't catch reallocating the parent inode
 785	 * here we will deadlock in xfs_iget() so we have to do these checks
 786	 * first.
 787	 */
 788	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
 789		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
 790		return -EFSCORRUPTED;
 791	}
 792
 793	/*
 794	 * Get the in-core inode with the lock held exclusively.
 795	 * This is because we're setting fields here we need
 796	 * to prevent others from looking at until we're done.
 797	 */
 798	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
 799			 XFS_ILOCK_EXCL, &ip);
 800	if (error)
 801		return error;
 
 802	ASSERT(ip != NULL);
 803	inode = VFS_I(ip);
 804	inode->i_mode = mode;
 805	set_nlink(inode, nlink);
 806	inode->i_uid = current_fsuid();
 807	inode->i_rdev = rdev;
 808	ip->i_d.di_projid = prid;
 809
 810	if (pip && XFS_INHERIT_GID(pip)) {
 811		inode->i_gid = VFS_I(pip)->i_gid;
 812		if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
 813			inode->i_mode |= S_ISGID;
 
 814	} else {
 815		inode->i_gid = current_fsgid();
 816	}
 817
 818	/*
 819	 * If the group ID of the new file does not match the effective group
 820	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
 821	 * (and only if the irix_sgid_inherit compatibility variable is set).
 822	 */
 823	if (irix_sgid_inherit &&
 824	    (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
 
 825		inode->i_mode &= ~S_ISGID;
 826
 827	ip->i_d.di_size = 0;
 828	ip->i_df.if_nextents = 0;
 829	ASSERT(ip->i_d.di_nblocks == 0);
 830
 831	tv = current_time(inode);
 832	inode->i_mtime = tv;
 833	inode->i_atime = tv;
 834	inode->i_ctime = tv;
 835
 836	ip->i_d.di_extsize = 0;
 837	ip->i_d.di_dmevmask = 0;
 838	ip->i_d.di_dmstate = 0;
 839	ip->i_d.di_flags = 0;
 840
 841	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
 842		inode_set_iversion(inode, 1);
 843		ip->i_d.di_flags2 = 0;
 844		ip->i_d.di_cowextsize = 0;
 845		ip->i_d.di_crtime = tv;
 846	}
 847
 848	flags = XFS_ILOG_CORE;
 849	switch (mode & S_IFMT) {
 850	case S_IFIFO:
 851	case S_IFCHR:
 852	case S_IFBLK:
 853	case S_IFSOCK:
 854		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
 855		ip->i_df.if_flags = 0;
 856		flags |= XFS_ILOG_DEV;
 857		break;
 858	case S_IFREG:
 859	case S_IFDIR:
 860		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
 861			uint		di_flags = 0;
 862
 863			if (S_ISDIR(mode)) {
 864				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
 865					di_flags |= XFS_DIFLAG_RTINHERIT;
 866				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
 867					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
 868					ip->i_d.di_extsize = pip->i_d.di_extsize;
 869				}
 870				if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
 871					di_flags |= XFS_DIFLAG_PROJINHERIT;
 872			} else if (S_ISREG(mode)) {
 873				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
 874					di_flags |= XFS_DIFLAG_REALTIME;
 875				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
 876					di_flags |= XFS_DIFLAG_EXTSIZE;
 877					ip->i_d.di_extsize = pip->i_d.di_extsize;
 878				}
 879			}
 880			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
 881			    xfs_inherit_noatime)
 882				di_flags |= XFS_DIFLAG_NOATIME;
 883			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
 884			    xfs_inherit_nodump)
 885				di_flags |= XFS_DIFLAG_NODUMP;
 886			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
 887			    xfs_inherit_sync)
 888				di_flags |= XFS_DIFLAG_SYNC;
 889			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
 890			    xfs_inherit_nosymlinks)
 891				di_flags |= XFS_DIFLAG_NOSYMLINKS;
 892			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
 893			    xfs_inherit_nodefrag)
 894				di_flags |= XFS_DIFLAG_NODEFRAG;
 895			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
 896				di_flags |= XFS_DIFLAG_FILESTREAM;
 897
 898			ip->i_d.di_flags |= di_flags;
 899		}
 900		if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
 901			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
 902				ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 903				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
 904			}
 905			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
 906				ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
 907		}
 908		/* FALLTHROUGH */
 909	case S_IFLNK:
 910		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 911		ip->i_df.if_flags = XFS_IFEXTENTS;
 912		ip->i_df.if_bytes = 0;
 913		ip->i_df.if_u1.if_root = NULL;
 914		break;
 915	default:
 916		ASSERT(0);
 917	}
 918
 919	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920	 * Log the new values stuffed into the inode.
 921	 */
 922	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 923	xfs_trans_log_inode(tp, ip, flags);
 924
 925	/* now that we have an i_mode we can setup the inode structure */
 926	xfs_setup_inode(ip);
 927
 928	*ipp = ip;
 929	return 0;
 930}
 931
 932/*
 933 * Allocates a new inode from disk and return a pointer to the
 934 * incore copy. This routine will internally commit the current
 935 * transaction and allocate a new one if the Space Manager needed
 936 * to do an allocation to replenish the inode free-list.
 937 *
 938 * This routine is designed to be called from xfs_create and
 939 * xfs_create_dir.
 940 *
 941 */
 942int
 943xfs_dir_ialloc(
 944	xfs_trans_t	**tpp,		/* input: current transaction;
 945					   output: may be a new transaction. */
 946	xfs_inode_t	*dp,		/* directory within whose allocate
 947					   the inode. */
 948	umode_t		mode,
 949	xfs_nlink_t	nlink,
 950	dev_t		rdev,
 951	prid_t		prid,		/* project id */
 952	xfs_inode_t	**ipp)		/* pointer to inode; it will be
 953					   locked. */
 954{
 955	xfs_trans_t	*tp;
 956	xfs_inode_t	*ip;
 957	xfs_buf_t	*ialloc_context = NULL;
 958	int		code;
 959	void		*dqinfo;
 960	uint		tflags;
 961
 962	tp = *tpp;
 963	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 964
 965	/*
 966	 * xfs_ialloc will return a pointer to an incore inode if
 967	 * the Space Manager has an available inode on the free
 968	 * list. Otherwise, it will do an allocation and replenish
 969	 * the freelist.  Since we can only do one allocation per
 970	 * transaction without deadlocks, we will need to commit the
 971	 * current transaction and start a new one.  We will then
 972	 * need to call xfs_ialloc again to get the inode.
 973	 *
 974	 * If xfs_ialloc did an allocation to replenish the freelist,
 975	 * it returns the bp containing the head of the freelist as
 976	 * ialloc_context. We will hold a lock on it across the
 977	 * transaction commit so that no other process can steal
 978	 * the inode(s) that we've just allocated.
 979	 */
 980	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
 981			&ip);
 982
 983	/*
 984	 * Return an error if we were unable to allocate a new inode.
 985	 * This should only happen if we run out of space on disk or
 986	 * encounter a disk error.
 987	 */
 988	if (code) {
 989		*ipp = NULL;
 990		return code;
 991	}
 992	if (!ialloc_context && !ip) {
 993		*ipp = NULL;
 994		return -ENOSPC;
 995	}
 996
 997	/*
 998	 * If the AGI buffer is non-NULL, then we were unable to get an
 999	 * inode in one operation.  We need to commit the current
1000	 * transaction and call xfs_ialloc() again.  It is guaranteed
1001	 * to succeed the second time.
1002	 */
1003	if (ialloc_context) {
1004		/*
1005		 * Normally, xfs_trans_commit releases all the locks.
1006		 * We call bhold to hang on to the ialloc_context across
1007		 * the commit.  Holding this buffer prevents any other
1008		 * processes from doing any allocations in this
1009		 * allocation group.
1010		 */
1011		xfs_trans_bhold(tp, ialloc_context);
1012
1013		/*
1014		 * We want the quota changes to be associated with the next
1015		 * transaction, NOT this one. So, detach the dqinfo from this
1016		 * and attach it to the next transaction.
1017		 */
1018		dqinfo = NULL;
1019		tflags = 0;
1020		if (tp->t_dqinfo) {
1021			dqinfo = (void *)tp->t_dqinfo;
1022			tp->t_dqinfo = NULL;
1023			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1024			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1025		}
1026
1027		code = xfs_trans_roll(&tp);
1028
1029		/*
1030		 * Re-attach the quota info that we detached from prev trx.
1031		 */
1032		if (dqinfo) {
1033			tp->t_dqinfo = dqinfo;
1034			tp->t_flags |= tflags;
1035		}
1036
1037		if (code) {
1038			xfs_buf_relse(ialloc_context);
1039			*tpp = tp;
1040			*ipp = NULL;
1041			return code;
1042		}
1043		xfs_trans_bjoin(tp, ialloc_context);
1044
1045		/*
1046		 * Call ialloc again. Since we've locked out all
1047		 * other allocations in this allocation group,
1048		 * this call should always succeed.
1049		 */
1050		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1051				  &ialloc_context, &ip);
1052
1053		/*
1054		 * If we get an error at this point, return to the caller
1055		 * so that the current transaction can be aborted.
1056		 */
1057		if (code) {
1058			*tpp = tp;
1059			*ipp = NULL;
1060			return code;
1061		}
1062		ASSERT(!ialloc_context && ip);
1063
1064	}
1065
1066	*ipp = ip;
1067	*tpp = tp;
1068
1069	return 0;
1070}
1071
1072/*
1073 * Decrement the link count on an inode & log the change.  If this causes the
1074 * link count to go to zero, move the inode to AGI unlinked list so that it can
1075 * be freed when the last active reference goes away via xfs_inactive().
1076 */
1077static int			/* error */
1078xfs_droplink(
1079	xfs_trans_t *tp,
1080	xfs_inode_t *ip)
1081{
1082	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1083
1084	drop_nlink(VFS_I(ip));
1085	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1086
1087	if (VFS_I(ip)->i_nlink)
1088		return 0;
1089
1090	return xfs_iunlink(tp, ip);
1091}
1092
1093/*
1094 * Increment the link count on an inode & log the change.
1095 */
1096static void
1097xfs_bumplink(
1098	xfs_trans_t *tp,
1099	xfs_inode_t *ip)
1100{
1101	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1102
1103	inc_nlink(VFS_I(ip));
1104	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1105}
1106
1107int
1108xfs_create(
 
1109	xfs_inode_t		*dp,
1110	struct xfs_name		*name,
1111	umode_t			mode,
1112	dev_t			rdev,
 
1113	xfs_inode_t		**ipp)
1114{
1115	int			is_dir = S_ISDIR(mode);
1116	struct xfs_mount	*mp = dp->i_mount;
1117	struct xfs_inode	*ip = NULL;
1118	struct xfs_trans	*tp = NULL;
1119	int			error;
1120	bool                    unlock_dp_on_error = false;
1121	prid_t			prid;
1122	struct xfs_dquot	*udqp = NULL;
1123	struct xfs_dquot	*gdqp = NULL;
1124	struct xfs_dquot	*pdqp = NULL;
1125	struct xfs_trans_res	*tres;
1126	uint			resblks;
 
1127
1128	trace_xfs_create(dp, name);
1129
1130	if (XFS_FORCED_SHUTDOWN(mp))
1131		return -EIO;
1132
1133	prid = xfs_get_initial_prid(dp);
1134
1135	/*
1136	 * Make sure that we have allocated dquot(s) on disk.
1137	 */
1138	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1139					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1140					&udqp, &gdqp, &pdqp);
 
1141	if (error)
1142		return error;
1143
1144	if (is_dir) {
1145		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1146		tres = &M_RES(mp)->tr_mkdir;
1147	} else {
1148		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1149		tres = &M_RES(mp)->tr_create;
1150	}
1151
1152	/*
1153	 * Initially assume that the file does not exist and
1154	 * reserve the resources for that case.  If that is not
1155	 * the case we'll drop the one we have and get a more
1156	 * appropriate transaction later.
1157	 */
1158	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 
1159	if (error == -ENOSPC) {
1160		/* flush outstanding delalloc blocks and retry */
1161		xfs_flush_inodes(mp);
1162		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 
1163	}
1164	if (error)
1165		goto out_release_inode;
1166
1167	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1168	unlock_dp_on_error = true;
1169
1170	/*
1171	 * Reserve disk quota and the inode.
1172	 */
1173	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1174						pdqp, resblks, 1, 0);
1175	if (error)
1176		goto out_trans_cancel;
1177
1178	/*
1179	 * A newly created regular or special file just has one directory
1180	 * entry pointing to them, but a directory also the "." entry
1181	 * pointing to itself.
1182	 */
1183	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
 
 
 
1184	if (error)
1185		goto out_trans_cancel;
1186
1187	/*
1188	 * Now we join the directory inode to the transaction.  We do not do it
1189	 * earlier because xfs_dir_ialloc might commit the previous transaction
1190	 * (and release all the locks).  An error from here on will result in
1191	 * the transaction cancel unlocking dp so don't do it explicitly in the
1192	 * error path.
1193	 */
1194	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1195	unlock_dp_on_error = false;
1196
1197	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1198					resblks - XFS_IALLOC_SPACE_RES(mp));
1199	if (error) {
1200		ASSERT(error != -ENOSPC);
1201		goto out_trans_cancel;
1202	}
1203	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1204	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1205
1206	if (is_dir) {
1207		error = xfs_dir_init(tp, ip, dp);
1208		if (error)
1209			goto out_trans_cancel;
1210
1211		xfs_bumplink(tp, dp);
1212	}
1213
1214	/*
1215	 * If this is a synchronous mount, make sure that the
1216	 * create transaction goes to disk before returning to
1217	 * the user.
1218	 */
1219	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1220		xfs_trans_set_sync(tp);
1221
1222	/*
1223	 * Attach the dquot(s) to the inodes and modify them incore.
1224	 * These ids of the inode couldn't have changed since the new
1225	 * inode has been locked ever since it was created.
1226	 */
1227	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1228
1229	error = xfs_trans_commit(tp);
1230	if (error)
1231		goto out_release_inode;
1232
1233	xfs_qm_dqrele(udqp);
1234	xfs_qm_dqrele(gdqp);
1235	xfs_qm_dqrele(pdqp);
1236
1237	*ipp = ip;
1238	return 0;
1239
1240 out_trans_cancel:
1241	xfs_trans_cancel(tp);
1242 out_release_inode:
1243	/*
1244	 * Wait until after the current transaction is aborted to finish the
1245	 * setup of the inode and release the inode.  This prevents recursive
1246	 * transactions and deadlocks from xfs_inactive.
1247	 */
1248	if (ip) {
1249		xfs_finish_inode_setup(ip);
1250		xfs_irele(ip);
1251	}
1252
1253	xfs_qm_dqrele(udqp);
1254	xfs_qm_dqrele(gdqp);
1255	xfs_qm_dqrele(pdqp);
1256
1257	if (unlock_dp_on_error)
1258		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1259	return error;
1260}
1261
1262int
1263xfs_create_tmpfile(
 
1264	struct xfs_inode	*dp,
1265	umode_t			mode,
1266	struct xfs_inode	**ipp)
1267{
1268	struct xfs_mount	*mp = dp->i_mount;
1269	struct xfs_inode	*ip = NULL;
1270	struct xfs_trans	*tp = NULL;
1271	int			error;
1272	prid_t                  prid;
1273	struct xfs_dquot	*udqp = NULL;
1274	struct xfs_dquot	*gdqp = NULL;
1275	struct xfs_dquot	*pdqp = NULL;
1276	struct xfs_trans_res	*tres;
1277	uint			resblks;
 
1278
1279	if (XFS_FORCED_SHUTDOWN(mp))
1280		return -EIO;
1281
1282	prid = xfs_get_initial_prid(dp);
1283
1284	/*
1285	 * Make sure that we have allocated dquot(s) on disk.
1286	 */
1287	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1288				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1289				&udqp, &gdqp, &pdqp);
 
1290	if (error)
1291		return error;
1292
1293	resblks = XFS_IALLOC_SPACE_RES(mp);
1294	tres = &M_RES(mp)->tr_create_tmpfile;
1295
1296	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1297	if (error)
1298		goto out_release_inode;
1299
1300	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1301						pdqp, resblks, 1, 0);
1302	if (error)
1303		goto out_trans_cancel;
1304
1305	error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
 
 
 
1306	if (error)
1307		goto out_trans_cancel;
1308
1309	if (mp->m_flags & XFS_MOUNT_WSYNC)
1310		xfs_trans_set_sync(tp);
1311
1312	/*
1313	 * Attach the dquot(s) to the inodes and modify them incore.
1314	 * These ids of the inode couldn't have changed since the new
1315	 * inode has been locked ever since it was created.
1316	 */
1317	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1318
1319	error = xfs_iunlink(tp, ip);
1320	if (error)
1321		goto out_trans_cancel;
1322
1323	error = xfs_trans_commit(tp);
1324	if (error)
1325		goto out_release_inode;
1326
1327	xfs_qm_dqrele(udqp);
1328	xfs_qm_dqrele(gdqp);
1329	xfs_qm_dqrele(pdqp);
1330
1331	*ipp = ip;
1332	return 0;
1333
1334 out_trans_cancel:
1335	xfs_trans_cancel(tp);
1336 out_release_inode:
1337	/*
1338	 * Wait until after the current transaction is aborted to finish the
1339	 * setup of the inode and release the inode.  This prevents recursive
1340	 * transactions and deadlocks from xfs_inactive.
1341	 */
1342	if (ip) {
1343		xfs_finish_inode_setup(ip);
1344		xfs_irele(ip);
1345	}
1346
1347	xfs_qm_dqrele(udqp);
1348	xfs_qm_dqrele(gdqp);
1349	xfs_qm_dqrele(pdqp);
1350
1351	return error;
1352}
1353
1354int
1355xfs_link(
1356	xfs_inode_t		*tdp,
1357	xfs_inode_t		*sip,
1358	struct xfs_name		*target_name)
1359{
1360	xfs_mount_t		*mp = tdp->i_mount;
1361	xfs_trans_t		*tp;
1362	int			error;
1363	int			resblks;
1364
1365	trace_xfs_link(tdp, target_name);
1366
1367	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1368
1369	if (XFS_FORCED_SHUTDOWN(mp))
1370		return -EIO;
1371
1372	error = xfs_qm_dqattach(sip);
1373	if (error)
1374		goto std_return;
1375
1376	error = xfs_qm_dqattach(tdp);
1377	if (error)
1378		goto std_return;
1379
1380	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1381	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1382	if (error == -ENOSPC) {
1383		resblks = 0;
1384		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1385	}
1386	if (error)
1387		goto std_return;
1388
1389	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1390
1391	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1392	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1393
 
 
 
 
 
1394	/*
1395	 * If we are using project inheritance, we only allow hard link
1396	 * creation in our tree when the project IDs are the same; else
1397	 * the tree quota mechanism could be circumvented.
1398	 */
1399	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1400		     tdp->i_d.di_projid != sip->i_d.di_projid)) {
1401		error = -EXDEV;
1402		goto error_return;
1403	}
1404
1405	if (!resblks) {
1406		error = xfs_dir_canenter(tp, tdp, target_name);
1407		if (error)
1408			goto error_return;
1409	}
1410
1411	/*
1412	 * Handle initial link state of O_TMPFILE inode
1413	 */
1414	if (VFS_I(sip)->i_nlink == 0) {
1415		error = xfs_iunlink_remove(tp, sip);
 
 
 
 
1416		if (error)
1417			goto error_return;
1418	}
1419
1420	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1421				   resblks);
1422	if (error)
1423		goto error_return;
1424	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1425	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1426
1427	xfs_bumplink(tp, sip);
1428
1429	/*
1430	 * If this is a synchronous mount, make sure that the
1431	 * link transaction goes to disk before returning to
1432	 * the user.
1433	 */
1434	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1435		xfs_trans_set_sync(tp);
1436
1437	return xfs_trans_commit(tp);
1438
1439 error_return:
1440	xfs_trans_cancel(tp);
1441 std_return:
1442	return error;
1443}
1444
1445/* Clear the reflink flag and the cowblocks tag if possible. */
1446static void
1447xfs_itruncate_clear_reflink_flags(
1448	struct xfs_inode	*ip)
1449{
1450	struct xfs_ifork	*dfork;
1451	struct xfs_ifork	*cfork;
1452
1453	if (!xfs_is_reflink_inode(ip))
1454		return;
1455	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1456	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1457	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1458		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1459	if (cfork->if_bytes == 0)
1460		xfs_inode_clear_cowblocks_tag(ip);
1461}
1462
1463/*
1464 * Free up the underlying blocks past new_size.  The new size must be smaller
1465 * than the current size.  This routine can be used both for the attribute and
1466 * data fork, and does not modify the inode size, which is left to the caller.
1467 *
1468 * The transaction passed to this routine must have made a permanent log
1469 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1470 * given transaction and start new ones, so make sure everything involved in
1471 * the transaction is tidy before calling here.  Some transaction will be
1472 * returned to the caller to be committed.  The incoming transaction must
1473 * already include the inode, and both inode locks must be held exclusively.
1474 * The inode must also be "held" within the transaction.  On return the inode
1475 * will be "held" within the returned transaction.  This routine does NOT
1476 * require any disk space to be reserved for it within the transaction.
1477 *
1478 * If we get an error, we must return with the inode locked and linked into the
1479 * current transaction. This keeps things simple for the higher level code,
1480 * because it always knows that the inode is locked and held in the transaction
1481 * that returns to it whether errors occur or not.  We don't mark the inode
1482 * dirty on error so that transactions can be easily aborted if possible.
1483 */
1484int
1485xfs_itruncate_extents_flags(
1486	struct xfs_trans	**tpp,
1487	struct xfs_inode	*ip,
1488	int			whichfork,
1489	xfs_fsize_t		new_size,
1490	int			flags)
1491{
1492	struct xfs_mount	*mp = ip->i_mount;
1493	struct xfs_trans	*tp = *tpp;
1494	xfs_fileoff_t		first_unmap_block;
1495	xfs_filblks_t		unmap_len;
1496	int			error = 0;
1497
1498	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1499	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1500	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1501	ASSERT(new_size <= XFS_ISIZE(ip));
1502	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1503	ASSERT(ip->i_itemp != NULL);
1504	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1505	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1506
1507	trace_xfs_itruncate_extents_start(ip, new_size);
1508
1509	flags |= xfs_bmapi_aflag(whichfork);
1510
1511	/*
1512	 * Since it is possible for space to become allocated beyond
1513	 * the end of the file (in a crash where the space is allocated
1514	 * but the inode size is not yet updated), simply remove any
1515	 * blocks which show up between the new EOF and the maximum
1516	 * possible file size.
1517	 *
1518	 * We have to free all the blocks to the bmbt maximum offset, even if
1519	 * the page cache can't scale that far.
1520	 */
1521	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1522	if (first_unmap_block >= XFS_MAX_FILEOFF) {
1523		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1524		return 0;
1525	}
1526
1527	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1528	while (unmap_len > 0) {
1529		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1530		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1531				flags, XFS_ITRUNC_MAX_EXTENTS);
1532		if (error)
1533			goto out;
1534
1535		/*
1536		 * Duplicate the transaction that has the permanent
1537		 * reservation and commit the old transaction.
1538		 */
1539		error = xfs_defer_finish(&tp);
1540		if (error)
1541			goto out;
1542
1543		error = xfs_trans_roll_inode(&tp, ip);
1544		if (error)
1545			goto out;
1546	}
1547
1548	if (whichfork == XFS_DATA_FORK) {
1549		/* Remove all pending CoW reservations. */
1550		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1551				first_unmap_block, XFS_MAX_FILEOFF, true);
1552		if (error)
1553			goto out;
1554
1555		xfs_itruncate_clear_reflink_flags(ip);
1556	}
1557
1558	/*
1559	 * Always re-log the inode so that our permanent transaction can keep
1560	 * on rolling it forward in the log.
1561	 */
1562	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1563
1564	trace_xfs_itruncate_extents_end(ip, new_size);
1565
1566out:
1567	*tpp = tp;
1568	return error;
1569}
1570
1571int
1572xfs_release(
1573	xfs_inode_t	*ip)
1574{
1575	xfs_mount_t	*mp = ip->i_mount;
1576	int		error;
1577
1578	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1579		return 0;
1580
1581	/* If this is a read-only mount, don't do this (would generate I/O) */
1582	if (mp->m_flags & XFS_MOUNT_RDONLY)
1583		return 0;
1584
1585	if (!XFS_FORCED_SHUTDOWN(mp)) {
1586		int truncated;
1587
1588		/*
1589		 * If we previously truncated this file and removed old data
1590		 * in the process, we want to initiate "early" writeout on
1591		 * the last close.  This is an attempt to combat the notorious
1592		 * NULL files problem which is particularly noticeable from a
1593		 * truncate down, buffered (re-)write (delalloc), followed by
1594		 * a crash.  What we are effectively doing here is
1595		 * significantly reducing the time window where we'd otherwise
1596		 * be exposed to that problem.
1597		 */
1598		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1599		if (truncated) {
1600			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1601			if (ip->i_delayed_blks > 0) {
1602				error = filemap_flush(VFS_I(ip)->i_mapping);
1603				if (error)
1604					return error;
1605			}
1606		}
1607	}
1608
1609	if (VFS_I(ip)->i_nlink == 0)
1610		return 0;
1611
 
 
 
 
 
 
 
 
 
1612	if (xfs_can_free_eofblocks(ip, false)) {
1613
1614		/*
1615		 * Check if the inode is being opened, written and closed
1616		 * frequently and we have delayed allocation blocks outstanding
1617		 * (e.g. streaming writes from the NFS server), truncating the
1618		 * blocks past EOF will cause fragmentation to occur.
1619		 *
1620		 * In this case don't do the truncation, but we have to be
1621		 * careful how we detect this case. Blocks beyond EOF show up as
1622		 * i_delayed_blks even when the inode is clean, so we need to
1623		 * truncate them away first before checking for a dirty release.
1624		 * Hence on the first dirty close we will still remove the
1625		 * speculative allocation, but after that we will leave it in
1626		 * place.
1627		 */
1628		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1629			return 0;
1630		/*
1631		 * If we can't get the iolock just skip truncating the blocks
1632		 * past EOF because we could deadlock with the mmap_lock
1633		 * otherwise. We'll get another chance to drop them once the
1634		 * last reference to the inode is dropped, so we'll never leak
1635		 * blocks permanently.
1636		 */
1637		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1638			error = xfs_free_eofblocks(ip);
1639			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1640			if (error)
1641				return error;
1642		}
1643
1644		/* delalloc blocks after truncation means it really is dirty */
1645		if (ip->i_delayed_blks)
1646			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1647	}
1648	return 0;
 
 
 
1649}
1650
1651/*
1652 * xfs_inactive_truncate
1653 *
1654 * Called to perform a truncate when an inode becomes unlinked.
1655 */
1656STATIC int
1657xfs_inactive_truncate(
1658	struct xfs_inode *ip)
1659{
1660	struct xfs_mount	*mp = ip->i_mount;
1661	struct xfs_trans	*tp;
1662	int			error;
1663
1664	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1665	if (error) {
1666		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1667		return error;
1668	}
1669	xfs_ilock(ip, XFS_ILOCK_EXCL);
1670	xfs_trans_ijoin(tp, ip, 0);
1671
1672	/*
1673	 * Log the inode size first to prevent stale data exposure in the event
1674	 * of a system crash before the truncate completes. See the related
1675	 * comment in xfs_vn_setattr_size() for details.
1676	 */
1677	ip->i_d.di_size = 0;
1678	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1679
1680	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1681	if (error)
1682		goto error_trans_cancel;
1683
1684	ASSERT(ip->i_df.if_nextents == 0);
1685
1686	error = xfs_trans_commit(tp);
1687	if (error)
1688		goto error_unlock;
1689
1690	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1691	return 0;
1692
1693error_trans_cancel:
1694	xfs_trans_cancel(tp);
1695error_unlock:
1696	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1697	return error;
1698}
1699
1700/*
1701 * xfs_inactive_ifree()
1702 *
1703 * Perform the inode free when an inode is unlinked.
1704 */
1705STATIC int
1706xfs_inactive_ifree(
1707	struct xfs_inode *ip)
1708{
1709	struct xfs_mount	*mp = ip->i_mount;
1710	struct xfs_trans	*tp;
1711	int			error;
1712
1713	/*
1714	 * We try to use a per-AG reservation for any block needed by the finobt
1715	 * tree, but as the finobt feature predates the per-AG reservation
1716	 * support a degraded file system might not have enough space for the
1717	 * reservation at mount time.  In that case try to dip into the reserved
1718	 * pool and pray.
1719	 *
1720	 * Send a warning if the reservation does happen to fail, as the inode
1721	 * now remains allocated and sits on the unlinked list until the fs is
1722	 * repaired.
1723	 */
1724	if (unlikely(mp->m_finobt_nores)) {
1725		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1726				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1727				&tp);
1728	} else {
1729		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1730	}
1731	if (error) {
1732		if (error == -ENOSPC) {
1733			xfs_warn_ratelimited(mp,
1734			"Failed to remove inode(s) from unlinked list. "
1735			"Please free space, unmount and run xfs_repair.");
1736		} else {
1737			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1738		}
1739		return error;
1740	}
1741
1742	/*
1743	 * We do not hold the inode locked across the entire rolling transaction
1744	 * here. We only need to hold it for the first transaction that
1745	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1746	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1747	 * here breaks the relationship between cluster buffer invalidation and
1748	 * stale inode invalidation on cluster buffer item journal commit
1749	 * completion, and can result in leaving dirty stale inodes hanging
1750	 * around in memory.
1751	 *
1752	 * We have no need for serialising this inode operation against other
1753	 * operations - we freed the inode and hence reallocation is required
1754	 * and that will serialise on reallocating the space the deferops need
1755	 * to free. Hence we can unlock the inode on the first commit of
1756	 * the transaction rather than roll it right through the deferops. This
1757	 * avoids relogging the XFS_ISTALE inode.
1758	 *
1759	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1760	 * by asserting that the inode is still locked when it returns.
1761	 */
1762	xfs_ilock(ip, XFS_ILOCK_EXCL);
1763	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1764
1765	error = xfs_ifree(tp, ip);
1766	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1767	if (error) {
1768		/*
1769		 * If we fail to free the inode, shut down.  The cancel
1770		 * might do that, we need to make sure.  Otherwise the
1771		 * inode might be lost for a long time or forever.
1772		 */
1773		if (!XFS_FORCED_SHUTDOWN(mp)) {
1774			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1775				__func__, error);
1776			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1777		}
1778		xfs_trans_cancel(tp);
1779		return error;
1780	}
1781
1782	/*
1783	 * Credit the quota account(s). The inode is gone.
1784	 */
1785	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1786
1787	/*
1788	 * Just ignore errors at this point.  There is nothing we can do except
1789	 * to try to keep going. Make sure it's not a silent error.
1790	 */
1791	error = xfs_trans_commit(tp);
1792	if (error)
1793		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1794			__func__, error);
1795
1796	return 0;
1797}
1798
1799/*
1800 * xfs_inactive
1801 *
1802 * This is called when the vnode reference count for the vnode
1803 * goes to zero.  If the file has been unlinked, then it must
1804 * now be truncated.  Also, we clear all of the read-ahead state
1805 * kept for the inode here since the file is now closed.
1806 */
1807void
1808xfs_inactive(
1809	xfs_inode_t	*ip)
1810{
1811	struct xfs_mount	*mp;
1812	int			error;
1813	int			truncate = 0;
1814
1815	/*
1816	 * If the inode is already free, then there can be nothing
1817	 * to clean up here.
1818	 */
1819	if (VFS_I(ip)->i_mode == 0) {
1820		ASSERT(ip->i_df.if_broot_bytes == 0);
1821		return;
1822	}
1823
1824	mp = ip->i_mount;
1825	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1826
1827	/* If this is a read-only mount, don't do this (would generate I/O) */
1828	if (mp->m_flags & XFS_MOUNT_RDONLY)
1829		return;
 
 
 
 
1830
1831	/* Try to clean out the cow blocks if there are any. */
1832	if (xfs_inode_has_cow_data(ip))
1833		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1834
1835	if (VFS_I(ip)->i_nlink != 0) {
1836		/*
1837		 * force is true because we are evicting an inode from the
1838		 * cache. Post-eof blocks must be freed, lest we end up with
1839		 * broken free space accounting.
1840		 *
1841		 * Note: don't bother with iolock here since lockdep complains
1842		 * about acquiring it in reclaim context. We have the only
1843		 * reference to the inode at this point anyways.
1844		 */
1845		if (xfs_can_free_eofblocks(ip, true))
1846			xfs_free_eofblocks(ip);
1847
1848		return;
1849	}
1850
1851	if (S_ISREG(VFS_I(ip)->i_mode) &&
1852	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1853	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1854		truncate = 1;
1855
1856	error = xfs_qm_dqattach(ip);
1857	if (error)
1858		return;
1859
1860	if (S_ISLNK(VFS_I(ip)->i_mode))
1861		error = xfs_inactive_symlink(ip);
1862	else if (truncate)
1863		error = xfs_inactive_truncate(ip);
1864	if (error)
1865		return;
1866
1867	/*
1868	 * If there are attributes associated with the file then blow them away
1869	 * now.  The code calls a routine that recursively deconstructs the
1870	 * attribute fork. If also blows away the in-core attribute fork.
1871	 */
1872	if (XFS_IFORK_Q(ip)) {
1873		error = xfs_attr_inactive(ip);
1874		if (error)
1875			return;
1876	}
1877
1878	ASSERT(!ip->i_afp);
1879	ASSERT(ip->i_d.di_forkoff == 0);
1880
1881	/*
1882	 * Free the inode.
1883	 */
1884	error = xfs_inactive_ifree(ip);
1885	if (error)
1886		return;
1887
 
1888	/*
1889	 * Release the dquots held by inode, if any.
 
1890	 */
1891	xfs_qm_dqdetach(ip);
1892}
1893
1894/*
1895 * In-Core Unlinked List Lookups
1896 * =============================
1897 *
1898 * Every inode is supposed to be reachable from some other piece of metadata
1899 * with the exception of the root directory.  Inodes with a connection to a
1900 * file descriptor but not linked from anywhere in the on-disk directory tree
1901 * are collectively known as unlinked inodes, though the filesystem itself
1902 * maintains links to these inodes so that on-disk metadata are consistent.
1903 *
1904 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1905 * header contains a number of buckets that point to an inode, and each inode
1906 * record has a pointer to the next inode in the hash chain.  This
1907 * singly-linked list causes scaling problems in the iunlink remove function
1908 * because we must walk that list to find the inode that points to the inode
1909 * being removed from the unlinked hash bucket list.
1910 *
1911 * What if we modelled the unlinked list as a collection of records capturing
1912 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1913 * have a fast way to look up unlinked list predecessors, which avoids the
1914 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1915 * rhashtable.
1916 *
1917 * Because this is a backref cache, we ignore operational failures since the
1918 * iunlink code can fall back to the slow bucket walk.  The only errors that
1919 * should bubble out are for obviously incorrect situations.
1920 *
1921 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1922 * access or have otherwise provided for concurrency control.
1923 */
1924
1925/* Capture a "X.next_unlinked = Y" relationship. */
1926struct xfs_iunlink {
1927	struct rhash_head	iu_rhash_head;
1928	xfs_agino_t		iu_agino;		/* X */
1929	xfs_agino_t		iu_next_unlinked;	/* Y */
1930};
1931
1932/* Unlinked list predecessor lookup hashtable construction */
1933static int
1934xfs_iunlink_obj_cmpfn(
1935	struct rhashtable_compare_arg	*arg,
1936	const void			*obj)
1937{
1938	const xfs_agino_t		*key = arg->key;
1939	const struct xfs_iunlink	*iu = obj;
1940
1941	if (iu->iu_next_unlinked != *key)
1942		return 1;
1943	return 0;
1944}
1945
1946static const struct rhashtable_params xfs_iunlink_hash_params = {
1947	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1948	.key_len		= sizeof(xfs_agino_t),
1949	.key_offset		= offsetof(struct xfs_iunlink,
1950					   iu_next_unlinked),
1951	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1952	.automatic_shrinking	= true,
1953	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1954};
1955
1956/*
1957 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1958 * relation is found.
1959 */
1960static xfs_agino_t
1961xfs_iunlink_lookup_backref(
1962	struct xfs_perag	*pag,
1963	xfs_agino_t		agino)
1964{
1965	struct xfs_iunlink	*iu;
1966
1967	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1968			xfs_iunlink_hash_params);
1969	return iu ? iu->iu_agino : NULLAGINO;
1970}
1971
1972/*
1973 * Take ownership of an iunlink cache entry and insert it into the hash table.
1974 * If successful, the entry will be owned by the cache; if not, it is freed.
1975 * Either way, the caller does not own @iu after this call.
1976 */
1977static int
1978xfs_iunlink_insert_backref(
1979	struct xfs_perag	*pag,
1980	struct xfs_iunlink	*iu)
1981{
1982	int			error;
1983
1984	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1985			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1986	/*
1987	 * Fail loudly if there already was an entry because that's a sign of
1988	 * corruption of in-memory data.  Also fail loudly if we see an error
1989	 * code we didn't anticipate from the rhashtable code.  Currently we
1990	 * only anticipate ENOMEM.
1991	 */
1992	if (error) {
1993		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1994		kmem_free(iu);
1995	}
1996	/*
1997	 * Absorb any runtime errors that aren't a result of corruption because
1998	 * this is a cache and we can always fall back to bucket list scanning.
1999	 */
2000	if (error != 0 && error != -EEXIST)
2001		error = 0;
2002	return error;
2003}
2004
2005/* Remember that @prev_agino.next_unlinked = @this_agino. */
2006static int
2007xfs_iunlink_add_backref(
2008	struct xfs_perag	*pag,
2009	xfs_agino_t		prev_agino,
2010	xfs_agino_t		this_agino)
2011{
2012	struct xfs_iunlink	*iu;
2013
2014	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2015		return 0;
2016
2017	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2018	iu->iu_agino = prev_agino;
2019	iu->iu_next_unlinked = this_agino;
2020
2021	return xfs_iunlink_insert_backref(pag, iu);
2022}
2023
2024/*
2025 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2026 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
2027 * wasn't any such entry then we don't bother.
2028 */
2029static int
2030xfs_iunlink_change_backref(
2031	struct xfs_perag	*pag,
2032	xfs_agino_t		agino,
2033	xfs_agino_t		next_unlinked)
2034{
2035	struct xfs_iunlink	*iu;
2036	int			error;
2037
2038	/* Look up the old entry; if there wasn't one then exit. */
2039	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2040			xfs_iunlink_hash_params);
2041	if (!iu)
2042		return 0;
2043
2044	/*
2045	 * Remove the entry.  This shouldn't ever return an error, but if we
2046	 * couldn't remove the old entry we don't want to add it again to the
2047	 * hash table, and if the entry disappeared on us then someone's
2048	 * violated the locking rules and we need to fail loudly.  Either way
2049	 * we cannot remove the inode because internal state is or would have
2050	 * been corrupt.
2051	 */
2052	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2053			&iu->iu_rhash_head, xfs_iunlink_hash_params);
2054	if (error)
2055		return error;
2056
2057	/* If there is no new next entry just free our item and return. */
2058	if (next_unlinked == NULLAGINO) {
2059		kmem_free(iu);
2060		return 0;
2061	}
2062
2063	/* Update the entry and re-add it to the hash table. */
2064	iu->iu_next_unlinked = next_unlinked;
2065	return xfs_iunlink_insert_backref(pag, iu);
2066}
2067
2068/* Set up the in-core predecessor structures. */
2069int
2070xfs_iunlink_init(
2071	struct xfs_perag	*pag)
2072{
2073	return rhashtable_init(&pag->pagi_unlinked_hash,
2074			&xfs_iunlink_hash_params);
2075}
2076
2077/* Free the in-core predecessor structures. */
2078static void
2079xfs_iunlink_free_item(
2080	void			*ptr,
2081	void			*arg)
2082{
2083	struct xfs_iunlink	*iu = ptr;
2084	bool			*freed_anything = arg;
2085
2086	*freed_anything = true;
2087	kmem_free(iu);
2088}
2089
2090void
2091xfs_iunlink_destroy(
2092	struct xfs_perag	*pag)
2093{
2094	bool			freed_anything = false;
2095
2096	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2097			xfs_iunlink_free_item, &freed_anything);
2098
2099	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2100}
2101
2102/*
2103 * Point the AGI unlinked bucket at an inode and log the results.  The caller
2104 * is responsible for validating the old value.
2105 */
2106STATIC int
2107xfs_iunlink_update_bucket(
2108	struct xfs_trans	*tp,
2109	xfs_agnumber_t		agno,
2110	struct xfs_buf		*agibp,
2111	unsigned int		bucket_index,
2112	xfs_agino_t		new_agino)
2113{
2114	struct xfs_agi		*agi = agibp->b_addr;
2115	xfs_agino_t		old_value;
2116	int			offset;
2117
2118	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2119
2120	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2121	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2122			old_value, new_agino);
2123
2124	/*
2125	 * We should never find the head of the list already set to the value
2126	 * passed in because either we're adding or removing ourselves from the
2127	 * head of the list.
2128	 */
2129	if (old_value == new_agino) {
2130		xfs_buf_mark_corrupt(agibp);
2131		return -EFSCORRUPTED;
2132	}
2133
2134	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2135	offset = offsetof(struct xfs_agi, agi_unlinked) +
2136			(sizeof(xfs_agino_t) * bucket_index);
2137	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2138	return 0;
2139}
2140
2141/* Set an on-disk inode's next_unlinked pointer. */
2142STATIC void
2143xfs_iunlink_update_dinode(
2144	struct xfs_trans	*tp,
2145	xfs_agnumber_t		agno,
2146	xfs_agino_t		agino,
2147	struct xfs_buf		*ibp,
2148	struct xfs_dinode	*dip,
2149	struct xfs_imap		*imap,
2150	xfs_agino_t		next_agino)
2151{
2152	struct xfs_mount	*mp = tp->t_mountp;
2153	int			offset;
2154
2155	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2156
2157	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2158			be32_to_cpu(dip->di_next_unlinked), next_agino);
2159
2160	dip->di_next_unlinked = cpu_to_be32(next_agino);
2161	offset = imap->im_boffset +
2162			offsetof(struct xfs_dinode, di_next_unlinked);
2163
2164	/* need to recalc the inode CRC if appropriate */
2165	xfs_dinode_calc_crc(mp, dip);
2166	xfs_trans_inode_buf(tp, ibp);
2167	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2168}
2169
2170/* Set an in-core inode's unlinked pointer and return the old value. */
2171STATIC int
2172xfs_iunlink_update_inode(
2173	struct xfs_trans	*tp,
2174	struct xfs_inode	*ip,
2175	xfs_agnumber_t		agno,
2176	xfs_agino_t		next_agino,
2177	xfs_agino_t		*old_next_agino)
2178{
2179	struct xfs_mount	*mp = tp->t_mountp;
2180	struct xfs_dinode	*dip;
2181	struct xfs_buf		*ibp;
2182	xfs_agino_t		old_value;
2183	int			error;
2184
2185	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2186
2187	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2188	if (error)
2189		return error;
 
2190
2191	/* Make sure the old pointer isn't garbage. */
2192	old_value = be32_to_cpu(dip->di_next_unlinked);
2193	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2194		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2195				sizeof(*dip), __this_address);
2196		error = -EFSCORRUPTED;
2197		goto out;
2198	}
2199
2200	/*
2201	 * Since we're updating a linked list, we should never find that the
2202	 * current pointer is the same as the new value, unless we're
2203	 * terminating the list.
2204	 */
2205	*old_next_agino = old_value;
2206	if (old_value == next_agino) {
2207		if (next_agino != NULLAGINO) {
2208			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2209					dip, sizeof(*dip), __this_address);
2210			error = -EFSCORRUPTED;
2211		}
2212		goto out;
2213	}
2214
2215	/* Ok, update the new pointer. */
2216	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2217			ibp, dip, &ip->i_imap, next_agino);
2218	return 0;
2219out:
2220	xfs_trans_brelse(tp, ibp);
2221	return error;
2222}
2223
2224/*
2225 * This is called when the inode's link count has gone to 0 or we are creating
2226 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2227 *
2228 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2229 * list when the inode is freed.
2230 */
2231STATIC int
2232xfs_iunlink(
2233	struct xfs_trans	*tp,
2234	struct xfs_inode	*ip)
2235{
2236	struct xfs_mount	*mp = tp->t_mountp;
 
2237	struct xfs_agi		*agi;
2238	struct xfs_buf		*agibp;
2239	xfs_agino_t		next_agino;
2240	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2241	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2242	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2243	int			error;
2244
2245	ASSERT(VFS_I(ip)->i_nlink == 0);
2246	ASSERT(VFS_I(ip)->i_mode != 0);
2247	trace_xfs_iunlink(ip);
2248
 
 
2249	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2250	error = xfs_read_agi(mp, tp, agno, &agibp);
2251	if (error)
2252		return error;
2253	agi = agibp->b_addr;
2254
2255	/*
2256	 * Get the index into the agi hash table for the list this inode will
2257	 * go on.  Make sure the pointer isn't garbage and that this inode
2258	 * isn't already on the list.
2259	 */
2260	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2261	if (next_agino == agino ||
2262	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2263		xfs_buf_mark_corrupt(agibp);
2264		return -EFSCORRUPTED;
 
2265	}
2266
2267	if (next_agino != NULLAGINO) {
2268		xfs_agino_t		old_agino;
2269
2270		/*
2271		 * There is already another inode in the bucket, so point this
2272		 * inode to the current head of the list.
2273		 */
2274		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2275				&old_agino);
2276		if (error)
2277			return error;
2278		ASSERT(old_agino == NULLAGINO);
2279
2280		/*
2281		 * agino has been unlinked, add a backref from the next inode
2282		 * back to agino.
2283		 */
2284		error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2285		if (error)
2286			return error;
2287	}
2288
2289	/* Point the head of the list to point to this inode. */
2290	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
 
 
 
2291}
2292
2293/* Return the imap, dinode pointer, and buffer for an inode. */
2294STATIC int
2295xfs_iunlink_map_ino(
2296	struct xfs_trans	*tp,
2297	xfs_agnumber_t		agno,
2298	xfs_agino_t		agino,
2299	struct xfs_imap		*imap,
2300	struct xfs_dinode	**dipp,
2301	struct xfs_buf		**bpp)
2302{
2303	struct xfs_mount	*mp = tp->t_mountp;
2304	int			error;
2305
2306	imap->im_blkno = 0;
2307	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2308	if (error) {
2309		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2310				__func__, error);
2311		return error;
2312	}
2313
2314	error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2315	if (error) {
2316		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2317				__func__, error);
2318		return error;
2319	}
2320
 
2321	return 0;
2322}
2323
2324/*
2325 * Walk the unlinked chain from @head_agino until we find the inode that
2326 * points to @target_agino.  Return the inode number, map, dinode pointer,
2327 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2328 *
2329 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2330 * @agino, @imap, @dipp, and @bpp are all output parameters.
2331 *
2332 * Do not call this function if @target_agino is the head of the list.
2333 */
2334STATIC int
2335xfs_iunlink_map_prev(
2336	struct xfs_trans	*tp,
2337	xfs_agnumber_t		agno,
2338	xfs_agino_t		head_agino,
2339	xfs_agino_t		target_agino,
2340	xfs_agino_t		*agino,
2341	struct xfs_imap		*imap,
2342	struct xfs_dinode	**dipp,
2343	struct xfs_buf		**bpp,
2344	struct xfs_perag	*pag)
2345{
2346	struct xfs_mount	*mp = tp->t_mountp;
2347	xfs_agino_t		next_agino;
2348	int			error;
2349
2350	ASSERT(head_agino != target_agino);
2351	*bpp = NULL;
2352
2353	/* See if our backref cache can find it faster. */
2354	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2355	if (*agino != NULLAGINO) {
2356		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
 
2357		if (error)
2358			return error;
2359
2360		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2361			return 0;
2362
2363		/*
2364		 * If we get here the cache contents were corrupt, so drop the
2365		 * buffer and fall back to walking the bucket list.
2366		 */
2367		xfs_trans_brelse(tp, *bpp);
2368		*bpp = NULL;
2369		WARN_ON_ONCE(1);
2370	}
2371
2372	trace_xfs_iunlink_map_prev_fallback(mp, agno);
2373
2374	/* Otherwise, walk the entire bucket until we find it. */
2375	next_agino = head_agino;
2376	while (next_agino != target_agino) {
2377		xfs_agino_t	unlinked_agino;
2378
2379		if (*bpp)
2380			xfs_trans_brelse(tp, *bpp);
2381
2382		*agino = next_agino;
2383		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2384				bpp);
2385		if (error)
2386			return error;
2387
2388		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2389		/*
2390		 * Make sure this pointer is valid and isn't an obvious
2391		 * infinite loop.
2392		 */
2393		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2394		    next_agino == unlinked_agino) {
2395			XFS_CORRUPTION_ERROR(__func__,
2396					XFS_ERRLEVEL_LOW, mp,
2397					*dipp, sizeof(**dipp));
2398			error = -EFSCORRUPTED;
2399			return error;
2400		}
2401		next_agino = unlinked_agino;
2402	}
2403
2404	return 0;
2405}
2406
2407/*
2408 * Pull the on-disk inode from the AGI unlinked list.
2409 */
2410STATIC int
2411xfs_iunlink_remove(
2412	struct xfs_trans	*tp,
 
2413	struct xfs_inode	*ip)
2414{
2415	struct xfs_mount	*mp = tp->t_mountp;
2416	struct xfs_agi		*agi;
2417	struct xfs_buf		*agibp;
2418	struct xfs_buf		*last_ibp;
2419	struct xfs_dinode	*last_dip = NULL;
2420	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2421	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2422	xfs_agino_t		next_agino;
2423	xfs_agino_t		head_agino;
2424	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2425	int			error;
2426
2427	trace_xfs_iunlink_remove(ip);
2428
2429	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2430	error = xfs_read_agi(mp, tp, agno, &agibp);
2431	if (error)
2432		return error;
2433	agi = agibp->b_addr;
2434
2435	/*
2436	 * Get the index into the agi hash table for the list this inode will
2437	 * go on.  Make sure the head pointer isn't garbage.
2438	 */
2439	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2440	if (!xfs_verify_agino(mp, agno, head_agino)) {
2441		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2442				agi, sizeof(*agi));
2443		return -EFSCORRUPTED;
2444	}
2445
2446	/*
2447	 * Set our inode's next_unlinked pointer to NULL and then return
2448	 * the old pointer value so that we can update whatever was previous
2449	 * to us in the list to point to whatever was next in the list.
2450	 */
2451	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2452	if (error)
2453		return error;
2454
2455	/*
2456	 * If there was a backref pointing from the next inode back to this
2457	 * one, remove it because we've removed this inode from the list.
2458	 *
2459	 * Later, if this inode was in the middle of the list we'll update
2460	 * this inode's backref to point from the next inode.
2461	 */
2462	if (next_agino != NULLAGINO) {
2463		error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2464				NULLAGINO);
2465		if (error)
2466			return error;
2467	}
2468
2469	if (head_agino != agino) {
2470		struct xfs_imap	imap;
2471		xfs_agino_t	prev_agino;
2472
2473		/* We need to search the list for the inode being freed. */
2474		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2475				&prev_agino, &imap, &last_dip, &last_ibp,
2476				agibp->b_pag);
2477		if (error)
2478			return error;
2479
2480		/* Point the previous inode on the list to the next inode. */
2481		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2482				last_dip, &imap, next_agino);
2483
2484		/*
2485		 * Now we deal with the backref for this inode.  If this inode
2486		 * pointed at a real inode, change the backref that pointed to
2487		 * us to point to our old next.  If this inode was the end of
2488		 * the list, delete the backref that pointed to us.  Note that
2489		 * change_backref takes care of deleting the backref if
2490		 * next_agino is NULLAGINO.
2491		 */
2492		return xfs_iunlink_change_backref(agibp->b_pag, agino,
2493				next_agino);
2494	}
2495
2496	/* Point the head of the list to the next unlinked inode. */
2497	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2498			next_agino);
2499}
2500
2501/*
2502 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2503 * mark it stale. We should only find clean inodes in this lookup that aren't
2504 * already stale.
2505 */
2506static void
2507xfs_ifree_mark_inode_stale(
2508	struct xfs_buf		*bp,
2509	struct xfs_inode	*free_ip,
2510	xfs_ino_t		inum)
2511{
2512	struct xfs_mount	*mp = bp->b_mount;
2513	struct xfs_perag	*pag = bp->b_pag;
2514	struct xfs_inode_log_item *iip;
2515	struct xfs_inode	*ip;
2516
2517retry:
2518	rcu_read_lock();
2519	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2520
2521	/* Inode not in memory, nothing to do */
2522	if (!ip) {
2523		rcu_read_unlock();
2524		return;
2525	}
2526
2527	/*
2528	 * because this is an RCU protected lookup, we could find a recently
2529	 * freed or even reallocated inode during the lookup. We need to check
2530	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2531	 * valid, the wrong inode or stale.
2532	 */
2533	spin_lock(&ip->i_flags_lock);
2534	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) {
2535		spin_unlock(&ip->i_flags_lock);
2536		rcu_read_unlock();
2537		return;
2538	}
2539
2540	/*
2541	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2542	 * other inodes that we did not find in the list attached to the buffer
2543	 * and are not already marked stale. If we can't lock it, back off and
2544	 * retry.
2545	 */
2546	if (ip != free_ip) {
2547		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2548			spin_unlock(&ip->i_flags_lock);
2549			rcu_read_unlock();
2550			delay(1);
2551			goto retry;
2552		}
2553	}
2554	ip->i_flags |= XFS_ISTALE;
2555	spin_unlock(&ip->i_flags_lock);
2556	rcu_read_unlock();
2557
2558	/*
2559	 * If we can't get the flush lock, the inode is already attached.  All
2560	 * we needed to do here is mark the inode stale so buffer IO completion
2561	 * will remove it from the AIL.
2562	 */
2563	iip = ip->i_itemp;
2564	if (!xfs_iflock_nowait(ip)) {
2565		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2566		ASSERT(iip->ili_last_fields);
2567		goto out_iunlock;
2568	}
2569
2570	/*
2571	 * Inodes not attached to the buffer can be released immediately.
2572	 * Everything else has to go through xfs_iflush_abort() on journal
2573	 * commit as the flock synchronises removal of the inode from the
2574	 * cluster buffer against inode reclaim.
2575	 */
2576	if (!iip || list_empty(&iip->ili_item.li_bio_list)) {
2577		xfs_ifunlock(ip);
2578		goto out_iunlock;
2579	}
 
 
 
2580
2581	/* we have a dirty inode in memory that has not yet been flushed. */
2582	spin_lock(&iip->ili_lock);
2583	iip->ili_last_fields = iip->ili_fields;
2584	iip->ili_fields = 0;
2585	iip->ili_fsync_fields = 0;
2586	spin_unlock(&iip->ili_lock);
2587	ASSERT(iip->ili_last_fields);
2588
 
 
 
 
2589out_iunlock:
2590	if (ip != free_ip)
2591		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 
 
2592}
2593
2594/*
2595 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2596 * inodes that are in memory - they all must be marked stale and attached to
2597 * the cluster buffer.
2598 */
2599STATIC int
2600xfs_ifree_cluster(
 
 
2601	struct xfs_inode	*free_ip,
2602	struct xfs_trans	*tp,
2603	struct xfs_icluster	*xic)
2604{
2605	struct xfs_mount	*mp = free_ip->i_mount;
2606	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2607	struct xfs_buf		*bp;
2608	xfs_daddr_t		blkno;
2609	xfs_ino_t		inum = xic->first_ino;
2610	int			nbufs;
2611	int			i, j;
2612	int			ioffset;
2613	int			error;
2614
2615	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2616
2617	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2618		/*
2619		 * The allocation bitmap tells us which inodes of the chunk were
2620		 * physically allocated. Skip the cluster if an inode falls into
2621		 * a sparse region.
2622		 */
2623		ioffset = inum - xic->first_ino;
2624		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2625			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2626			continue;
2627		}
2628
2629		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2630					 XFS_INO_TO_AGBNO(mp, inum));
2631
2632		/*
2633		 * We obtain and lock the backing buffer first in the process
2634		 * here, as we have to ensure that any dirty inode that we
2635		 * can't get the flush lock on is attached to the buffer.
 
2636		 * If we scan the in-memory inodes first, then buffer IO can
2637		 * complete before we get a lock on it, and hence we may fail
2638		 * to mark all the active inodes on the buffer stale.
2639		 */
2640		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2641				mp->m_bsize * igeo->blocks_per_cluster,
2642				XBF_UNMAPPED, &bp);
2643		if (error)
2644			return error;
2645
2646		/*
2647		 * This buffer may not have been correctly initialised as we
2648		 * didn't read it from disk. That's not important because we are
2649		 * only using to mark the buffer as stale in the log, and to
2650		 * attach stale cached inodes on it. That means it will never be
2651		 * dispatched for IO. If it is, we want to know about it, and we
2652		 * want it to fail. We can acheive this by adding a write
2653		 * verifier to the buffer.
2654		 */
2655		bp->b_ops = &xfs_inode_buf_ops;
2656
2657		/*
2658		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2659		 * too. This requires lookups, and will skip inodes that we've
2660		 * already marked XFS_ISTALE.
2661		 */
2662		for (i = 0; i < igeo->inodes_per_cluster; i++)
2663			xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
2664
2665		xfs_trans_stale_inode_buf(tp, bp);
2666		xfs_trans_binval(tp, bp);
2667	}
2668	return 0;
2669}
2670
2671/*
2672 * This is called to return an inode to the inode free list.
2673 * The inode should already be truncated to 0 length and have
2674 * no pages associated with it.  This routine also assumes that
2675 * the inode is already a part of the transaction.
2676 *
2677 * The on-disk copy of the inode will have been added to the list
2678 * of unlinked inodes in the AGI. We need to remove the inode from
2679 * that list atomically with respect to freeing it here.
2680 */
2681int
2682xfs_ifree(
2683	struct xfs_trans	*tp,
2684	struct xfs_inode	*ip)
2685{
2686	int			error;
 
2687	struct xfs_icluster	xic = { 0 };
2688	struct xfs_inode_log_item *iip = ip->i_itemp;
 
2689
2690	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2691	ASSERT(VFS_I(ip)->i_nlink == 0);
2692	ASSERT(ip->i_df.if_nextents == 0);
2693	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2694	ASSERT(ip->i_d.di_nblocks == 0);
 
 
2695
2696	/*
2697	 * Pull the on-disk inode from the AGI unlinked list.
2698	 */
2699	error = xfs_iunlink_remove(tp, ip);
2700	if (error)
2701		return error;
2702
2703	error = xfs_difree(tp, ip->i_ino, &xic);
2704	if (error)
2705		return error;
2706
2707	/*
2708	 * Free any local-format data sitting around before we reset the
2709	 * data fork to extents format.  Note that the attr fork data has
2710	 * already been freed by xfs_attr_inactive.
2711	 */
2712	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2713		kmem_free(ip->i_df.if_u1.if_data);
2714		ip->i_df.if_u1.if_data = NULL;
2715		ip->i_df.if_bytes = 0;
2716	}
2717
2718	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2719	ip->i_d.di_flags = 0;
2720	ip->i_d.di_flags2 = 0;
2721	ip->i_d.di_dmevmask = 0;
2722	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2723	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 
 
2724
2725	/* Don't attempt to replay owner changes for a deleted inode */
2726	spin_lock(&iip->ili_lock);
2727	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2728	spin_unlock(&iip->ili_lock);
2729
2730	/*
2731	 * Bump the generation count so no one will be confused
2732	 * by reincarnations of this inode.
2733	 */
2734	VFS_I(ip)->i_generation++;
2735	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2736
2737	if (xic.deleted)
2738		error = xfs_ifree_cluster(ip, tp, &xic);
2739
 
2740	return error;
2741}
2742
2743/*
2744 * This is called to unpin an inode.  The caller must have the inode locked
2745 * in at least shared mode so that the buffer cannot be subsequently pinned
2746 * once someone is waiting for it to be unpinned.
2747 */
2748static void
2749xfs_iunpin(
2750	struct xfs_inode	*ip)
2751{
2752	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2753
2754	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2755
2756	/* Give the log a push to start the unpinning I/O */
2757	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2758
2759}
2760
2761static void
2762__xfs_iunpin_wait(
2763	struct xfs_inode	*ip)
2764{
2765	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2766	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2767
2768	xfs_iunpin(ip);
2769
2770	do {
2771		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2772		if (xfs_ipincount(ip))
2773			io_schedule();
2774	} while (xfs_ipincount(ip));
2775	finish_wait(wq, &wait.wq_entry);
2776}
2777
2778void
2779xfs_iunpin_wait(
2780	struct xfs_inode	*ip)
2781{
2782	if (xfs_ipincount(ip))
2783		__xfs_iunpin_wait(ip);
2784}
2785
2786/*
2787 * Removing an inode from the namespace involves removing the directory entry
2788 * and dropping the link count on the inode. Removing the directory entry can
2789 * result in locking an AGF (directory blocks were freed) and removing a link
2790 * count can result in placing the inode on an unlinked list which results in
2791 * locking an AGI.
2792 *
2793 * The big problem here is that we have an ordering constraint on AGF and AGI
2794 * locking - inode allocation locks the AGI, then can allocate a new extent for
2795 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2796 * removes the inode from the unlinked list, requiring that we lock the AGI
2797 * first, and then freeing the inode can result in an inode chunk being freed
2798 * and hence freeing disk space requiring that we lock an AGF.
2799 *
2800 * Hence the ordering that is imposed by other parts of the code is AGI before
2801 * AGF. This means we cannot remove the directory entry before we drop the inode
2802 * reference count and put it on the unlinked list as this results in a lock
2803 * order of AGF then AGI, and this can deadlock against inode allocation and
2804 * freeing. Therefore we must drop the link counts before we remove the
2805 * directory entry.
2806 *
2807 * This is still safe from a transactional point of view - it is not until we
2808 * get to xfs_defer_finish() that we have the possibility of multiple
2809 * transactions in this operation. Hence as long as we remove the directory
2810 * entry and drop the link count in the first transaction of the remove
2811 * operation, there are no transactional constraints on the ordering here.
2812 */
2813int
2814xfs_remove(
2815	xfs_inode_t             *dp,
2816	struct xfs_name		*name,
2817	xfs_inode_t		*ip)
2818{
2819	xfs_mount_t		*mp = dp->i_mount;
2820	xfs_trans_t             *tp = NULL;
2821	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2822	int                     error = 0;
2823	uint			resblks;
2824
2825	trace_xfs_remove(dp, name);
2826
2827	if (XFS_FORCED_SHUTDOWN(mp))
2828		return -EIO;
2829
2830	error = xfs_qm_dqattach(dp);
2831	if (error)
2832		goto std_return;
2833
2834	error = xfs_qm_dqattach(ip);
2835	if (error)
2836		goto std_return;
2837
2838	/*
2839	 * We try to get the real space reservation first,
2840	 * allowing for directory btree deletion(s) implying
2841	 * possible bmap insert(s).  If we can't get the space
2842	 * reservation then we use 0 instead, and avoid the bmap
2843	 * btree insert(s) in the directory code by, if the bmap
2844	 * insert tries to happen, instead trimming the LAST
2845	 * block from the directory.
2846	 */
2847	resblks = XFS_REMOVE_SPACE_RES(mp);
2848	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2849	if (error == -ENOSPC) {
2850		resblks = 0;
2851		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2852				&tp);
2853	}
2854	if (error) {
2855		ASSERT(error != -ENOSPC);
2856		goto std_return;
2857	}
2858
2859	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2860
2861	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2862	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2863
2864	/*
2865	 * If we're removing a directory perform some additional validation.
2866	 */
2867	if (is_dir) {
2868		ASSERT(VFS_I(ip)->i_nlink >= 2);
2869		if (VFS_I(ip)->i_nlink != 2) {
2870			error = -ENOTEMPTY;
2871			goto out_trans_cancel;
2872		}
2873		if (!xfs_dir_isempty(ip)) {
2874			error = -ENOTEMPTY;
2875			goto out_trans_cancel;
2876		}
2877
2878		/* Drop the link from ip's "..".  */
2879		error = xfs_droplink(tp, dp);
2880		if (error)
2881			goto out_trans_cancel;
2882
2883		/* Drop the "." link from ip to self.  */
2884		error = xfs_droplink(tp, ip);
2885		if (error)
2886			goto out_trans_cancel;
 
 
 
 
 
 
 
 
 
 
 
 
 
2887	} else {
2888		/*
2889		 * When removing a non-directory we need to log the parent
2890		 * inode here.  For a directory this is done implicitly
2891		 * by the xfs_droplink call for the ".." entry.
2892		 */
2893		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2894	}
2895	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2896
2897	/* Drop the link from dp to ip. */
2898	error = xfs_droplink(tp, ip);
2899	if (error)
2900		goto out_trans_cancel;
2901
2902	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2903	if (error) {
2904		ASSERT(error != -ENOENT);
2905		goto out_trans_cancel;
2906	}
2907
2908	/*
2909	 * If this is a synchronous mount, make sure that the
2910	 * remove transaction goes to disk before returning to
2911	 * the user.
2912	 */
2913	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2914		xfs_trans_set_sync(tp);
2915
2916	error = xfs_trans_commit(tp);
2917	if (error)
2918		goto std_return;
2919
2920	if (is_dir && xfs_inode_is_filestream(ip))
2921		xfs_filestream_deassociate(ip);
2922
2923	return 0;
2924
2925 out_trans_cancel:
2926	xfs_trans_cancel(tp);
2927 std_return:
2928	return error;
2929}
2930
2931/*
2932 * Enter all inodes for a rename transaction into a sorted array.
2933 */
2934#define __XFS_SORT_INODES	5
2935STATIC void
2936xfs_sort_for_rename(
2937	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2938	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2939	struct xfs_inode	*ip1,	/* in: inode of old entry */
2940	struct xfs_inode	*ip2,	/* in: inode of new entry */
2941	struct xfs_inode	*wip,	/* in: whiteout inode */
2942	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2943	int			*num_inodes)  /* in/out: inodes in array */
2944{
2945	int			i, j;
2946
2947	ASSERT(*num_inodes == __XFS_SORT_INODES);
2948	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2949
2950	/*
2951	 * i_tab contains a list of pointers to inodes.  We initialize
2952	 * the table here & we'll sort it.  We will then use it to
2953	 * order the acquisition of the inode locks.
2954	 *
2955	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2956	 */
2957	i = 0;
2958	i_tab[i++] = dp1;
2959	i_tab[i++] = dp2;
2960	i_tab[i++] = ip1;
2961	if (ip2)
2962		i_tab[i++] = ip2;
2963	if (wip)
2964		i_tab[i++] = wip;
2965	*num_inodes = i;
2966
2967	/*
2968	 * Sort the elements via bubble sort.  (Remember, there are at
2969	 * most 5 elements to sort, so this is adequate.)
2970	 */
2971	for (i = 0; i < *num_inodes; i++) {
2972		for (j = 1; j < *num_inodes; j++) {
2973			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2974				struct xfs_inode *temp = i_tab[j];
2975				i_tab[j] = i_tab[j-1];
2976				i_tab[j-1] = temp;
2977			}
2978		}
2979	}
2980}
2981
2982static int
2983xfs_finish_rename(
2984	struct xfs_trans	*tp)
2985{
2986	/*
2987	 * If this is a synchronous mount, make sure that the rename transaction
2988	 * goes to disk before returning to the user.
2989	 */
2990	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2991		xfs_trans_set_sync(tp);
2992
2993	return xfs_trans_commit(tp);
2994}
2995
2996/*
2997 * xfs_cross_rename()
2998 *
2999 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3000 */
3001STATIC int
3002xfs_cross_rename(
3003	struct xfs_trans	*tp,
3004	struct xfs_inode	*dp1,
3005	struct xfs_name		*name1,
3006	struct xfs_inode	*ip1,
3007	struct xfs_inode	*dp2,
3008	struct xfs_name		*name2,
3009	struct xfs_inode	*ip2,
3010	int			spaceres)
3011{
3012	int		error = 0;
3013	int		ip1_flags = 0;
3014	int		ip2_flags = 0;
3015	int		dp2_flags = 0;
3016
3017	/* Swap inode number for dirent in first parent */
3018	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3019	if (error)
3020		goto out_trans_abort;
3021
3022	/* Swap inode number for dirent in second parent */
3023	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3024	if (error)
3025		goto out_trans_abort;
3026
3027	/*
3028	 * If we're renaming one or more directories across different parents,
3029	 * update the respective ".." entries (and link counts) to match the new
3030	 * parents.
3031	 */
3032	if (dp1 != dp2) {
3033		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3034
3035		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3036			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3037						dp1->i_ino, spaceres);
3038			if (error)
3039				goto out_trans_abort;
3040
3041			/* transfer ip2 ".." reference to dp1 */
3042			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3043				error = xfs_droplink(tp, dp2);
3044				if (error)
3045					goto out_trans_abort;
3046				xfs_bumplink(tp, dp1);
3047			}
3048
3049			/*
3050			 * Although ip1 isn't changed here, userspace needs
3051			 * to be warned about the change, so that applications
3052			 * relying on it (like backup ones), will properly
3053			 * notify the change
3054			 */
3055			ip1_flags |= XFS_ICHGTIME_CHG;
3056			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3057		}
3058
3059		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3060			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3061						dp2->i_ino, spaceres);
3062			if (error)
3063				goto out_trans_abort;
3064
3065			/* transfer ip1 ".." reference to dp2 */
3066			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3067				error = xfs_droplink(tp, dp1);
3068				if (error)
3069					goto out_trans_abort;
3070				xfs_bumplink(tp, dp2);
3071			}
3072
3073			/*
3074			 * Although ip2 isn't changed here, userspace needs
3075			 * to be warned about the change, so that applications
3076			 * relying on it (like backup ones), will properly
3077			 * notify the change
3078			 */
3079			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3080			ip2_flags |= XFS_ICHGTIME_CHG;
3081		}
3082	}
3083
3084	if (ip1_flags) {
3085		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3086		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3087	}
3088	if (ip2_flags) {
3089		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3090		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3091	}
3092	if (dp2_flags) {
3093		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3094		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3095	}
3096	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3097	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3098	return xfs_finish_rename(tp);
3099
3100out_trans_abort:
3101	xfs_trans_cancel(tp);
3102	return error;
3103}
3104
3105/*
3106 * xfs_rename_alloc_whiteout()
3107 *
3108 * Return a referenced, unlinked, unlocked inode that can be used as a
3109 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3110 * crash between allocating the inode and linking it into the rename transaction
3111 * recovery will free the inode and we won't leak it.
3112 */
3113static int
3114xfs_rename_alloc_whiteout(
 
3115	struct xfs_inode	*dp,
3116	struct xfs_inode	**wip)
3117{
3118	struct xfs_inode	*tmpfile;
3119	int			error;
3120
3121	error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
 
3122	if (error)
3123		return error;
3124
3125	/*
3126	 * Prepare the tmpfile inode as if it were created through the VFS.
3127	 * Complete the inode setup and flag it as linkable.  nlink is already
3128	 * zero, so we can skip the drop_nlink.
3129	 */
3130	xfs_setup_iops(tmpfile);
3131	xfs_finish_inode_setup(tmpfile);
3132	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3133
3134	*wip = tmpfile;
3135	return 0;
3136}
3137
3138/*
3139 * xfs_rename
3140 */
3141int
3142xfs_rename(
 
3143	struct xfs_inode	*src_dp,
3144	struct xfs_name		*src_name,
3145	struct xfs_inode	*src_ip,
3146	struct xfs_inode	*target_dp,
3147	struct xfs_name		*target_name,
3148	struct xfs_inode	*target_ip,
3149	unsigned int		flags)
3150{
3151	struct xfs_mount	*mp = src_dp->i_mount;
3152	struct xfs_trans	*tp;
3153	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3154	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3155	struct xfs_buf		*agibp;
3156	int			num_inodes = __XFS_SORT_INODES;
3157	bool			new_parent = (src_dp != target_dp);
3158	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3159	int			spaceres;
3160	int			error;
3161
3162	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3163
3164	if ((flags & RENAME_EXCHANGE) && !target_ip)
3165		return -EINVAL;
3166
3167	/*
3168	 * If we are doing a whiteout operation, allocate the whiteout inode
3169	 * we will be placing at the target and ensure the type is set
3170	 * appropriately.
3171	 */
3172	if (flags & RENAME_WHITEOUT) {
3173		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3174		error = xfs_rename_alloc_whiteout(target_dp, &wip);
3175		if (error)
3176			return error;
3177
3178		/* setup target dirent info as whiteout */
3179		src_name->type = XFS_DIR3_FT_CHRDEV;
3180	}
3181
3182	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3183				inodes, &num_inodes);
3184
3185	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3186	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3187	if (error == -ENOSPC) {
3188		spaceres = 0;
3189		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3190				&tp);
3191	}
3192	if (error)
3193		goto out_release_wip;
3194
3195	/*
3196	 * Attach the dquots to the inodes
3197	 */
3198	error = xfs_qm_vop_rename_dqattach(inodes);
3199	if (error)
3200		goto out_trans_cancel;
3201
3202	/*
3203	 * Lock all the participating inodes. Depending upon whether
3204	 * the target_name exists in the target directory, and
3205	 * whether the target directory is the same as the source
3206	 * directory, we can lock from 2 to 4 inodes.
3207	 */
3208	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3209
3210	/*
3211	 * Join all the inodes to the transaction. From this point on,
3212	 * we can rely on either trans_commit or trans_cancel to unlock
3213	 * them.
3214	 */
3215	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3216	if (new_parent)
3217		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3218	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3219	if (target_ip)
3220		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3221	if (wip)
3222		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3223
3224	/*
3225	 * If we are using project inheritance, we only allow renames
3226	 * into our tree when the project IDs are the same; else the
3227	 * tree quota mechanism would be circumvented.
3228	 */
3229	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3230		     target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3231		error = -EXDEV;
3232		goto out_trans_cancel;
3233	}
3234
3235	/* RENAME_EXCHANGE is unique from here on. */
3236	if (flags & RENAME_EXCHANGE)
3237		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3238					target_dp, target_name, target_ip,
3239					spaceres);
3240
3241	/*
3242	 * Check for expected errors before we dirty the transaction
3243	 * so we can return an error without a transaction abort.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3244	 */
3245	if (target_ip == NULL) {
3246		/*
3247		 * If there's no space reservation, check the entry will
3248		 * fit before actually inserting it.
3249		 */
3250		if (!spaceres) {
3251			error = xfs_dir_canenter(tp, target_dp, target_name);
3252			if (error)
3253				goto out_trans_cancel;
 
 
 
 
 
 
3254		}
3255	} else {
3256		/*
3257		 * If target exists and it's a directory, check that whether
3258		 * it can be destroyed.
3259		 */
3260		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3261		    (!xfs_dir_isempty(target_ip) ||
3262		     (VFS_I(target_ip)->i_nlink > 2))) {
3263			error = -EEXIST;
3264			goto out_trans_cancel;
3265		}
3266	}
3267
3268	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3269	 * Directory entry creation below may acquire the AGF. Remove
3270	 * the whiteout from the unlinked list first to preserve correct
3271	 * AGI/AGF locking order. This dirties the transaction so failures
3272	 * after this point will abort and log recovery will clean up the
3273	 * mess.
3274	 *
3275	 * For whiteouts, we need to bump the link count on the whiteout
3276	 * inode. After this point, we have a real link, clear the tmpfile
3277	 * state flag from the inode so it doesn't accidentally get misused
3278	 * in future.
3279	 */
3280	if (wip) {
 
 
3281		ASSERT(VFS_I(wip)->i_nlink == 0);
3282		error = xfs_iunlink_remove(tp, wip);
 
 
 
3283		if (error)
3284			goto out_trans_cancel;
3285
3286		xfs_bumplink(tp, wip);
3287		VFS_I(wip)->i_state &= ~I_LINKABLE;
3288	}
3289
3290	/*
3291	 * Set up the target.
3292	 */
3293	if (target_ip == NULL) {
3294		/*
3295		 * If target does not exist and the rename crosses
3296		 * directories, adjust the target directory link count
3297		 * to account for the ".." reference from the new entry.
3298		 */
3299		error = xfs_dir_createname(tp, target_dp, target_name,
3300					   src_ip->i_ino, spaceres);
3301		if (error)
3302			goto out_trans_cancel;
3303
3304		xfs_trans_ichgtime(tp, target_dp,
3305					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3306
3307		if (new_parent && src_is_directory) {
3308			xfs_bumplink(tp, target_dp);
3309		}
3310	} else { /* target_ip != NULL */
3311		/*
3312		 * Link the source inode under the target name.
3313		 * If the source inode is a directory and we are moving
3314		 * it across directories, its ".." entry will be
3315		 * inconsistent until we replace that down below.
3316		 *
3317		 * In case there is already an entry with the same
3318		 * name at the destination directory, remove it first.
3319		 */
3320
3321		/*
3322		 * Check whether the replace operation will need to allocate
3323		 * blocks.  This happens when the shortform directory lacks
3324		 * space and we have to convert it to a block format directory.
3325		 * When more blocks are necessary, we must lock the AGI first
3326		 * to preserve locking order (AGI -> AGF).
3327		 */
3328		if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
3329			error = xfs_read_agi(mp, tp,
3330					XFS_INO_TO_AGNO(mp, target_ip->i_ino),
3331					&agibp);
3332			if (error)
3333				goto out_trans_cancel;
3334		}
3335
3336		error = xfs_dir_replace(tp, target_dp, target_name,
3337					src_ip->i_ino, spaceres);
3338		if (error)
3339			goto out_trans_cancel;
3340
3341		xfs_trans_ichgtime(tp, target_dp,
3342					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3343
3344		/*
3345		 * Decrement the link count on the target since the target
3346		 * dir no longer points to it.
3347		 */
3348		error = xfs_droplink(tp, target_ip);
3349		if (error)
3350			goto out_trans_cancel;
3351
3352		if (src_is_directory) {
3353			/*
3354			 * Drop the link from the old "." entry.
3355			 */
3356			error = xfs_droplink(tp, target_ip);
3357			if (error)
3358				goto out_trans_cancel;
3359		}
3360	} /* target_ip != NULL */
3361
3362	/*
3363	 * Remove the source.
3364	 */
3365	if (new_parent && src_is_directory) {
3366		/*
3367		 * Rewrite the ".." entry to point to the new
3368		 * directory.
3369		 */
3370		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3371					target_dp->i_ino, spaceres);
3372		ASSERT(error != -EEXIST);
3373		if (error)
3374			goto out_trans_cancel;
3375	}
3376
3377	/*
3378	 * We always want to hit the ctime on the source inode.
3379	 *
3380	 * This isn't strictly required by the standards since the source
3381	 * inode isn't really being changed, but old unix file systems did
3382	 * it and some incremental backup programs won't work without it.
3383	 */
3384	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3385	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3386
3387	/*
3388	 * Adjust the link count on src_dp.  This is necessary when
3389	 * renaming a directory, either within one parent when
3390	 * the target existed, or across two parent directories.
3391	 */
3392	if (src_is_directory && (new_parent || target_ip != NULL)) {
3393
3394		/*
3395		 * Decrement link count on src_directory since the
3396		 * entry that's moved no longer points to it.
3397		 */
3398		error = xfs_droplink(tp, src_dp);
3399		if (error)
3400			goto out_trans_cancel;
3401	}
3402
3403	/*
3404	 * For whiteouts, we only need to update the source dirent with the
3405	 * inode number of the whiteout inode rather than removing it
3406	 * altogether.
3407	 */
3408	if (wip) {
3409		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3410					spaceres);
3411	} else
 
 
 
 
 
3412		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3413					   spaceres);
 
 
3414	if (error)
3415		goto out_trans_cancel;
3416
3417	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3418	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3419	if (new_parent)
3420		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3421
3422	error = xfs_finish_rename(tp);
3423	if (wip)
3424		xfs_irele(wip);
3425	return error;
3426
3427out_trans_cancel:
3428	xfs_trans_cancel(tp);
3429out_release_wip:
3430	if (wip)
3431		xfs_irele(wip);
3432	return error;
3433}
3434
3435static int
3436xfs_iflush(
3437	struct xfs_inode	*ip,
3438	struct xfs_buf		*bp)
3439{
3440	struct xfs_inode_log_item *iip = ip->i_itemp;
3441	struct xfs_dinode	*dip;
3442	struct xfs_mount	*mp = ip->i_mount;
3443	int			error;
3444
3445	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3446	ASSERT(xfs_isiflocked(ip));
3447	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3448	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3449	ASSERT(iip->ili_item.li_buf == bp);
3450
3451	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3452
3453	/*
3454	 * We don't flush the inode if any of the following checks fail, but we
3455	 * do still update the log item and attach to the backing buffer as if
3456	 * the flush happened. This is a formality to facilitate predictable
3457	 * error handling as the caller will shutdown and fail the buffer.
3458	 */
3459	error = -EFSCORRUPTED;
3460	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3461			       mp, XFS_ERRTAG_IFLUSH_1)) {
3462		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3463			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3464			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3465		goto flush_out;
3466	}
3467	if (S_ISREG(VFS_I(ip)->i_mode)) {
3468		if (XFS_TEST_ERROR(
3469		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3470		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3471		    mp, XFS_ERRTAG_IFLUSH_3)) {
3472			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3473				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3474				__func__, ip->i_ino, ip);
3475			goto flush_out;
3476		}
3477	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3478		if (XFS_TEST_ERROR(
3479		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3480		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3481		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3482		    mp, XFS_ERRTAG_IFLUSH_4)) {
3483			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3484				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3485				__func__, ip->i_ino, ip);
3486			goto flush_out;
3487		}
3488	}
3489	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3490				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3491		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3492			"%s: detected corrupt incore inode %Lu, "
3493			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3494			__func__, ip->i_ino,
3495			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3496			ip->i_d.di_nblocks, ip);
3497		goto flush_out;
3498	}
3499	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3500				mp, XFS_ERRTAG_IFLUSH_6)) {
3501		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3502			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3503			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3504		goto flush_out;
3505	}
3506
3507	/*
3508	 * Inode item log recovery for v2 inodes are dependent on the
3509	 * di_flushiter count for correct sequencing. We bump the flush
3510	 * iteration count so we can detect flushes which postdate a log record
3511	 * during recovery. This is redundant as we now log every change and
3512	 * hence this can't happen but we need to still do it to ensure
3513	 * backwards compatibility with old kernels that predate logging all
3514	 * inode changes.
3515	 */
3516	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3517		ip->i_d.di_flushiter++;
3518
3519	/*
3520	 * If there are inline format data / attr forks attached to this inode,
3521	 * make sure they are not corrupt.
3522	 */
3523	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3524	    xfs_ifork_verify_local_data(ip))
3525		goto flush_out;
3526	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3527	    xfs_ifork_verify_local_attr(ip))
3528		goto flush_out;
3529
3530	/*
3531	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3532	 * copy out the core of the inode, because if the inode is dirty at all
3533	 * the core must be.
3534	 */
3535	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3536
3537	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3538	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3539		ip->i_d.di_flushiter = 0;
 
 
3540
3541	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3542	if (XFS_IFORK_Q(ip))
3543		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3544
3545	/*
3546	 * We've recorded everything logged in the inode, so we'd like to clear
3547	 * the ili_fields bits so we don't log and flush things unnecessarily.
3548	 * However, we can't stop logging all this information until the data
3549	 * we've copied into the disk buffer is written to disk.  If we did we
3550	 * might overwrite the copy of the inode in the log with all the data
3551	 * after re-logging only part of it, and in the face of a crash we
3552	 * wouldn't have all the data we need to recover.
3553	 *
3554	 * What we do is move the bits to the ili_last_fields field.  When
3555	 * logging the inode, these bits are moved back to the ili_fields field.
3556	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3557	 * know that the information those bits represent is permanently on
3558	 * disk.  As long as the flush completes before the inode is logged
3559	 * again, then both ili_fields and ili_last_fields will be cleared.
3560	 */
3561	error = 0;
3562flush_out:
3563	spin_lock(&iip->ili_lock);
3564	iip->ili_last_fields = iip->ili_fields;
3565	iip->ili_fields = 0;
3566	iip->ili_fsync_fields = 0;
3567	spin_unlock(&iip->ili_lock);
3568
3569	/*
3570	 * Store the current LSN of the inode so that we can tell whether the
3571	 * item has moved in the AIL from xfs_iflush_done().
3572	 */
3573	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3574				&iip->ili_item.li_lsn);
3575
3576	/* generate the checksum. */
3577	xfs_dinode_calc_crc(mp, dip);
3578	return error;
3579}
3580
3581/*
3582 * Non-blocking flush of dirty inode metadata into the backing buffer.
3583 *
3584 * The caller must have a reference to the inode and hold the cluster buffer
3585 * locked. The function will walk across all the inodes on the cluster buffer it
3586 * can find and lock without blocking, and flush them to the cluster buffer.
3587 *
3588 * On successful flushing of at least one inode, the caller must write out the
3589 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3590 * the caller needs to release the buffer. On failure, the filesystem will be
3591 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3592 * will be returned.
3593 */
3594int
3595xfs_iflush_cluster(
3596	struct xfs_buf		*bp)
3597{
3598	struct xfs_mount	*mp = bp->b_mount;
3599	struct xfs_log_item	*lip, *n;
3600	struct xfs_inode	*ip;
3601	struct xfs_inode_log_item *iip;
3602	int			clcount = 0;
3603	int			error = 0;
3604
3605	/*
3606	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3607	 * can remove itself from the list.
3608	 */
3609	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3610		iip = (struct xfs_inode_log_item *)lip;
3611		ip = iip->ili_inode;
3612
3613		/*
3614		 * Quick and dirty check to avoid locks if possible.
3615		 */
3616		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK))
3617			continue;
3618		if (xfs_ipincount(ip))
3619			continue;
3620
3621		/*
3622		 * The inode is still attached to the buffer, which means it is
3623		 * dirty but reclaim might try to grab it. Check carefully for
3624		 * that, and grab the ilock while still holding the i_flags_lock
3625		 * to guarantee reclaim will not be able to reclaim this inode
3626		 * once we drop the i_flags_lock.
3627		 */
3628		spin_lock(&ip->i_flags_lock);
3629		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3630		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK)) {
3631			spin_unlock(&ip->i_flags_lock);
3632			continue;
3633		}
3634
3635		/*
3636		 * ILOCK will pin the inode against reclaim and prevent
3637		 * concurrent transactions modifying the inode while we are
3638		 * flushing the inode.
 
3639		 */
3640		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3641			spin_unlock(&ip->i_flags_lock);
3642			continue;
3643		}
 
3644		spin_unlock(&ip->i_flags_lock);
3645
3646		/*
3647		 * Skip inodes that are already flush locked as they have
3648		 * already been written to the buffer.
3649		 */
3650		if (!xfs_iflock_nowait(ip)) {
3651			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3652			continue;
3653		}
3654
3655		/*
3656		 * Abort flushing this inode if we are shut down because the
3657		 * inode may not currently be in the AIL. This can occur when
3658		 * log I/O failure unpins the inode without inserting into the
3659		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3660		 * that otherwise looks like it should be flushed.
3661		 */
3662		if (XFS_FORCED_SHUTDOWN(mp)) {
3663			xfs_iunpin_wait(ip);
3664			/* xfs_iflush_abort() drops the flush lock */
3665			xfs_iflush_abort(ip);
3666			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3667			error = -EIO;
3668			continue;
3669		}
3670
3671		/* don't block waiting on a log force to unpin dirty inodes */
3672		if (xfs_ipincount(ip)) {
3673			xfs_ifunlock(ip);
3674			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3675			continue;
3676		}
3677
3678		if (!xfs_inode_clean(ip))
3679			error = xfs_iflush(ip, bp);
3680		else
3681			xfs_ifunlock(ip);
3682		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3683		if (error)
3684			break;
3685		clcount++;
3686	}
3687
3688	if (error) {
3689		bp->b_flags |= XBF_ASYNC;
3690		xfs_buf_ioend_fail(bp);
3691		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3692		return error;
3693	}
3694
3695	if (!clcount)
3696		return -EAGAIN;
3697
3698	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3699	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3700	return 0;
3701
3702}
3703
3704/* Release an inode. */
3705void
3706xfs_irele(
3707	struct xfs_inode	*ip)
3708{
3709	trace_xfs_irele(ip, _RET_IP_);
3710	iput(VFS_I(ip));
3711}
3712
3713/*
3714 * Ensure all commited transactions touching the inode are written to the log.
3715 */
3716int
3717xfs_log_force_inode(
3718	struct xfs_inode	*ip)
3719{
3720	xfs_lsn_t		lsn = 0;
3721
3722	xfs_ilock(ip, XFS_ILOCK_SHARED);
3723	if (xfs_ipincount(ip))
3724		lsn = ip->i_itemp->ili_last_lsn;
3725	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3726
3727	if (!lsn)
3728		return 0;
3729	return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
3730}
3731
3732/*
3733 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3734 * abide vfs locking order (lowest pointer value goes first) and breaking the
3735 * layout leases before proceeding.  The loop is needed because we cannot call
3736 * the blocking break_layout() with the iolocks held, and therefore have to
3737 * back out both locks.
3738 */
3739static int
3740xfs_iolock_two_inodes_and_break_layout(
3741	struct inode		*src,
3742	struct inode		*dest)
3743{
3744	int			error;
3745
3746	if (src > dest)
3747		swap(src, dest);
3748
3749retry:
3750	/* Wait to break both inodes' layouts before we start locking. */
3751	error = break_layout(src, true);
3752	if (error)
3753		return error;
3754	if (src != dest) {
3755		error = break_layout(dest, true);
3756		if (error)
3757			return error;
3758	}
3759
3760	/* Lock one inode and make sure nobody got in and leased it. */
3761	inode_lock(src);
3762	error = break_layout(src, false);
3763	if (error) {
3764		inode_unlock(src);
3765		if (error == -EWOULDBLOCK)
3766			goto retry;
3767		return error;
3768	}
3769
3770	if (src == dest)
3771		return 0;
3772
3773	/* Lock the other inode and make sure nobody got in and leased it. */
3774	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3775	error = break_layout(dest, false);
3776	if (error) {
3777		inode_unlock(src);
3778		inode_unlock(dest);
3779		if (error == -EWOULDBLOCK)
3780			goto retry;
3781		return error;
3782	}
3783
3784	return 0;
3785}
3786
3787/*
3788 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3789 * mmap activity.
3790 */
3791int
3792xfs_ilock2_io_mmap(
3793	struct xfs_inode	*ip1,
3794	struct xfs_inode	*ip2)
3795{
3796	int			ret;
3797
3798	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3799	if (ret)
3800		return ret;
3801	if (ip1 == ip2)
3802		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3803	else
3804		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3805				    ip2, XFS_MMAPLOCK_EXCL);
3806	return 0;
3807}
3808
3809/* Unlock both inodes to allow IO and mmap activity. */
3810void
3811xfs_iunlock2_io_mmap(
3812	struct xfs_inode	*ip1,
3813	struct xfs_inode	*ip2)
3814{
3815	bool			same_inode = (ip1 == ip2);
3816
3817	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3818	if (!same_inode)
3819		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3820	inode_unlock(VFS_I(ip2));
3821	if (!same_inode)
3822		inode_unlock(VFS_I(ip1));
3823}