Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.17
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
 
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_sb.h"
  24#include "xfs_mount.h"
  25#include "xfs_inode.h"
  26#include "xfs_error.h"
  27#include "xfs_trans.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_quota.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_bmap_util.h"
  34#include "xfs_dquot_item.h"
  35#include "xfs_dquot.h"
  36#include "xfs_reflink.h"
 
 
 
  37
  38#include <linux/kthread.h>
  39#include <linux/freezer.h>
  40#include <linux/iversion.h>
  41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42/*
  43 * Allocate and initialise an xfs_inode.
  44 */
  45struct xfs_inode *
  46xfs_inode_alloc(
  47	struct xfs_mount	*mp,
  48	xfs_ino_t		ino)
  49{
  50	struct xfs_inode	*ip;
  51
  52	/*
  53	 * if this didn't occur in transactions, we could use
  54	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  55	 * code up to do this anyway.
  56	 */
  57	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  58	if (!ip)
  59		return NULL;
  60	if (inode_init_always(mp->m_super, VFS_I(ip))) {
  61		kmem_zone_free(xfs_inode_zone, ip);
  62		return NULL;
  63	}
  64
  65	/* VFS doesn't initialise i_mode! */
  66	VFS_I(ip)->i_mode = 0;
 
 
  67
  68	XFS_STATS_INC(mp, vn_active);
  69	ASSERT(atomic_read(&ip->i_pincount) == 0);
  70	ASSERT(!xfs_isiflocked(ip));
  71	ASSERT(ip->i_ino == 0);
  72
  73	/* initialise the xfs inode */
  74	ip->i_ino = ino;
  75	ip->i_mount = mp;
  76	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  77	ip->i_afp = NULL;
  78	ip->i_cowfp = NULL;
  79	ip->i_cnextents = 0;
  80	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
  81	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
  82	ip->i_flags = 0;
  83	ip->i_delayed_blks = 0;
  84	memset(&ip->i_d, 0, sizeof(ip->i_d));
 
 
 
 
 
 
 
 
 
  85
  86	return ip;
  87}
  88
  89STATIC void
  90xfs_inode_free_callback(
  91	struct rcu_head		*head)
  92{
  93	struct inode		*inode = container_of(head, struct inode, i_rcu);
  94	struct xfs_inode	*ip = XFS_I(inode);
  95
  96	switch (VFS_I(ip)->i_mode & S_IFMT) {
  97	case S_IFREG:
  98	case S_IFDIR:
  99	case S_IFLNK:
 100		xfs_idestroy_fork(ip, XFS_DATA_FORK);
 101		break;
 102	}
 103
 104	if (ip->i_afp)
 105		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 106	if (ip->i_cowfp)
 107		xfs_idestroy_fork(ip, XFS_COW_FORK);
 108
 
 
 
 
 109	if (ip->i_itemp) {
 110		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
 
 111		xfs_inode_item_destroy(ip);
 112		ip->i_itemp = NULL;
 113	}
 114
 115	kmem_zone_free(xfs_inode_zone, ip);
 116}
 117
 118static void
 119__xfs_inode_free(
 120	struct xfs_inode	*ip)
 121{
 122	/* asserts to verify all state is correct here */
 123	ASSERT(atomic_read(&ip->i_pincount) == 0);
 
 124	XFS_STATS_DEC(ip->i_mount, vn_active);
 125
 126	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 127}
 128
 129void
 130xfs_inode_free(
 131	struct xfs_inode	*ip)
 132{
 133	ASSERT(!xfs_isiflocked(ip));
 134
 135	/*
 136	 * Because we use RCU freeing we need to ensure the inode always
 137	 * appears to be reclaimed with an invalid inode number when in the
 138	 * free state. The ip->i_flags_lock provides the barrier against lookup
 139	 * races.
 140	 */
 141	spin_lock(&ip->i_flags_lock);
 142	ip->i_flags = XFS_IRECLAIM;
 143	ip->i_ino = 0;
 144	spin_unlock(&ip->i_flags_lock);
 145
 146	__xfs_inode_free(ip);
 147}
 148
 149/*
 150 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 151 * isn't a reclaim pass already in progress. By default it runs every 5s based
 152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
 153 * tunable, but that can be done if this method proves to be ineffective or too
 154 * aggressive.
 155 */
 156static void
 157xfs_reclaim_work_queue(
 158	struct xfs_mount        *mp)
 159{
 160
 161	rcu_read_lock();
 162	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 163		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 164			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 165	}
 166	rcu_read_unlock();
 167}
 168
 169/*
 170 * This is a fast pass over the inode cache to try to get reclaim moving on as
 171 * many inodes as possible in a short period of time. It kicks itself every few
 172 * seconds, as well as being kicked by the inode cache shrinker when memory
 173 * goes low. It scans as quickly as possible avoiding locked inodes or those
 174 * already being flushed, and once done schedules a future pass.
 175 */
 176void
 177xfs_reclaim_worker(
 178	struct work_struct *work)
 179{
 180	struct xfs_mount *mp = container_of(to_delayed_work(work),
 181					struct xfs_mount, m_reclaim_work);
 182
 183	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
 184	xfs_reclaim_work_queue(mp);
 
 
 
 
 
 
 
 185}
 186
 
 187static void
 188xfs_perag_set_reclaim_tag(
 189	struct xfs_perag	*pag)
 
 
 190{
 191	struct xfs_mount	*mp = pag->pag_mount;
 
 192
 193	lockdep_assert_held(&pag->pag_ici_lock);
 194	if (pag->pag_ici_reclaimable++)
 
 
 
 
 
 
 
 195		return;
 196
 197	/* propagate the reclaim tag up into the perag radix tree */
 198	spin_lock(&mp->m_perag_lock);
 199	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
 200			   XFS_ICI_RECLAIM_TAG);
 201	spin_unlock(&mp->m_perag_lock);
 202
 203	/* schedule periodic background inode reclaim */
 204	xfs_reclaim_work_queue(mp);
 
 
 
 
 
 
 
 205
 206	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 207}
 208
 
 209static void
 210xfs_perag_clear_reclaim_tag(
 211	struct xfs_perag	*pag)
 
 
 212{
 213	struct xfs_mount	*mp = pag->pag_mount;
 214
 215	lockdep_assert_held(&pag->pag_ici_lock);
 216	if (--pag->pag_ici_reclaimable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 217		return;
 218
 219	/* clear the reclaim tag from the perag radix tree */
 220	spin_lock(&mp->m_perag_lock);
 221	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
 222			     XFS_ICI_RECLAIM_TAG);
 223	spin_unlock(&mp->m_perag_lock);
 224	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 
 225}
 226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228/*
 229 * We set the inode flag atomically with the radix tree tag.
 230 * Once we get tag lookups on the radix tree, this inode flag
 231 * can go away.
 232 */
 233void
 234xfs_inode_set_reclaim_tag(
 235	struct xfs_inode	*ip)
 
 236{
 237	struct xfs_mount	*mp = ip->i_mount;
 238	struct xfs_perag	*pag;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239
 240	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 241	spin_lock(&pag->pag_ici_lock);
 242	spin_lock(&ip->i_flags_lock);
 243
 244	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
 245			   XFS_ICI_RECLAIM_TAG);
 246	xfs_perag_set_reclaim_tag(pag);
 247	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 248
 
 
 
 
 
 249	spin_unlock(&ip->i_flags_lock);
 250	spin_unlock(&pag->pag_ici_lock);
 251	xfs_perag_put(pag);
 
 252}
 253
 254STATIC void
 255xfs_inode_clear_reclaim_tag(
 256	struct xfs_perag	*pag,
 257	xfs_ino_t		ino)
 
 
 
 
 
 
 
 
 
 
 258{
 259	radix_tree_tag_clear(&pag->pag_ici_root,
 260			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
 261			     XFS_ICI_RECLAIM_TAG);
 262	xfs_perag_clear_reclaim_tag(pag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263}
 264
 265static void
 266xfs_inew_wait(
 267	struct xfs_inode	*ip)
 
 268{
 269	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
 270	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
 
 
 
 
 
 
 
 
 
 271
 272	do {
 273		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 274		if (!xfs_iflags_test(ip, XFS_INEW))
 275			break;
 276		schedule();
 277	} while (true);
 278	finish_wait(wq, &wait.wq_entry);
 279}
 280
 281/*
 282 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 283 * part of the structure. This is made more complex by the fact we store
 284 * information about the on-disk values in the VFS inode and so we can't just
 285 * overwrite the values unconditionally. Hence we save the parameters we
 286 * need to retain across reinitialisation, and rewrite them into the VFS inode
 287 * after reinitialisation even if it fails.
 288 */
 289static int
 290xfs_reinit_inode(
 291	struct xfs_mount	*mp,
 292	struct inode		*inode)
 293{
 294	int		error;
 295	uint32_t	nlink = inode->i_nlink;
 296	uint32_t	generation = inode->i_generation;
 297	uint64_t	version = inode_peek_iversion(inode);
 298	umode_t		mode = inode->i_mode;
 299	dev_t		dev = inode->i_rdev;
 300
 301	error = inode_init_always(mp->m_super, inode);
 
 
 
 
 
 
 
 
 302
 303	set_nlink(inode, nlink);
 304	inode->i_generation = generation;
 305	inode_set_iversion_queried(inode, version);
 306	inode->i_mode = mode;
 307	inode->i_rdev = dev;
 308	return error;
 309}
 310
 311/*
 312 * Check the validity of the inode we just found it the cache
 313 */
 314static int
 315xfs_iget_cache_hit(
 316	struct xfs_perag	*pag,
 317	struct xfs_inode	*ip,
 318	xfs_ino_t		ino,
 319	int			flags,
 320	int			lock_flags) __releases(RCU)
 321{
 322	struct inode		*inode = VFS_I(ip);
 323	struct xfs_mount	*mp = ip->i_mount;
 324	int			error;
 325
 326	/*
 327	 * check for re-use of an inode within an RCU grace period due to the
 328	 * radix tree nodes not being updated yet. We monitor for this by
 329	 * setting the inode number to zero before freeing the inode structure.
 330	 * If the inode has been reallocated and set up, then the inode number
 331	 * will not match, so check for that, too.
 332	 */
 333	spin_lock(&ip->i_flags_lock);
 334	if (ip->i_ino != ino) {
 335		trace_xfs_iget_skip(ip);
 336		XFS_STATS_INC(mp, xs_ig_frecycle);
 337		error = -EAGAIN;
 338		goto out_error;
 339	}
 340
 341
 342	/*
 343	 * If we are racing with another cache hit that is currently
 344	 * instantiating this inode or currently recycling it out of
 345	 * reclaimabe state, wait for the initialisation to complete
 346	 * before continuing.
 347	 *
 
 
 
 
 
 
 
 
 348	 * XXX(hch): eventually we should do something equivalent to
 349	 *	     wait_on_inode to wait for these flags to be cleared
 350	 *	     instead of polling for it.
 351	 */
 352	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
 353		trace_xfs_iget_skip(ip);
 354		XFS_STATS_INC(mp, xs_ig_frecycle);
 355		error = -EAGAIN;
 356		goto out_error;
 357	}
 358
 359	/*
 360	 * If lookup is racing with unlink return an error immediately.
 361	 */
 362	if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
 363		error = -ENOENT;
 364		goto out_error;
 
 365	}
 366
 367	/*
 368	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
 369	 * Need to carefully get it back into useable state.
 370	 */
 371	if (ip->i_flags & XFS_IRECLAIMABLE) {
 372		trace_xfs_iget_reclaim(ip);
 373
 374		if (flags & XFS_IGET_INCORE) {
 375			error = -EAGAIN;
 376			goto out_error;
 377		}
 378
 379		/*
 380		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
 381		 * from stomping over us while we recycle the inode.  We can't
 382		 * clear the radix tree reclaimable tag yet as it requires
 383		 * pag_ici_lock to be held exclusive.
 384		 */
 385		ip->i_flags |= XFS_IRECLAIM;
 386
 387		spin_unlock(&ip->i_flags_lock);
 388		rcu_read_unlock();
 389
 390		error = xfs_reinit_inode(mp, inode);
 391		if (error) {
 392			bool wake;
 393			/*
 394			 * Re-initializing the inode failed, and we are in deep
 395			 * trouble.  Try to re-add it to the reclaim list.
 396			 */
 397			rcu_read_lock();
 398			spin_lock(&ip->i_flags_lock);
 399			wake = !!__xfs_iflags_test(ip, XFS_INEW);
 400			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 401			if (wake)
 402				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 403			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 404			trace_xfs_iget_reclaim_fail(ip);
 405			goto out_error;
 406		}
 407
 408		spin_lock(&pag->pag_ici_lock);
 409		spin_lock(&ip->i_flags_lock);
 410
 411		/*
 412		 * Clear the per-lifetime state in the inode as we are now
 413		 * effectively a new inode and need to return to the initial
 414		 * state before reuse occurs.
 415		 */
 416		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 417		ip->i_flags |= XFS_INEW;
 418		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
 419		inode->i_state = I_NEW;
 420
 421		ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 422		init_rwsem(&inode->i_rwsem);
 
 
 423
 424		spin_unlock(&ip->i_flags_lock);
 425		spin_unlock(&pag->pag_ici_lock);
 
 
 
 
 
 
 426	} else {
 427		/* If the VFS inode is being torn down, pause and try again. */
 428		if (!igrab(inode)) {
 429			trace_xfs_iget_skip(ip);
 430			error = -EAGAIN;
 431			goto out_error;
 432		}
 433
 434		/* We've got a live one. */
 435		spin_unlock(&ip->i_flags_lock);
 436		rcu_read_unlock();
 437		trace_xfs_iget_hit(ip);
 438	}
 439
 440	if (lock_flags != 0)
 441		xfs_ilock(ip, lock_flags);
 442
 443	if (!(flags & XFS_IGET_INCORE))
 444		xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
 445	XFS_STATS_INC(mp, xs_ig_found);
 446
 447	return 0;
 448
 
 
 
 
 449out_error:
 450	spin_unlock(&ip->i_flags_lock);
 451	rcu_read_unlock();
 452	return error;
 453}
 454
 
 
 
 
 
 
 
 
 
 
 
 455
 456static int
 457xfs_iget_cache_miss(
 458	struct xfs_mount	*mp,
 459	struct xfs_perag	*pag,
 460	xfs_trans_t		*tp,
 461	xfs_ino_t		ino,
 462	struct xfs_inode	**ipp,
 463	int			flags,
 464	int			lock_flags)
 465{
 466	struct xfs_inode	*ip;
 467	int			error;
 468	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
 469	int			iflags;
 470
 471	ip = xfs_inode_alloc(mp, ino);
 472	if (!ip)
 473		return -ENOMEM;
 474
 475	error = xfs_iread(mp, tp, ip, flags);
 476	if (error)
 477		goto out_destroy;
 478
 479	if (!xfs_inode_verify_forks(ip)) {
 480		error = -EFSCORRUPTED;
 481		goto out_destroy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482	}
 483
 484	trace_xfs_iget_miss(ip);
 485
 486
 487	/*
 488	 * If we are allocating a new inode, then check what was returned is
 489	 * actually a free, empty inode. If we are not allocating an inode,
 490	 * the check we didn't find a free inode.
 491	 */
 492	if (flags & XFS_IGET_CREATE) {
 493		if (VFS_I(ip)->i_mode != 0) {
 494			xfs_warn(mp,
 495"Corruption detected! Free inode 0x%llx not marked free on disk",
 496				ino);
 497			error = -EFSCORRUPTED;
 498			goto out_destroy;
 499		}
 500		if (ip->i_d.di_nblocks != 0) {
 501			xfs_warn(mp,
 502"Corruption detected! Free inode 0x%llx has blocks allocated!",
 503				ino);
 504			error = -EFSCORRUPTED;
 505			goto out_destroy;
 506		}
 507	} else if (VFS_I(ip)->i_mode == 0) {
 508		error = -ENOENT;
 509		goto out_destroy;
 510	}
 511
 512	/*
 513	 * Preload the radix tree so we can insert safely under the
 514	 * write spinlock. Note that we cannot sleep inside the preload
 515	 * region. Since we can be called from transaction context, don't
 516	 * recurse into the file system.
 517	 */
 518	if (radix_tree_preload(GFP_NOFS)) {
 519		error = -EAGAIN;
 520		goto out_destroy;
 521	}
 522
 523	/*
 524	 * Because the inode hasn't been added to the radix-tree yet it can't
 525	 * be found by another thread, so we can do the non-sleeping lock here.
 526	 */
 527	if (lock_flags) {
 528		if (!xfs_ilock_nowait(ip, lock_flags))
 529			BUG();
 530	}
 531
 532	/*
 533	 * These values must be set before inserting the inode into the radix
 534	 * tree as the moment it is inserted a concurrent lookup (allowed by the
 535	 * RCU locking mechanism) can find it and that lookup must see that this
 536	 * is an inode currently under construction (i.e. that XFS_INEW is set).
 537	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 538	 * memory barrier that ensures this detection works correctly at lookup
 539	 * time.
 540	 */
 541	iflags = XFS_INEW;
 542	if (flags & XFS_IGET_DONTCACHE)
 543		iflags |= XFS_IDONTCACHE;
 544	ip->i_udquot = NULL;
 545	ip->i_gdquot = NULL;
 546	ip->i_pdquot = NULL;
 547	xfs_iflags_set(ip, iflags);
 548
 549	/* insert the new inode */
 550	spin_lock(&pag->pag_ici_lock);
 551	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 552	if (unlikely(error)) {
 553		WARN_ON(error != -EEXIST);
 554		XFS_STATS_INC(mp, xs_ig_dup);
 555		error = -EAGAIN;
 556		goto out_preload_end;
 557	}
 558	spin_unlock(&pag->pag_ici_lock);
 559	radix_tree_preload_end();
 560
 561	*ipp = ip;
 562	return 0;
 563
 564out_preload_end:
 565	spin_unlock(&pag->pag_ici_lock);
 566	radix_tree_preload_end();
 567	if (lock_flags)
 568		xfs_iunlock(ip, lock_flags);
 569out_destroy:
 570	__destroy_inode(VFS_I(ip));
 571	xfs_inode_free(ip);
 572	return error;
 573}
 574
 575/*
 576 * Look up an inode by number in the given file system.
 577 * The inode is looked up in the cache held in each AG.
 578 * If the inode is found in the cache, initialise the vfs inode
 579 * if necessary.
 580 *
 581 * If it is not in core, read it in from the file system's device,
 582 * add it to the cache and initialise the vfs inode.
 583 *
 584 * The inode is locked according to the value of the lock_flags parameter.
 585 * This flag parameter indicates how and if the inode's IO lock and inode lock
 586 * should be taken.
 587 *
 588 * mp -- the mount point structure for the current file system.  It points
 589 *       to the inode hash table.
 590 * tp -- a pointer to the current transaction if there is one.  This is
 591 *       simply passed through to the xfs_iread() call.
 592 * ino -- the number of the inode desired.  This is the unique identifier
 593 *        within the file system for the inode being requested.
 594 * lock_flags -- flags indicating how to lock the inode.  See the comment
 595 *		 for xfs_ilock() for a list of valid values.
 596 */
 597int
 598xfs_iget(
 599	xfs_mount_t	*mp,
 600	xfs_trans_t	*tp,
 601	xfs_ino_t	ino,
 602	uint		flags,
 603	uint		lock_flags,
 604	xfs_inode_t	**ipp)
 605{
 606	xfs_inode_t	*ip;
 607	int		error;
 608	xfs_perag_t	*pag;
 609	xfs_agino_t	agino;
 610
 611	/*
 612	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
 613	 * doesn't get freed while it's being referenced during a
 614	 * radix tree traversal here.  It assumes this function
 615	 * aqcuires only the ILOCK (and therefore it has no need to
 616	 * involve the IOLOCK in this synchronization).
 617	 */
 618	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 619
 620	/* reject inode numbers outside existing AGs */
 621	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 622		return -EINVAL;
 623
 624	XFS_STATS_INC(mp, xs_ig_attempts);
 625
 626	/* get the perag structure and ensure that it's inode capable */
 627	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 628	agino = XFS_INO_TO_AGINO(mp, ino);
 629
 630again:
 631	error = 0;
 632	rcu_read_lock();
 633	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 634
 635	if (ip) {
 636		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 637		if (error)
 638			goto out_error_or_again;
 639	} else {
 640		rcu_read_unlock();
 641		if (flags & XFS_IGET_INCORE) {
 642			error = -ENODATA;
 643			goto out_error_or_again;
 644		}
 645		XFS_STATS_INC(mp, xs_ig_missed);
 646
 647		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 648							flags, lock_flags);
 649		if (error)
 650			goto out_error_or_again;
 651	}
 652	xfs_perag_put(pag);
 653
 654	*ipp = ip;
 655
 656	/*
 657	 * If we have a real type for an on-disk inode, we can setup the inode
 658	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
 
 659	 */
 660	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 661		xfs_setup_existing_inode(ip);
 662	return 0;
 663
 664out_error_or_again:
 665	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
 
 666		delay(1);
 667		goto again;
 668	}
 669	xfs_perag_put(pag);
 670	return error;
 671}
 672
 673/*
 674 * "Is this a cached inode that's also allocated?"
 675 *
 676 * Look up an inode by number in the given file system.  If the inode is
 677 * in cache and isn't in purgatory, return 1 if the inode is allocated
 678 * and 0 if it is not.  For all other cases (not in cache, being torn
 679 * down, etc.), return a negative error code.
 
 
 680 *
 681 * The caller has to prevent inode allocation and freeing activity,
 682 * presumably by locking the AGI buffer.   This is to ensure that an
 683 * inode cannot transition from allocated to freed until the caller is
 684 * ready to allow that.  If the inode is in an intermediate state (new,
 685 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
 686 * inode is not in the cache, -ENOENT will be returned.  The caller must
 687 * deal with these scenarios appropriately.
 688 *
 689 * This is a specialized use case for the online scrubber; if you're
 690 * reading this, you probably want xfs_iget.
 691 */
 692int
 693xfs_icache_inode_is_allocated(
 694	struct xfs_mount	*mp,
 695	struct xfs_trans	*tp,
 696	xfs_ino_t		ino,
 697	bool			*inuse)
 698{
 699	struct xfs_inode	*ip;
 700	int			error;
 701
 702	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
 703	if (error)
 704		return error;
 705
 706	*inuse = !!(VFS_I(ip)->i_mode);
 707	IRELE(ip);
 708	return 0;
 709}
 710
 711/*
 712 * The inode lookup is done in batches to keep the amount of lock traffic and
 713 * radix tree lookups to a minimum. The batch size is a trade off between
 714 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 715 * be too greedy.
 716 */
 717#define XFS_LOOKUP_BATCH	32
 718
 719STATIC int
 720xfs_inode_ag_walk_grab(
 721	struct xfs_inode	*ip,
 722	int			flags)
 723{
 724	struct inode		*inode = VFS_I(ip);
 725	bool			newinos = !!(flags & XFS_AGITER_INEW_WAIT);
 726
 727	ASSERT(rcu_read_lock_held());
 728
 729	/*
 730	 * check for stale RCU freed inode
 731	 *
 732	 * If the inode has been reallocated, it doesn't matter if it's not in
 733	 * the AG we are walking - we are walking for writeback, so if it
 734	 * passes all the "valid inode" checks and is dirty, then we'll write
 735	 * it back anyway.  If it has been reallocated and still being
 736	 * initialised, the XFS_INEW check below will catch it.
 737	 */
 738	spin_lock(&ip->i_flags_lock);
 739	if (!ip->i_ino)
 740		goto out_unlock_noent;
 741
 742	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
 743	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
 744	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
 745		goto out_unlock_noent;
 746	spin_unlock(&ip->i_flags_lock);
 747
 748	/* nothing to sync during shutdown */
 749	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 750		return -EFSCORRUPTED;
 751
 752	/* If we can't grab the inode, it must on it's way to reclaim. */
 753	if (!igrab(inode))
 754		return -ENOENT;
 755
 756	/* inode is valid */
 757	return 0;
 758
 759out_unlock_noent:
 760	spin_unlock(&ip->i_flags_lock);
 761	return -ENOENT;
 762}
 763
 764STATIC int
 765xfs_inode_ag_walk(
 766	struct xfs_mount	*mp,
 767	struct xfs_perag	*pag,
 768	int			(*execute)(struct xfs_inode *ip, int flags,
 769					   void *args),
 770	int			flags,
 771	void			*args,
 772	int			tag,
 773	int			iter_flags)
 774{
 775	uint32_t		first_index;
 776	int			last_error = 0;
 777	int			skipped;
 778	int			done;
 779	int			nr_found;
 780
 781restart:
 782	done = 0;
 783	skipped = 0;
 784	first_index = 0;
 785	nr_found = 0;
 786	do {
 787		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 788		int		error = 0;
 789		int		i;
 790
 791		rcu_read_lock();
 792
 793		if (tag == -1)
 794			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 795					(void **)batch, first_index,
 796					XFS_LOOKUP_BATCH);
 797		else
 798			nr_found = radix_tree_gang_lookup_tag(
 799					&pag->pag_ici_root,
 800					(void **) batch, first_index,
 801					XFS_LOOKUP_BATCH, tag);
 802
 803		if (!nr_found) {
 804			rcu_read_unlock();
 805			break;
 806		}
 807
 808		/*
 809		 * Grab the inodes before we drop the lock. if we found
 810		 * nothing, nr == 0 and the loop will be skipped.
 811		 */
 812		for (i = 0; i < nr_found; i++) {
 813			struct xfs_inode *ip = batch[i];
 814
 815			if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
 816				batch[i] = NULL;
 817
 818			/*
 819			 * Update the index for the next lookup. Catch
 820			 * overflows into the next AG range which can occur if
 821			 * we have inodes in the last block of the AG and we
 822			 * are currently pointing to the last inode.
 823			 *
 824			 * Because we may see inodes that are from the wrong AG
 825			 * due to RCU freeing and reallocation, only update the
 826			 * index if it lies in this AG. It was a race that lead
 827			 * us to see this inode, so another lookup from the
 828			 * same index will not find it again.
 829			 */
 830			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 831				continue;
 832			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 833			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 834				done = 1;
 835		}
 836
 837		/* unlock now we've grabbed the inodes. */
 838		rcu_read_unlock();
 839
 840		for (i = 0; i < nr_found; i++) {
 841			if (!batch[i])
 842				continue;
 843			if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
 844			    xfs_iflags_test(batch[i], XFS_INEW))
 845				xfs_inew_wait(batch[i]);
 846			error = execute(batch[i], flags, args);
 847			IRELE(batch[i]);
 848			if (error == -EAGAIN) {
 849				skipped++;
 850				continue;
 851			}
 852			if (error && last_error != -EFSCORRUPTED)
 853				last_error = error;
 854		}
 855
 856		/* bail out if the filesystem is corrupted.  */
 857		if (error == -EFSCORRUPTED)
 858			break;
 859
 860		cond_resched();
 861
 862	} while (nr_found && !done);
 863
 864	if (skipped) {
 865		delay(1);
 866		goto restart;
 867	}
 868	return last_error;
 869}
 870
 871/*
 872 * Background scanning to trim post-EOF preallocated space. This is queued
 873 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
 874 */
 875void
 876xfs_queue_eofblocks(
 877	struct xfs_mount *mp)
 878{
 879	rcu_read_lock();
 880	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
 881		queue_delayed_work(mp->m_eofblocks_workqueue,
 882				   &mp->m_eofblocks_work,
 883				   msecs_to_jiffies(xfs_eofb_secs * 1000));
 884	rcu_read_unlock();
 885}
 886
 887void
 888xfs_eofblocks_worker(
 889	struct work_struct *work)
 890{
 891	struct xfs_mount *mp = container_of(to_delayed_work(work),
 892				struct xfs_mount, m_eofblocks_work);
 893	xfs_icache_free_eofblocks(mp, NULL);
 894	xfs_queue_eofblocks(mp);
 895}
 896
 897/*
 898 * Background scanning to trim preallocated CoW space. This is queued
 899 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
 900 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
 901 */
 902void
 903xfs_queue_cowblocks(
 904	struct xfs_mount *mp)
 905{
 906	rcu_read_lock();
 907	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
 908		queue_delayed_work(mp->m_eofblocks_workqueue,
 909				   &mp->m_cowblocks_work,
 910				   msecs_to_jiffies(xfs_cowb_secs * 1000));
 911	rcu_read_unlock();
 912}
 913
 914void
 915xfs_cowblocks_worker(
 916	struct work_struct *work)
 917{
 918	struct xfs_mount *mp = container_of(to_delayed_work(work),
 919				struct xfs_mount, m_cowblocks_work);
 920	xfs_icache_free_cowblocks(mp, NULL);
 921	xfs_queue_cowblocks(mp);
 922}
 923
 924int
 925xfs_inode_ag_iterator_flags(
 926	struct xfs_mount	*mp,
 927	int			(*execute)(struct xfs_inode *ip, int flags,
 928					   void *args),
 929	int			flags,
 930	void			*args,
 931	int			iter_flags)
 932{
 933	struct xfs_perag	*pag;
 934	int			error = 0;
 935	int			last_error = 0;
 936	xfs_agnumber_t		ag;
 937
 938	ag = 0;
 939	while ((pag = xfs_perag_get(mp, ag))) {
 940		ag = pag->pag_agno + 1;
 941		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
 942					  iter_flags);
 943		xfs_perag_put(pag);
 944		if (error) {
 945			last_error = error;
 946			if (error == -EFSCORRUPTED)
 947				break;
 948		}
 949	}
 950	return last_error;
 951}
 952
 953int
 954xfs_inode_ag_iterator(
 955	struct xfs_mount	*mp,
 956	int			(*execute)(struct xfs_inode *ip, int flags,
 957					   void *args),
 958	int			flags,
 959	void			*args)
 960{
 961	return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
 962}
 963
 964int
 965xfs_inode_ag_iterator_tag(
 966	struct xfs_mount	*mp,
 967	int			(*execute)(struct xfs_inode *ip, int flags,
 968					   void *args),
 969	int			flags,
 970	void			*args,
 971	int			tag)
 972{
 973	struct xfs_perag	*pag;
 974	int			error = 0;
 975	int			last_error = 0;
 976	xfs_agnumber_t		ag;
 977
 978	ag = 0;
 979	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 980		ag = pag->pag_agno + 1;
 981		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
 982					  0);
 983		xfs_perag_put(pag);
 984		if (error) {
 985			last_error = error;
 986			if (error == -EFSCORRUPTED)
 987				break;
 988		}
 989	}
 990	return last_error;
 991}
 992
 993/*
 994 * Grab the inode for reclaim exclusively.
 995 * Return 0 if we grabbed it, non-zero otherwise.
 996 */
 997STATIC int
 998xfs_reclaim_inode_grab(
 999	struct xfs_inode	*ip,
1000	int			flags)
1001{
1002	ASSERT(rcu_read_lock_held());
1003
1004	/* quick check for stale RCU freed inode */
1005	if (!ip->i_ino)
1006		return 1;
1007
1008	/*
1009	 * If we are asked for non-blocking operation, do unlocked checks to
1010	 * see if the inode already is being flushed or in reclaim to avoid
1011	 * lock traffic.
1012	 */
1013	if ((flags & SYNC_TRYLOCK) &&
1014	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1015		return 1;
1016
1017	/*
1018	 * The radix tree lock here protects a thread in xfs_iget from racing
1019	 * with us starting reclaim on the inode.  Once we have the
1020	 * XFS_IRECLAIM flag set it will not touch us.
1021	 *
1022	 * Due to RCU lookup, we may find inodes that have been freed and only
1023	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
1024	 * aren't candidates for reclaim at all, so we must check the
1025	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1026	 */
1027	spin_lock(&ip->i_flags_lock);
1028	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1029	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1030		/* not a reclaim candidate. */
1031		spin_unlock(&ip->i_flags_lock);
1032		return 1;
1033	}
 
 
 
 
 
 
 
 
1034	__xfs_iflags_set(ip, XFS_IRECLAIM);
1035	spin_unlock(&ip->i_flags_lock);
1036	return 0;
1037}
1038
1039/*
1040 * Inodes in different states need to be treated differently. The following
1041 * table lists the inode states and the reclaim actions necessary:
1042 *
1043 *	inode state	     iflush ret		required action
1044 *      ---------------      ----------         ---------------
1045 *	bad			-		reclaim
1046 *	shutdown		EIO		unpin and reclaim
1047 *	clean, unpinned		0		reclaim
1048 *	stale, unpinned		0		reclaim
1049 *	clean, pinned(*)	0		requeue
1050 *	stale, pinned		EAGAIN		requeue
1051 *	dirty, async		-		requeue
1052 *	dirty, sync		0		reclaim
1053 *
1054 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1055 * handled anyway given the order of checks implemented.
1056 *
1057 * Also, because we get the flush lock first, we know that any inode that has
1058 * been flushed delwri has had the flush completed by the time we check that
1059 * the inode is clean.
1060 *
1061 * Note that because the inode is flushed delayed write by AIL pushing, the
1062 * flush lock may already be held here and waiting on it can result in very
1063 * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
1064 * the caller should push the AIL first before trying to reclaim inodes to
1065 * minimise the amount of time spent waiting.  For background relaim, we only
1066 * bother to reclaim clean inodes anyway.
1067 *
1068 * Hence the order of actions after gaining the locks should be:
1069 *	bad		=> reclaim
1070 *	shutdown	=> unpin and reclaim
1071 *	pinned, async	=> requeue
1072 *	pinned, sync	=> unpin
1073 *	stale		=> reclaim
1074 *	clean		=> reclaim
1075 *	dirty, async	=> requeue
1076 *	dirty, sync	=> flush, wait and reclaim
1077 */
1078STATIC int
1079xfs_reclaim_inode(
1080	struct xfs_inode	*ip,
1081	struct xfs_perag	*pag,
1082	int			sync_mode)
1083{
1084	struct xfs_buf		*bp = NULL;
1085	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
1086	int			error;
1087
1088restart:
1089	error = 0;
1090	xfs_ilock(ip, XFS_ILOCK_EXCL);
1091	if (!xfs_iflock_nowait(ip)) {
1092		if (!(sync_mode & SYNC_WAIT))
1093			goto out;
1094		xfs_iflock(ip);
1095	}
1096
1097	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1098		xfs_iunpin_wait(ip);
1099		/* xfs_iflush_abort() drops the flush lock */
1100		xfs_iflush_abort(ip, false);
1101		goto reclaim;
1102	}
1103	if (xfs_ipincount(ip)) {
1104		if (!(sync_mode & SYNC_WAIT))
1105			goto out_ifunlock;
1106		xfs_iunpin_wait(ip);
1107	}
1108	if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1109		xfs_ifunlock(ip);
1110		goto reclaim;
1111	}
1112
1113	/*
1114	 * Never flush out dirty data during non-blocking reclaim, as it would
1115	 * just contend with AIL pushing trying to do the same job.
 
 
 
1116	 */
1117	if (!(sync_mode & SYNC_WAIT))
1118		goto out_ifunlock;
1119
1120	/*
1121	 * Now we have an inode that needs flushing.
1122	 *
1123	 * Note that xfs_iflush will never block on the inode buffer lock, as
1124	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1125	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
1126	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1127	 * result in an ABBA deadlock with xfs_ifree_cluster().
1128	 *
1129	 * As xfs_ifree_cluser() must gather all inodes that are active in the
1130	 * cache to mark them stale, if we hit this case we don't actually want
1131	 * to do IO here - we want the inode marked stale so we can simply
1132	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1133	 * inode, back off and try again.  Hopefully the next pass through will
1134	 * see the stale flag set on the inode.
1135	 */
1136	error = xfs_iflush(ip, &bp);
1137	if (error == -EAGAIN) {
1138		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1139		/* backoff longer than in xfs_ifree_cluster */
1140		delay(2);
1141		goto restart;
1142	}
1143
1144	if (!error) {
1145		error = xfs_bwrite(bp);
1146		xfs_buf_relse(bp);
1147	}
 
 
 
 
1148
 
1149reclaim:
1150	ASSERT(!xfs_isiflocked(ip));
1151
1152	/*
1153	 * Because we use RCU freeing we need to ensure the inode always appears
1154	 * to be reclaimed with an invalid inode number when in the free state.
1155	 * We do this as early as possible under the ILOCK so that
1156	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1157	 * detect races with us here. By doing this, we guarantee that once
1158	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1159	 * it will see either a valid inode that will serialise correctly, or it
1160	 * will see an invalid inode that it can skip.
1161	 */
1162	spin_lock(&ip->i_flags_lock);
1163	ip->i_flags = XFS_IRECLAIM;
1164	ip->i_ino = 0;
 
 
1165	spin_unlock(&ip->i_flags_lock);
1166
 
1167	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1168
1169	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1170	/*
1171	 * Remove the inode from the per-AG radix tree.
1172	 *
1173	 * Because radix_tree_delete won't complain even if the item was never
1174	 * added to the tree assert that it's been there before to catch
1175	 * problems with the inode life time early on.
1176	 */
1177	spin_lock(&pag->pag_ici_lock);
1178	if (!radix_tree_delete(&pag->pag_ici_root,
1179				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1180		ASSERT(0);
1181	xfs_perag_clear_reclaim_tag(pag);
1182	spin_unlock(&pag->pag_ici_lock);
1183
1184	/*
1185	 * Here we do an (almost) spurious inode lock in order to coordinate
1186	 * with inode cache radix tree lookups.  This is because the lookup
1187	 * can reference the inodes in the cache without taking references.
1188	 *
1189	 * We make that OK here by ensuring that we wait until the inode is
1190	 * unlocked after the lookup before we go ahead and free it.
1191	 */
1192	xfs_ilock(ip, XFS_ILOCK_EXCL);
1193	xfs_qm_dqdetach(ip);
1194	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
1195
1196	__xfs_inode_free(ip);
1197	return error;
1198
1199out_ifunlock:
1200	xfs_ifunlock(ip);
 
 
1201out:
1202	xfs_iflags_clear(ip, XFS_IRECLAIM);
1203	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1204	/*
1205	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1206	 * a short while. However, this just burns CPU time scanning the tree
1207	 * waiting for IO to complete and the reclaim work never goes back to
1208	 * the idle state. Instead, return 0 to let the next scheduled
1209	 * background reclaim attempt to reclaim the inode again.
1210	 */
1211	return 0;
1212}
1213
1214/*
1215 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1216 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1217 * then a shut down during filesystem unmount reclaim walk leak all the
1218 * unreclaimed inodes.
1219 */
1220STATIC int
1221xfs_reclaim_inodes_ag(
1222	struct xfs_mount	*mp,
1223	int			flags,
1224	int			*nr_to_scan)
1225{
1226	struct xfs_perag	*pag;
1227	int			error = 0;
1228	int			last_error = 0;
1229	xfs_agnumber_t		ag;
1230	int			trylock = flags & SYNC_TRYLOCK;
1231	int			skipped;
1232
1233restart:
1234	ag = 0;
1235	skipped = 0;
1236	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1237		unsigned long	first_index = 0;
1238		int		done = 0;
1239		int		nr_found = 0;
1240
1241		ag = pag->pag_agno + 1;
1242
1243		if (trylock) {
1244			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1245				skipped++;
1246				xfs_perag_put(pag);
1247				continue;
1248			}
1249			first_index = pag->pag_ici_reclaim_cursor;
1250		} else
1251			mutex_lock(&pag->pag_ici_reclaim_lock);
1252
1253		do {
1254			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1255			int	i;
1256
1257			rcu_read_lock();
1258			nr_found = radix_tree_gang_lookup_tag(
1259					&pag->pag_ici_root,
1260					(void **)batch, first_index,
1261					XFS_LOOKUP_BATCH,
1262					XFS_ICI_RECLAIM_TAG);
1263			if (!nr_found) {
1264				done = 1;
1265				rcu_read_unlock();
1266				break;
1267			}
1268
1269			/*
1270			 * Grab the inodes before we drop the lock. if we found
1271			 * nothing, nr == 0 and the loop will be skipped.
1272			 */
1273			for (i = 0; i < nr_found; i++) {
1274				struct xfs_inode *ip = batch[i];
1275
1276				if (done || xfs_reclaim_inode_grab(ip, flags))
1277					batch[i] = NULL;
1278
1279				/*
1280				 * Update the index for the next lookup. Catch
1281				 * overflows into the next AG range which can
1282				 * occur if we have inodes in the last block of
1283				 * the AG and we are currently pointing to the
1284				 * last inode.
1285				 *
1286				 * Because we may see inodes that are from the
1287				 * wrong AG due to RCU freeing and
1288				 * reallocation, only update the index if it
1289				 * lies in this AG. It was a race that lead us
1290				 * to see this inode, so another lookup from
1291				 * the same index will not find it again.
1292				 */
1293				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1294								pag->pag_agno)
1295					continue;
1296				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1297				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1298					done = 1;
1299			}
1300
1301			/* unlock now we've grabbed the inodes. */
1302			rcu_read_unlock();
1303
1304			for (i = 0; i < nr_found; i++) {
1305				if (!batch[i])
1306					continue;
1307				error = xfs_reclaim_inode(batch[i], pag, flags);
1308				if (error && last_error != -EFSCORRUPTED)
1309					last_error = error;
1310			}
1311
1312			*nr_to_scan -= XFS_LOOKUP_BATCH;
1313
1314			cond_resched();
1315
1316		} while (nr_found && !done && *nr_to_scan > 0);
1317
1318		if (trylock && !done)
1319			pag->pag_ici_reclaim_cursor = first_index;
1320		else
1321			pag->pag_ici_reclaim_cursor = 0;
1322		mutex_unlock(&pag->pag_ici_reclaim_lock);
1323		xfs_perag_put(pag);
1324	}
1325
1326	/*
1327	 * if we skipped any AG, and we still have scan count remaining, do
1328	 * another pass this time using blocking reclaim semantics (i.e
1329	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1330	 * ensure that when we get more reclaimers than AGs we block rather
1331	 * than spin trying to execute reclaim.
1332	 */
1333	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1334		trylock = 0;
1335		goto restart;
1336	}
1337	return last_error;
1338}
1339
1340int
1341xfs_reclaim_inodes(
1342	xfs_mount_t	*mp,
1343	int		mode)
1344{
1345	int		nr_to_scan = INT_MAX;
 
 
 
 
 
1346
1347	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
 
 
 
1348}
1349
1350/*
1351 * Scan a certain number of inodes for reclaim.
1352 *
1353 * When called we make sure that there is a background (fast) inode reclaim in
1354 * progress, while we will throttle the speed of reclaim via doing synchronous
1355 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1356 * them to be cleaned, which we hope will not be very long due to the
1357 * background walker having already kicked the IO off on those dirty inodes.
1358 */
1359long
1360xfs_reclaim_inodes_nr(
1361	struct xfs_mount	*mp,
1362	int			nr_to_scan)
1363{
 
 
 
 
 
 
 
 
1364	/* kick background reclaimer and push the AIL */
1365	xfs_reclaim_work_queue(mp);
1366	xfs_ail_push_all(mp->m_ail);
1367
1368	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
 
1369}
1370
1371/*
1372 * Return the number of reclaimable inodes in the filesystem for
1373 * the shrinker to determine how much to reclaim.
1374 */
1375int
1376xfs_reclaim_inodes_count(
1377	struct xfs_mount	*mp)
1378{
1379	struct xfs_perag	*pag;
1380	xfs_agnumber_t		ag = 0;
1381	int			reclaimable = 0;
1382
1383	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1384		ag = pag->pag_agno + 1;
1385		reclaimable += pag->pag_ici_reclaimable;
1386		xfs_perag_put(pag);
1387	}
1388	return reclaimable;
1389}
1390
1391STATIC int
1392xfs_inode_match_id(
1393	struct xfs_inode	*ip,
1394	struct xfs_eofblocks	*eofb)
1395{
1396	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1397	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1398		return 0;
1399
1400	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1401	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1402		return 0;
1403
1404	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1405	    xfs_get_projid(ip) != eofb->eof_prid)
1406		return 0;
1407
1408	return 1;
1409}
1410
1411/*
1412 * A union-based inode filtering algorithm. Process the inode if any of the
1413 * criteria match. This is for global/internal scans only.
1414 */
1415STATIC int
1416xfs_inode_match_id_union(
1417	struct xfs_inode	*ip,
1418	struct xfs_eofblocks	*eofb)
1419{
1420	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1421	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1422		return 1;
1423
1424	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1425	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1426		return 1;
1427
1428	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1429	    xfs_get_projid(ip) == eofb->eof_prid)
1430		return 1;
1431
1432	return 0;
 
 
 
 
 
 
 
 
1433}
1434
1435STATIC int
1436xfs_inode_free_eofblocks(
 
 
 
 
 
1437	struct xfs_inode	*ip,
1438	int			flags,
1439	void			*args)
1440{
1441	int ret = 0;
1442	struct xfs_eofblocks *eofb = args;
1443	int match;
1444
1445	if (!xfs_can_free_eofblocks(ip, false)) {
1446		/* inode could be preallocated or append-only */
1447		trace_xfs_inode_free_eofblocks_invalid(ip);
1448		xfs_inode_clear_eofblocks_tag(ip);
1449		return 0;
1450	}
1451
1452	/*
1453	 * If the mapping is dirty the operation can block and wait for some
1454	 * time. Unless we are waiting, skip it.
1455	 */
1456	if (!(flags & SYNC_WAIT) &&
1457	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1458		return 0;
1459
1460	if (eofb) {
1461		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1462			match = xfs_inode_match_id_union(ip, eofb);
1463		else
1464			match = xfs_inode_match_id(ip, eofb);
1465		if (!match)
1466			return 0;
1467
1468		/* skip the inode if the file size is too small */
1469		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1470		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1471			return 0;
1472	}
1473
1474	/*
1475	 * If the caller is waiting, return -EAGAIN to keep the background
1476	 * scanner moving and revisit the inode in a subsequent pass.
1477	 */
1478	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1479		if (flags & SYNC_WAIT)
1480			ret = -EAGAIN;
1481		return ret;
1482	}
1483	ret = xfs_free_eofblocks(ip);
1484	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1485
1486	return ret;
1487}
1488
1489static int
1490__xfs_icache_free_eofblocks(
1491	struct xfs_mount	*mp,
1492	struct xfs_eofblocks	*eofb,
1493	int			(*execute)(struct xfs_inode *ip, int flags,
1494					   void *args),
1495	int			tag)
 
 
1496{
1497	int flags = SYNC_TRYLOCK;
1498
1499	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1500		flags = SYNC_WAIT;
1501
1502	return xfs_inode_ag_iterator_tag(mp, execute, flags,
1503					 eofb, tag);
1504}
1505
1506int
1507xfs_icache_free_eofblocks(
1508	struct xfs_mount	*mp,
1509	struct xfs_eofblocks	*eofb)
1510{
1511	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1512			XFS_ICI_EOFBLOCKS_TAG);
1513}
1514
1515/*
1516 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1517 * multiple quotas, we don't know exactly which quota caused an allocation
1518 * failure. We make a best effort by including each quota under low free space
1519 * conditions (less than 1% free space) in the scan.
1520 */
1521static int
1522__xfs_inode_free_quota_eofblocks(
1523	struct xfs_inode	*ip,
1524	int			(*execute)(struct xfs_mount *mp,
1525					   struct xfs_eofblocks	*eofb))
1526{
1527	int scan = 0;
1528	struct xfs_eofblocks eofb = {0};
1529	struct xfs_dquot *dq;
 
 
 
1530
1531	/*
1532	 * Run a sync scan to increase effectiveness and use the union filter to
1533	 * cover all applicable quotas in a single scan.
1534	 */
1535	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
 
1536
1537	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1538		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1539		if (dq && xfs_dquot_lowsp(dq)) {
1540			eofb.eof_uid = VFS_I(ip)->i_uid;
1541			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1542			scan = 1;
1543		}
1544	}
1545
1546	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1547		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1548		if (dq && xfs_dquot_lowsp(dq)) {
1549			eofb.eof_gid = VFS_I(ip)->i_gid;
1550			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1551			scan = 1;
1552		}
 
1553	}
 
1554
1555	if (scan)
1556		execute(ip->i_mount, &eofb);
1557
1558	return scan;
1559}
1560
1561int
1562xfs_inode_free_quota_eofblocks(
1563	struct xfs_inode *ip)
1564{
1565	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1566}
1567
1568static inline unsigned long
1569xfs_iflag_for_tag(
1570	int		tag)
 
1571{
1572	switch (tag) {
1573	case XFS_ICI_EOFBLOCKS_TAG:
1574		return XFS_IEOFBLOCKS;
1575	case XFS_ICI_COWBLOCKS_TAG:
1576		return XFS_ICOWBLOCKS;
1577	default:
1578		ASSERT(0);
1579		return 0;
1580	}
1581}
1582
1583static void
1584__xfs_inode_set_blocks_tag(
1585	xfs_inode_t	*ip,
1586	void		(*execute)(struct xfs_mount *mp),
1587	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1588				  int error, unsigned long caller_ip),
1589	int		tag)
1590{
1591	struct xfs_mount *mp = ip->i_mount;
1592	struct xfs_perag *pag;
1593	int tagged;
1594
1595	/*
1596	 * Don't bother locking the AG and looking up in the radix trees
1597	 * if we already know that we have the tag set.
1598	 */
1599	if (ip->i_flags & xfs_iflag_for_tag(tag))
1600		return;
1601	spin_lock(&ip->i_flags_lock);
1602	ip->i_flags |= xfs_iflag_for_tag(tag);
1603	spin_unlock(&ip->i_flags_lock);
1604
1605	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1606	spin_lock(&pag->pag_ici_lock);
1607
1608	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1609	radix_tree_tag_set(&pag->pag_ici_root,
1610			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1611	if (!tagged) {
1612		/* propagate the eofblocks tag up into the perag radix tree */
1613		spin_lock(&ip->i_mount->m_perag_lock);
1614		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1615				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1616				   tag);
1617		spin_unlock(&ip->i_mount->m_perag_lock);
1618
1619		/* kick off background trimming */
1620		execute(ip->i_mount);
1621
1622		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1623	}
1624
1625	spin_unlock(&pag->pag_ici_lock);
1626	xfs_perag_put(pag);
1627}
1628
1629void
1630xfs_inode_set_eofblocks_tag(
1631	xfs_inode_t	*ip)
1632{
1633	trace_xfs_inode_set_eofblocks_tag(ip);
1634	return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1635			trace_xfs_perag_set_eofblocks,
1636			XFS_ICI_EOFBLOCKS_TAG);
1637}
1638
1639static void
1640__xfs_inode_clear_blocks_tag(
1641	xfs_inode_t	*ip,
1642	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1643				    int error, unsigned long caller_ip),
1644	int		tag)
1645{
1646	struct xfs_mount *mp = ip->i_mount;
1647	struct xfs_perag *pag;
 
 
 
1648
1649	spin_lock(&ip->i_flags_lock);
1650	ip->i_flags &= ~xfs_iflag_for_tag(tag);
 
1651	spin_unlock(&ip->i_flags_lock);
1652
 
 
 
1653	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1654	spin_lock(&pag->pag_ici_lock);
1655
1656	radix_tree_tag_clear(&pag->pag_ici_root,
1657			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1658	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1659		/* clear the eofblocks tag from the perag radix tree */
1660		spin_lock(&ip->i_mount->m_perag_lock);
1661		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1662				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1663				     tag);
1664		spin_unlock(&ip->i_mount->m_perag_lock);
1665		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1666	}
1667
1668	spin_unlock(&pag->pag_ici_lock);
1669	xfs_perag_put(pag);
1670}
1671
1672void
1673xfs_inode_clear_eofblocks_tag(
1674	xfs_inode_t	*ip)
1675{
1676	trace_xfs_inode_clear_eofblocks_tag(ip);
1677	return __xfs_inode_clear_blocks_tag(ip,
1678			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1679}
1680
1681/*
1682 * Set ourselves up to free CoW blocks from this file.  If it's already clean
1683 * then we can bail out quickly, but otherwise we must back off if the file
1684 * is undergoing some kind of write.
1685 */
1686static bool
1687xfs_prep_free_cowblocks(
1688	struct xfs_inode	*ip,
1689	struct xfs_ifork	*ifp)
1690{
1691	/*
1692	 * Just clear the tag if we have an empty cow fork or none at all. It's
1693	 * possible the inode was fully unshared since it was originally tagged.
1694	 */
1695	if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1696		trace_xfs_inode_free_cowblocks_invalid(ip);
1697		xfs_inode_clear_cowblocks_tag(ip);
1698		return false;
1699	}
1700
1701	/*
1702	 * If the mapping is dirty or under writeback we cannot touch the
1703	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1704	 */
1705	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1706	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1707	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1708	    atomic_read(&VFS_I(ip)->i_dio_count))
1709		return false;
1710
1711	return true;
1712}
1713
1714/*
1715 * Automatic CoW Reservation Freeing
1716 *
1717 * These functions automatically garbage collect leftover CoW reservations
1718 * that were made on behalf of a cowextsize hint when we start to run out
1719 * of quota or when the reservations sit around for too long.  If the file
1720 * has dirty pages or is undergoing writeback, its CoW reservations will
1721 * be retained.
1722 *
1723 * The actual garbage collection piggybacks off the same code that runs
1724 * the speculative EOF preallocation garbage collector.
1725 */
1726STATIC int
1727xfs_inode_free_cowblocks(
1728	struct xfs_inode	*ip,
1729	int			flags,
1730	void			*args)
1731{
1732	struct xfs_eofblocks	*eofb = args;
1733	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1734	int			match;
1735	int			ret = 0;
1736
1737	if (!xfs_prep_free_cowblocks(ip, ifp))
 
 
 
 
 
 
 
 
1738		return 0;
1739
1740	if (eofb) {
1741		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1742			match = xfs_inode_match_id_union(ip, eofb);
1743		else
1744			match = xfs_inode_match_id(ip, eofb);
1745		if (!match)
1746			return 0;
1747
1748		/* skip the inode if the file size is too small */
1749		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1750		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1751			return 0;
1752	}
1753
1754	/* Free the CoW blocks */
1755	xfs_ilock(ip, XFS_IOLOCK_EXCL);
1756	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 
1757
1758	/*
1759	 * Check again, nobody else should be able to dirty blocks or change
1760	 * the reflink iflag now that we have the first two locks held.
1761	 */
1762	if (xfs_prep_free_cowblocks(ip, ifp))
1763		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
 
 
1764
1765	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1766	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 
 
 
 
 
1767
1768	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1769}
1770
 
1771int
1772xfs_icache_free_cowblocks(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1773	struct xfs_mount	*mp,
1774	struct xfs_eofblocks	*eofb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1775{
1776	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1777			XFS_ICI_COWBLOCKS_TAG);
 
 
1778}
1779
 
 
 
 
1780int
1781xfs_inode_free_quota_cowblocks(
1782	struct xfs_inode *ip)
1783{
1784	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
 
 
1785}
1786
 
 
 
 
 
1787void
1788xfs_inode_set_cowblocks_tag(
1789	xfs_inode_t	*ip)
1790{
1791	trace_xfs_inode_set_cowblocks_tag(ip);
1792	return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1793			trace_xfs_perag_set_cowblocks,
1794			XFS_ICI_COWBLOCKS_TAG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1795}
1796
 
 
 
 
 
1797void
1798xfs_inode_clear_cowblocks_tag(
1799	xfs_inode_t	*ip)
1800{
1801	trace_xfs_inode_clear_cowblocks_tag(ip);
1802	return __xfs_inode_clear_blocks_tag(ip,
1803			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
 
  12#include "xfs_mount.h"
  13#include "xfs_inode.h"
 
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_inode_item.h"
  17#include "xfs_quota.h"
  18#include "xfs_trace.h"
  19#include "xfs_icache.h"
  20#include "xfs_bmap_util.h"
  21#include "xfs_dquot_item.h"
  22#include "xfs_dquot.h"
  23#include "xfs_reflink.h"
  24#include "xfs_ialloc.h"
  25#include "xfs_ag.h"
  26#include "xfs_log_priv.h"
  27
 
 
  28#include <linux/iversion.h>
  29
  30/* Radix tree tags for incore inode tree. */
  31
  32/* inode is to be reclaimed */
  33#define XFS_ICI_RECLAIM_TAG	0
  34/* Inode has speculative preallocations (posteof or cow) to clean. */
  35#define XFS_ICI_BLOCKGC_TAG	1
  36
  37/*
  38 * The goal for walking incore inodes.  These can correspond with incore inode
  39 * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
  40 */
  41enum xfs_icwalk_goal {
  42	/* Goals directly associated with tagged inodes. */
  43	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
  44	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
  45};
  46
  47static int xfs_icwalk(struct xfs_mount *mp,
  48		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
  49static int xfs_icwalk_ag(struct xfs_perag *pag,
  50		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
  51
  52/*
  53 * Private inode cache walk flags for struct xfs_icwalk.  Must not
  54 * coincide with XFS_ICWALK_FLAGS_VALID.
  55 */
  56
  57/* Stop scanning after icw_scan_limit inodes. */
  58#define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
  59
  60#define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
  61#define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
  62
  63#define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
  64					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
  65					 XFS_ICWALK_FLAG_UNION)
  66
  67/*
  68 * Allocate and initialise an xfs_inode.
  69 */
  70struct xfs_inode *
  71xfs_inode_alloc(
  72	struct xfs_mount	*mp,
  73	xfs_ino_t		ino)
  74{
  75	struct xfs_inode	*ip;
  76
  77	/*
  78	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
  79	 * and return NULL here on ENOMEM.
 
  80	 */
  81	ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
  82
 
  83	if (inode_init_always(mp->m_super, VFS_I(ip))) {
  84		kmem_cache_free(xfs_inode_cache, ip);
  85		return NULL;
  86	}
  87
  88	/* VFS doesn't initialise i_mode or i_state! */
  89	VFS_I(ip)->i_mode = 0;
  90	VFS_I(ip)->i_state = 0;
  91	mapping_set_large_folios(VFS_I(ip)->i_mapping);
  92
  93	XFS_STATS_INC(mp, vn_active);
  94	ASSERT(atomic_read(&ip->i_pincount) == 0);
 
  95	ASSERT(ip->i_ino == 0);
  96
  97	/* initialise the xfs inode */
  98	ip->i_ino = ino;
  99	ip->i_mount = mp;
 100	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
 
 101	ip->i_cowfp = NULL;
 102	memset(&ip->i_af, 0, sizeof(ip->i_af));
 103	ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
 104	memset(&ip->i_df, 0, sizeof(ip->i_df));
 105	ip->i_flags = 0;
 106	ip->i_delayed_blks = 0;
 107	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
 108	ip->i_nblocks = 0;
 109	ip->i_forkoff = 0;
 110	ip->i_sick = 0;
 111	ip->i_checked = 0;
 112	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
 113	INIT_LIST_HEAD(&ip->i_ioend_list);
 114	spin_lock_init(&ip->i_ioend_lock);
 115	ip->i_next_unlinked = NULLAGINO;
 116	ip->i_prev_unlinked = 0;
 117
 118	return ip;
 119}
 120
 121STATIC void
 122xfs_inode_free_callback(
 123	struct rcu_head		*head)
 124{
 125	struct inode		*inode = container_of(head, struct inode, i_rcu);
 126	struct xfs_inode	*ip = XFS_I(inode);
 127
 128	switch (VFS_I(ip)->i_mode & S_IFMT) {
 129	case S_IFREG:
 130	case S_IFDIR:
 131	case S_IFLNK:
 132		xfs_idestroy_fork(&ip->i_df);
 133		break;
 134	}
 135
 136	xfs_ifork_zap_attr(ip);
 
 
 
 137
 138	if (ip->i_cowfp) {
 139		xfs_idestroy_fork(ip->i_cowfp);
 140		kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
 141	}
 142	if (ip->i_itemp) {
 143		ASSERT(!test_bit(XFS_LI_IN_AIL,
 144				 &ip->i_itemp->ili_item.li_flags));
 145		xfs_inode_item_destroy(ip);
 146		ip->i_itemp = NULL;
 147	}
 148
 149	kmem_cache_free(xfs_inode_cache, ip);
 150}
 151
 152static void
 153__xfs_inode_free(
 154	struct xfs_inode	*ip)
 155{
 156	/* asserts to verify all state is correct here */
 157	ASSERT(atomic_read(&ip->i_pincount) == 0);
 158	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
 159	XFS_STATS_DEC(ip->i_mount, vn_active);
 160
 161	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 162}
 163
 164void
 165xfs_inode_free(
 166	struct xfs_inode	*ip)
 167{
 168	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
 169
 170	/*
 171	 * Because we use RCU freeing we need to ensure the inode always
 172	 * appears to be reclaimed with an invalid inode number when in the
 173	 * free state. The ip->i_flags_lock provides the barrier against lookup
 174	 * races.
 175	 */
 176	spin_lock(&ip->i_flags_lock);
 177	ip->i_flags = XFS_IRECLAIM;
 178	ip->i_ino = 0;
 179	spin_unlock(&ip->i_flags_lock);
 180
 181	__xfs_inode_free(ip);
 182}
 183
 184/*
 185 * Queue background inode reclaim work if there are reclaimable inodes and there
 186 * isn't reclaim work already scheduled or in progress.
 
 
 
 187 */
 188static void
 189xfs_reclaim_work_queue(
 190	struct xfs_mount        *mp)
 191{
 192
 193	rcu_read_lock();
 194	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 195		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 196			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 197	}
 198	rcu_read_unlock();
 199}
 200
 201/*
 202 * Background scanning to trim preallocated space. This is queued based on the
 203 * 'speculative_prealloc_lifetime' tunable (5m by default).
 
 
 
 204 */
 205static inline void
 206xfs_blockgc_queue(
 207	struct xfs_perag	*pag)
 208{
 209	struct xfs_mount	*mp = pag->pag_mount;
 
 210
 211	if (!xfs_is_blockgc_enabled(mp))
 212		return;
 213
 214	rcu_read_lock();
 215	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
 216		queue_delayed_work(pag->pag_mount->m_blockgc_wq,
 217				   &pag->pag_blockgc_work,
 218				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
 219	rcu_read_unlock();
 220}
 221
 222/* Set a tag on both the AG incore inode tree and the AG radix tree. */
 223static void
 224xfs_perag_set_inode_tag(
 225	struct xfs_perag	*pag,
 226	xfs_agino_t		agino,
 227	unsigned int		tag)
 228{
 229	struct xfs_mount	*mp = pag->pag_mount;
 230	bool			was_tagged;
 231
 232	lockdep_assert_held(&pag->pag_ici_lock);
 233
 234	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
 235	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
 236
 237	if (tag == XFS_ICI_RECLAIM_TAG)
 238		pag->pag_ici_reclaimable++;
 239
 240	if (was_tagged)
 241		return;
 242
 243	/* propagate the tag up into the perag radix tree */
 244	spin_lock(&mp->m_perag_lock);
 245	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
 
 246	spin_unlock(&mp->m_perag_lock);
 247
 248	/* start background work */
 249	switch (tag) {
 250	case XFS_ICI_RECLAIM_TAG:
 251		xfs_reclaim_work_queue(mp);
 252		break;
 253	case XFS_ICI_BLOCKGC_TAG:
 254		xfs_blockgc_queue(pag);
 255		break;
 256	}
 257
 258	trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
 259}
 260
 261/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
 262static void
 263xfs_perag_clear_inode_tag(
 264	struct xfs_perag	*pag,
 265	xfs_agino_t		agino,
 266	unsigned int		tag)
 267{
 268	struct xfs_mount	*mp = pag->pag_mount;
 269
 270	lockdep_assert_held(&pag->pag_ici_lock);
 271
 272	/*
 273	 * Reclaim can signal (with a null agino) that it cleared its own tag
 274	 * by removing the inode from the radix tree.
 275	 */
 276	if (agino != NULLAGINO)
 277		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
 278	else
 279		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
 280
 281	if (tag == XFS_ICI_RECLAIM_TAG)
 282		pag->pag_ici_reclaimable--;
 283
 284	if (radix_tree_tagged(&pag->pag_ici_root, tag))
 285		return;
 286
 287	/* clear the tag from the perag radix tree */
 288	spin_lock(&mp->m_perag_lock);
 289	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
 
 290	spin_unlock(&mp->m_perag_lock);
 291
 292	trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
 293}
 294
 295/*
 296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 297 * part of the structure. This is made more complex by the fact we store
 298 * information about the on-disk values in the VFS inode and so we can't just
 299 * overwrite the values unconditionally. Hence we save the parameters we
 300 * need to retain across reinitialisation, and rewrite them into the VFS inode
 301 * after reinitialisation even if it fails.
 302 */
 303static int
 304xfs_reinit_inode(
 305	struct xfs_mount	*mp,
 306	struct inode		*inode)
 307{
 308	int			error;
 309	uint32_t		nlink = inode->i_nlink;
 310	uint32_t		generation = inode->i_generation;
 311	uint64_t		version = inode_peek_iversion(inode);
 312	umode_t			mode = inode->i_mode;
 313	dev_t			dev = inode->i_rdev;
 314	kuid_t			uid = inode->i_uid;
 315	kgid_t			gid = inode->i_gid;
 316
 317	error = inode_init_always(mp->m_super, inode);
 318
 319	set_nlink(inode, nlink);
 320	inode->i_generation = generation;
 321	inode_set_iversion_queried(inode, version);
 322	inode->i_mode = mode;
 323	inode->i_rdev = dev;
 324	inode->i_uid = uid;
 325	inode->i_gid = gid;
 326	mapping_set_large_folios(inode->i_mapping);
 327	return error;
 328}
 329
 330/*
 331 * Carefully nudge an inode whose VFS state has been torn down back into a
 332 * usable state.  Drops the i_flags_lock and the rcu read lock.
 
 333 */
 334static int
 335xfs_iget_recycle(
 336	struct xfs_perag	*pag,
 337	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
 338{
 339	struct xfs_mount	*mp = ip->i_mount;
 340	struct inode		*inode = VFS_I(ip);
 341	int			error;
 342
 343	trace_xfs_iget_recycle(ip);
 344
 345	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
 346		return -EAGAIN;
 347
 348	/*
 349	 * We need to make it look like the inode is being reclaimed to prevent
 350	 * the actual reclaim workers from stomping over us while we recycle
 351	 * the inode.  We can't clear the radix tree tag yet as it requires
 352	 * pag_ici_lock to be held exclusive.
 353	 */
 354	ip->i_flags |= XFS_IRECLAIM;
 355
 356	spin_unlock(&ip->i_flags_lock);
 357	rcu_read_unlock();
 358
 359	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 360	error = xfs_reinit_inode(mp, inode);
 361	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 362	if (error) {
 363		/*
 364		 * Re-initializing the inode failed, and we are in deep
 365		 * trouble.  Try to re-add it to the reclaim list.
 366		 */
 367		rcu_read_lock();
 368		spin_lock(&ip->i_flags_lock);
 369		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 370		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 371		spin_unlock(&ip->i_flags_lock);
 372		rcu_read_unlock();
 373
 374		trace_xfs_iget_recycle_fail(ip);
 375		return error;
 376	}
 377
 
 378	spin_lock(&pag->pag_ici_lock);
 379	spin_lock(&ip->i_flags_lock);
 380
 381	/*
 382	 * Clear the per-lifetime state in the inode as we are now effectively
 383	 * a new inode and need to return to the initial state before reuse
 384	 * occurs.
 385	 */
 386	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 387	ip->i_flags |= XFS_INEW;
 388	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
 389			XFS_ICI_RECLAIM_TAG);
 390	inode->i_state = I_NEW;
 391	spin_unlock(&ip->i_flags_lock);
 392	spin_unlock(&pag->pag_ici_lock);
 393
 394	return 0;
 395}
 396
 397/*
 398 * If we are allocating a new inode, then check what was returned is
 399 * actually a free, empty inode. If we are not allocating an inode,
 400 * then check we didn't find a free inode.
 401 *
 402 * Returns:
 403 *	0		if the inode free state matches the lookup context
 404 *	-ENOENT		if the inode is free and we are not allocating
 405 *	-EFSCORRUPTED	if there is any state mismatch at all
 406 */
 407static int
 408xfs_iget_check_free_state(
 409	struct xfs_inode	*ip,
 410	int			flags)
 411{
 412	if (flags & XFS_IGET_CREATE) {
 413		/* should be a free inode */
 414		if (VFS_I(ip)->i_mode != 0) {
 415			xfs_warn(ip->i_mount,
 416"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
 417				ip->i_ino, VFS_I(ip)->i_mode);
 418			return -EFSCORRUPTED;
 419		}
 420
 421		if (ip->i_nblocks != 0) {
 422			xfs_warn(ip->i_mount,
 423"Corruption detected! Free inode 0x%llx has blocks allocated!",
 424				ip->i_ino);
 425			return -EFSCORRUPTED;
 426		}
 427		return 0;
 428	}
 429
 430	/* should be an allocated inode */
 431	if (VFS_I(ip)->i_mode == 0)
 432		return -ENOENT;
 433
 434	return 0;
 435}
 436
 437/* Make all pending inactivation work start immediately. */
 438static bool
 439xfs_inodegc_queue_all(
 440	struct xfs_mount	*mp)
 441{
 442	struct xfs_inodegc	*gc;
 443	int			cpu;
 444	bool			ret = false;
 445
 446	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
 447		gc = per_cpu_ptr(mp->m_inodegc, cpu);
 448		if (!llist_empty(&gc->list)) {
 449			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
 450			ret = true;
 451		}
 452	}
 453
 454	return ret;
 
 
 
 
 
 
 455}
 456
 457/* Wait for all queued work and collect errors */
 
 
 
 
 
 
 
 458static int
 459xfs_inodegc_wait_all(
 460	struct xfs_mount	*mp)
 
 461{
 462	int			cpu;
 463	int			error = 0;
 
 
 
 
 464
 465	flush_workqueue(mp->m_inodegc_wq);
 466	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
 467		struct xfs_inodegc	*gc;
 468
 469		gc = per_cpu_ptr(mp->m_inodegc, cpu);
 470		if (gc->error && !error)
 471			error = gc->error;
 472		gc->error = 0;
 473	}
 474
 
 
 
 
 
 475	return error;
 476}
 477
 478/*
 479 * Check the validity of the inode we just found it the cache
 480 */
 481static int
 482xfs_iget_cache_hit(
 483	struct xfs_perag	*pag,
 484	struct xfs_inode	*ip,
 485	xfs_ino_t		ino,
 486	int			flags,
 487	int			lock_flags) __releases(RCU)
 488{
 489	struct inode		*inode = VFS_I(ip);
 490	struct xfs_mount	*mp = ip->i_mount;
 491	int			error;
 492
 493	/*
 494	 * check for re-use of an inode within an RCU grace period due to the
 495	 * radix tree nodes not being updated yet. We monitor for this by
 496	 * setting the inode number to zero before freeing the inode structure.
 497	 * If the inode has been reallocated and set up, then the inode number
 498	 * will not match, so check for that, too.
 499	 */
 500	spin_lock(&ip->i_flags_lock);
 501	if (ip->i_ino != ino)
 502		goto out_skip;
 
 
 
 
 
 503
 504	/*
 505	 * If we are racing with another cache hit that is currently
 506	 * instantiating this inode or currently recycling it out of
 507	 * reclaimable state, wait for the initialisation to complete
 508	 * before continuing.
 509	 *
 510	 * If we're racing with the inactivation worker we also want to wait.
 511	 * If we're creating a new file, it's possible that the worker
 512	 * previously marked the inode as free on disk but hasn't finished
 513	 * updating the incore state yet.  The AGI buffer will be dirty and
 514	 * locked to the icreate transaction, so a synchronous push of the
 515	 * inodegc workers would result in deadlock.  For a regular iget, the
 516	 * worker is running already, so we might as well wait.
 517	 *
 518	 * XXX(hch): eventually we should do something equivalent to
 519	 *	     wait_on_inode to wait for these flags to be cleared
 520	 *	     instead of polling for it.
 521	 */
 522	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
 523		goto out_skip;
 
 
 
 
 524
 525	if (ip->i_flags & XFS_NEED_INACTIVE) {
 526		/* Unlinked inodes cannot be re-grabbed. */
 527		if (VFS_I(ip)->i_nlink == 0) {
 528			error = -ENOENT;
 529			goto out_error;
 530		}
 531		goto out_inodegc_flush;
 532	}
 533
 534	/*
 535	 * Check the inode free state is valid. This also detects lookup
 536	 * racing with unlinks.
 537	 */
 538	error = xfs_iget_check_free_state(ip, flags);
 539	if (error)
 540		goto out_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 541
 542	/* Skip inodes that have no vfs state. */
 543	if ((flags & XFS_IGET_INCORE) &&
 544	    (ip->i_flags & XFS_IRECLAIMABLE))
 545		goto out_skip;
 546
 547	/* The inode fits the selection criteria; process it. */
 548	if (ip->i_flags & XFS_IRECLAIMABLE) {
 549		/* Drops i_flags_lock and RCU read lock. */
 550		error = xfs_iget_recycle(pag, ip);
 551		if (error == -EAGAIN)
 552			goto out_skip;
 553		if (error)
 554			return error;
 555	} else {
 556		/* If the VFS inode is being torn down, pause and try again. */
 557		if (!igrab(inode))
 558			goto out_skip;
 
 
 
 559
 560		/* We've got a live one. */
 561		spin_unlock(&ip->i_flags_lock);
 562		rcu_read_unlock();
 563		trace_xfs_iget_hit(ip);
 564	}
 565
 566	if (lock_flags != 0)
 567		xfs_ilock(ip, lock_flags);
 568
 569	if (!(flags & XFS_IGET_INCORE))
 570		xfs_iflags_clear(ip, XFS_ISTALE);
 571	XFS_STATS_INC(mp, xs_ig_found);
 572
 573	return 0;
 574
 575out_skip:
 576	trace_xfs_iget_skip(ip);
 577	XFS_STATS_INC(mp, xs_ig_frecycle);
 578	error = -EAGAIN;
 579out_error:
 580	spin_unlock(&ip->i_flags_lock);
 581	rcu_read_unlock();
 582	return error;
 
 583
 584out_inodegc_flush:
 585	spin_unlock(&ip->i_flags_lock);
 586	rcu_read_unlock();
 587	/*
 588	 * Do not wait for the workers, because the caller could hold an AGI
 589	 * buffer lock.  We're just going to sleep in a loop anyway.
 590	 */
 591	if (xfs_is_inodegc_enabled(mp))
 592		xfs_inodegc_queue_all(mp);
 593	return -EAGAIN;
 594}
 595
 596static int
 597xfs_iget_cache_miss(
 598	struct xfs_mount	*mp,
 599	struct xfs_perag	*pag,
 600	xfs_trans_t		*tp,
 601	xfs_ino_t		ino,
 602	struct xfs_inode	**ipp,
 603	int			flags,
 604	int			lock_flags)
 605{
 606	struct xfs_inode	*ip;
 607	int			error;
 608	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
 609	int			iflags;
 610
 611	ip = xfs_inode_alloc(mp, ino);
 612	if (!ip)
 613		return -ENOMEM;
 614
 615	error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
 616	if (error)
 617		goto out_destroy;
 618
 619	/*
 620	 * For version 5 superblocks, if we are initialising a new inode and we
 621	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
 622	 * simply build the new inode core with a random generation number.
 623	 *
 624	 * For version 4 (and older) superblocks, log recovery is dependent on
 625	 * the i_flushiter field being initialised from the current on-disk
 626	 * value and hence we must also read the inode off disk even when
 627	 * initializing new inodes.
 628	 */
 629	if (xfs_has_v3inodes(mp) &&
 630	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
 631		VFS_I(ip)->i_generation = get_random_u32();
 632	} else {
 633		struct xfs_buf		*bp;
 634
 635		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
 636		if (error)
 637			goto out_destroy;
 638
 639		error = xfs_inode_from_disk(ip,
 640				xfs_buf_offset(bp, ip->i_imap.im_boffset));
 641		if (!error)
 642			xfs_buf_set_ref(bp, XFS_INO_REF);
 643		xfs_trans_brelse(tp, bp);
 644
 645		if (error)
 646			goto out_destroy;
 647	}
 648
 649	trace_xfs_iget_miss(ip);
 650
 
 651	/*
 652	 * Check the inode free state is valid. This also detects lookup
 653	 * racing with unlinks.
 
 654	 */
 655	error = xfs_iget_check_free_state(ip, flags);
 656	if (error)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657		goto out_destroy;
 
 658
 659	/*
 660	 * Preload the radix tree so we can insert safely under the
 661	 * write spinlock. Note that we cannot sleep inside the preload
 662	 * region. Since we can be called from transaction context, don't
 663	 * recurse into the file system.
 664	 */
 665	if (radix_tree_preload(GFP_NOFS)) {
 666		error = -EAGAIN;
 667		goto out_destroy;
 668	}
 669
 670	/*
 671	 * Because the inode hasn't been added to the radix-tree yet it can't
 672	 * be found by another thread, so we can do the non-sleeping lock here.
 673	 */
 674	if (lock_flags) {
 675		if (!xfs_ilock_nowait(ip, lock_flags))
 676			BUG();
 677	}
 678
 679	/*
 680	 * These values must be set before inserting the inode into the radix
 681	 * tree as the moment it is inserted a concurrent lookup (allowed by the
 682	 * RCU locking mechanism) can find it and that lookup must see that this
 683	 * is an inode currently under construction (i.e. that XFS_INEW is set).
 684	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 685	 * memory barrier that ensures this detection works correctly at lookup
 686	 * time.
 687	 */
 688	iflags = XFS_INEW;
 689	if (flags & XFS_IGET_DONTCACHE)
 690		d_mark_dontcache(VFS_I(ip));
 691	ip->i_udquot = NULL;
 692	ip->i_gdquot = NULL;
 693	ip->i_pdquot = NULL;
 694	xfs_iflags_set(ip, iflags);
 695
 696	/* insert the new inode */
 697	spin_lock(&pag->pag_ici_lock);
 698	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 699	if (unlikely(error)) {
 700		WARN_ON(error != -EEXIST);
 701		XFS_STATS_INC(mp, xs_ig_dup);
 702		error = -EAGAIN;
 703		goto out_preload_end;
 704	}
 705	spin_unlock(&pag->pag_ici_lock);
 706	radix_tree_preload_end();
 707
 708	*ipp = ip;
 709	return 0;
 710
 711out_preload_end:
 712	spin_unlock(&pag->pag_ici_lock);
 713	radix_tree_preload_end();
 714	if (lock_flags)
 715		xfs_iunlock(ip, lock_flags);
 716out_destroy:
 717	__destroy_inode(VFS_I(ip));
 718	xfs_inode_free(ip);
 719	return error;
 720}
 721
 722/*
 723 * Look up an inode by number in the given file system.  The inode is looked up
 724 * in the cache held in each AG.  If the inode is found in the cache, initialise
 725 * the vfs inode if necessary.
 
 726 *
 727 * If it is not in core, read it in from the file system's device, add it to the
 728 * cache and initialise the vfs inode.
 729 *
 730 * The inode is locked according to the value of the lock_flags parameter.
 731 * Inode lookup is only done during metadata operations and not as part of the
 732 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
 
 
 
 
 
 
 
 
 
 733 */
 734int
 735xfs_iget(
 736	struct xfs_mount	*mp,
 737	struct xfs_trans	*tp,
 738	xfs_ino_t		ino,
 739	uint			flags,
 740	uint			lock_flags,
 741	struct xfs_inode	**ipp)
 742{
 743	struct xfs_inode	*ip;
 744	struct xfs_perag	*pag;
 745	xfs_agino_t		agino;
 746	int			error;
 747
 
 
 
 
 
 
 
 748	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 749
 750	/* reject inode numbers outside existing AGs */
 751	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 752		return -EINVAL;
 753
 754	XFS_STATS_INC(mp, xs_ig_attempts);
 755
 756	/* get the perag structure and ensure that it's inode capable */
 757	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 758	agino = XFS_INO_TO_AGINO(mp, ino);
 759
 760again:
 761	error = 0;
 762	rcu_read_lock();
 763	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 764
 765	if (ip) {
 766		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 767		if (error)
 768			goto out_error_or_again;
 769	} else {
 770		rcu_read_unlock();
 771		if (flags & XFS_IGET_INCORE) {
 772			error = -ENODATA;
 773			goto out_error_or_again;
 774		}
 775		XFS_STATS_INC(mp, xs_ig_missed);
 776
 777		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 778							flags, lock_flags);
 779		if (error)
 780			goto out_error_or_again;
 781	}
 782	xfs_perag_put(pag);
 783
 784	*ipp = ip;
 785
 786	/*
 787	 * If we have a real type for an on-disk inode, we can setup the inode
 788	 * now.	 If it's a new inode being created, xfs_init_new_inode will
 789	 * handle it.
 790	 */
 791	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 792		xfs_setup_existing_inode(ip);
 793	return 0;
 794
 795out_error_or_again:
 796	if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
 797	    error == -EAGAIN) {
 798		delay(1);
 799		goto again;
 800	}
 801	xfs_perag_put(pag);
 802	return error;
 803}
 804
 805/*
 806 * Grab the inode for reclaim exclusively.
 807 *
 808 * We have found this inode via a lookup under RCU, so the inode may have
 809 * already been freed, or it may be in the process of being recycled by
 810 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
 811 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
 812 * will not be set. Hence we need to check for both these flag conditions to
 813 * avoid inodes that are no longer reclaim candidates.
 814 *
 815 * Note: checking for other state flags here, under the i_flags_lock or not, is
 816 * racy and should be avoided. Those races should be resolved only after we have
 817 * ensured that we are able to reclaim this inode and the world can see that we
 818 * are going to reclaim it.
 
 
 
 819 *
 820 * Return true if we grabbed it, false otherwise.
 
 821 */
 822static bool
 823xfs_reclaim_igrab(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824	struct xfs_inode	*ip,
 825	struct xfs_icwalk	*icw)
 826{
 827	ASSERT(rcu_read_lock_held());
 828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829	spin_lock(&ip->i_flags_lock);
 830	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
 831	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
 832		/* not a reclaim candidate. */
 833		spin_unlock(&ip->i_flags_lock);
 834		return false;
 835	}
 836
 837	/* Don't reclaim a sick inode unless the caller asked for it. */
 838	if (ip->i_sick &&
 839	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
 840		spin_unlock(&ip->i_flags_lock);
 841		return false;
 842	}
 843
 844	__xfs_iflags_set(ip, XFS_IRECLAIM);
 845	spin_unlock(&ip->i_flags_lock);
 846	return true;
 847}
 848
 849/*
 850 * Inode reclaim is non-blocking, so the default action if progress cannot be
 851 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
 852 * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
 853 * blocking anymore and hence we can wait for the inode to be able to reclaim
 854 * it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855 *
 856 * We do no IO here - if callers require inodes to be cleaned they must push the
 857 * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
 858 * done in the background in a non-blocking manner, and enables memory reclaim
 859 * to make progress without blocking.
 
 
 
 
 
 
 
 
 
 
 
 
 860 */
 861static void
 862xfs_reclaim_inode(
 863	struct xfs_inode	*ip,
 864	struct xfs_perag	*pag)
 
 865{
 
 866	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
 
 867
 868	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
 869		goto out;
 870	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
 871		goto out_iunlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872
 873	/*
 874	 * Check for log shutdown because aborting the inode can move the log
 875	 * tail and corrupt in memory state. This is fine if the log is shut
 876	 * down, but if the log is still active and only the mount is shut down
 877	 * then the in-memory log tail movement caused by the abort can be
 878	 * incorrectly propagated to disk.
 879	 */
 880	if (xlog_is_shutdown(ip->i_mount->m_log)) {
 881		xfs_iunpin_wait(ip);
 882		xfs_iflush_shutdown_abort(ip);
 883		goto reclaim;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884	}
 885	if (xfs_ipincount(ip))
 886		goto out_clear_flush;
 887	if (!xfs_inode_clean(ip))
 888		goto out_clear_flush;
 889
 890	xfs_iflags_clear(ip, XFS_IFLUSHING);
 891reclaim:
 892	trace_xfs_inode_reclaiming(ip);
 893
 894	/*
 895	 * Because we use RCU freeing we need to ensure the inode always appears
 896	 * to be reclaimed with an invalid inode number when in the free state.
 897	 * We do this as early as possible under the ILOCK so that
 898	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
 899	 * detect races with us here. By doing this, we guarantee that once
 900	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
 901	 * it will see either a valid inode that will serialise correctly, or it
 902	 * will see an invalid inode that it can skip.
 903	 */
 904	spin_lock(&ip->i_flags_lock);
 905	ip->i_flags = XFS_IRECLAIM;
 906	ip->i_ino = 0;
 907	ip->i_sick = 0;
 908	ip->i_checked = 0;
 909	spin_unlock(&ip->i_flags_lock);
 910
 911	ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
 912	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 913
 914	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
 915	/*
 916	 * Remove the inode from the per-AG radix tree.
 917	 *
 918	 * Because radix_tree_delete won't complain even if the item was never
 919	 * added to the tree assert that it's been there before to catch
 920	 * problems with the inode life time early on.
 921	 */
 922	spin_lock(&pag->pag_ici_lock);
 923	if (!radix_tree_delete(&pag->pag_ici_root,
 924				XFS_INO_TO_AGINO(ip->i_mount, ino)))
 925		ASSERT(0);
 926	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
 927	spin_unlock(&pag->pag_ici_lock);
 928
 929	/*
 930	 * Here we do an (almost) spurious inode lock in order to coordinate
 931	 * with inode cache radix tree lookups.  This is because the lookup
 932	 * can reference the inodes in the cache without taking references.
 933	 *
 934	 * We make that OK here by ensuring that we wait until the inode is
 935	 * unlocked after the lookup before we go ahead and free it.
 936	 */
 937	xfs_ilock(ip, XFS_ILOCK_EXCL);
 938	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
 939	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 940	ASSERT(xfs_inode_clean(ip));
 941
 942	__xfs_inode_free(ip);
 943	return;
 944
 945out_clear_flush:
 946	xfs_iflags_clear(ip, XFS_IFLUSHING);
 947out_iunlock:
 948	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 949out:
 950	xfs_iflags_clear(ip, XFS_IRECLAIM);
 
 
 
 
 
 
 
 
 
 951}
 952
 953/* Reclaim sick inodes if we're unmounting or the fs went down. */
 954static inline bool
 955xfs_want_reclaim_sick(
 956	struct xfs_mount	*mp)
 
 
 
 
 
 
 
 957{
 958	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
 959	       xfs_is_shutdown(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960}
 961
 962void
 963xfs_reclaim_inodes(
 964	struct xfs_mount	*mp)
 
 965{
 966	struct xfs_icwalk	icw = {
 967		.icw_flags	= 0,
 968	};
 969
 970	if (xfs_want_reclaim_sick(mp))
 971		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
 972
 973	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 974		xfs_ail_push_all_sync(mp->m_ail);
 975		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
 976	}
 977}
 978
 979/*
 980 * The shrinker infrastructure determines how many inodes we should scan for
 981 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
 982 * push the AIL here. We also want to proactively free up memory if we can to
 983 * minimise the amount of work memory reclaim has to do so we kick the
 984 * background reclaim if it isn't already scheduled.
 
 
 985 */
 986long
 987xfs_reclaim_inodes_nr(
 988	struct xfs_mount	*mp,
 989	unsigned long		nr_to_scan)
 990{
 991	struct xfs_icwalk	icw = {
 992		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
 993		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
 994	};
 995
 996	if (xfs_want_reclaim_sick(mp))
 997		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
 998
 999	/* kick background reclaimer and push the AIL */
1000	xfs_reclaim_work_queue(mp);
1001	xfs_ail_push_all(mp->m_ail);
1002
1003	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1004	return 0;
1005}
1006
1007/*
1008 * Return the number of reclaimable inodes in the filesystem for
1009 * the shrinker to determine how much to reclaim.
1010 */
1011long
1012xfs_reclaim_inodes_count(
1013	struct xfs_mount	*mp)
1014{
1015	struct xfs_perag	*pag;
1016	xfs_agnumber_t		ag = 0;
1017	long			reclaimable = 0;
1018
1019	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1020		ag = pag->pag_agno + 1;
1021		reclaimable += pag->pag_ici_reclaimable;
1022		xfs_perag_put(pag);
1023	}
1024	return reclaimable;
1025}
1026
1027STATIC bool
1028xfs_icwalk_match_id(
1029	struct xfs_inode	*ip,
1030	struct xfs_icwalk	*icw)
1031{
1032	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1033	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1034		return false;
1035
1036	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1037	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1038		return false;
1039
1040	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1041	    ip->i_projid != icw->icw_prid)
1042		return false;
1043
1044	return true;
1045}
1046
1047/*
1048 * A union-based inode filtering algorithm. Process the inode if any of the
1049 * criteria match. This is for global/internal scans only.
1050 */
1051STATIC bool
1052xfs_icwalk_match_id_union(
1053	struct xfs_inode	*ip,
1054	struct xfs_icwalk	*icw)
1055{
1056	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1057	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1058		return true;
 
 
 
 
 
 
 
 
1059
1060	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1061	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1062		return true;
1063
1064	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1065	    ip->i_projid == icw->icw_prid)
1066		return true;
1067
1068	return false;
1069}
1070
1071/*
1072 * Is this inode @ip eligible for eof/cow block reclamation, given some
1073 * filtering parameters @icw?  The inode is eligible if @icw is null or
1074 * if the predicate functions match.
1075 */
1076static bool
1077xfs_icwalk_match(
1078	struct xfs_inode	*ip,
1079	struct xfs_icwalk	*icw)
 
1080{
1081	bool			match;
 
 
 
 
 
 
 
 
 
1082
1083	if (!icw)
1084		return true;
 
 
 
 
 
1085
1086	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1087		match = xfs_icwalk_match_id_union(ip, icw);
1088	else
1089		match = xfs_icwalk_match_id(ip, icw);
1090	if (!match)
1091		return false;
 
 
 
 
 
 
 
1092
1093	/* skip the inode if the file size is too small */
1094	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1095	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1096		return false;
 
 
 
 
 
 
 
1097
1098	return true;
1099}
1100
1101/*
1102 * This is a fast pass over the inode cache to try to get reclaim moving on as
1103 * many inodes as possible in a short period of time. It kicks itself every few
1104 * seconds, as well as being kicked by the inode cache shrinker when memory
1105 * goes low.
1106 */
1107void
1108xfs_reclaim_worker(
1109	struct work_struct *work)
1110{
1111	struct xfs_mount *mp = container_of(to_delayed_work(work),
1112					struct xfs_mount, m_reclaim_work);
 
 
 
 
 
 
1113
1114	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1115	xfs_reclaim_work_queue(mp);
 
 
 
 
 
1116}
1117
1118STATIC int
1119xfs_inode_free_eofblocks(
 
 
 
 
 
 
1120	struct xfs_inode	*ip,
1121	struct xfs_icwalk	*icw,
1122	unsigned int		*lockflags)
1123{
1124	bool			wait;
1125
1126	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1127
1128	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1129		return 0;
1130
1131	/*
1132	 * If the mapping is dirty the operation can block and wait for some
1133	 * time. Unless we are waiting, skip it.
1134	 */
1135	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1136		return 0;
1137
1138	if (!xfs_icwalk_match(ip, icw))
1139		return 0;
 
 
 
 
 
 
1140
1141	/*
1142	 * If the caller is waiting, return -EAGAIN to keep the background
1143	 * scanner moving and revisit the inode in a subsequent pass.
1144	 */
1145	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1146		if (wait)
1147			return -EAGAIN;
1148		return 0;
1149	}
1150	*lockflags |= XFS_IOLOCK_EXCL;
1151
1152	if (xfs_can_free_eofblocks(ip, false))
1153		return xfs_free_eofblocks(ip);
 
 
 
1154
1155	/* inode could be preallocated or append-only */
1156	trace_xfs_inode_free_eofblocks_invalid(ip);
1157	xfs_inode_clear_eofblocks_tag(ip);
1158	return 0;
 
1159}
1160
1161static void
1162xfs_blockgc_set_iflag(
1163	struct xfs_inode	*ip,
1164	unsigned long		iflag)
1165{
1166	struct xfs_mount	*mp = ip->i_mount;
1167	struct xfs_perag	*pag;
 
 
 
 
 
 
 
 
1168
1169	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
 
 
 
 
 
 
 
 
 
 
1170
1171	/*
1172	 * Don't bother locking the AG and looking up in the radix trees
1173	 * if we already know that we have the tag set.
1174	 */
1175	if (ip->i_flags & iflag)
1176		return;
1177	spin_lock(&ip->i_flags_lock);
1178	ip->i_flags |= iflag;
1179	spin_unlock(&ip->i_flags_lock);
1180
1181	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1182	spin_lock(&pag->pag_ici_lock);
1183
1184	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1185			XFS_ICI_BLOCKGC_TAG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186
1187	spin_unlock(&pag->pag_ici_lock);
1188	xfs_perag_put(pag);
1189}
1190
1191void
1192xfs_inode_set_eofblocks_tag(
1193	xfs_inode_t	*ip)
1194{
1195	trace_xfs_inode_set_eofblocks_tag(ip);
1196	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
 
 
1197}
1198
1199static void
1200xfs_blockgc_clear_iflag(
1201	struct xfs_inode	*ip,
1202	unsigned long		iflag)
 
 
1203{
1204	struct xfs_mount	*mp = ip->i_mount;
1205	struct xfs_perag	*pag;
1206	bool			clear_tag;
1207
1208	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1209
1210	spin_lock(&ip->i_flags_lock);
1211	ip->i_flags &= ~iflag;
1212	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1213	spin_unlock(&ip->i_flags_lock);
1214
1215	if (!clear_tag)
1216		return;
1217
1218	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1219	spin_lock(&pag->pag_ici_lock);
1220
1221	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1222			XFS_ICI_BLOCKGC_TAG);
 
 
 
 
 
 
 
 
 
1223
1224	spin_unlock(&pag->pag_ici_lock);
1225	xfs_perag_put(pag);
1226}
1227
1228void
1229xfs_inode_clear_eofblocks_tag(
1230	xfs_inode_t	*ip)
1231{
1232	trace_xfs_inode_clear_eofblocks_tag(ip);
1233	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
 
1234}
1235
1236/*
1237 * Set ourselves up to free CoW blocks from this file.  If it's already clean
1238 * then we can bail out quickly, but otherwise we must back off if the file
1239 * is undergoing some kind of write.
1240 */
1241static bool
1242xfs_prep_free_cowblocks(
1243	struct xfs_inode	*ip)
 
1244{
1245	/*
1246	 * Just clear the tag if we have an empty cow fork or none at all. It's
1247	 * possible the inode was fully unshared since it was originally tagged.
1248	 */
1249	if (!xfs_inode_has_cow_data(ip)) {
1250		trace_xfs_inode_free_cowblocks_invalid(ip);
1251		xfs_inode_clear_cowblocks_tag(ip);
1252		return false;
1253	}
1254
1255	/*
1256	 * If the mapping is dirty or under writeback we cannot touch the
1257	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1258	 */
1259	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1260	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1261	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1262	    atomic_read(&VFS_I(ip)->i_dio_count))
1263		return false;
1264
1265	return true;
1266}
1267
1268/*
1269 * Automatic CoW Reservation Freeing
1270 *
1271 * These functions automatically garbage collect leftover CoW reservations
1272 * that were made on behalf of a cowextsize hint when we start to run out
1273 * of quota or when the reservations sit around for too long.  If the file
1274 * has dirty pages or is undergoing writeback, its CoW reservations will
1275 * be retained.
1276 *
1277 * The actual garbage collection piggybacks off the same code that runs
1278 * the speculative EOF preallocation garbage collector.
1279 */
1280STATIC int
1281xfs_inode_free_cowblocks(
1282	struct xfs_inode	*ip,
1283	struct xfs_icwalk	*icw,
1284	unsigned int		*lockflags)
1285{
1286	bool			wait;
 
 
1287	int			ret = 0;
1288
1289	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1290
1291	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1292		return 0;
1293
1294	if (!xfs_prep_free_cowblocks(ip))
1295		return 0;
1296
1297	if (!xfs_icwalk_match(ip, icw))
1298		return 0;
1299
1300	/*
1301	 * If the caller is waiting, return -EAGAIN to keep the background
1302	 * scanner moving and revisit the inode in a subsequent pass.
1303	 */
1304	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1305	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1306		if (wait)
1307			return -EAGAIN;
1308		return 0;
1309	}
1310	*lockflags |= XFS_IOLOCK_EXCL;
1311
1312	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1313		if (wait)
1314			return -EAGAIN;
1315		return 0;
1316	}
1317	*lockflags |= XFS_MMAPLOCK_EXCL;
1318
1319	/*
1320	 * Check again, nobody else should be able to dirty blocks or change
1321	 * the reflink iflag now that we have the first two locks held.
1322	 */
1323	if (xfs_prep_free_cowblocks(ip))
1324		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1325	return ret;
1326}
1327
1328void
1329xfs_inode_set_cowblocks_tag(
1330	xfs_inode_t	*ip)
1331{
1332	trace_xfs_inode_set_cowblocks_tag(ip);
1333	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1334}
1335
1336void
1337xfs_inode_clear_cowblocks_tag(
1338	xfs_inode_t	*ip)
1339{
1340	trace_xfs_inode_clear_cowblocks_tag(ip);
1341	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1342}
1343
1344/* Disable post-EOF and CoW block auto-reclamation. */
1345void
1346xfs_blockgc_stop(
1347	struct xfs_mount	*mp)
1348{
1349	struct xfs_perag	*pag;
1350	xfs_agnumber_t		agno;
1351
1352	if (!xfs_clear_blockgc_enabled(mp))
1353		return;
1354
1355	for_each_perag(mp, agno, pag)
1356		cancel_delayed_work_sync(&pag->pag_blockgc_work);
1357	trace_xfs_blockgc_stop(mp, __return_address);
1358}
1359
1360/* Enable post-EOF and CoW block auto-reclamation. */
1361void
1362xfs_blockgc_start(
1363	struct xfs_mount	*mp)
1364{
1365	struct xfs_perag	*pag;
1366	xfs_agnumber_t		agno;
1367
1368	if (xfs_set_blockgc_enabled(mp))
1369		return;
1370
1371	trace_xfs_blockgc_start(mp, __return_address);
1372	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1373		xfs_blockgc_queue(pag);
1374}
1375
1376/* Don't try to run block gc on an inode that's in any of these states. */
1377#define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1378					 XFS_NEED_INACTIVE | \
1379					 XFS_INACTIVATING | \
1380					 XFS_IRECLAIMABLE | \
1381					 XFS_IRECLAIM)
1382/*
1383 * Decide if the given @ip is eligible for garbage collection of speculative
1384 * preallocations, and grab it if so.  Returns true if it's ready to go or
1385 * false if we should just ignore it.
1386 */
1387static bool
1388xfs_blockgc_igrab(
1389	struct xfs_inode	*ip)
1390{
1391	struct inode		*inode = VFS_I(ip);
1392
1393	ASSERT(rcu_read_lock_held());
1394
1395	/* Check for stale RCU freed inode */
1396	spin_lock(&ip->i_flags_lock);
1397	if (!ip->i_ino)
1398		goto out_unlock_noent;
1399
1400	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1401		goto out_unlock_noent;
1402	spin_unlock(&ip->i_flags_lock);
1403
1404	/* nothing to sync during shutdown */
1405	if (xfs_is_shutdown(ip->i_mount))
1406		return false;
1407
1408	/* If we can't grab the inode, it must on it's way to reclaim. */
1409	if (!igrab(inode))
1410		return false;
1411
1412	/* inode is valid */
1413	return true;
1414
1415out_unlock_noent:
1416	spin_unlock(&ip->i_flags_lock);
1417	return false;
1418}
1419
1420/* Scan one incore inode for block preallocations that we can remove. */
1421static int
1422xfs_blockgc_scan_inode(
1423	struct xfs_inode	*ip,
1424	struct xfs_icwalk	*icw)
1425{
1426	unsigned int		lockflags = 0;
1427	int			error;
1428
1429	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1430	if (error)
1431		goto unlock;
1432
1433	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1434unlock:
1435	if (lockflags)
1436		xfs_iunlock(ip, lockflags);
1437	xfs_irele(ip);
1438	return error;
1439}
1440
1441/* Background worker that trims preallocated space. */
1442void
1443xfs_blockgc_worker(
1444	struct work_struct	*work)
1445{
1446	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1447					struct xfs_perag, pag_blockgc_work);
1448	struct xfs_mount	*mp = pag->pag_mount;
1449	int			error;
1450
1451	trace_xfs_blockgc_worker(mp, __return_address);
1452
1453	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1454	if (error)
1455		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1456				pag->pag_agno, error);
1457	xfs_blockgc_queue(pag);
1458}
1459
1460/*
1461 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1462 * and cowblocks.
1463 */
1464int
1465xfs_blockgc_free_space(
1466	struct xfs_mount	*mp,
1467	struct xfs_icwalk	*icw)
1468{
1469	int			error;
1470
1471	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1472
1473	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1474	if (error)
1475		return error;
1476
1477	return xfs_inodegc_flush(mp);
1478}
1479
1480/*
1481 * Reclaim all the free space that we can by scheduling the background blockgc
1482 * and inodegc workers immediately and waiting for them all to clear.
1483 */
1484int
1485xfs_blockgc_flush_all(
1486	struct xfs_mount	*mp)
1487{
1488	struct xfs_perag	*pag;
1489	xfs_agnumber_t		agno;
1490
1491	trace_xfs_blockgc_flush_all(mp, __return_address);
1492
1493	/*
1494	 * For each blockgc worker, move its queue time up to now.  If it
1495	 * wasn't queued, it will not be requeued.  Then flush whatever's
1496	 * left.
1497	 */
1498	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1499		mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1500				&pag->pag_blockgc_work, 0);
1501
1502	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1503		flush_delayed_work(&pag->pag_blockgc_work);
1504
1505	return xfs_inodegc_flush(mp);
1506}
1507
1508/*
1509 * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1510 * quota caused an allocation failure, so we make a best effort by including
1511 * each quota under low free space conditions (less than 1% free space) in the
1512 * scan.
1513 *
1514 * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
1515 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1516 * MMAPLOCK.
1517 */
1518int
1519xfs_blockgc_free_dquots(
1520	struct xfs_mount	*mp,
1521	struct xfs_dquot	*udqp,
1522	struct xfs_dquot	*gdqp,
1523	struct xfs_dquot	*pdqp,
1524	unsigned int		iwalk_flags)
1525{
1526	struct xfs_icwalk	icw = {0};
1527	bool			do_work = false;
1528
1529	if (!udqp && !gdqp && !pdqp)
1530		return 0;
1531
1532	/*
1533	 * Run a scan to free blocks using the union filter to cover all
1534	 * applicable quotas in a single scan.
1535	 */
1536	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1537
1538	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1539		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1540		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1541		do_work = true;
1542	}
1543
1544	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1545		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1546		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1547		do_work = true;
1548	}
1549
1550	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1551		icw.icw_prid = pdqp->q_id;
1552		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1553		do_work = true;
1554	}
1555
1556	if (!do_work)
1557		return 0;
1558
1559	return xfs_blockgc_free_space(mp, &icw);
1560}
1561
1562/* Run cow/eofblocks scans on the quotas attached to the inode. */
1563int
1564xfs_blockgc_free_quota(
1565	struct xfs_inode	*ip,
1566	unsigned int		iwalk_flags)
1567{
1568	return xfs_blockgc_free_dquots(ip->i_mount,
1569			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1570			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1571			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1572}
1573
1574/* XFS Inode Cache Walking Code */
1575
1576/*
1577 * The inode lookup is done in batches to keep the amount of lock traffic and
1578 * radix tree lookups to a minimum. The batch size is a trade off between
1579 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1580 * be too greedy.
1581 */
1582#define XFS_LOOKUP_BATCH	32
1583
1584
1585/*
1586 * Decide if we want to grab this inode in anticipation of doing work towards
1587 * the goal.
1588 */
1589static inline bool
1590xfs_icwalk_igrab(
1591	enum xfs_icwalk_goal	goal,
1592	struct xfs_inode	*ip,
1593	struct xfs_icwalk	*icw)
1594{
1595	switch (goal) {
1596	case XFS_ICWALK_BLOCKGC:
1597		return xfs_blockgc_igrab(ip);
1598	case XFS_ICWALK_RECLAIM:
1599		return xfs_reclaim_igrab(ip, icw);
1600	default:
1601		return false;
1602	}
1603}
1604
1605/*
1606 * Process an inode.  Each processing function must handle any state changes
1607 * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1608 */
1609static inline int
1610xfs_icwalk_process_inode(
1611	enum xfs_icwalk_goal	goal,
1612	struct xfs_inode	*ip,
1613	struct xfs_perag	*pag,
1614	struct xfs_icwalk	*icw)
1615{
1616	int			error = 0;
1617
1618	switch (goal) {
1619	case XFS_ICWALK_BLOCKGC:
1620		error = xfs_blockgc_scan_inode(ip, icw);
1621		break;
1622	case XFS_ICWALK_RECLAIM:
1623		xfs_reclaim_inode(ip, pag);
1624		break;
1625	}
1626	return error;
1627}
1628
1629/*
1630 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1631 * process them in some manner.
1632 */
1633static int
1634xfs_icwalk_ag(
1635	struct xfs_perag	*pag,
1636	enum xfs_icwalk_goal	goal,
1637	struct xfs_icwalk	*icw)
1638{
1639	struct xfs_mount	*mp = pag->pag_mount;
1640	uint32_t		first_index;
1641	int			last_error = 0;
1642	int			skipped;
1643	bool			done;
1644	int			nr_found;
1645
1646restart:
1647	done = false;
1648	skipped = 0;
1649	if (goal == XFS_ICWALK_RECLAIM)
1650		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1651	else
1652		first_index = 0;
1653	nr_found = 0;
1654	do {
1655		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1656		int		error = 0;
1657		int		i;
1658
1659		rcu_read_lock();
1660
1661		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1662				(void **) batch, first_index,
1663				XFS_LOOKUP_BATCH, goal);
1664		if (!nr_found) {
1665			done = true;
1666			rcu_read_unlock();
1667			break;
1668		}
1669
1670		/*
1671		 * Grab the inodes before we drop the lock. if we found
1672		 * nothing, nr == 0 and the loop will be skipped.
1673		 */
1674		for (i = 0; i < nr_found; i++) {
1675			struct xfs_inode *ip = batch[i];
1676
1677			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1678				batch[i] = NULL;
1679
1680			/*
1681			 * Update the index for the next lookup. Catch
1682			 * overflows into the next AG range which can occur if
1683			 * we have inodes in the last block of the AG and we
1684			 * are currently pointing to the last inode.
1685			 *
1686			 * Because we may see inodes that are from the wrong AG
1687			 * due to RCU freeing and reallocation, only update the
1688			 * index if it lies in this AG. It was a race that lead
1689			 * us to see this inode, so another lookup from the
1690			 * same index will not find it again.
1691			 */
1692			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1693				continue;
1694			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1695			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1696				done = true;
1697		}
1698
1699		/* unlock now we've grabbed the inodes. */
1700		rcu_read_unlock();
1701
1702		for (i = 0; i < nr_found; i++) {
1703			if (!batch[i])
1704				continue;
1705			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1706					icw);
1707			if (error == -EAGAIN) {
1708				skipped++;
1709				continue;
1710			}
1711			if (error && last_error != -EFSCORRUPTED)
1712				last_error = error;
1713		}
1714
1715		/* bail out if the filesystem is corrupted.  */
1716		if (error == -EFSCORRUPTED)
1717			break;
1718
1719		cond_resched();
1720
1721		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1722			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1723			if (icw->icw_scan_limit <= 0)
1724				break;
1725		}
1726	} while (nr_found && !done);
1727
1728	if (goal == XFS_ICWALK_RECLAIM) {
1729		if (done)
1730			first_index = 0;
1731		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1732	}
1733
1734	if (skipped) {
1735		delay(1);
1736		goto restart;
1737	}
1738	return last_error;
1739}
1740
1741/* Walk all incore inodes to achieve a given goal. */
1742static int
1743xfs_icwalk(
1744	struct xfs_mount	*mp,
1745	enum xfs_icwalk_goal	goal,
1746	struct xfs_icwalk	*icw)
1747{
1748	struct xfs_perag	*pag;
1749	int			error = 0;
1750	int			last_error = 0;
1751	xfs_agnumber_t		agno;
1752
1753	for_each_perag_tag(mp, agno, pag, goal) {
1754		error = xfs_icwalk_ag(pag, goal, icw);
1755		if (error) {
1756			last_error = error;
1757			if (error == -EFSCORRUPTED) {
1758				xfs_perag_rele(pag);
1759				break;
1760			}
1761		}
1762	}
1763	return last_error;
1764	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1765}
1766
1767#ifdef DEBUG
1768static void
1769xfs_check_delalloc(
1770	struct xfs_inode	*ip,
1771	int			whichfork)
1772{
1773	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1774	struct xfs_bmbt_irec	got;
1775	struct xfs_iext_cursor	icur;
1776
1777	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1778		return;
1779	do {
1780		if (isnullstartblock(got.br_startblock)) {
1781			xfs_warn(ip->i_mount,
1782	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1783				ip->i_ino,
1784				whichfork == XFS_DATA_FORK ? "data" : "cow",
1785				got.br_startoff, got.br_blockcount);
1786		}
1787	} while (xfs_iext_next_extent(ifp, &icur, &got));
1788}
1789#else
1790#define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1791#endif
1792
1793/* Schedule the inode for reclaim. */
1794static void
1795xfs_inodegc_set_reclaimable(
1796	struct xfs_inode	*ip)
1797{
1798	struct xfs_mount	*mp = ip->i_mount;
1799	struct xfs_perag	*pag;
1800
1801	if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1802		xfs_check_delalloc(ip, XFS_DATA_FORK);
1803		xfs_check_delalloc(ip, XFS_COW_FORK);
1804		ASSERT(0);
1805	}
1806
1807	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1808	spin_lock(&pag->pag_ici_lock);
1809	spin_lock(&ip->i_flags_lock);
1810
1811	trace_xfs_inode_set_reclaimable(ip);
1812	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1813	ip->i_flags |= XFS_IRECLAIMABLE;
1814	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1815			XFS_ICI_RECLAIM_TAG);
1816
1817	spin_unlock(&ip->i_flags_lock);
1818	spin_unlock(&pag->pag_ici_lock);
1819	xfs_perag_put(pag);
1820}
1821
1822/*
1823 * Free all speculative preallocations and possibly even the inode itself.
1824 * This is the last chance to make changes to an otherwise unreferenced file
1825 * before incore reclamation happens.
1826 */
1827static int
1828xfs_inodegc_inactivate(
1829	struct xfs_inode	*ip)
1830{
1831	int			error;
1832
1833	trace_xfs_inode_inactivating(ip);
1834	error = xfs_inactive(ip);
1835	xfs_inodegc_set_reclaimable(ip);
1836	return error;
1837
1838}
1839
1840void
1841xfs_inodegc_worker(
1842	struct work_struct	*work)
1843{
1844	struct xfs_inodegc	*gc = container_of(to_delayed_work(work),
1845						struct xfs_inodegc, work);
1846	struct llist_node	*node = llist_del_all(&gc->list);
1847	struct xfs_inode	*ip, *n;
1848	struct xfs_mount	*mp = gc->mp;
1849	unsigned int		nofs_flag;
1850
1851	/*
1852	 * Clear the cpu mask bit and ensure that we have seen the latest
1853	 * update of the gc structure associated with this CPU. This matches
1854	 * with the release semantics used when setting the cpumask bit in
1855	 * xfs_inodegc_queue.
1856	 */
1857	cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1858	smp_mb__after_atomic();
1859
1860	WRITE_ONCE(gc->items, 0);
1861
1862	if (!node)
1863		return;
1864
1865	/*
1866	 * We can allocate memory here while doing writeback on behalf of
1867	 * memory reclaim.  To avoid memory allocation deadlocks set the
1868	 * task-wide nofs context for the following operations.
1869	 */
1870	nofs_flag = memalloc_nofs_save();
1871
1872	ip = llist_entry(node, struct xfs_inode, i_gclist);
1873	trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1874
1875	WRITE_ONCE(gc->shrinker_hits, 0);
1876	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1877		int	error;
1878
1879		xfs_iflags_set(ip, XFS_INACTIVATING);
1880		error = xfs_inodegc_inactivate(ip);
1881		if (error && !gc->error)
1882			gc->error = error;
1883	}
1884
1885	memalloc_nofs_restore(nofs_flag);
1886}
1887
1888/*
1889 * Expedite all pending inodegc work to run immediately. This does not wait for
1890 * completion of the work.
1891 */
1892void
1893xfs_inodegc_push(
1894	struct xfs_mount	*mp)
1895{
1896	if (!xfs_is_inodegc_enabled(mp))
1897		return;
1898	trace_xfs_inodegc_push(mp, __return_address);
1899	xfs_inodegc_queue_all(mp);
1900}
1901
1902/*
1903 * Force all currently queued inode inactivation work to run immediately and
1904 * wait for the work to finish.
1905 */
1906int
1907xfs_inodegc_flush(
1908	struct xfs_mount	*mp)
1909{
1910	xfs_inodegc_push(mp);
1911	trace_xfs_inodegc_flush(mp, __return_address);
1912	return xfs_inodegc_wait_all(mp);
1913}
1914
1915/*
1916 * Flush all the pending work and then disable the inode inactivation background
1917 * workers and wait for them to stop.  Caller must hold sb->s_umount to
1918 * coordinate changes in the inodegc_enabled state.
1919 */
1920void
1921xfs_inodegc_stop(
1922	struct xfs_mount	*mp)
1923{
1924	bool			rerun;
1925
1926	if (!xfs_clear_inodegc_enabled(mp))
1927		return;
1928
1929	/*
1930	 * Drain all pending inodegc work, including inodes that could be
1931	 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1932	 * threads that sample the inodegc state just prior to us clearing it.
1933	 * The inodegc flag state prevents new threads from queuing more
1934	 * inodes, so we queue pending work items and flush the workqueue until
1935	 * all inodegc lists are empty.  IOWs, we cannot use drain_workqueue
1936	 * here because it does not allow other unserialized mechanisms to
1937	 * reschedule inodegc work while this draining is in progress.
1938	 */
1939	xfs_inodegc_queue_all(mp);
1940	do {
1941		flush_workqueue(mp->m_inodegc_wq);
1942		rerun = xfs_inodegc_queue_all(mp);
1943	} while (rerun);
1944
1945	trace_xfs_inodegc_stop(mp, __return_address);
1946}
1947
1948/*
1949 * Enable the inode inactivation background workers and schedule deferred inode
1950 * inactivation work if there is any.  Caller must hold sb->s_umount to
1951 * coordinate changes in the inodegc_enabled state.
1952 */
1953void
1954xfs_inodegc_start(
1955	struct xfs_mount	*mp)
1956{
1957	if (xfs_set_inodegc_enabled(mp))
1958		return;
1959
1960	trace_xfs_inodegc_start(mp, __return_address);
1961	xfs_inodegc_queue_all(mp);
1962}
1963
1964#ifdef CONFIG_XFS_RT
1965static inline bool
1966xfs_inodegc_want_queue_rt_file(
1967	struct xfs_inode	*ip)
1968{
1969	struct xfs_mount	*mp = ip->i_mount;
1970
1971	if (!XFS_IS_REALTIME_INODE(ip))
1972		return false;
1973
1974	if (__percpu_counter_compare(&mp->m_frextents,
1975				mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1976				XFS_FDBLOCKS_BATCH) < 0)
1977		return true;
1978
1979	return false;
1980}
1981#else
1982# define xfs_inodegc_want_queue_rt_file(ip)	(false)
1983#endif /* CONFIG_XFS_RT */
1984
1985/*
1986 * Schedule the inactivation worker when:
1987 *
1988 *  - We've accumulated more than one inode cluster buffer's worth of inodes.
1989 *  - There is less than 5% free space left.
1990 *  - Any of the quotas for this inode are near an enforcement limit.
1991 */
1992static inline bool
1993xfs_inodegc_want_queue_work(
1994	struct xfs_inode	*ip,
1995	unsigned int		items)
1996{
1997	struct xfs_mount	*mp = ip->i_mount;
1998
1999	if (items > mp->m_ino_geo.inodes_per_cluster)
2000		return true;
2001
2002	if (__percpu_counter_compare(&mp->m_fdblocks,
2003				mp->m_low_space[XFS_LOWSP_5_PCNT],
2004				XFS_FDBLOCKS_BATCH) < 0)
2005		return true;
2006
2007	if (xfs_inodegc_want_queue_rt_file(ip))
2008		return true;
2009
2010	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2011		return true;
2012
2013	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2014		return true;
2015
2016	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2017		return true;
2018
2019	return false;
2020}
2021
2022/*
2023 * Upper bound on the number of inodes in each AG that can be queued for
2024 * inactivation at any given time, to avoid monopolizing the workqueue.
2025 */
2026#define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
2027
2028/*
2029 * Make the frontend wait for inactivations when:
2030 *
2031 *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
2032 *  - The queue depth exceeds the maximum allowable percpu backlog.
2033 *
2034 * Note: If the current thread is running a transaction, we don't ever want to
2035 * wait for other transactions because that could introduce a deadlock.
2036 */
2037static inline bool
2038xfs_inodegc_want_flush_work(
2039	struct xfs_inode	*ip,
2040	unsigned int		items,
2041	unsigned int		shrinker_hits)
2042{
2043	if (current->journal_info)
2044		return false;
2045
2046	if (shrinker_hits > 0)
2047		return true;
2048
2049	if (items > XFS_INODEGC_MAX_BACKLOG)
2050		return true;
2051
2052	return false;
2053}
2054
2055/*
2056 * Queue a background inactivation worker if there are inodes that need to be
2057 * inactivated and higher level xfs code hasn't disabled the background
2058 * workers.
2059 */
2060static void
2061xfs_inodegc_queue(
2062	struct xfs_inode	*ip)
2063{
2064	struct xfs_mount	*mp = ip->i_mount;
2065	struct xfs_inodegc	*gc;
2066	int			items;
2067	unsigned int		shrinker_hits;
2068	unsigned int		cpu_nr;
2069	unsigned long		queue_delay = 1;
2070
2071	trace_xfs_inode_set_need_inactive(ip);
2072	spin_lock(&ip->i_flags_lock);
2073	ip->i_flags |= XFS_NEED_INACTIVE;
2074	spin_unlock(&ip->i_flags_lock);
2075
2076	cpu_nr = get_cpu();
2077	gc = this_cpu_ptr(mp->m_inodegc);
2078	llist_add(&ip->i_gclist, &gc->list);
2079	items = READ_ONCE(gc->items);
2080	WRITE_ONCE(gc->items, items + 1);
2081	shrinker_hits = READ_ONCE(gc->shrinker_hits);
2082
2083	/*
2084	 * Ensure the list add is always seen by anyone who finds the cpumask
2085	 * bit set. This effectively gives the cpumask bit set operation
2086	 * release ordering semantics.
2087	 */
2088	smp_mb__before_atomic();
2089	if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2090		cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2091
2092	/*
2093	 * We queue the work while holding the current CPU so that the work
2094	 * is scheduled to run on this CPU.
2095	 */
2096	if (!xfs_is_inodegc_enabled(mp)) {
2097		put_cpu();
2098		return;
2099	}
2100
2101	if (xfs_inodegc_want_queue_work(ip, items))
2102		queue_delay = 0;
2103
2104	trace_xfs_inodegc_queue(mp, __return_address);
2105	mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2106			queue_delay);
2107	put_cpu();
2108
2109	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2110		trace_xfs_inodegc_throttle(mp, __return_address);
2111		flush_delayed_work(&gc->work);
2112	}
2113}
2114
2115/*
2116 * We set the inode flag atomically with the radix tree tag.  Once we get tag
2117 * lookups on the radix tree, this inode flag can go away.
2118 *
2119 * We always use background reclaim here because even if the inode is clean, it
2120 * still may be under IO and hence we have wait for IO completion to occur
2121 * before we can reclaim the inode. The background reclaim path handles this
2122 * more efficiently than we can here, so simply let background reclaim tear down
2123 * all inodes.
2124 */
2125void
2126xfs_inode_mark_reclaimable(
2127	struct xfs_inode	*ip)
2128{
2129	struct xfs_mount	*mp = ip->i_mount;
2130	bool			need_inactive;
2131
2132	XFS_STATS_INC(mp, vn_reclaim);
2133
2134	/*
2135	 * We should never get here with any of the reclaim flags already set.
2136	 */
2137	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2138
2139	need_inactive = xfs_inode_needs_inactive(ip);
2140	if (need_inactive) {
2141		xfs_inodegc_queue(ip);
2142		return;
2143	}
2144
2145	/* Going straight to reclaim, so drop the dquots. */
2146	xfs_qm_dqdetach(ip);
2147	xfs_inodegc_set_reclaimable(ip);
2148}
2149
2150/*
2151 * Register a phony shrinker so that we can run background inodegc sooner when
2152 * there's memory pressure.  Inactivation does not itself free any memory but
2153 * it does make inodes reclaimable, which eventually frees memory.
2154 *
2155 * The count function, seek value, and batch value are crafted to trigger the
2156 * scan function during the second round of scanning.  Hopefully this means
2157 * that we reclaimed enough memory that initiating metadata transactions won't
2158 * make things worse.
2159 */
2160#define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
2161#define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2162
2163static unsigned long
2164xfs_inodegc_shrinker_count(
2165	struct shrinker		*shrink,
2166	struct shrink_control	*sc)
2167{
2168	struct xfs_mount	*mp = shrink->private_data;
2169	struct xfs_inodegc	*gc;
2170	int			cpu;
2171
2172	if (!xfs_is_inodegc_enabled(mp))
2173		return 0;
2174
2175	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2176		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2177		if (!llist_empty(&gc->list))
2178			return XFS_INODEGC_SHRINKER_COUNT;
2179	}
2180
2181	return 0;
2182}
2183
2184static unsigned long
2185xfs_inodegc_shrinker_scan(
2186	struct shrinker		*shrink,
2187	struct shrink_control	*sc)
2188{
2189	struct xfs_mount	*mp = shrink->private_data;
2190	struct xfs_inodegc	*gc;
2191	int			cpu;
2192	bool			no_items = true;
2193
2194	if (!xfs_is_inodegc_enabled(mp))
2195		return SHRINK_STOP;
2196
2197	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2198
2199	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2200		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2201		if (!llist_empty(&gc->list)) {
2202			unsigned int	h = READ_ONCE(gc->shrinker_hits);
2203
2204			WRITE_ONCE(gc->shrinker_hits, h + 1);
2205			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2206			no_items = false;
2207		}
2208	}
2209
2210	/*
2211	 * If there are no inodes to inactivate, we don't want the shrinker
2212	 * to think there's deferred work to call us back about.
2213	 */
2214	if (no_items)
2215		return LONG_MAX;
2216
2217	return SHRINK_STOP;
2218}
2219
2220/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2221int
2222xfs_inodegc_register_shrinker(
2223	struct xfs_mount	*mp)
2224{
2225	mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
2226						"xfs-inodegc:%s",
2227						mp->m_super->s_id);
2228	if (!mp->m_inodegc_shrinker)
2229		return -ENOMEM;
2230
2231	mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
2232	mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
2233	mp->m_inodegc_shrinker->seeks = 0;
2234	mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
2235	mp->m_inodegc_shrinker->private_data = mp;
2236
2237	shrinker_register(mp->m_inodegc_shrinker);
2238
2239	return 0;
2240}