Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
 
  12#include "xfs_mount.h"
  13#include "xfs_inode.h"
 
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_inode_item.h"
  17#include "xfs_quota.h"
  18#include "xfs_trace.h"
  19#include "xfs_icache.h"
  20#include "xfs_bmap_util.h"
  21#include "xfs_dquot_item.h"
  22#include "xfs_dquot.h"
  23#include "xfs_reflink.h"
  24#include "xfs_ialloc.h"
  25#include "xfs_ag.h"
  26#include "xfs_log_priv.h"
  27
  28#include <linux/iversion.h>
  29
  30/* Radix tree tags for incore inode tree. */
  31
  32/* inode is to be reclaimed */
  33#define XFS_ICI_RECLAIM_TAG	0
  34/* Inode has speculative preallocations (posteof or cow) to clean. */
  35#define XFS_ICI_BLOCKGC_TAG	1
  36
  37/*
  38 * The goal for walking incore inodes.  These can correspond with incore inode
  39 * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
  40 */
  41enum xfs_icwalk_goal {
  42	/* Goals directly associated with tagged inodes. */
  43	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
  44	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
  45};
  46
  47static int xfs_icwalk(struct xfs_mount *mp,
  48		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
  49static int xfs_icwalk_ag(struct xfs_perag *pag,
  50		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
  51
  52/*
  53 * Private inode cache walk flags for struct xfs_icwalk.  Must not
  54 * coincide with XFS_ICWALK_FLAGS_VALID.
  55 */
  56
  57/* Stop scanning after icw_scan_limit inodes. */
  58#define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
  59
  60#define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
  61#define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
  62
  63#define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
  64					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
  65					 XFS_ICWALK_FLAG_UNION)
  66
  67/*
  68 * Allocate and initialise an xfs_inode.
  69 */
  70struct xfs_inode *
  71xfs_inode_alloc(
  72	struct xfs_mount	*mp,
  73	xfs_ino_t		ino)
  74{
  75	struct xfs_inode	*ip;
  76
  77	/*
  78	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
  79	 * and return NULL here on ENOMEM.
 
  80	 */
  81	ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
  82
 
  83	if (inode_init_always(mp->m_super, VFS_I(ip))) {
  84		kmem_cache_free(xfs_inode_cache, ip);
  85		return NULL;
  86	}
  87
  88	/* VFS doesn't initialise i_mode or i_state! */
  89	VFS_I(ip)->i_mode = 0;
  90	VFS_I(ip)->i_state = 0;
  91	mapping_set_large_folios(VFS_I(ip)->i_mapping);
  92
  93	XFS_STATS_INC(mp, vn_active);
  94	ASSERT(atomic_read(&ip->i_pincount) == 0);
 
 
  95	ASSERT(ip->i_ino == 0);
  96
 
 
  97	/* initialise the xfs inode */
  98	ip->i_ino = ino;
  99	ip->i_mount = mp;
 100	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
 101	ip->i_cowfp = NULL;
 102	memset(&ip->i_af, 0, sizeof(ip->i_af));
 103	ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
 104	memset(&ip->i_df, 0, sizeof(ip->i_df));
 105	ip->i_flags = 0;
 106	ip->i_delayed_blks = 0;
 107	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
 108	ip->i_nblocks = 0;
 109	ip->i_forkoff = 0;
 110	ip->i_sick = 0;
 111	ip->i_checked = 0;
 112	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
 113	INIT_LIST_HEAD(&ip->i_ioend_list);
 114	spin_lock_init(&ip->i_ioend_lock);
 115	ip->i_next_unlinked = NULLAGINO;
 116	ip->i_prev_unlinked = NULLAGINO;
 117
 118	return ip;
 119}
 120
 121STATIC void
 122xfs_inode_free_callback(
 123	struct rcu_head		*head)
 124{
 125	struct inode		*inode = container_of(head, struct inode, i_rcu);
 126	struct xfs_inode	*ip = XFS_I(inode);
 127
 
 
 
 
 
 
 
 128	switch (VFS_I(ip)->i_mode & S_IFMT) {
 129	case S_IFREG:
 130	case S_IFDIR:
 131	case S_IFLNK:
 132		xfs_idestroy_fork(&ip->i_df);
 133		break;
 134	}
 135
 136	xfs_ifork_zap_attr(ip);
 
 137
 138	if (ip->i_cowfp) {
 139		xfs_idestroy_fork(ip->i_cowfp);
 140		kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
 141	}
 142	if (ip->i_itemp) {
 143		ASSERT(!test_bit(XFS_LI_IN_AIL,
 144				 &ip->i_itemp->ili_item.li_flags));
 145		xfs_inode_item_destroy(ip);
 146		ip->i_itemp = NULL;
 147	}
 148
 149	kmem_cache_free(xfs_inode_cache, ip);
 150}
 151
 152static void
 153__xfs_inode_free(
 154	struct xfs_inode	*ip)
 155{
 156	/* asserts to verify all state is correct here */
 157	ASSERT(atomic_read(&ip->i_pincount) == 0);
 158	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
 159	XFS_STATS_DEC(ip->i_mount, vn_active);
 160
 161	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 162}
 163
 164void
 165xfs_inode_free(
 166	struct xfs_inode	*ip)
 167{
 168	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
 169
 170	/*
 171	 * Because we use RCU freeing we need to ensure the inode always
 172	 * appears to be reclaimed with an invalid inode number when in the
 173	 * free state. The ip->i_flags_lock provides the barrier against lookup
 174	 * races.
 175	 */
 176	spin_lock(&ip->i_flags_lock);
 177	ip->i_flags = XFS_IRECLAIM;
 178	ip->i_ino = 0;
 179	spin_unlock(&ip->i_flags_lock);
 180
 181	__xfs_inode_free(ip);
 182}
 183
 184/*
 185 * Queue background inode reclaim work if there are reclaimable inodes and there
 186 * isn't reclaim work already scheduled or in progress.
 187 */
 188static void
 189xfs_reclaim_work_queue(
 190	struct xfs_mount        *mp)
 191{
 192
 193	rcu_read_lock();
 194	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 195		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 196			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 197	}
 198	rcu_read_unlock();
 199}
 200
 201/*
 202 * Background scanning to trim preallocated space. This is queued based on the
 203 * 'speculative_prealloc_lifetime' tunable (5m by default).
 204 */
 205static inline void
 206xfs_blockgc_queue(
 207	struct xfs_perag	*pag)
 208{
 209	struct xfs_mount	*mp = pag->pag_mount;
 210
 211	if (!xfs_is_blockgc_enabled(mp))
 212		return;
 213
 214	rcu_read_lock();
 215	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
 216		queue_delayed_work(pag->pag_mount->m_blockgc_wq,
 217				   &pag->pag_blockgc_work,
 218				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
 219	rcu_read_unlock();
 220}
 221
 222/* Set a tag on both the AG incore inode tree and the AG radix tree. */
 223static void
 224xfs_perag_set_inode_tag(
 225	struct xfs_perag	*pag,
 226	xfs_agino_t		agino,
 227	unsigned int		tag)
 228{
 229	struct xfs_mount	*mp = pag->pag_mount;
 230	bool			was_tagged;
 231
 232	lockdep_assert_held(&pag->pag_ici_lock);
 233
 234	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
 235	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
 236
 237	if (tag == XFS_ICI_RECLAIM_TAG)
 238		pag->pag_ici_reclaimable++;
 239
 240	if (was_tagged)
 241		return;
 242
 243	/* propagate the tag up into the perag radix tree */
 244	spin_lock(&mp->m_perag_lock);
 245	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
 246	spin_unlock(&mp->m_perag_lock);
 247
 248	/* start background work */
 249	switch (tag) {
 250	case XFS_ICI_RECLAIM_TAG:
 251		xfs_reclaim_work_queue(mp);
 252		break;
 253	case XFS_ICI_BLOCKGC_TAG:
 254		xfs_blockgc_queue(pag);
 255		break;
 256	}
 257
 258	trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
 259}
 260
 261/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
 262static void
 263xfs_perag_clear_inode_tag(
 264	struct xfs_perag	*pag,
 265	xfs_agino_t		agino,
 266	unsigned int		tag)
 267{
 268	struct xfs_mount	*mp = pag->pag_mount;
 269
 270	lockdep_assert_held(&pag->pag_ici_lock);
 271
 272	/*
 273	 * Reclaim can signal (with a null agino) that it cleared its own tag
 274	 * by removing the inode from the radix tree.
 275	 */
 276	if (agino != NULLAGINO)
 277		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
 278	else
 279		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
 280
 281	if (tag == XFS_ICI_RECLAIM_TAG)
 282		pag->pag_ici_reclaimable--;
 283
 284	if (radix_tree_tagged(&pag->pag_ici_root, tag))
 285		return;
 286
 287	/* clear the tag from the perag radix tree */
 288	spin_lock(&mp->m_perag_lock);
 289	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
 290	spin_unlock(&mp->m_perag_lock);
 291
 292	trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
 293}
 294
 295/*
 296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 297 * part of the structure. This is made more complex by the fact we store
 298 * information about the on-disk values in the VFS inode and so we can't just
 299 * overwrite the values unconditionally. Hence we save the parameters we
 300 * need to retain across reinitialisation, and rewrite them into the VFS inode
 301 * after reinitialisation even if it fails.
 302 */
 303static int
 304xfs_reinit_inode(
 305	struct xfs_mount	*mp,
 306	struct inode		*inode)
 307{
 308	int			error;
 309	uint32_t		nlink = inode->i_nlink;
 310	uint32_t		generation = inode->i_generation;
 311	uint64_t		version = inode_peek_iversion(inode);
 312	umode_t			mode = inode->i_mode;
 313	dev_t			dev = inode->i_rdev;
 314	kuid_t			uid = inode->i_uid;
 315	kgid_t			gid = inode->i_gid;
 316
 317	error = inode_init_always(mp->m_super, inode);
 318
 319	set_nlink(inode, nlink);
 320	inode->i_generation = generation;
 321	inode_set_iversion_queried(inode, version);
 322	inode->i_mode = mode;
 323	inode->i_rdev = dev;
 324	inode->i_uid = uid;
 325	inode->i_gid = gid;
 326	mapping_set_large_folios(inode->i_mapping);
 327	return error;
 328}
 329
 330/*
 331 * Carefully nudge an inode whose VFS state has been torn down back into a
 332 * usable state.  Drops the i_flags_lock and the rcu read lock.
 333 */
 334static int
 335xfs_iget_recycle(
 336	struct xfs_perag	*pag,
 337	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
 338{
 339	struct xfs_mount	*mp = ip->i_mount;
 340	struct inode		*inode = VFS_I(ip);
 341	int			error;
 342
 343	trace_xfs_iget_recycle(ip);
 344
 345	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
 346		return -EAGAIN;
 347
 348	/*
 349	 * We need to make it look like the inode is being reclaimed to prevent
 350	 * the actual reclaim workers from stomping over us while we recycle
 351	 * the inode.  We can't clear the radix tree tag yet as it requires
 352	 * pag_ici_lock to be held exclusive.
 353	 */
 354	ip->i_flags |= XFS_IRECLAIM;
 355
 356	spin_unlock(&ip->i_flags_lock);
 357	rcu_read_unlock();
 358
 359	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 360	error = xfs_reinit_inode(mp, inode);
 361	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 362	if (error) {
 363		/*
 364		 * Re-initializing the inode failed, and we are in deep
 365		 * trouble.  Try to re-add it to the reclaim list.
 366		 */
 367		rcu_read_lock();
 368		spin_lock(&ip->i_flags_lock);
 369		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 370		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 371		spin_unlock(&ip->i_flags_lock);
 372		rcu_read_unlock();
 373
 374		trace_xfs_iget_recycle_fail(ip);
 375		return error;
 376	}
 377
 378	spin_lock(&pag->pag_ici_lock);
 379	spin_lock(&ip->i_flags_lock);
 380
 381	/*
 382	 * Clear the per-lifetime state in the inode as we are now effectively
 383	 * a new inode and need to return to the initial state before reuse
 384	 * occurs.
 385	 */
 386	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 387	ip->i_flags |= XFS_INEW;
 388	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
 389			XFS_ICI_RECLAIM_TAG);
 390	inode->i_state = I_NEW;
 391	spin_unlock(&ip->i_flags_lock);
 392	spin_unlock(&pag->pag_ici_lock);
 393
 394	return 0;
 395}
 396
 397/*
 398 * If we are allocating a new inode, then check what was returned is
 399 * actually a free, empty inode. If we are not allocating an inode,
 400 * then check we didn't find a free inode.
 401 *
 402 * Returns:
 403 *	0		if the inode free state matches the lookup context
 404 *	-ENOENT		if the inode is free and we are not allocating
 405 *	-EFSCORRUPTED	if there is any state mismatch at all
 406 */
 407static int
 408xfs_iget_check_free_state(
 409	struct xfs_inode	*ip,
 410	int			flags)
 411{
 412	if (flags & XFS_IGET_CREATE) {
 413		/* should be a free inode */
 414		if (VFS_I(ip)->i_mode != 0) {
 415			xfs_warn(ip->i_mount,
 416"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
 417				ip->i_ino, VFS_I(ip)->i_mode);
 418			return -EFSCORRUPTED;
 419		}
 420
 421		if (ip->i_nblocks != 0) {
 422			xfs_warn(ip->i_mount,
 423"Corruption detected! Free inode 0x%llx has blocks allocated!",
 424				ip->i_ino);
 425			return -EFSCORRUPTED;
 426		}
 427		return 0;
 428	}
 429
 430	/* should be an allocated inode */
 431	if (VFS_I(ip)->i_mode == 0)
 432		return -ENOENT;
 433
 434	return 0;
 435}
 436
 437/* Make all pending inactivation work start immediately. */
 438static void
 439xfs_inodegc_queue_all(
 440	struct xfs_mount	*mp)
 441{
 442	struct xfs_inodegc	*gc;
 443	int			cpu;
 444
 445	for_each_online_cpu(cpu) {
 446		gc = per_cpu_ptr(mp->m_inodegc, cpu);
 447		if (!llist_empty(&gc->list))
 448			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
 449	}
 450}
 451
 452/*
 453 * Check the validity of the inode we just found it the cache
 454 */
 455static int
 456xfs_iget_cache_hit(
 457	struct xfs_perag	*pag,
 458	struct xfs_inode	*ip,
 459	xfs_ino_t		ino,
 460	int			flags,
 461	int			lock_flags) __releases(RCU)
 462{
 463	struct inode		*inode = VFS_I(ip);
 464	struct xfs_mount	*mp = ip->i_mount;
 465	int			error;
 466
 467	/*
 468	 * check for re-use of an inode within an RCU grace period due to the
 469	 * radix tree nodes not being updated yet. We monitor for this by
 470	 * setting the inode number to zero before freeing the inode structure.
 471	 * If the inode has been reallocated and set up, then the inode number
 472	 * will not match, so check for that, too.
 473	 */
 474	spin_lock(&ip->i_flags_lock);
 475	if (ip->i_ino != ino)
 476		goto out_skip;
 
 
 
 
 
 477
 478	/*
 479	 * If we are racing with another cache hit that is currently
 480	 * instantiating this inode or currently recycling it out of
 481	 * reclaimable state, wait for the initialisation to complete
 482	 * before continuing.
 483	 *
 484	 * If we're racing with the inactivation worker we also want to wait.
 485	 * If we're creating a new file, it's possible that the worker
 486	 * previously marked the inode as free on disk but hasn't finished
 487	 * updating the incore state yet.  The AGI buffer will be dirty and
 488	 * locked to the icreate transaction, so a synchronous push of the
 489	 * inodegc workers would result in deadlock.  For a regular iget, the
 490	 * worker is running already, so we might as well wait.
 491	 *
 492	 * XXX(hch): eventually we should do something equivalent to
 493	 *	     wait_on_inode to wait for these flags to be cleared
 494	 *	     instead of polling for it.
 495	 */
 496	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
 497		goto out_skip;
 498
 499	if (ip->i_flags & XFS_NEED_INACTIVE) {
 500		/* Unlinked inodes cannot be re-grabbed. */
 501		if (VFS_I(ip)->i_nlink == 0) {
 502			error = -ENOENT;
 503			goto out_error;
 504		}
 505		goto out_inodegc_flush;
 506	}
 507
 508	/*
 509	 * Check the inode free state is valid. This also detects lookup
 510	 * racing with unlinks.
 511	 */
 512	error = xfs_iget_check_free_state(ip, flags);
 513	if (error)
 514		goto out_error;
 
 515
 516	/* Skip inodes that have no vfs state. */
 517	if ((flags & XFS_IGET_INCORE) &&
 518	    (ip->i_flags & XFS_IRECLAIMABLE))
 519		goto out_skip;
 520
 521	/* The inode fits the selection criteria; process it. */
 522	if (ip->i_flags & XFS_IRECLAIMABLE) {
 523		/* Drops i_flags_lock and RCU read lock. */
 524		error = xfs_iget_recycle(pag, ip);
 525		if (error == -EAGAIN)
 526			goto out_skip;
 527		if (error)
 528			return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529	} else {
 530		/* If the VFS inode is being torn down, pause and try again. */
 531		if (!igrab(inode))
 532			goto out_skip;
 
 
 
 533
 534		/* We've got a live one. */
 535		spin_unlock(&ip->i_flags_lock);
 536		rcu_read_unlock();
 537		trace_xfs_iget_hit(ip);
 538	}
 539
 540	if (lock_flags != 0)
 541		xfs_ilock(ip, lock_flags);
 542
 543	if (!(flags & XFS_IGET_INCORE))
 544		xfs_iflags_clear(ip, XFS_ISTALE);
 545	XFS_STATS_INC(mp, xs_ig_found);
 546
 547	return 0;
 548
 549out_skip:
 550	trace_xfs_iget_skip(ip);
 551	XFS_STATS_INC(mp, xs_ig_frecycle);
 552	error = -EAGAIN;
 553out_error:
 554	spin_unlock(&ip->i_flags_lock);
 555	rcu_read_unlock();
 556	return error;
 557
 558out_inodegc_flush:
 559	spin_unlock(&ip->i_flags_lock);
 560	rcu_read_unlock();
 561	/*
 562	 * Do not wait for the workers, because the caller could hold an AGI
 563	 * buffer lock.  We're just going to sleep in a loop anyway.
 564	 */
 565	if (xfs_is_inodegc_enabled(mp))
 566		xfs_inodegc_queue_all(mp);
 567	return -EAGAIN;
 568}
 569
 
 570static int
 571xfs_iget_cache_miss(
 572	struct xfs_mount	*mp,
 573	struct xfs_perag	*pag,
 574	xfs_trans_t		*tp,
 575	xfs_ino_t		ino,
 576	struct xfs_inode	**ipp,
 577	int			flags,
 578	int			lock_flags)
 579{
 580	struct xfs_inode	*ip;
 581	int			error;
 582	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
 583	int			iflags;
 584
 585	ip = xfs_inode_alloc(mp, ino);
 586	if (!ip)
 587		return -ENOMEM;
 588
 589	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
 590	if (error)
 591		goto out_destroy;
 592
 593	/*
 594	 * For version 5 superblocks, if we are initialising a new inode and we
 595	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
 596	 * simply build the new inode core with a random generation number.
 597	 *
 598	 * For version 4 (and older) superblocks, log recovery is dependent on
 599	 * the i_flushiter field being initialised from the current on-disk
 600	 * value and hence we must also read the inode off disk even when
 601	 * initializing new inodes.
 602	 */
 603	if (xfs_has_v3inodes(mp) &&
 604	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
 605		VFS_I(ip)->i_generation = get_random_u32();
 606	} else {
 607		struct xfs_buf		*bp;
 608
 609		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
 610		if (error)
 611			goto out_destroy;
 612
 613		error = xfs_inode_from_disk(ip,
 614				xfs_buf_offset(bp, ip->i_imap.im_boffset));
 615		if (!error)
 616			xfs_buf_set_ref(bp, XFS_INO_REF);
 617		xfs_trans_brelse(tp, bp);
 618
 619		if (error)
 620			goto out_destroy;
 621	}
 622
 623	trace_xfs_iget_miss(ip);
 624
 625	/*
 626	 * Check the inode free state is valid. This also detects lookup
 627	 * racing with unlinks.
 628	 */
 629	error = xfs_iget_check_free_state(ip, flags);
 630	if (error)
 631		goto out_destroy;
 
 632
 633	/*
 634	 * Preload the radix tree so we can insert safely under the
 635	 * write spinlock. Note that we cannot sleep inside the preload
 636	 * region. Since we can be called from transaction context, don't
 637	 * recurse into the file system.
 638	 */
 639	if (radix_tree_preload(GFP_NOFS)) {
 640		error = -EAGAIN;
 641		goto out_destroy;
 642	}
 643
 644	/*
 645	 * Because the inode hasn't been added to the radix-tree yet it can't
 646	 * be found by another thread, so we can do the non-sleeping lock here.
 647	 */
 648	if (lock_flags) {
 649		if (!xfs_ilock_nowait(ip, lock_flags))
 650			BUG();
 651	}
 652
 653	/*
 654	 * These values must be set before inserting the inode into the radix
 655	 * tree as the moment it is inserted a concurrent lookup (allowed by the
 656	 * RCU locking mechanism) can find it and that lookup must see that this
 657	 * is an inode currently under construction (i.e. that XFS_INEW is set).
 658	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 659	 * memory barrier that ensures this detection works correctly at lookup
 660	 * time.
 661	 */
 662	iflags = XFS_INEW;
 663	if (flags & XFS_IGET_DONTCACHE)
 664		d_mark_dontcache(VFS_I(ip));
 665	ip->i_udquot = NULL;
 666	ip->i_gdquot = NULL;
 667	ip->i_pdquot = NULL;
 668	xfs_iflags_set(ip, iflags);
 669
 670	/* insert the new inode */
 671	spin_lock(&pag->pag_ici_lock);
 672	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 673	if (unlikely(error)) {
 674		WARN_ON(error != -EEXIST);
 675		XFS_STATS_INC(mp, xs_ig_dup);
 676		error = -EAGAIN;
 677		goto out_preload_end;
 678	}
 679	spin_unlock(&pag->pag_ici_lock);
 680	radix_tree_preload_end();
 681
 682	*ipp = ip;
 683	return 0;
 684
 685out_preload_end:
 686	spin_unlock(&pag->pag_ici_lock);
 687	radix_tree_preload_end();
 688	if (lock_flags)
 689		xfs_iunlock(ip, lock_flags);
 690out_destroy:
 691	__destroy_inode(VFS_I(ip));
 692	xfs_inode_free(ip);
 693	return error;
 694}
 695
 696/*
 697 * Look up an inode by number in the given file system.  The inode is looked up
 698 * in the cache held in each AG.  If the inode is found in the cache, initialise
 699 * the vfs inode if necessary.
 
 700 *
 701 * If it is not in core, read it in from the file system's device, add it to the
 702 * cache and initialise the vfs inode.
 703 *
 704 * The inode is locked according to the value of the lock_flags parameter.
 705 * Inode lookup is only done during metadata operations and not as part of the
 706 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
 
 
 
 
 
 
 
 
 
 707 */
 708int
 709xfs_iget(
 710	struct xfs_mount	*mp,
 711	struct xfs_trans	*tp,
 712	xfs_ino_t		ino,
 713	uint			flags,
 714	uint			lock_flags,
 715	struct xfs_inode	**ipp)
 716{
 717	struct xfs_inode	*ip;
 718	struct xfs_perag	*pag;
 719	xfs_agino_t		agino;
 720	int			error;
 721
 
 
 
 
 
 
 
 722	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 723
 724	/* reject inode numbers outside existing AGs */
 725	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 726		return -EINVAL;
 727
 728	XFS_STATS_INC(mp, xs_ig_attempts);
 729
 730	/* get the perag structure and ensure that it's inode capable */
 731	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 732	agino = XFS_INO_TO_AGINO(mp, ino);
 733
 734again:
 735	error = 0;
 736	rcu_read_lock();
 737	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 738
 739	if (ip) {
 740		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 741		if (error)
 742			goto out_error_or_again;
 743	} else {
 744		rcu_read_unlock();
 745		if (flags & XFS_IGET_INCORE) {
 746			error = -ENODATA;
 747			goto out_error_or_again;
 748		}
 749		XFS_STATS_INC(mp, xs_ig_missed);
 750
 751		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 752							flags, lock_flags);
 753		if (error)
 754			goto out_error_or_again;
 755	}
 756	xfs_perag_put(pag);
 757
 758	*ipp = ip;
 759
 760	/*
 761	 * If we have a real type for an on-disk inode, we can setup the inode
 762	 * now.	 If it's a new inode being created, xfs_init_new_inode will
 763	 * handle it.
 764	 */
 765	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 766		xfs_setup_existing_inode(ip);
 767	return 0;
 768
 769out_error_or_again:
 770	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
 771		delay(1);
 772		goto again;
 773	}
 774	xfs_perag_put(pag);
 775	return error;
 776}
 777
 778/*
 779 * "Is this a cached inode that's also allocated?"
 780 *
 781 * Look up an inode by number in the given file system.  If the inode is
 782 * in cache and isn't in purgatory, return 1 if the inode is allocated
 783 * and 0 if it is not.  For all other cases (not in cache, being torn
 784 * down, etc.), return a negative error code.
 785 *
 786 * The caller has to prevent inode allocation and freeing activity,
 787 * presumably by locking the AGI buffer.   This is to ensure that an
 788 * inode cannot transition from allocated to freed until the caller is
 789 * ready to allow that.  If the inode is in an intermediate state (new,
 790 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
 791 * inode is not in the cache, -ENOENT will be returned.  The caller must
 792 * deal with these scenarios appropriately.
 793 *
 794 * This is a specialized use case for the online scrubber; if you're
 795 * reading this, you probably want xfs_iget.
 796 */
 797int
 798xfs_icache_inode_is_allocated(
 799	struct xfs_mount	*mp,
 800	struct xfs_trans	*tp,
 801	xfs_ino_t		ino,
 802	bool			*inuse)
 803{
 804	struct xfs_inode	*ip;
 805	int			error;
 806
 807	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
 808	if (error)
 809		return error;
 810
 811	*inuse = !!(VFS_I(ip)->i_mode);
 812	xfs_irele(ip);
 813	return 0;
 814}
 815
 816/*
 817 * Grab the inode for reclaim exclusively.
 818 *
 819 * We have found this inode via a lookup under RCU, so the inode may have
 820 * already been freed, or it may be in the process of being recycled by
 821 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
 822 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
 823 * will not be set. Hence we need to check for both these flag conditions to
 824 * avoid inodes that are no longer reclaim candidates.
 825 *
 826 * Note: checking for other state flags here, under the i_flags_lock or not, is
 827 * racy and should be avoided. Those races should be resolved only after we have
 828 * ensured that we are able to reclaim this inode and the world can see that we
 829 * are going to reclaim it.
 830 *
 831 * Return true if we grabbed it, false otherwise.
 832 */
 833static bool
 834xfs_reclaim_igrab(
 835	struct xfs_inode	*ip,
 836	struct xfs_icwalk	*icw)
 837{
 838	ASSERT(rcu_read_lock_held());
 839
 840	spin_lock(&ip->i_flags_lock);
 841	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
 842	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
 843		/* not a reclaim candidate. */
 844		spin_unlock(&ip->i_flags_lock);
 845		return false;
 846	}
 847
 848	/* Don't reclaim a sick inode unless the caller asked for it. */
 849	if (ip->i_sick &&
 850	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
 851		spin_unlock(&ip->i_flags_lock);
 852		return false;
 853	}
 854
 855	__xfs_iflags_set(ip, XFS_IRECLAIM);
 856	spin_unlock(&ip->i_flags_lock);
 857	return true;
 858}
 859
 860/*
 861 * Inode reclaim is non-blocking, so the default action if progress cannot be
 862 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
 863 * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
 864 * blocking anymore and hence we can wait for the inode to be able to reclaim
 865 * it.
 866 *
 867 * We do no IO here - if callers require inodes to be cleaned they must push the
 868 * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
 869 * done in the background in a non-blocking manner, and enables memory reclaim
 870 * to make progress without blocking.
 871 */
 872static void
 873xfs_reclaim_inode(
 874	struct xfs_inode	*ip,
 875	struct xfs_perag	*pag)
 876{
 877	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
 878
 879	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
 880		goto out;
 881	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
 882		goto out_iunlock;
 883
 884	/*
 885	 * Check for log shutdown because aborting the inode can move the log
 886	 * tail and corrupt in memory state. This is fine if the log is shut
 887	 * down, but if the log is still active and only the mount is shut down
 888	 * then the in-memory log tail movement caused by the abort can be
 889	 * incorrectly propagated to disk.
 890	 */
 891	if (xlog_is_shutdown(ip->i_mount->m_log)) {
 892		xfs_iunpin_wait(ip);
 893		xfs_iflush_shutdown_abort(ip);
 894		goto reclaim;
 895	}
 896	if (xfs_ipincount(ip))
 897		goto out_clear_flush;
 898	if (!xfs_inode_clean(ip))
 899		goto out_clear_flush;
 900
 901	xfs_iflags_clear(ip, XFS_IFLUSHING);
 902reclaim:
 903	trace_xfs_inode_reclaiming(ip);
 904
 905	/*
 906	 * Because we use RCU freeing we need to ensure the inode always appears
 907	 * to be reclaimed with an invalid inode number when in the free state.
 908	 * We do this as early as possible under the ILOCK so that
 909	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
 910	 * detect races with us here. By doing this, we guarantee that once
 911	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
 912	 * it will see either a valid inode that will serialise correctly, or it
 913	 * will see an invalid inode that it can skip.
 914	 */
 915	spin_lock(&ip->i_flags_lock);
 916	ip->i_flags = XFS_IRECLAIM;
 917	ip->i_ino = 0;
 918	ip->i_sick = 0;
 919	ip->i_checked = 0;
 920	spin_unlock(&ip->i_flags_lock);
 921
 922	ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
 923	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 924
 925	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
 926	/*
 927	 * Remove the inode from the per-AG radix tree.
 928	 *
 929	 * Because radix_tree_delete won't complain even if the item was never
 930	 * added to the tree assert that it's been there before to catch
 931	 * problems with the inode life time early on.
 932	 */
 933	spin_lock(&pag->pag_ici_lock);
 934	if (!radix_tree_delete(&pag->pag_ici_root,
 935				XFS_INO_TO_AGINO(ip->i_mount, ino)))
 936		ASSERT(0);
 937	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
 938	spin_unlock(&pag->pag_ici_lock);
 939
 940	/*
 941	 * Here we do an (almost) spurious inode lock in order to coordinate
 942	 * with inode cache radix tree lookups.  This is because the lookup
 943	 * can reference the inodes in the cache without taking references.
 944	 *
 945	 * We make that OK here by ensuring that we wait until the inode is
 946	 * unlocked after the lookup before we go ahead and free it.
 947	 */
 948	xfs_ilock(ip, XFS_ILOCK_EXCL);
 949	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
 950	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 951	ASSERT(xfs_inode_clean(ip));
 952
 953	__xfs_inode_free(ip);
 954	return;
 955
 956out_clear_flush:
 957	xfs_iflags_clear(ip, XFS_IFLUSHING);
 958out_iunlock:
 959	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 960out:
 961	xfs_iflags_clear(ip, XFS_IRECLAIM);
 962}
 963
 964/* Reclaim sick inodes if we're unmounting or the fs went down. */
 965static inline bool
 966xfs_want_reclaim_sick(
 967	struct xfs_mount	*mp)
 968{
 969	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
 970	       xfs_is_shutdown(mp);
 971}
 972
 973void
 974xfs_reclaim_inodes(
 975	struct xfs_mount	*mp)
 976{
 977	struct xfs_icwalk	icw = {
 978		.icw_flags	= 0,
 979	};
 980
 981	if (xfs_want_reclaim_sick(mp))
 982		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
 983
 984	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 985		xfs_ail_push_all_sync(mp->m_ail);
 986		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
 987	}
 988}
 989
 990/*
 991 * The shrinker infrastructure determines how many inodes we should scan for
 992 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
 993 * push the AIL here. We also want to proactively free up memory if we can to
 994 * minimise the amount of work memory reclaim has to do so we kick the
 995 * background reclaim if it isn't already scheduled.
 996 */
 997long
 998xfs_reclaim_inodes_nr(
 999	struct xfs_mount	*mp,
1000	unsigned long		nr_to_scan)
1001{
1002	struct xfs_icwalk	icw = {
1003		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
1004		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
1005	};
1006
1007	if (xfs_want_reclaim_sick(mp))
1008		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1009
1010	/* kick background reclaimer and push the AIL */
1011	xfs_reclaim_work_queue(mp);
1012	xfs_ail_push_all(mp->m_ail);
1013
1014	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1015	return 0;
1016}
1017
1018/*
1019 * Return the number of reclaimable inodes in the filesystem for
1020 * the shrinker to determine how much to reclaim.
1021 */
1022long
1023xfs_reclaim_inodes_count(
1024	struct xfs_mount	*mp)
1025{
1026	struct xfs_perag	*pag;
1027	xfs_agnumber_t		ag = 0;
1028	long			reclaimable = 0;
1029
1030	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1031		ag = pag->pag_agno + 1;
1032		reclaimable += pag->pag_ici_reclaimable;
1033		xfs_perag_put(pag);
1034	}
1035	return reclaimable;
1036}
1037
1038STATIC bool
1039xfs_icwalk_match_id(
1040	struct xfs_inode	*ip,
1041	struct xfs_icwalk	*icw)
1042{
1043	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1044	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1045		return false;
1046
1047	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1048	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1049		return false;
1050
1051	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1052	    ip->i_projid != icw->icw_prid)
1053		return false;
1054
1055	return true;
1056}
1057
1058/*
1059 * A union-based inode filtering algorithm. Process the inode if any of the
1060 * criteria match. This is for global/internal scans only.
1061 */
1062STATIC bool
1063xfs_icwalk_match_id_union(
1064	struct xfs_inode	*ip,
1065	struct xfs_icwalk	*icw)
1066{
1067	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1068	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1069		return true;
1070
1071	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1072	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1073		return true;
1074
1075	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1076	    ip->i_projid == icw->icw_prid)
1077		return true;
1078
1079	return false;
1080}
1081
1082/*
1083 * Is this inode @ip eligible for eof/cow block reclamation, given some
1084 * filtering parameters @icw?  The inode is eligible if @icw is null or
1085 * if the predicate functions match.
1086 */
1087static bool
1088xfs_icwalk_match(
1089	struct xfs_inode	*ip,
1090	struct xfs_icwalk	*icw)
1091{
1092	bool			match;
1093
1094	if (!icw)
1095		return true;
1096
1097	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1098		match = xfs_icwalk_match_id_union(ip, icw);
1099	else
1100		match = xfs_icwalk_match_id(ip, icw);
1101	if (!match)
1102		return false;
1103
1104	/* skip the inode if the file size is too small */
1105	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1106	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1107		return false;
1108
1109	return true;
1110}
1111
1112/*
1113 * This is a fast pass over the inode cache to try to get reclaim moving on as
1114 * many inodes as possible in a short period of time. It kicks itself every few
1115 * seconds, as well as being kicked by the inode cache shrinker when memory
1116 * goes low.
1117 */
1118void
1119xfs_reclaim_worker(
1120	struct work_struct *work)
1121{
1122	struct xfs_mount *mp = container_of(to_delayed_work(work),
1123					struct xfs_mount, m_reclaim_work);
1124
1125	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1126	xfs_reclaim_work_queue(mp);
1127}
1128
1129STATIC int
1130xfs_inode_free_eofblocks(
1131	struct xfs_inode	*ip,
1132	struct xfs_icwalk	*icw,
1133	unsigned int		*lockflags)
1134{
1135	bool			wait;
1136
1137	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1138
1139	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1140		return 0;
1141
1142	/*
1143	 * If the mapping is dirty the operation can block and wait for some
1144	 * time. Unless we are waiting, skip it.
1145	 */
1146	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1147		return 0;
1148
1149	if (!xfs_icwalk_match(ip, icw))
1150		return 0;
1151
1152	/*
1153	 * If the caller is waiting, return -EAGAIN to keep the background
1154	 * scanner moving and revisit the inode in a subsequent pass.
1155	 */
1156	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1157		if (wait)
1158			return -EAGAIN;
1159		return 0;
1160	}
1161	*lockflags |= XFS_IOLOCK_EXCL;
1162
1163	if (xfs_can_free_eofblocks(ip, false))
1164		return xfs_free_eofblocks(ip);
1165
1166	/* inode could be preallocated or append-only */
1167	trace_xfs_inode_free_eofblocks_invalid(ip);
1168	xfs_inode_clear_eofblocks_tag(ip);
1169	return 0;
1170}
1171
1172static void
1173xfs_blockgc_set_iflag(
1174	struct xfs_inode	*ip,
1175	unsigned long		iflag)
1176{
1177	struct xfs_mount	*mp = ip->i_mount;
1178	struct xfs_perag	*pag;
1179
1180	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1181
1182	/*
1183	 * Don't bother locking the AG and looking up in the radix trees
1184	 * if we already know that we have the tag set.
1185	 */
1186	if (ip->i_flags & iflag)
1187		return;
1188	spin_lock(&ip->i_flags_lock);
1189	ip->i_flags |= iflag;
1190	spin_unlock(&ip->i_flags_lock);
1191
1192	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1193	spin_lock(&pag->pag_ici_lock);
1194
1195	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1196			XFS_ICI_BLOCKGC_TAG);
1197
1198	spin_unlock(&pag->pag_ici_lock);
1199	xfs_perag_put(pag);
1200}
1201
1202void
1203xfs_inode_set_eofblocks_tag(
1204	xfs_inode_t	*ip)
1205{
1206	trace_xfs_inode_set_eofblocks_tag(ip);
1207	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1208}
1209
1210static void
1211xfs_blockgc_clear_iflag(
1212	struct xfs_inode	*ip,
1213	unsigned long		iflag)
1214{
1215	struct xfs_mount	*mp = ip->i_mount;
1216	struct xfs_perag	*pag;
1217	bool			clear_tag;
1218
1219	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1220
1221	spin_lock(&ip->i_flags_lock);
1222	ip->i_flags &= ~iflag;
1223	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1224	spin_unlock(&ip->i_flags_lock);
1225
1226	if (!clear_tag)
1227		return;
1228
1229	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1230	spin_lock(&pag->pag_ici_lock);
1231
1232	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1233			XFS_ICI_BLOCKGC_TAG);
1234
1235	spin_unlock(&pag->pag_ici_lock);
1236	xfs_perag_put(pag);
1237}
1238
1239void
1240xfs_inode_clear_eofblocks_tag(
1241	xfs_inode_t	*ip)
1242{
1243	trace_xfs_inode_clear_eofblocks_tag(ip);
1244	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1245}
1246
1247/*
1248 * Set ourselves up to free CoW blocks from this file.  If it's already clean
1249 * then we can bail out quickly, but otherwise we must back off if the file
1250 * is undergoing some kind of write.
1251 */
1252static bool
1253xfs_prep_free_cowblocks(
1254	struct xfs_inode	*ip)
1255{
1256	/*
1257	 * Just clear the tag if we have an empty cow fork or none at all. It's
1258	 * possible the inode was fully unshared since it was originally tagged.
1259	 */
1260	if (!xfs_inode_has_cow_data(ip)) {
1261		trace_xfs_inode_free_cowblocks_invalid(ip);
1262		xfs_inode_clear_cowblocks_tag(ip);
1263		return false;
1264	}
1265
1266	/*
1267	 * If the mapping is dirty or under writeback we cannot touch the
1268	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1269	 */
1270	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1271	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1272	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1273	    atomic_read(&VFS_I(ip)->i_dio_count))
1274		return false;
1275
1276	return true;
1277}
1278
1279/*
1280 * Automatic CoW Reservation Freeing
1281 *
1282 * These functions automatically garbage collect leftover CoW reservations
1283 * that were made on behalf of a cowextsize hint when we start to run out
1284 * of quota or when the reservations sit around for too long.  If the file
1285 * has dirty pages or is undergoing writeback, its CoW reservations will
1286 * be retained.
1287 *
1288 * The actual garbage collection piggybacks off the same code that runs
1289 * the speculative EOF preallocation garbage collector.
1290 */
1291STATIC int
1292xfs_inode_free_cowblocks(
1293	struct xfs_inode	*ip,
1294	struct xfs_icwalk	*icw,
1295	unsigned int		*lockflags)
1296{
1297	bool			wait;
1298	int			ret = 0;
1299
1300	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1301
1302	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1303		return 0;
1304
1305	if (!xfs_prep_free_cowblocks(ip))
1306		return 0;
1307
1308	if (!xfs_icwalk_match(ip, icw))
1309		return 0;
1310
1311	/*
1312	 * If the caller is waiting, return -EAGAIN to keep the background
1313	 * scanner moving and revisit the inode in a subsequent pass.
1314	 */
1315	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1316	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1317		if (wait)
1318			return -EAGAIN;
1319		return 0;
1320	}
1321	*lockflags |= XFS_IOLOCK_EXCL;
1322
1323	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1324		if (wait)
1325			return -EAGAIN;
1326		return 0;
1327	}
1328	*lockflags |= XFS_MMAPLOCK_EXCL;
1329
1330	/*
1331	 * Check again, nobody else should be able to dirty blocks or change
1332	 * the reflink iflag now that we have the first two locks held.
1333	 */
1334	if (xfs_prep_free_cowblocks(ip))
1335		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1336	return ret;
1337}
1338
1339void
1340xfs_inode_set_cowblocks_tag(
1341	xfs_inode_t	*ip)
1342{
1343	trace_xfs_inode_set_cowblocks_tag(ip);
1344	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1345}
1346
1347void
1348xfs_inode_clear_cowblocks_tag(
1349	xfs_inode_t	*ip)
1350{
1351	trace_xfs_inode_clear_cowblocks_tag(ip);
1352	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1353}
1354
1355/* Disable post-EOF and CoW block auto-reclamation. */
1356void
1357xfs_blockgc_stop(
1358	struct xfs_mount	*mp)
1359{
1360	struct xfs_perag	*pag;
1361	xfs_agnumber_t		agno;
1362
1363	if (!xfs_clear_blockgc_enabled(mp))
1364		return;
1365
1366	for_each_perag(mp, agno, pag)
1367		cancel_delayed_work_sync(&pag->pag_blockgc_work);
1368	trace_xfs_blockgc_stop(mp, __return_address);
1369}
1370
1371/* Enable post-EOF and CoW block auto-reclamation. */
1372void
1373xfs_blockgc_start(
1374	struct xfs_mount	*mp)
1375{
1376	struct xfs_perag	*pag;
1377	xfs_agnumber_t		agno;
1378
1379	if (xfs_set_blockgc_enabled(mp))
1380		return;
1381
1382	trace_xfs_blockgc_start(mp, __return_address);
1383	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1384		xfs_blockgc_queue(pag);
1385}
1386
1387/* Don't try to run block gc on an inode that's in any of these states. */
1388#define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1389					 XFS_NEED_INACTIVE | \
1390					 XFS_INACTIVATING | \
1391					 XFS_IRECLAIMABLE | \
1392					 XFS_IRECLAIM)
1393/*
1394 * Decide if the given @ip is eligible for garbage collection of speculative
1395 * preallocations, and grab it if so.  Returns true if it's ready to go or
1396 * false if we should just ignore it.
1397 */
1398static bool
1399xfs_blockgc_igrab(
1400	struct xfs_inode	*ip)
1401{
1402	struct inode		*inode = VFS_I(ip);
1403
1404	ASSERT(rcu_read_lock_held());
1405
1406	/* Check for stale RCU freed inode */
 
 
 
 
 
 
 
 
1407	spin_lock(&ip->i_flags_lock);
1408	if (!ip->i_ino)
1409		goto out_unlock_noent;
1410
1411	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
 
1412		goto out_unlock_noent;
1413	spin_unlock(&ip->i_flags_lock);
1414
1415	/* nothing to sync during shutdown */
1416	if (xfs_is_shutdown(ip->i_mount))
1417		return false;
1418
1419	/* If we can't grab the inode, it must on it's way to reclaim. */
1420	if (!igrab(inode))
1421		return false;
1422
1423	/* inode is valid */
1424	return true;
1425
1426out_unlock_noent:
1427	spin_unlock(&ip->i_flags_lock);
1428	return false;
1429}
1430
1431/* Scan one incore inode for block preallocations that we can remove. */
1432static int
1433xfs_blockgc_scan_inode(
1434	struct xfs_inode	*ip,
1435	struct xfs_icwalk	*icw)
1436{
1437	unsigned int		lockflags = 0;
1438	int			error;
1439
1440	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1441	if (error)
1442		goto unlock;
1443
1444	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1445unlock:
1446	if (lockflags)
1447		xfs_iunlock(ip, lockflags);
1448	xfs_irele(ip);
1449	return error;
1450}
1451
1452/* Background worker that trims preallocated space. */
1453void
1454xfs_blockgc_worker(
1455	struct work_struct	*work)
1456{
1457	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1458					struct xfs_perag, pag_blockgc_work);
1459	struct xfs_mount	*mp = pag->pag_mount;
1460	int			error;
1461
1462	trace_xfs_blockgc_worker(mp, __return_address);
1463
1464	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1465	if (error)
1466		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1467				pag->pag_agno, error);
1468	xfs_blockgc_queue(pag);
1469}
1470
1471/*
1472 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1473 * and cowblocks.
1474 */
1475int
1476xfs_blockgc_free_space(
1477	struct xfs_mount	*mp,
1478	struct xfs_icwalk	*icw)
1479{
1480	int			error;
1481
1482	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1483
1484	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1485	if (error)
1486		return error;
1487
1488	xfs_inodegc_flush(mp);
1489	return 0;
1490}
1491
1492/*
1493 * Reclaim all the free space that we can by scheduling the background blockgc
1494 * and inodegc workers immediately and waiting for them all to clear.
1495 */
1496void
1497xfs_blockgc_flush_all(
1498	struct xfs_mount	*mp)
1499{
1500	struct xfs_perag	*pag;
1501	xfs_agnumber_t		agno;
1502
1503	trace_xfs_blockgc_flush_all(mp, __return_address);
1504
1505	/*
1506	 * For each blockgc worker, move its queue time up to now.  If it
1507	 * wasn't queued, it will not be requeued.  Then flush whatever's
1508	 * left.
1509	 */
1510	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1511		mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1512				&pag->pag_blockgc_work, 0);
1513
1514	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1515		flush_delayed_work(&pag->pag_blockgc_work);
1516
1517	xfs_inodegc_flush(mp);
1518}
1519
1520/*
1521 * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1522 * quota caused an allocation failure, so we make a best effort by including
1523 * each quota under low free space conditions (less than 1% free space) in the
1524 * scan.
1525 *
1526 * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
1527 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1528 * MMAPLOCK.
1529 */
1530int
1531xfs_blockgc_free_dquots(
1532	struct xfs_mount	*mp,
1533	struct xfs_dquot	*udqp,
1534	struct xfs_dquot	*gdqp,
1535	struct xfs_dquot	*pdqp,
1536	unsigned int		iwalk_flags)
1537{
1538	struct xfs_icwalk	icw = {0};
1539	bool			do_work = false;
1540
1541	if (!udqp && !gdqp && !pdqp)
1542		return 0;
1543
1544	/*
1545	 * Run a scan to free blocks using the union filter to cover all
1546	 * applicable quotas in a single scan.
1547	 */
1548	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1549
1550	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1551		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1552		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1553		do_work = true;
1554	}
1555
1556	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1557		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1558		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1559		do_work = true;
1560	}
1561
1562	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1563		icw.icw_prid = pdqp->q_id;
1564		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1565		do_work = true;
1566	}
1567
1568	if (!do_work)
1569		return 0;
1570
1571	return xfs_blockgc_free_space(mp, &icw);
1572}
1573
1574/* Run cow/eofblocks scans on the quotas attached to the inode. */
1575int
1576xfs_blockgc_free_quota(
1577	struct xfs_inode	*ip,
1578	unsigned int		iwalk_flags)
1579{
1580	return xfs_blockgc_free_dquots(ip->i_mount,
1581			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1582			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1583			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1584}
1585
1586/* XFS Inode Cache Walking Code */
1587
1588/*
1589 * The inode lookup is done in batches to keep the amount of lock traffic and
1590 * radix tree lookups to a minimum. The batch size is a trade off between
1591 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1592 * be too greedy.
1593 */
1594#define XFS_LOOKUP_BATCH	32
1595
1596
1597/*
1598 * Decide if we want to grab this inode in anticipation of doing work towards
1599 * the goal.
1600 */
1601static inline bool
1602xfs_icwalk_igrab(
1603	enum xfs_icwalk_goal	goal,
1604	struct xfs_inode	*ip,
1605	struct xfs_icwalk	*icw)
1606{
1607	switch (goal) {
1608	case XFS_ICWALK_BLOCKGC:
1609		return xfs_blockgc_igrab(ip);
1610	case XFS_ICWALK_RECLAIM:
1611		return xfs_reclaim_igrab(ip, icw);
1612	default:
1613		return false;
1614	}
1615}
1616
1617/*
1618 * Process an inode.  Each processing function must handle any state changes
1619 * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1620 */
1621static inline int
1622xfs_icwalk_process_inode(
1623	enum xfs_icwalk_goal	goal,
1624	struct xfs_inode	*ip,
1625	struct xfs_perag	*pag,
1626	struct xfs_icwalk	*icw)
1627{
1628	int			error = 0;
1629
1630	switch (goal) {
1631	case XFS_ICWALK_BLOCKGC:
1632		error = xfs_blockgc_scan_inode(ip, icw);
1633		break;
1634	case XFS_ICWALK_RECLAIM:
1635		xfs_reclaim_inode(ip, pag);
1636		break;
1637	}
1638	return error;
1639}
1640
1641/*
1642 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1643 * process them in some manner.
1644 */
1645static int
1646xfs_icwalk_ag(
1647	struct xfs_perag	*pag,
1648	enum xfs_icwalk_goal	goal,
1649	struct xfs_icwalk	*icw)
1650{
1651	struct xfs_mount	*mp = pag->pag_mount;
1652	uint32_t		first_index;
1653	int			last_error = 0;
1654	int			skipped;
1655	bool			done;
1656	int			nr_found;
1657
1658restart:
1659	done = false;
1660	skipped = 0;
1661	if (goal == XFS_ICWALK_RECLAIM)
1662		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1663	else
1664		first_index = 0;
1665	nr_found = 0;
1666	do {
1667		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1668		int		error = 0;
1669		int		i;
1670
1671		rcu_read_lock();
1672
1673		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1674				(void **) batch, first_index,
1675				XFS_LOOKUP_BATCH, goal);
 
 
 
 
 
 
 
1676		if (!nr_found) {
1677			done = true;
1678			rcu_read_unlock();
1679			break;
1680		}
1681
1682		/*
1683		 * Grab the inodes before we drop the lock. if we found
1684		 * nothing, nr == 0 and the loop will be skipped.
1685		 */
1686		for (i = 0; i < nr_found; i++) {
1687			struct xfs_inode *ip = batch[i];
1688
1689			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1690				batch[i] = NULL;
1691
1692			/*
1693			 * Update the index for the next lookup. Catch
1694			 * overflows into the next AG range which can occur if
1695			 * we have inodes in the last block of the AG and we
1696			 * are currently pointing to the last inode.
1697			 *
1698			 * Because we may see inodes that are from the wrong AG
1699			 * due to RCU freeing and reallocation, only update the
1700			 * index if it lies in this AG. It was a race that lead
1701			 * us to see this inode, so another lookup from the
1702			 * same index will not find it again.
1703			 */
1704			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1705				continue;
1706			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1707			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1708				done = true;
1709		}
1710
1711		/* unlock now we've grabbed the inodes. */
1712		rcu_read_unlock();
1713
1714		for (i = 0; i < nr_found; i++) {
1715			if (!batch[i])
1716				continue;
1717			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1718					icw);
1719			if (error == -EAGAIN) {
1720				skipped++;
1721				continue;
1722			}
1723			if (error && last_error != -EFSCORRUPTED)
1724				last_error = error;
1725		}
1726
1727		/* bail out if the filesystem is corrupted.  */
1728		if (error == -EFSCORRUPTED)
1729			break;
1730
1731		cond_resched();
1732
1733		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1734			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1735			if (icw->icw_scan_limit <= 0)
1736				break;
1737		}
1738	} while (nr_found && !done);
1739
1740	if (goal == XFS_ICWALK_RECLAIM) {
1741		if (done)
1742			first_index = 0;
1743		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1744	}
1745
1746	if (skipped) {
1747		delay(1);
1748		goto restart;
1749	}
1750	return last_error;
1751}
1752
1753/* Walk all incore inodes to achieve a given goal. */
1754static int
1755xfs_icwalk(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1756	struct xfs_mount	*mp,
1757	enum xfs_icwalk_goal	goal,
1758	struct xfs_icwalk	*icw)
 
 
1759{
1760	struct xfs_perag	*pag;
1761	int			error = 0;
1762	int			last_error = 0;
1763	xfs_agnumber_t		agno;
1764
1765	for_each_perag_tag(mp, agno, pag, goal) {
1766		error = xfs_icwalk_ag(pag, goal, icw);
 
 
 
1767		if (error) {
1768			last_error = error;
1769			if (error == -EFSCORRUPTED) {
1770				xfs_perag_put(pag);
1771				break;
1772			}
1773		}
1774	}
1775	return last_error;
1776	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1777}
1778
1779#ifdef DEBUG
1780static void
1781xfs_check_delalloc(
1782	struct xfs_inode	*ip,
1783	int			whichfork)
 
 
 
1784{
1785	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1786	struct xfs_bmbt_irec	got;
1787	struct xfs_iext_cursor	icur;
 
1788
1789	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1790		return;
1791	do {
1792		if (isnullstartblock(got.br_startblock)) {
1793			xfs_warn(ip->i_mount,
1794	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1795				ip->i_ino,
1796				whichfork == XFS_DATA_FORK ? "data" : "cow",
1797				got.br_startoff, got.br_blockcount);
1798		}
1799	} while (xfs_iext_next_extent(ifp, &icur, &got));
 
1800}
1801#else
1802#define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1803#endif
1804
1805/* Schedule the inode for reclaim. */
 
 
 
 
 
 
1806static void
1807xfs_inodegc_set_reclaimable(
1808	struct xfs_inode	*ip)
1809{
1810	struct xfs_mount	*mp = ip->i_mount;
1811	struct xfs_perag	*pag;
1812
1813	if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1814		xfs_check_delalloc(ip, XFS_DATA_FORK);
1815		xfs_check_delalloc(ip, XFS_COW_FORK);
1816		ASSERT(0);
1817	}
1818
1819	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1820	spin_lock(&pag->pag_ici_lock);
1821	spin_lock(&ip->i_flags_lock);
1822
1823	trace_xfs_inode_set_reclaimable(ip);
1824	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1825	ip->i_flags |= XFS_IRECLAIMABLE;
1826	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1827			XFS_ICI_RECLAIM_TAG);
1828
1829	spin_unlock(&ip->i_flags_lock);
1830	spin_unlock(&pag->pag_ici_lock);
1831	xfs_perag_put(pag);
1832}
1833
1834/*
1835 * Free all speculative preallocations and possibly even the inode itself.
1836 * This is the last chance to make changes to an otherwise unreferenced file
1837 * before incore reclamation happens.
 
 
1838 */
1839static void
1840xfs_inodegc_inactivate(
1841	struct xfs_inode	*ip)
1842{
1843	trace_xfs_inode_inactivating(ip);
1844	xfs_inactive(ip);
1845	xfs_inodegc_set_reclaimable(ip);
1846}
1847
1848void
1849xfs_inodegc_worker(
1850	struct work_struct	*work)
1851{
1852	struct xfs_inodegc	*gc = container_of(to_delayed_work(work),
1853						struct xfs_inodegc, work);
1854	struct llist_node	*node = llist_del_all(&gc->list);
1855	struct xfs_inode	*ip, *n;
1856	unsigned int		nofs_flag;
1857
1858	WRITE_ONCE(gc->items, 0);
 
 
1859
1860	if (!node)
1861		return;
 
 
 
 
 
 
1862
1863	/*
1864	 * We can allocate memory here while doing writeback on behalf of
1865	 * memory reclaim.  To avoid memory allocation deadlocks set the
1866	 * task-wide nofs context for the following operations.
1867	 */
1868	nofs_flag = memalloc_nofs_save();
 
1869
1870	ip = llist_entry(node, struct xfs_inode, i_gclist);
1871	trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1872
1873	WRITE_ONCE(gc->shrinker_hits, 0);
1874	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1875		xfs_iflags_set(ip, XFS_INACTIVATING);
1876		xfs_inodegc_inactivate(ip);
1877	}
1878
1879	memalloc_nofs_restore(nofs_flag);
1880}
1881
1882/*
1883 * Expedite all pending inodegc work to run immediately. This does not wait for
1884 * completion of the work.
 
1885 */
1886void
1887xfs_inodegc_push(
1888	struct xfs_mount	*mp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889{
1890	if (!xfs_is_inodegc_enabled(mp))
1891		return;
1892	trace_xfs_inodegc_push(mp, __return_address);
1893	xfs_inodegc_queue_all(mp);
 
 
 
 
 
 
 
1894}
1895
1896/*
1897 * Force all currently queued inode inactivation work to run immediately and
1898 * wait for the work to finish.
1899 */
1900void
1901xfs_inodegc_flush(
1902	struct xfs_mount	*mp)
1903{
1904	xfs_inodegc_push(mp);
1905	trace_xfs_inodegc_flush(mp, __return_address);
1906	flush_workqueue(mp->m_inodegc_wq);
1907}
1908
1909/*
1910 * Flush all the pending work and then disable the inode inactivation background
1911 * workers and wait for them to stop.
1912 */
1913void
1914xfs_inodegc_stop(
1915	struct xfs_mount	*mp)
 
1916{
1917	if (!xfs_clear_inodegc_enabled(mp))
1918		return;
1919
1920	xfs_inodegc_queue_all(mp);
1921	drain_workqueue(mp->m_inodegc_wq);
 
1922
1923	trace_xfs_inodegc_stop(mp, __return_address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1924}
1925
1926/*
1927 * Enable the inode inactivation background workers and schedule deferred inode
1928 * inactivation work if there is any.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1929 */
1930void
1931xfs_inodegc_start(
1932	struct xfs_mount	*mp)
 
 
1933{
1934	if (xfs_set_inodegc_enabled(mp))
1935		return;
1936
1937	trace_xfs_inodegc_start(mp, __return_address);
1938	xfs_inodegc_queue_all(mp);
1939}
 
 
 
 
 
1940
1941#ifdef CONFIG_XFS_RT
1942static inline bool
1943xfs_inodegc_want_queue_rt_file(
1944	struct xfs_inode	*ip)
1945{
1946	struct xfs_mount	*mp = ip->i_mount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1947
1948	if (!XFS_IS_REALTIME_INODE(ip))
1949		return false;
 
 
1950
1951	if (__percpu_counter_compare(&mp->m_frextents,
1952				mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1953				XFS_FDBLOCKS_BATCH) < 0)
1954		return true;
1955
1956	return false;
1957}
1958#else
1959# define xfs_inodegc_want_queue_rt_file(ip)	(false)
1960#endif /* CONFIG_XFS_RT */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1961
1962/*
1963 * Schedule the inactivation worker when:
1964 *
1965 *  - We've accumulated more than one inode cluster buffer's worth of inodes.
1966 *  - There is less than 5% free space left.
1967 *  - Any of the quotas for this inode are near an enforcement limit.
1968 */
1969static inline bool
1970xfs_inodegc_want_queue_work(
1971	struct xfs_inode	*ip,
1972	unsigned int		items)
 
1973{
1974	struct xfs_mount	*mp = ip->i_mount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1975
1976	if (items > mp->m_ino_geo.inodes_per_cluster)
1977		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1978
1979	if (__percpu_counter_compare(&mp->m_fdblocks,
1980				mp->m_low_space[XFS_LOWSP_5_PCNT],
1981				XFS_FDBLOCKS_BATCH) < 0)
1982		return true;
1983
1984	if (xfs_inodegc_want_queue_rt_file(ip))
1985		return true;
 
 
 
 
 
1986
1987	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1988		return true;
1989
1990	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1991		return true;
1992
1993	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1994		return true;
1995
1996	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1997}
1998
1999/*
2000 * Upper bound on the number of inodes in each AG that can be queued for
2001 * inactivation at any given time, to avoid monopolizing the workqueue.
 
 
 
 
 
2002 */
2003#define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
 
 
 
 
 
 
 
 
 
 
2004
2005/*
2006 * Make the frontend wait for inactivations when:
2007 *
2008 *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
2009 *  - The queue depth exceeds the maximum allowable percpu backlog.
2010 *
2011 * Note: If the current thread is running a transaction, we don't ever want to
2012 * wait for other transactions because that could introduce a deadlock.
2013 */
2014static inline bool
2015xfs_inodegc_want_flush_work(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2016	struct xfs_inode	*ip,
2017	unsigned int		items,
2018	unsigned int		shrinker_hits)
2019{
2020	if (current->journal_info)
2021		return false;
 
2022
2023	if (shrinker_hits > 0)
2024		return true;
 
2025
2026	if (items > XFS_INODEGC_MAX_BACKLOG)
2027		return true;
 
2028
2029	return false;
2030}
2031
2032/*
2033 * Queue a background inactivation worker if there are inodes that need to be
2034 * inactivated and higher level xfs code hasn't disabled the background
2035 * workers.
2036 */
2037static void
2038xfs_inodegc_queue(
2039	struct xfs_inode	*ip)
 
2040{
2041	struct xfs_mount	*mp = ip->i_mount;
2042	struct xfs_inodegc	*gc;
2043	int			items;
2044	unsigned int		shrinker_hits;
2045	unsigned long		queue_delay = 1;
 
 
 
 
 
 
2046
2047	trace_xfs_inode_set_need_inactive(ip);
2048	spin_lock(&ip->i_flags_lock);
2049	ip->i_flags |= XFS_NEED_INACTIVE;
2050	spin_unlock(&ip->i_flags_lock);
2051
2052	gc = get_cpu_ptr(mp->m_inodegc);
2053	llist_add(&ip->i_gclist, &gc->list);
2054	items = READ_ONCE(gc->items);
2055	WRITE_ONCE(gc->items, items + 1);
2056	shrinker_hits = READ_ONCE(gc->shrinker_hits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2057
2058	/*
2059	 * We queue the work while holding the current CPU so that the work
2060	 * is scheduled to run on this CPU.
2061	 */
2062	if (!xfs_is_inodegc_enabled(mp)) {
2063		put_cpu_ptr(gc);
2064		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065	}
2066
2067	if (xfs_inodegc_want_queue_work(ip, items))
2068		queue_delay = 0;
2069
2070	trace_xfs_inodegc_queue(mp, __return_address);
2071	mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
2072	put_cpu_ptr(gc);
2073
2074	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2075		trace_xfs_inodegc_throttle(mp, __return_address);
2076		flush_delayed_work(&gc->work);
2077	}
2078}
2079
2080/*
2081 * Fold the dead CPU inodegc queue into the current CPUs queue.
2082 */
2083void
2084xfs_inodegc_cpu_dead(
2085	struct xfs_mount	*mp,
2086	unsigned int		dead_cpu)
2087{
2088	struct xfs_inodegc	*dead_gc, *gc;
2089	struct llist_node	*first, *last;
2090	unsigned int		count = 0;
2091
2092	dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2093	cancel_delayed_work_sync(&dead_gc->work);
2094
2095	if (llist_empty(&dead_gc->list))
2096		return;
2097
2098	first = dead_gc->list.first;
2099	last = first;
2100	while (last->next) {
2101		last = last->next;
2102		count++;
2103	}
2104	dead_gc->list.first = NULL;
2105	dead_gc->items = 0;
2106
2107	/* Add pending work to current CPU */
2108	gc = get_cpu_ptr(mp->m_inodegc);
2109	llist_add_batch(first, last, &gc->list);
2110	count += READ_ONCE(gc->items);
2111	WRITE_ONCE(gc->items, count);
2112
2113	if (xfs_is_inodegc_enabled(mp)) {
2114		trace_xfs_inodegc_queue(mp, __return_address);
2115		mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
2116	}
2117	put_cpu_ptr(gc);
2118}
2119
2120/*
2121 * We set the inode flag atomically with the radix tree tag.  Once we get tag
2122 * lookups on the radix tree, this inode flag can go away.
2123 *
2124 * We always use background reclaim here because even if the inode is clean, it
2125 * still may be under IO and hence we have wait for IO completion to occur
2126 * before we can reclaim the inode. The background reclaim path handles this
2127 * more efficiently than we can here, so simply let background reclaim tear down
2128 * all inodes.
2129 */
2130void
2131xfs_inode_mark_reclaimable(
2132	struct xfs_inode	*ip)
2133{
2134	struct xfs_mount	*mp = ip->i_mount;
2135	bool			need_inactive;
2136
2137	XFS_STATS_INC(mp, vn_reclaim);
2138
2139	/*
2140	 * We should never get here with any of the reclaim flags already set.
2141	 */
2142	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2143
2144	need_inactive = xfs_inode_needs_inactive(ip);
2145	if (need_inactive) {
2146		xfs_inodegc_queue(ip);
2147		return;
 
 
 
2148	}
2149
2150	/* Going straight to reclaim, so drop the dquots. */
2151	xfs_qm_dqdetach(ip);
2152	xfs_inodegc_set_reclaimable(ip);
2153}
2154
2155/*
2156 * Register a phony shrinker so that we can run background inodegc sooner when
2157 * there's memory pressure.  Inactivation does not itself free any memory but
2158 * it does make inodes reclaimable, which eventually frees memory.
2159 *
2160 * The count function, seek value, and batch value are crafted to trigger the
2161 * scan function during the second round of scanning.  Hopefully this means
2162 * that we reclaimed enough memory that initiating metadata transactions won't
2163 * make things worse.
2164 */
2165#define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
2166#define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2167
2168static unsigned long
2169xfs_inodegc_shrinker_count(
2170	struct shrinker		*shrink,
2171	struct shrink_control	*sc)
2172{
2173	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
2174						   m_inodegc_shrinker);
2175	struct xfs_inodegc	*gc;
2176	int			cpu;
2177
2178	if (!xfs_is_inodegc_enabled(mp))
2179		return 0;
 
2180
2181	for_each_online_cpu(cpu) {
2182		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2183		if (!llist_empty(&gc->list))
2184			return XFS_INODEGC_SHRINKER_COUNT;
2185	}
 
 
 
 
 
 
 
2186
2187	return 0;
2188}
2189
2190static unsigned long
2191xfs_inodegc_shrinker_scan(
2192	struct shrinker		*shrink,
2193	struct shrink_control	*sc)
2194{
2195	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
2196						   m_inodegc_shrinker);
2197	struct xfs_inodegc	*gc;
2198	int			cpu;
2199	bool			no_items = true;
2200
2201	if (!xfs_is_inodegc_enabled(mp))
2202		return SHRINK_STOP;
2203
2204	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2205
2206	for_each_online_cpu(cpu) {
2207		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2208		if (!llist_empty(&gc->list)) {
2209			unsigned int	h = READ_ONCE(gc->shrinker_hits);
2210
2211			WRITE_ONCE(gc->shrinker_hits, h + 1);
2212			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2213			no_items = false;
2214		}
2215	}
2216
2217	/*
2218	 * If there are no inodes to inactivate, we don't want the shrinker
2219	 * to think there's deferred work to call us back about.
2220	 */
2221	if (no_items)
2222		return LONG_MAX;
2223
2224	return SHRINK_STOP;
2225}
2226
2227/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2228int
2229xfs_inodegc_register_shrinker(
2230	struct xfs_mount	*mp)
2231{
2232	struct shrinker		*shrink = &mp->m_inodegc_shrinker;
 
2233
2234	shrink->count_objects = xfs_inodegc_shrinker_count;
2235	shrink->scan_objects = xfs_inodegc_shrinker_scan;
2236	shrink->seeks = 0;
2237	shrink->flags = SHRINKER_NONSLAB;
2238	shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
 
 
 
 
 
 
 
 
 
 
 
 
2239
2240	return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
 
2241}
v4.6
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
 
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_sb.h"
  24#include "xfs_mount.h"
  25#include "xfs_inode.h"
  26#include "xfs_error.h"
  27#include "xfs_trans.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_quota.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_bmap_util.h"
  34#include "xfs_dquot_item.h"
  35#include "xfs_dquot.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37#include <linux/kthread.h>
  38#include <linux/freezer.h>
  39
  40STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
  41				struct xfs_perag *pag, struct xfs_inode *ip);
 
 
 
 
  42
  43/*
  44 * Allocate and initialise an xfs_inode.
  45 */
  46struct xfs_inode *
  47xfs_inode_alloc(
  48	struct xfs_mount	*mp,
  49	xfs_ino_t		ino)
  50{
  51	struct xfs_inode	*ip;
  52
  53	/*
  54	 * if this didn't occur in transactions, we could use
  55	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  56	 * code up to do this anyway.
  57	 */
  58	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  59	if (!ip)
  60		return NULL;
  61	if (inode_init_always(mp->m_super, VFS_I(ip))) {
  62		kmem_zone_free(xfs_inode_zone, ip);
  63		return NULL;
  64	}
  65
  66	/* VFS doesn't initialise i_mode! */
  67	VFS_I(ip)->i_mode = 0;
 
 
  68
  69	XFS_STATS_INC(mp, vn_active);
  70	ASSERT(atomic_read(&ip->i_pincount) == 0);
  71	ASSERT(!spin_is_locked(&ip->i_flags_lock));
  72	ASSERT(!xfs_isiflocked(ip));
  73	ASSERT(ip->i_ino == 0);
  74
  75	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  76
  77	/* initialise the xfs inode */
  78	ip->i_ino = ino;
  79	ip->i_mount = mp;
  80	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  81	ip->i_afp = NULL;
  82	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
 
 
  83	ip->i_flags = 0;
  84	ip->i_delayed_blks = 0;
  85	memset(&ip->i_d, 0, sizeof(ip->i_d));
 
 
 
 
 
 
 
 
 
  86
  87	return ip;
  88}
  89
  90STATIC void
  91xfs_inode_free_callback(
  92	struct rcu_head		*head)
  93{
  94	struct inode		*inode = container_of(head, struct inode, i_rcu);
  95	struct xfs_inode	*ip = XFS_I(inode);
  96
  97	kmem_zone_free(xfs_inode_zone, ip);
  98}
  99
 100void
 101xfs_inode_free(
 102	struct xfs_inode	*ip)
 103{
 104	switch (VFS_I(ip)->i_mode & S_IFMT) {
 105	case S_IFREG:
 106	case S_IFDIR:
 107	case S_IFLNK:
 108		xfs_idestroy_fork(ip, XFS_DATA_FORK);
 109		break;
 110	}
 111
 112	if (ip->i_afp)
 113		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 114
 
 
 
 
 115	if (ip->i_itemp) {
 116		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
 
 117		xfs_inode_item_destroy(ip);
 118		ip->i_itemp = NULL;
 119	}
 120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121	/*
 122	 * Because we use RCU freeing we need to ensure the inode always
 123	 * appears to be reclaimed with an invalid inode number when in the
 124	 * free state. The ip->i_flags_lock provides the barrier against lookup
 125	 * races.
 126	 */
 127	spin_lock(&ip->i_flags_lock);
 128	ip->i_flags = XFS_IRECLAIM;
 129	ip->i_ino = 0;
 130	spin_unlock(&ip->i_flags_lock);
 131
 132	/* asserts to verify all state is correct here */
 133	ASSERT(atomic_read(&ip->i_pincount) == 0);
 134	ASSERT(!xfs_isiflocked(ip));
 135	XFS_STATS_DEC(ip->i_mount, vn_active);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136
 137	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 138}
 139
 140/*
 141 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 142 * part of the structure. This is made more complex by the fact we store
 143 * information about the on-disk values in the VFS inode and so we can't just
 144 * overwrite the values unconditionally. Hence we save the parameters we
 145 * need to retain across reinitialisation, and rewrite them into the VFS inode
 146 * after reinitialisation even if it fails.
 147 */
 148static int
 149xfs_reinit_inode(
 150	struct xfs_mount	*mp,
 151	struct inode		*inode)
 152{
 153	int		error;
 154	uint32_t	nlink = inode->i_nlink;
 155	uint32_t	generation = inode->i_generation;
 156	uint64_t	version = inode->i_version;
 157	umode_t		mode = inode->i_mode;
 
 
 
 158
 159	error = inode_init_always(mp->m_super, inode);
 160
 161	set_nlink(inode, nlink);
 162	inode->i_generation = generation;
 163	inode->i_version = version;
 164	inode->i_mode = mode;
 
 
 
 
 165	return error;
 166}
 167
 168/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169 * Check the validity of the inode we just found it the cache
 170 */
 171static int
 172xfs_iget_cache_hit(
 173	struct xfs_perag	*pag,
 174	struct xfs_inode	*ip,
 175	xfs_ino_t		ino,
 176	int			flags,
 177	int			lock_flags) __releases(RCU)
 178{
 179	struct inode		*inode = VFS_I(ip);
 180	struct xfs_mount	*mp = ip->i_mount;
 181	int			error;
 182
 183	/*
 184	 * check for re-use of an inode within an RCU grace period due to the
 185	 * radix tree nodes not being updated yet. We monitor for this by
 186	 * setting the inode number to zero before freeing the inode structure.
 187	 * If the inode has been reallocated and set up, then the inode number
 188	 * will not match, so check for that, too.
 189	 */
 190	spin_lock(&ip->i_flags_lock);
 191	if (ip->i_ino != ino) {
 192		trace_xfs_iget_skip(ip);
 193		XFS_STATS_INC(mp, xs_ig_frecycle);
 194		error = -EAGAIN;
 195		goto out_error;
 196	}
 197
 198
 199	/*
 200	 * If we are racing with another cache hit that is currently
 201	 * instantiating this inode or currently recycling it out of
 202	 * reclaimabe state, wait for the initialisation to complete
 203	 * before continuing.
 204	 *
 
 
 
 
 
 
 
 
 205	 * XXX(hch): eventually we should do something equivalent to
 206	 *	     wait_on_inode to wait for these flags to be cleared
 207	 *	     instead of polling for it.
 208	 */
 209	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
 210		trace_xfs_iget_skip(ip);
 211		XFS_STATS_INC(mp, xs_ig_frecycle);
 212		error = -EAGAIN;
 213		goto out_error;
 
 
 
 
 
 214	}
 215
 216	/*
 217	 * If lookup is racing with unlink return an error immediately.
 
 218	 */
 219	if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
 220		error = -ENOENT;
 221		goto out_error;
 222	}
 223
 224	/*
 225	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
 226	 * Need to carefully get it back into useable state.
 227	 */
 
 
 228	if (ip->i_flags & XFS_IRECLAIMABLE) {
 229		trace_xfs_iget_reclaim(ip);
 230
 231		/*
 232		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
 233		 * from stomping over us while we recycle the inode.  We can't
 234		 * clear the radix tree reclaimable tag yet as it requires
 235		 * pag_ici_lock to be held exclusive.
 236		 */
 237		ip->i_flags |= XFS_IRECLAIM;
 238
 239		spin_unlock(&ip->i_flags_lock);
 240		rcu_read_unlock();
 241
 242		error = xfs_reinit_inode(mp, inode);
 243		if (error) {
 244			/*
 245			 * Re-initializing the inode failed, and we are in deep
 246			 * trouble.  Try to re-add it to the reclaim list.
 247			 */
 248			rcu_read_lock();
 249			spin_lock(&ip->i_flags_lock);
 250
 251			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 252			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 253			trace_xfs_iget_reclaim_fail(ip);
 254			goto out_error;
 255		}
 256
 257		spin_lock(&pag->pag_ici_lock);
 258		spin_lock(&ip->i_flags_lock);
 259
 260		/*
 261		 * Clear the per-lifetime state in the inode as we are now
 262		 * effectively a new inode and need to return to the initial
 263		 * state before reuse occurs.
 264		 */
 265		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 266		ip->i_flags |= XFS_INEW;
 267		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
 268		inode->i_state = I_NEW;
 269
 270		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 271		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 272
 273		spin_unlock(&ip->i_flags_lock);
 274		spin_unlock(&pag->pag_ici_lock);
 275	} else {
 276		/* If the VFS inode is being torn down, pause and try again. */
 277		if (!igrab(inode)) {
 278			trace_xfs_iget_skip(ip);
 279			error = -EAGAIN;
 280			goto out_error;
 281		}
 282
 283		/* We've got a live one. */
 284		spin_unlock(&ip->i_flags_lock);
 285		rcu_read_unlock();
 286		trace_xfs_iget_hit(ip);
 287	}
 288
 289	if (lock_flags != 0)
 290		xfs_ilock(ip, lock_flags);
 291
 292	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
 
 293	XFS_STATS_INC(mp, xs_ig_found);
 294
 295	return 0;
 296
 
 
 
 
 297out_error:
 298	spin_unlock(&ip->i_flags_lock);
 299	rcu_read_unlock();
 300	return error;
 
 
 
 
 
 
 
 
 
 
 
 301}
 302
 303
 304static int
 305xfs_iget_cache_miss(
 306	struct xfs_mount	*mp,
 307	struct xfs_perag	*pag,
 308	xfs_trans_t		*tp,
 309	xfs_ino_t		ino,
 310	struct xfs_inode	**ipp,
 311	int			flags,
 312	int			lock_flags)
 313{
 314	struct xfs_inode	*ip;
 315	int			error;
 316	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
 317	int			iflags;
 318
 319	ip = xfs_inode_alloc(mp, ino);
 320	if (!ip)
 321		return -ENOMEM;
 322
 323	error = xfs_iread(mp, tp, ip, flags);
 324	if (error)
 325		goto out_destroy;
 326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327	trace_xfs_iget_miss(ip);
 328
 329	if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
 330		error = -ENOENT;
 
 
 
 
 331		goto out_destroy;
 332	}
 333
 334	/*
 335	 * Preload the radix tree so we can insert safely under the
 336	 * write spinlock. Note that we cannot sleep inside the preload
 337	 * region. Since we can be called from transaction context, don't
 338	 * recurse into the file system.
 339	 */
 340	if (radix_tree_preload(GFP_NOFS)) {
 341		error = -EAGAIN;
 342		goto out_destroy;
 343	}
 344
 345	/*
 346	 * Because the inode hasn't been added to the radix-tree yet it can't
 347	 * be found by another thread, so we can do the non-sleeping lock here.
 348	 */
 349	if (lock_flags) {
 350		if (!xfs_ilock_nowait(ip, lock_flags))
 351			BUG();
 352	}
 353
 354	/*
 355	 * These values must be set before inserting the inode into the radix
 356	 * tree as the moment it is inserted a concurrent lookup (allowed by the
 357	 * RCU locking mechanism) can find it and that lookup must see that this
 358	 * is an inode currently under construction (i.e. that XFS_INEW is set).
 359	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 360	 * memory barrier that ensures this detection works correctly at lookup
 361	 * time.
 362	 */
 363	iflags = XFS_INEW;
 364	if (flags & XFS_IGET_DONTCACHE)
 365		iflags |= XFS_IDONTCACHE;
 366	ip->i_udquot = NULL;
 367	ip->i_gdquot = NULL;
 368	ip->i_pdquot = NULL;
 369	xfs_iflags_set(ip, iflags);
 370
 371	/* insert the new inode */
 372	spin_lock(&pag->pag_ici_lock);
 373	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 374	if (unlikely(error)) {
 375		WARN_ON(error != -EEXIST);
 376		XFS_STATS_INC(mp, xs_ig_dup);
 377		error = -EAGAIN;
 378		goto out_preload_end;
 379	}
 380	spin_unlock(&pag->pag_ici_lock);
 381	radix_tree_preload_end();
 382
 383	*ipp = ip;
 384	return 0;
 385
 386out_preload_end:
 387	spin_unlock(&pag->pag_ici_lock);
 388	radix_tree_preload_end();
 389	if (lock_flags)
 390		xfs_iunlock(ip, lock_flags);
 391out_destroy:
 392	__destroy_inode(VFS_I(ip));
 393	xfs_inode_free(ip);
 394	return error;
 395}
 396
 397/*
 398 * Look up an inode by number in the given file system.
 399 * The inode is looked up in the cache held in each AG.
 400 * If the inode is found in the cache, initialise the vfs inode
 401 * if necessary.
 402 *
 403 * If it is not in core, read it in from the file system's device,
 404 * add it to the cache and initialise the vfs inode.
 405 *
 406 * The inode is locked according to the value of the lock_flags parameter.
 407 * This flag parameter indicates how and if the inode's IO lock and inode lock
 408 * should be taken.
 409 *
 410 * mp -- the mount point structure for the current file system.  It points
 411 *       to the inode hash table.
 412 * tp -- a pointer to the current transaction if there is one.  This is
 413 *       simply passed through to the xfs_iread() call.
 414 * ino -- the number of the inode desired.  This is the unique identifier
 415 *        within the file system for the inode being requested.
 416 * lock_flags -- flags indicating how to lock the inode.  See the comment
 417 *		 for xfs_ilock() for a list of valid values.
 418 */
 419int
 420xfs_iget(
 421	xfs_mount_t	*mp,
 422	xfs_trans_t	*tp,
 423	xfs_ino_t	ino,
 424	uint		flags,
 425	uint		lock_flags,
 426	xfs_inode_t	**ipp)
 427{
 428	xfs_inode_t	*ip;
 429	int		error;
 430	xfs_perag_t	*pag;
 431	xfs_agino_t	agino;
 432
 433	/*
 434	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
 435	 * doesn't get freed while it's being referenced during a
 436	 * radix tree traversal here.  It assumes this function
 437	 * aqcuires only the ILOCK (and therefore it has no need to
 438	 * involve the IOLOCK in this synchronization).
 439	 */
 440	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 441
 442	/* reject inode numbers outside existing AGs */
 443	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 444		return -EINVAL;
 445
 446	XFS_STATS_INC(mp, xs_ig_attempts);
 447
 448	/* get the perag structure and ensure that it's inode capable */
 449	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 450	agino = XFS_INO_TO_AGINO(mp, ino);
 451
 452again:
 453	error = 0;
 454	rcu_read_lock();
 455	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 456
 457	if (ip) {
 458		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 459		if (error)
 460			goto out_error_or_again;
 461	} else {
 462		rcu_read_unlock();
 
 
 
 
 463		XFS_STATS_INC(mp, xs_ig_missed);
 464
 465		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 466							flags, lock_flags);
 467		if (error)
 468			goto out_error_or_again;
 469	}
 470	xfs_perag_put(pag);
 471
 472	*ipp = ip;
 473
 474	/*
 475	 * If we have a real type for an on-disk inode, we can setup the inode
 476	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
 
 477	 */
 478	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 479		xfs_setup_existing_inode(ip);
 480	return 0;
 481
 482out_error_or_again:
 483	if (error == -EAGAIN) {
 484		delay(1);
 485		goto again;
 486	}
 487	xfs_perag_put(pag);
 488	return error;
 489}
 490
 491/*
 492 * The inode lookup is done in batches to keep the amount of lock traffic and
 493 * radix tree lookups to a minimum. The batch size is a trade off between
 494 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 495 * be too greedy.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496 */
 497#define XFS_LOOKUP_BATCH	32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498
 
 
 
 
 
 
 
 
 
 
 
 
 499STATIC int
 500xfs_inode_ag_walk_grab(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501	struct xfs_inode	*ip)
 502{
 503	struct inode		*inode = VFS_I(ip);
 504
 505	ASSERT(rcu_read_lock_held());
 506
 507	/*
 508	 * check for stale RCU freed inode
 509	 *
 510	 * If the inode has been reallocated, it doesn't matter if it's not in
 511	 * the AG we are walking - we are walking for writeback, so if it
 512	 * passes all the "valid inode" checks and is dirty, then we'll write
 513	 * it back anyway.  If it has been reallocated and still being
 514	 * initialised, the XFS_INEW check below will catch it.
 515	 */
 516	spin_lock(&ip->i_flags_lock);
 517	if (!ip->i_ino)
 518		goto out_unlock_noent;
 519
 520	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
 521	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
 522		goto out_unlock_noent;
 523	spin_unlock(&ip->i_flags_lock);
 524
 525	/* nothing to sync during shutdown */
 526	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 527		return -EFSCORRUPTED;
 528
 529	/* If we can't grab the inode, it must on it's way to reclaim. */
 530	if (!igrab(inode))
 531		return -ENOENT;
 532
 533	/* inode is valid */
 534	return 0;
 535
 536out_unlock_noent:
 537	spin_unlock(&ip->i_flags_lock);
 538	return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539}
 540
 541STATIC int
 542xfs_inode_ag_walk(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543	struct xfs_mount	*mp,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544	struct xfs_perag	*pag,
 545	int			(*execute)(struct xfs_inode *ip, int flags,
 546					   void *args),
 547	int			flags,
 548	void			*args,
 549	int			tag)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550{
 
 551	uint32_t		first_index;
 552	int			last_error = 0;
 553	int			skipped;
 554	int			done;
 555	int			nr_found;
 556
 557restart:
 558	done = 0;
 559	skipped = 0;
 560	first_index = 0;
 
 
 
 561	nr_found = 0;
 562	do {
 563		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 564		int		error = 0;
 565		int		i;
 566
 567		rcu_read_lock();
 568
 569		if (tag == -1)
 570			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 571					(void **)batch, first_index,
 572					XFS_LOOKUP_BATCH);
 573		else
 574			nr_found = radix_tree_gang_lookup_tag(
 575					&pag->pag_ici_root,
 576					(void **) batch, first_index,
 577					XFS_LOOKUP_BATCH, tag);
 578
 579		if (!nr_found) {
 
 580			rcu_read_unlock();
 581			break;
 582		}
 583
 584		/*
 585		 * Grab the inodes before we drop the lock. if we found
 586		 * nothing, nr == 0 and the loop will be skipped.
 587		 */
 588		for (i = 0; i < nr_found; i++) {
 589			struct xfs_inode *ip = batch[i];
 590
 591			if (done || xfs_inode_ag_walk_grab(ip))
 592				batch[i] = NULL;
 593
 594			/*
 595			 * Update the index for the next lookup. Catch
 596			 * overflows into the next AG range which can occur if
 597			 * we have inodes in the last block of the AG and we
 598			 * are currently pointing to the last inode.
 599			 *
 600			 * Because we may see inodes that are from the wrong AG
 601			 * due to RCU freeing and reallocation, only update the
 602			 * index if it lies in this AG. It was a race that lead
 603			 * us to see this inode, so another lookup from the
 604			 * same index will not find it again.
 605			 */
 606			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 607				continue;
 608			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 609			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 610				done = 1;
 611		}
 612
 613		/* unlock now we've grabbed the inodes. */
 614		rcu_read_unlock();
 615
 616		for (i = 0; i < nr_found; i++) {
 617			if (!batch[i])
 618				continue;
 619			error = execute(batch[i], flags, args);
 620			IRELE(batch[i]);
 621			if (error == -EAGAIN) {
 622				skipped++;
 623				continue;
 624			}
 625			if (error && last_error != -EFSCORRUPTED)
 626				last_error = error;
 627		}
 628
 629		/* bail out if the filesystem is corrupted.  */
 630		if (error == -EFSCORRUPTED)
 631			break;
 632
 633		cond_resched();
 634
 
 
 
 
 
 635	} while (nr_found && !done);
 636
 
 
 
 
 
 
 637	if (skipped) {
 638		delay(1);
 639		goto restart;
 640	}
 641	return last_error;
 642}
 643
 644/*
 645 * Background scanning to trim post-EOF preallocated space. This is queued
 646 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
 647 */
 648STATIC void
 649xfs_queue_eofblocks(
 650	struct xfs_mount *mp)
 651{
 652	rcu_read_lock();
 653	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
 654		queue_delayed_work(mp->m_eofblocks_workqueue,
 655				   &mp->m_eofblocks_work,
 656				   msecs_to_jiffies(xfs_eofb_secs * 1000));
 657	rcu_read_unlock();
 658}
 659
 660void
 661xfs_eofblocks_worker(
 662	struct work_struct *work)
 663{
 664	struct xfs_mount *mp = container_of(to_delayed_work(work),
 665				struct xfs_mount, m_eofblocks_work);
 666	xfs_icache_free_eofblocks(mp, NULL);
 667	xfs_queue_eofblocks(mp);
 668}
 669
 670int
 671xfs_inode_ag_iterator(
 672	struct xfs_mount	*mp,
 673	int			(*execute)(struct xfs_inode *ip, int flags,
 674					   void *args),
 675	int			flags,
 676	void			*args)
 677{
 678	struct xfs_perag	*pag;
 679	int			error = 0;
 680	int			last_error = 0;
 681	xfs_agnumber_t		ag;
 682
 683	ag = 0;
 684	while ((pag = xfs_perag_get(mp, ag))) {
 685		ag = pag->pag_agno + 1;
 686		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
 687		xfs_perag_put(pag);
 688		if (error) {
 689			last_error = error;
 690			if (error == -EFSCORRUPTED)
 
 691				break;
 
 692		}
 693	}
 694	return last_error;
 
 695}
 696
 697int
 698xfs_inode_ag_iterator_tag(
 699	struct xfs_mount	*mp,
 700	int			(*execute)(struct xfs_inode *ip, int flags,
 701					   void *args),
 702	int			flags,
 703	void			*args,
 704	int			tag)
 705{
 706	struct xfs_perag	*pag;
 707	int			error = 0;
 708	int			last_error = 0;
 709	xfs_agnumber_t		ag;
 710
 711	ag = 0;
 712	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 713		ag = pag->pag_agno + 1;
 714		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
 715		xfs_perag_put(pag);
 716		if (error) {
 717			last_error = error;
 718			if (error == -EFSCORRUPTED)
 719				break;
 720		}
 721	}
 722	return last_error;
 723}
 
 
 
 724
 725/*
 726 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 727 * isn't a reclaim pass already in progress. By default it runs every 5s based
 728 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
 729 * tunable, but that can be done if this method proves to be ineffective or too
 730 * aggressive.
 731 */
 732static void
 733xfs_reclaim_work_queue(
 734	struct xfs_mount        *mp)
 735{
 
 
 736
 737	rcu_read_lock();
 738	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 739		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 740			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 741	}
 742	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 743}
 744
 745/*
 746 * This is a fast pass over the inode cache to try to get reclaim moving on as
 747 * many inodes as possible in a short period of time. It kicks itself every few
 748 * seconds, as well as being kicked by the inode cache shrinker when memory
 749 * goes low. It scans as quickly as possible avoiding locked inodes or those
 750 * already being flushed, and once done schedules a future pass.
 751 */
 
 
 
 
 
 
 
 
 
 752void
 753xfs_reclaim_worker(
 754	struct work_struct *work)
 755{
 756	struct xfs_mount *mp = container_of(to_delayed_work(work),
 757					struct xfs_mount, m_reclaim_work);
 
 
 
 758
 759	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
 760	xfs_reclaim_work_queue(mp);
 761}
 762
 763static void
 764__xfs_inode_set_reclaim_tag(
 765	struct xfs_perag	*pag,
 766	struct xfs_inode	*ip)
 767{
 768	radix_tree_tag_set(&pag->pag_ici_root,
 769			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
 770			   XFS_ICI_RECLAIM_TAG);
 771
 772	if (!pag->pag_ici_reclaimable) {
 773		/* propagate the reclaim tag up into the perag radix tree */
 774		spin_lock(&ip->i_mount->m_perag_lock);
 775		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
 776				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 777				XFS_ICI_RECLAIM_TAG);
 778		spin_unlock(&ip->i_mount->m_perag_lock);
 779
 780		/* schedule periodic background inode reclaim */
 781		xfs_reclaim_work_queue(ip->i_mount);
 782
 783		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
 784							-1, _RET_IP_);
 
 
 785	}
 786	pag->pag_ici_reclaimable++;
 
 787}
 788
 789/*
 790 * We set the inode flag atomically with the radix tree tag.
 791 * Once we get tag lookups on the radix tree, this inode flag
 792 * can go away.
 793 */
 794void
 795xfs_inode_set_reclaim_tag(
 796	xfs_inode_t	*ip)
 797{
 798	struct xfs_mount *mp = ip->i_mount;
 799	struct xfs_perag *pag;
 800
 801	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 802	spin_lock(&pag->pag_ici_lock);
 803	spin_lock(&ip->i_flags_lock);
 804	__xfs_inode_set_reclaim_tag(pag, ip);
 805	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 806	spin_unlock(&ip->i_flags_lock);
 807	spin_unlock(&pag->pag_ici_lock);
 808	xfs_perag_put(pag);
 809}
 810
 811STATIC void
 812__xfs_inode_clear_reclaim(
 813	xfs_perag_t	*pag,
 814	xfs_inode_t	*ip)
 815{
 816	pag->pag_ici_reclaimable--;
 817	if (!pag->pag_ici_reclaimable) {
 818		/* clear the reclaim tag from the perag radix tree */
 819		spin_lock(&ip->i_mount->m_perag_lock);
 820		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
 821				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 822				XFS_ICI_RECLAIM_TAG);
 823		spin_unlock(&ip->i_mount->m_perag_lock);
 824		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
 825							-1, _RET_IP_);
 826	}
 827}
 828
 829STATIC void
 830__xfs_inode_clear_reclaim_tag(
 831	xfs_mount_t	*mp,
 832	xfs_perag_t	*pag,
 833	xfs_inode_t	*ip)
 
 
 834{
 835	radix_tree_tag_clear(&pag->pag_ici_root,
 836			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
 837	__xfs_inode_clear_reclaim(pag, ip);
 838}
 839
 840/*
 841 * Grab the inode for reclaim exclusively.
 842 * Return 0 if we grabbed it, non-zero otherwise.
 843 */
 844STATIC int
 845xfs_reclaim_inode_grab(
 846	struct xfs_inode	*ip,
 847	int			flags)
 848{
 849	ASSERT(rcu_read_lock_held());
 
 850
 851	/* quick check for stale RCU freed inode */
 852	if (!ip->i_ino)
 853		return 1;
 854
 855	/*
 856	 * If we are asked for non-blocking operation, do unlocked checks to
 857	 * see if the inode already is being flushed or in reclaim to avoid
 858	 * lock traffic.
 859	 */
 860	if ((flags & SYNC_TRYLOCK) &&
 861	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
 862		return 1;
 863
 864	/*
 865	 * The radix tree lock here protects a thread in xfs_iget from racing
 866	 * with us starting reclaim on the inode.  Once we have the
 867	 * XFS_IRECLAIM flag set it will not touch us.
 868	 *
 869	 * Due to RCU lookup, we may find inodes that have been freed and only
 870	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
 871	 * aren't candidates for reclaim at all, so we must check the
 872	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 873	 */
 874	spin_lock(&ip->i_flags_lock);
 875	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
 876	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
 877		/* not a reclaim candidate. */
 878		spin_unlock(&ip->i_flags_lock);
 879		return 1;
 880	}
 881	__xfs_iflags_set(ip, XFS_IRECLAIM);
 882	spin_unlock(&ip->i_flags_lock);
 883	return 0;
 884}
 885
 886/*
 887 * Inodes in different states need to be treated differently. The following
 888 * table lists the inode states and the reclaim actions necessary:
 889 *
 890 *	inode state	     iflush ret		required action
 891 *      ---------------      ----------         ---------------
 892 *	bad			-		reclaim
 893 *	shutdown		EIO		unpin and reclaim
 894 *	clean, unpinned		0		reclaim
 895 *	stale, unpinned		0		reclaim
 896 *	clean, pinned(*)	0		requeue
 897 *	stale, pinned		EAGAIN		requeue
 898 *	dirty, async		-		requeue
 899 *	dirty, sync		0		reclaim
 900 *
 901 * (*) dgc: I don't think the clean, pinned state is possible but it gets
 902 * handled anyway given the order of checks implemented.
 903 *
 904 * Also, because we get the flush lock first, we know that any inode that has
 905 * been flushed delwri has had the flush completed by the time we check that
 906 * the inode is clean.
 907 *
 908 * Note that because the inode is flushed delayed write by AIL pushing, the
 909 * flush lock may already be held here and waiting on it can result in very
 910 * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
 911 * the caller should push the AIL first before trying to reclaim inodes to
 912 * minimise the amount of time spent waiting.  For background relaim, we only
 913 * bother to reclaim clean inodes anyway.
 914 *
 915 * Hence the order of actions after gaining the locks should be:
 916 *	bad		=> reclaim
 917 *	shutdown	=> unpin and reclaim
 918 *	pinned, async	=> requeue
 919 *	pinned, sync	=> unpin
 920 *	stale		=> reclaim
 921 *	clean		=> reclaim
 922 *	dirty, async	=> requeue
 923 *	dirty, sync	=> flush, wait and reclaim
 924 */
 925STATIC int
 926xfs_reclaim_inode(
 927	struct xfs_inode	*ip,
 928	struct xfs_perag	*pag,
 929	int			sync_mode)
 930{
 931	struct xfs_buf		*bp = NULL;
 932	int			error;
 933
 934restart:
 935	error = 0;
 936	xfs_ilock(ip, XFS_ILOCK_EXCL);
 937	if (!xfs_iflock_nowait(ip)) {
 938		if (!(sync_mode & SYNC_WAIT))
 939			goto out;
 940		xfs_iflock(ip);
 941	}
 942
 943	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 944		xfs_iunpin_wait(ip);
 945		xfs_iflush_abort(ip, false);
 946		goto reclaim;
 947	}
 948	if (xfs_ipincount(ip)) {
 949		if (!(sync_mode & SYNC_WAIT))
 950			goto out_ifunlock;
 951		xfs_iunpin_wait(ip);
 952	}
 953	if (xfs_iflags_test(ip, XFS_ISTALE))
 954		goto reclaim;
 955	if (xfs_inode_clean(ip))
 956		goto reclaim;
 957
 958	/*
 959	 * Never flush out dirty data during non-blocking reclaim, as it would
 960	 * just contend with AIL pushing trying to do the same job.
 961	 */
 962	if (!(sync_mode & SYNC_WAIT))
 963		goto out_ifunlock;
 964
 965	/*
 966	 * Now we have an inode that needs flushing.
 967	 *
 968	 * Note that xfs_iflush will never block on the inode buffer lock, as
 969	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
 970	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
 971	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
 972	 * result in an ABBA deadlock with xfs_ifree_cluster().
 973	 *
 974	 * As xfs_ifree_cluser() must gather all inodes that are active in the
 975	 * cache to mark them stale, if we hit this case we don't actually want
 976	 * to do IO here - we want the inode marked stale so we can simply
 977	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
 978	 * inode, back off and try again.  Hopefully the next pass through will
 979	 * see the stale flag set on the inode.
 980	 */
 981	error = xfs_iflush(ip, &bp);
 982	if (error == -EAGAIN) {
 983		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 984		/* backoff longer than in xfs_ifree_cluster */
 985		delay(2);
 986		goto restart;
 987	}
 988
 989	if (!error) {
 990		error = xfs_bwrite(bp);
 991		xfs_buf_relse(bp);
 992	}
 993
 994	xfs_iflock(ip);
 995reclaim:
 996	xfs_ifunlock(ip);
 997	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 998
 999	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1000	/*
1001	 * Remove the inode from the per-AG radix tree.
1002	 *
1003	 * Because radix_tree_delete won't complain even if the item was never
1004	 * added to the tree assert that it's been there before to catch
1005	 * problems with the inode life time early on.
1006	 */
1007	spin_lock(&pag->pag_ici_lock);
1008	if (!radix_tree_delete(&pag->pag_ici_root,
1009				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
1010		ASSERT(0);
1011	__xfs_inode_clear_reclaim(pag, ip);
1012	spin_unlock(&pag->pag_ici_lock);
1013
1014	/*
1015	 * Here we do an (almost) spurious inode lock in order to coordinate
1016	 * with inode cache radix tree lookups.  This is because the lookup
1017	 * can reference the inodes in the cache without taking references.
1018	 *
1019	 * We make that OK here by ensuring that we wait until the inode is
1020	 * unlocked after the lookup before we go ahead and free it.
1021	 */
1022	xfs_ilock(ip, XFS_ILOCK_EXCL);
1023	xfs_qm_dqdetach(ip);
1024	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1025
1026	xfs_inode_free(ip);
1027	return error;
1028
1029out_ifunlock:
1030	xfs_ifunlock(ip);
1031out:
1032	xfs_iflags_clear(ip, XFS_IRECLAIM);
1033	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1034	/*
1035	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1036	 * a short while. However, this just burns CPU time scanning the tree
1037	 * waiting for IO to complete and the reclaim work never goes back to
1038	 * the idle state. Instead, return 0 to let the next scheduled
1039	 * background reclaim attempt to reclaim the inode again.
1040	 */
1041	return 0;
1042}
1043
1044/*
1045 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1046 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1047 * then a shut down during filesystem unmount reclaim walk leak all the
1048 * unreclaimed inodes.
 
1049 */
1050STATIC int
1051xfs_reclaim_inodes_ag(
1052	struct xfs_mount	*mp,
1053	int			flags,
1054	int			*nr_to_scan)
1055{
1056	struct xfs_perag	*pag;
1057	int			error = 0;
1058	int			last_error = 0;
1059	xfs_agnumber_t		ag;
1060	int			trylock = flags & SYNC_TRYLOCK;
1061	int			skipped;
1062
1063restart:
1064	ag = 0;
1065	skipped = 0;
1066	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1067		unsigned long	first_index = 0;
1068		int		done = 0;
1069		int		nr_found = 0;
1070
1071		ag = pag->pag_agno + 1;
1072
1073		if (trylock) {
1074			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1075				skipped++;
1076				xfs_perag_put(pag);
1077				continue;
1078			}
1079			first_index = pag->pag_ici_reclaim_cursor;
1080		} else
1081			mutex_lock(&pag->pag_ici_reclaim_lock);
1082
1083		do {
1084			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1085			int	i;
1086
1087			rcu_read_lock();
1088			nr_found = radix_tree_gang_lookup_tag(
1089					&pag->pag_ici_root,
1090					(void **)batch, first_index,
1091					XFS_LOOKUP_BATCH,
1092					XFS_ICI_RECLAIM_TAG);
1093			if (!nr_found) {
1094				done = 1;
1095				rcu_read_unlock();
1096				break;
1097			}
1098
1099			/*
1100			 * Grab the inodes before we drop the lock. if we found
1101			 * nothing, nr == 0 and the loop will be skipped.
1102			 */
1103			for (i = 0; i < nr_found; i++) {
1104				struct xfs_inode *ip = batch[i];
1105
1106				if (done || xfs_reclaim_inode_grab(ip, flags))
1107					batch[i] = NULL;
1108
1109				/*
1110				 * Update the index for the next lookup. Catch
1111				 * overflows into the next AG range which can
1112				 * occur if we have inodes in the last block of
1113				 * the AG and we are currently pointing to the
1114				 * last inode.
1115				 *
1116				 * Because we may see inodes that are from the
1117				 * wrong AG due to RCU freeing and
1118				 * reallocation, only update the index if it
1119				 * lies in this AG. It was a race that lead us
1120				 * to see this inode, so another lookup from
1121				 * the same index will not find it again.
1122				 */
1123				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1124								pag->pag_agno)
1125					continue;
1126				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1127				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1128					done = 1;
1129			}
1130
1131			/* unlock now we've grabbed the inodes. */
1132			rcu_read_unlock();
 
 
1133
1134			for (i = 0; i < nr_found; i++) {
1135				if (!batch[i])
1136					continue;
1137				error = xfs_reclaim_inode(batch[i], pag, flags);
1138				if (error && last_error != -EFSCORRUPTED)
1139					last_error = error;
1140			}
1141
1142			*nr_to_scan -= XFS_LOOKUP_BATCH;
 
1143
1144			cond_resched();
 
1145
1146		} while (nr_found && !done && *nr_to_scan > 0);
 
1147
1148		if (trylock && !done)
1149			pag->pag_ici_reclaim_cursor = first_index;
1150		else
1151			pag->pag_ici_reclaim_cursor = 0;
1152		mutex_unlock(&pag->pag_ici_reclaim_lock);
1153		xfs_perag_put(pag);
1154	}
1155
1156	/*
1157	 * if we skipped any AG, and we still have scan count remaining, do
1158	 * another pass this time using blocking reclaim semantics (i.e
1159	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1160	 * ensure that when we get more reclaimers than AGs we block rather
1161	 * than spin trying to execute reclaim.
1162	 */
1163	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1164		trylock = 0;
1165		goto restart;
1166	}
1167	return last_error;
1168}
1169
1170int
1171xfs_reclaim_inodes(
1172	xfs_mount_t	*mp,
1173	int		mode)
1174{
1175	int		nr_to_scan = INT_MAX;
1176
1177	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1178}
1179
1180/*
1181 * Scan a certain number of inodes for reclaim.
1182 *
1183 * When called we make sure that there is a background (fast) inode reclaim in
1184 * progress, while we will throttle the speed of reclaim via doing synchronous
1185 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1186 * them to be cleaned, which we hope will not be very long due to the
1187 * background walker having already kicked the IO off on those dirty inodes.
1188 */
1189long
1190xfs_reclaim_inodes_nr(
1191	struct xfs_mount	*mp,
1192	int			nr_to_scan)
1193{
1194	/* kick background reclaimer and push the AIL */
1195	xfs_reclaim_work_queue(mp);
1196	xfs_ail_push_all(mp->m_ail);
1197
1198	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1199}
1200
1201/*
1202 * Return the number of reclaimable inodes in the filesystem for
1203 * the shrinker to determine how much to reclaim.
 
 
 
 
 
1204 */
1205int
1206xfs_reclaim_inodes_count(
1207	struct xfs_mount	*mp)
1208{
1209	struct xfs_perag	*pag;
1210	xfs_agnumber_t		ag = 0;
1211	int			reclaimable = 0;
1212
1213	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1214		ag = pag->pag_agno + 1;
1215		reclaimable += pag->pag_ici_reclaimable;
1216		xfs_perag_put(pag);
1217	}
1218	return reclaimable;
1219}
1220
1221STATIC int
1222xfs_inode_match_id(
1223	struct xfs_inode	*ip,
1224	struct xfs_eofblocks	*eofb)
 
1225{
1226	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1227	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1228		return 0;
1229
1230	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1231	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1232		return 0;
1233
1234	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1235	    xfs_get_projid(ip) != eofb->eof_prid)
1236		return 0;
1237
1238	return 1;
1239}
1240
1241/*
1242 * A union-based inode filtering algorithm. Process the inode if any of the
1243 * criteria match. This is for global/internal scans only.
 
1244 */
1245STATIC int
1246xfs_inode_match_id_union(
1247	struct xfs_inode	*ip,
1248	struct xfs_eofblocks	*eofb)
1249{
1250	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1251	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1252		return 1;
1253
1254	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1255	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1256		return 1;
1257
1258	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1259	    xfs_get_projid(ip) == eofb->eof_prid)
1260		return 1;
1261
1262	return 0;
1263}
 
 
1264
1265STATIC int
1266xfs_inode_free_eofblocks(
1267	struct xfs_inode	*ip,
1268	int			flags,
1269	void			*args)
1270{
1271	int ret;
1272	struct xfs_eofblocks *eofb = args;
1273	bool need_iolock = true;
1274	int match;
1275
1276	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1277
1278	if (!xfs_can_free_eofblocks(ip, false)) {
1279		/* inode could be preallocated or append-only */
1280		trace_xfs_inode_free_eofblocks_invalid(ip);
1281		xfs_inode_clear_eofblocks_tag(ip);
1282		return 0;
1283	}
1284
1285	/*
1286	 * If the mapping is dirty the operation can block and wait for some
1287	 * time. Unless we are waiting, skip it.
1288	 */
1289	if (!(flags & SYNC_WAIT) &&
1290	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1291		return 0;
1292
1293	if (eofb) {
1294		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1295			match = xfs_inode_match_id_union(ip, eofb);
1296		else
1297			match = xfs_inode_match_id(ip, eofb);
1298		if (!match)
1299			return 0;
1300
1301		/* skip the inode if the file size is too small */
1302		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1303		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1304			return 0;
1305
1306		/*
1307		 * A scan owner implies we already hold the iolock. Skip it in
1308		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
1309		 * the possibility of EAGAIN being returned.
1310		 */
1311		if (eofb->eof_scan_owner == ip->i_ino)
1312			need_iolock = false;
1313	}
1314
1315	ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
 
1316
1317	/* don't revisit the inode if we're not waiting */
1318	if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1319		ret = 0;
1320
1321	return ret;
 
 
 
1322}
1323
1324int
1325xfs_icache_free_eofblocks(
 
 
 
1326	struct xfs_mount	*mp,
1327	struct xfs_eofblocks	*eofb)
1328{
1329	int flags = SYNC_TRYLOCK;
1330
1331	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1332		flags = SYNC_WAIT;
1333
1334	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1335					 eofb, XFS_ICI_EOFBLOCKS_TAG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336}
1337
1338/*
1339 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1340 * multiple quotas, we don't know exactly which quota caused an allocation
1341 * failure. We make a best effort by including each quota under low free space
1342 * conditions (less than 1% free space) in the scan.
 
 
 
 
1343 */
1344int
1345xfs_inode_free_quota_eofblocks(
1346	struct xfs_inode *ip)
1347{
1348	int scan = 0;
1349	struct xfs_eofblocks eofb = {0};
1350	struct xfs_dquot *dq;
1351
1352	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1353
1354	/*
1355	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
1356	 * can repeatedly trylock on the inode we're currently processing. We
1357	 * run a sync scan to increase effectiveness and use the union filter to
1358	 * cover all applicable quotas in a single scan.
1359	 */
1360	eofb.eof_scan_owner = ip->i_ino;
1361	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1362
1363	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1364		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1365		if (dq && xfs_dquot_lowsp(dq)) {
1366			eofb.eof_uid = VFS_I(ip)->i_uid;
1367			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1368			scan = 1;
1369		}
1370	}
1371
1372	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1373		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1374		if (dq && xfs_dquot_lowsp(dq)) {
1375			eofb.eof_gid = VFS_I(ip)->i_gid;
1376			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1377			scan = 1;
1378		}
1379	}
1380
1381	if (scan)
1382		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 
 
1383
1384	return scan;
1385}
 
 
 
 
 
 
 
 
 
 
1386
1387void
1388xfs_inode_set_eofblocks_tag(
1389	xfs_inode_t	*ip)
1390{
1391	struct xfs_mount *mp = ip->i_mount;
1392	struct xfs_perag *pag;
1393	int tagged;
 
 
1394
1395	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1396	spin_lock(&pag->pag_ici_lock);
1397	trace_xfs_inode_set_eofblocks_tag(ip);
1398
1399	tagged = radix_tree_tagged(&pag->pag_ici_root,
1400				   XFS_ICI_EOFBLOCKS_TAG);
1401	radix_tree_tag_set(&pag->pag_ici_root,
1402			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1403			   XFS_ICI_EOFBLOCKS_TAG);
1404	if (!tagged) {
1405		/* propagate the eofblocks tag up into the perag radix tree */
1406		spin_lock(&ip->i_mount->m_perag_lock);
1407		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1408				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1409				   XFS_ICI_EOFBLOCKS_TAG);
1410		spin_unlock(&ip->i_mount->m_perag_lock);
1411
1412		/* kick off background trimming */
1413		xfs_queue_eofblocks(ip->i_mount);
1414
1415		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1416					      -1, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1417	}
1418
1419	spin_unlock(&pag->pag_ici_lock);
1420	xfs_perag_put(pag);
 
 
 
 
 
 
1421}
1422
1423void
1424xfs_inode_clear_eofblocks_tag(
1425	xfs_inode_t	*ip)
 
1426{
1427	struct xfs_mount *mp = ip->i_mount;
1428	struct xfs_perag *pag;
1429
1430	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1431	spin_lock(&pag->pag_ici_lock);
1432	trace_xfs_inode_clear_eofblocks_tag(ip);
1433
1434	radix_tree_tag_clear(&pag->pag_ici_root,
1435			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1436			     XFS_ICI_EOFBLOCKS_TAG);
1437	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1438		/* clear the eofblocks tag from the perag radix tree */
1439		spin_lock(&ip->i_mount->m_perag_lock);
1440		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1441				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1442				     XFS_ICI_EOFBLOCKS_TAG);
1443		spin_unlock(&ip->i_mount->m_perag_lock);
1444		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1445					       -1, _RET_IP_);
1446	}
1447
1448	spin_unlock(&pag->pag_ici_lock);
1449	xfs_perag_put(pag);
1450}
1451